Unless the LiteralType is a type parameter,
-its underlying type
+its underlying type
must be a struct, array, slice, or map type
(the syntax enforces this constraint except when the type is given
as a TypeName).
@@ -4873,7 +4873,7 @@
var prints []func()
-for i := 0; i < 5; i++ {
+for i := 0; i < 5; i++ {
prints = append(prints, func() { println(i) })
i++
}
@@ -6772,7 +6772,7 @@
For statements with range clause
variable, which must be of integer type.
Otherwise, if the iteration variable is declared by the "range" clause or is absent,
the type of the iteration values is the default type for n.
-If n <= 0, the loop does not run any iterations.
+If n <= 0, the loop does not run any iterations.
@@ -7799,7 +7799,7 @@
Min and max
-min(x, y) == if x <= y then x else y
+min(x, y) == if x <= y then x else y
min(x, y, z) == min(min(x, y), z)
diff --git a/doc/godebug.md b/doc/godebug.md
index aaa0f9dd55e570..c12ce5311d90d1 100644
--- a/doc/godebug.md
+++ b/doc/godebug.md
@@ -153,6 +153,16 @@ for example,
see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables)
and the [go command documentation](/cmd/go#hdr-Build_and_test_caching).
+### Go 1.26
+
+Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number
+of cookies that net/http will accept when parsing HTTP headers. If the number of
+cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing
+will fail early. The default value is `httpcookiemaxnum=3000`. Setting
+`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite
+number of cookies. To avoid denial of service attacks, this setting and default
+was backported to Go 1.25.2 and Go 1.24.8.
+
### Go 1.25
Go 1.25 added a new `decoratemappings` setting that controls whether the Go
diff --git a/doc/next/3-tools.md b/doc/next/3-tools.md
index 9459a5490e7904..c0a4601c0b9e74 100644
--- a/doc/next/3-tools.md
+++ b/doc/next/3-tools.md
@@ -7,5 +7,15 @@
a replacement for `go tool doc`: it takes the same flags and arguments and
has the same behavior.
+
+The `go fix` command, following the pattern of `go vet` in Go 1.10,
+now uses the Go analysis framework (`golang.org/x/tools/go/analysis`).
+This means the same analyzers that provide diagnostics in `go vet`
+can be used to suggest and apply fixes in `go fix`.
+The `go fix` command's historical fixers, all of which were obsolete,
+have been removed and replaced by a suite of new analyzers that
+offer fixes to use newer features of the language and library.
+
+
### Cgo {#cgo}
diff --git a/doc/next/6-stdlib/99-minor/errors/51945.md b/doc/next/6-stdlib/99-minor/errors/51945.md
new file mode 100644
index 00000000000000..44ac7222e6d990
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/errors/51945.md
@@ -0,0 +1,2 @@
+The new [AsType] function is a generic version of [As]. It is type-safe, faster,
+and, in most cases, easier to use.
diff --git a/doc/next/6-stdlib/99-minor/net/http/httputil/73161.md b/doc/next/6-stdlib/99-minor/net/http/httputil/73161.md
new file mode 100644
index 00000000000000..f6318f85534746
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/net/http/httputil/73161.md
@@ -0,0 +1,11 @@
+The [ReverseProxy.Director] configuration field is deprecated
+in favor of [ReverseProxy.Rewrite].
+
+A malicious client can remove headers added by a `Director` function
+by designating those headers as hop-by-hop. Since there is no way to address
+this problem within the scope of the `Director` API, we added a new
+`Rewrite` hook in Go 1.20. `Rewrite` hooks are provided with both the
+unmodified inbound request received by the proxy and the outbound request
+which will be sent by the proxy.
+
+Since the `Director` hook is fundamentally unsafe, we are now deprecating it.
diff --git a/doc/next/6-stdlib/99-minor/testing/71287.md b/doc/next/6-stdlib/99-minor/testing/71287.md
new file mode 100644
index 00000000000000..82cac638101099
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/testing/71287.md
@@ -0,0 +1,18 @@
+The new methods [T.ArtifactDir], [B.ArtifactDir], and [F.ArtifactDir]
+return a directory in which to write test output files (artifacts).
+
+When the `-artifacts` flag is provided to `go test`,
+this directory will be located under the output directory
+(specified with `-outputdir`, or the current directory by default).
+Otherwise, artifacts are stored in a temporary directory
+which is removed after the test completes.
+
+The first call to `ArtifactDir` when `-artifacts` is provided
+writes the location of the directory to the test log.
+
+For example, in a test named `TestArtifacts`,
+`t.ArtifactDir()` emits:
+
+```
+=== ARTIFACTS Test /path/to/artifact/dir
+```
diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go
index 7b3945ff153144..ad31bbb64aaa5c 100644
--- a/src/archive/tar/common.go
+++ b/src/archive/tar/common.go
@@ -39,6 +39,7 @@ var (
errMissData = errors.New("archive/tar: sparse file references non-existent data")
errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
+ errSparseTooLong = errors.New("archive/tar: sparse map too long")
)
type headerError []string
diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go
index 8483fb52a28f66..16ac2f5b17c28b 100644
--- a/src/archive/tar/reader.go
+++ b/src/archive/tar/reader.go
@@ -531,12 +531,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
cntNewline int64
buf bytes.Buffer
blk block
+ totalSize int
)
// feedTokens copies data in blocks from r into buf until there are
// at least cnt newlines in buf. It will not read more blocks than needed.
feedTokens := func(n int64) error {
for cntNewline < n {
+ totalSize += len(blk)
+ if totalSize > maxSpecialFileSize {
+ return errSparseTooLong
+ }
if _, err := mustReadFull(r, blk[:]); err != nil {
return err
}
@@ -569,8 +574,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
}
// Parse for all member entries.
- // numEntries is trusted after this since a potential attacker must have
- // committed resources proportional to what this library used.
+ // numEntries is trusted after this since feedTokens limits the number of
+ // tokens based on maxSpecialFileSize.
if err := feedTokens(2 * numEntries); err != nil {
return nil, err
}
diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go
index 99340a30471914..fca53dae741bd5 100644
--- a/src/archive/tar/reader_test.go
+++ b/src/archive/tar/reader_test.go
@@ -621,6 +621,11 @@ func TestReader(t *testing.T) {
},
Format: FormatPAX,
}},
+ }, {
+ // Small compressed file that uncompresses to
+ // a file with a very large GNU 1.0 sparse map.
+ file: "testdata/gnu-sparse-many-zeros.tar.bz2",
+ err: errSparseTooLong,
}}
for _, v := range vectors {
diff --git a/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2
new file mode 100644
index 00000000000000..751d7fd4b68be1
Binary files /dev/null and b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 differ
diff --git a/src/cmd/asm/internal/arch/arm64.go b/src/cmd/asm/internal/arch/arm64.go
index 87ccb8c0409313..d562e5907d6fc1 100644
--- a/src/cmd/asm/internal/arch/arm64.go
+++ b/src/cmd/asm/internal/arch/arm64.go
@@ -195,149 +195,6 @@ func ARM64RegisterShift(reg, op, count int16) (int64, error) {
return int64(reg&31)<<16 | int64(op)<<22 | int64(uint16(count)), nil
}
-// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement.
-func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error {
- Rnum := (reg & 31) + int16(num<<5)
- if isAmount {
- if num < 0 || num > 7 {
- return errors.New("index shift amount is out of range")
- }
- }
- if reg <= arm64.REG_R31 && reg >= arm64.REG_R0 {
- if !isAmount {
- return errors.New("invalid register extension")
- }
- switch ext {
- case "UXTB":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_UXTB + Rnum
- case "UXTH":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_UXTH + Rnum
- case "UXTW":
- // effective address of memory is a base register value and an offset register value.
- if a.Type == obj.TYPE_MEM {
- a.Index = arm64.REG_UXTW + Rnum
- } else {
- a.Reg = arm64.REG_UXTW + Rnum
- }
- case "UXTX":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_UXTX + Rnum
- case "SXTB":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_SXTB + Rnum
- case "SXTH":
- if a.Type == obj.TYPE_MEM {
- return errors.New("invalid shift for the register offset addressing mode")
- }
- a.Reg = arm64.REG_SXTH + Rnum
- case "SXTW":
- if a.Type == obj.TYPE_MEM {
- a.Index = arm64.REG_SXTW + Rnum
- } else {
- a.Reg = arm64.REG_SXTW + Rnum
- }
- case "SXTX":
- if a.Type == obj.TYPE_MEM {
- a.Index = arm64.REG_SXTX + Rnum
- } else {
- a.Reg = arm64.REG_SXTX + Rnum
- }
- case "LSL":
- a.Index = arm64.REG_LSL + Rnum
- default:
- return errors.New("unsupported general register extension type: " + ext)
-
- }
- } else if reg <= arm64.REG_V31 && reg >= arm64.REG_V0 {
- switch ext {
- case "B8":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5)
- case "B16":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5)
- case "H4":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5)
- case "H8":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5)
- case "S2":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5)
- case "S4":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5)
- case "D1":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1D & 15) << 5)
- case "D2":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5)
- case "Q1":
- if isIndex {
- return errors.New("invalid register extension")
- }
- a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1Q & 15) << 5)
- case "B":
- if !isIndex {
- return nil
- }
- a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5)
- a.Index = num
- case "H":
- if !isIndex {
- return nil
- }
- a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5)
- a.Index = num
- case "S":
- if !isIndex {
- return nil
- }
- a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5)
- a.Index = num
- case "D":
- if !isIndex {
- return nil
- }
- a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5)
- a.Index = num
- default:
- return errors.New("unsupported simd register extension type: " + ext)
- }
- } else {
- return errors.New("invalid register and extension combination")
- }
- return nil
-}
-
// ARM64RegisterArrangement constructs an ARM64 vector register arrangement.
func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) {
var curQ, curSize uint16
diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go
index 8f8f6dcc346a44..545f6c7553351b 100644
--- a/src/cmd/asm/internal/asm/parse.go
+++ b/src/cmd/asm/internal/asm/parse.go
@@ -775,7 +775,7 @@ func (p *Parser) registerExtension(a *obj.Addr, name string, prefix rune) {
switch p.arch.Family {
case sys.ARM64:
- err := arch.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex)
+ err := arm64.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex)
if err != nil {
p.errorf("%v", err)
}
diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s
index 236f1a66979099..109a3d8316678b 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64.s
@@ -1894,4 +1894,12 @@ next:
BTI J // 9f2403d5
BTI JC // df2403d5
+// Pointer Authentication Codes (PAC)
+ PACIASP // 3f2303d5
+ AUTIASP // bf2303d5
+ PACIBSP // 7f2303d5
+ AUTIBSP // ff2303d5
+ AUTIA1716 // 9f2103d5
+ AUTIB1716 // df2103d5
+
END
diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s
index 55890ce3e631a4..ce88e3ca540f13 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64error.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64error.s
@@ -422,4 +422,10 @@ TEXT errors(SB),$0
SHA1H V1.B16, V2.B16 // ERROR "invalid operands"
BTI // ERROR "missing operand"
BTI PLDL1KEEP // ERROR "illegal argument"
+ PACIASP C // ERROR "illegal combination"
+ AUTIASP R2 // ERROR "illegal combination"
+ PACIBSP R0 // ERROR "illegal combination"
+ AUTIBSP C // ERROR "illegal combination"
+ AUTIA1716 $45 // ERROR "illegal combination"
+ AUTIB1716 R0 // ERROR "illegal combination"
RET
diff --git a/src/cmd/cgo/internal/testcshared/cshared_test.go b/src/cmd/cgo/internal/testcshared/cshared_test.go
index f1c30f8f9a2b2a..2ce705adba44f3 100644
--- a/src/cmd/cgo/internal/testcshared/cshared_test.go
+++ b/src/cmd/cgo/internal/testcshared/cshared_test.go
@@ -8,6 +8,7 @@ import (
"bufio"
"bytes"
"cmd/cgo/internal/cgotest"
+ "cmp"
"debug/elf"
"debug/pe"
"encoding/binary"
@@ -272,7 +273,7 @@ func createHeaders() error {
// which results in the linkers output implib getting overwritten at each step. So instead build the
// import library the traditional way, using a def file.
err = os.WriteFile("libgo.def",
- []byte("LIBRARY libgo.dll\nEXPORTS\n\tDidInitRun\n\tDidMainRun\n\tDivu\n\tFromPkg\n\t_cgo_dummy_export\n"),
+ []byte("LIBRARY libgo.dll\nEXPORTS\n\tDidInitRun\n\tDidMainRun\n\tDivu\n\tFromPkg\n"),
0644)
if err != nil {
return fmt.Errorf("unable to write def file: %v", err)
@@ -375,9 +376,23 @@ func TestExportedSymbols(t *testing.T) {
}
}
-func checkNumberOfExportedFunctionsWindows(t *testing.T, prog string, exportedFunctions int, wantAll bool) {
+func checkNumberOfExportedSymbolsWindows(t *testing.T, exportedSymbols int, wantAll bool) {
+ t.Parallel()
tmpdir := t.TempDir()
+ prog := `
+package main
+import "C"
+func main() {}
+`
+
+ for i := range exportedSymbols {
+ prog += fmt.Sprintf(`
+//export GoFunc%d
+func GoFunc%d() {}
+`, i, i)
+ }
+
srcfile := filepath.Join(tmpdir, "test.go")
objfile := filepath.Join(tmpdir, "test.dll")
if err := os.WriteFile(srcfile, []byte(prog), 0666); err != nil {
@@ -443,18 +458,19 @@ func checkNumberOfExportedFunctionsWindows(t *testing.T, prog string, exportedFu
t.Fatalf("binary.Read failed: %v", err)
}
- // Only the two exported functions and _cgo_dummy_export should be exported.
+ exportedSymbols = cmp.Or(exportedSymbols, 1) // _cgo_stub_export is exported if there are no other symbols exported
+
// NumberOfNames is the number of functions exported with a unique name.
// NumberOfFunctions can be higher than that because it also counts
// functions exported only by ordinal, a unique number asigned by the linker,
// and linkers might add an unknown number of their own ordinal-only functions.
if wantAll {
- if e.NumberOfNames <= uint32(exportedFunctions) {
- t.Errorf("got %d exported names, want > %d", e.NumberOfNames, exportedFunctions)
+ if e.NumberOfNames <= uint32(exportedSymbols) {
+ t.Errorf("got %d exported names, want > %d", e.NumberOfNames, exportedSymbols)
}
} else {
- if e.NumberOfNames > uint32(exportedFunctions) {
- t.Errorf("got %d exported names, want <= %d", e.NumberOfNames, exportedFunctions)
+ if e.NumberOfNames != uint32(exportedSymbols) {
+ t.Errorf("got %d exported names, want %d", e.NumberOfNames, exportedSymbols)
}
}
}
@@ -470,43 +486,14 @@ func TestNumberOfExportedFunctions(t *testing.T) {
t.Parallel()
- const prog0 = `
-package main
-
-import "C"
-
-func main() {
-}
-`
-
- const prog2 = `
-package main
-
-import "C"
-
-//export GoFunc
-func GoFunc() {
- println(42)
-}
-
-//export GoFunc2
-func GoFunc2() {
- println(24)
-}
-
-func main() {
-}
-`
- // All programs export _cgo_dummy_export, so add 1 to the expected counts.
- t.Run("OnlyExported/0", func(t *testing.T) {
- checkNumberOfExportedFunctionsWindows(t, prog0, 0+1, false)
- })
- t.Run("OnlyExported/2", func(t *testing.T) {
- checkNumberOfExportedFunctionsWindows(t, prog2, 2+1, false)
- })
- t.Run("All", func(t *testing.T) {
- checkNumberOfExportedFunctionsWindows(t, prog2, 2+1, true)
- })
+ for i := range 3 {
+ t.Run(fmt.Sprintf("OnlyExported/%d", i), func(t *testing.T) {
+ checkNumberOfExportedSymbolsWindows(t, i, false)
+ })
+ t.Run(fmt.Sprintf("All/%d", i), func(t *testing.T) {
+ checkNumberOfExportedSymbolsWindows(t, i, true)
+ })
+ }
}
// test1: shared library can be dynamically loaded and exported symbols are accessible.
diff --git a/src/cmd/cgo/internal/testout/out_test.go b/src/cmd/cgo/internal/testout/out_test.go
new file mode 100644
index 00000000000000..81dfa365871372
--- /dev/null
+++ b/src/cmd/cgo/internal/testout/out_test.go
@@ -0,0 +1,144 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package out_test
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "internal/testenv"
+ "internal/goarch"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+type methodAlign struct {
+ Method string
+ Align int
+}
+
+var wantAligns = map[string]int{
+ "ReturnEmpty": 1,
+ "ReturnOnlyUint8": 1,
+ "ReturnOnlyUint16": 2,
+ "ReturnOnlyUint32": 4,
+ "ReturnOnlyUint64": goarch.PtrSize,
+ "ReturnOnlyInt": goarch.PtrSize,
+ "ReturnOnlyPtr": goarch.PtrSize,
+ "ReturnByteSlice": goarch.PtrSize,
+ "ReturnString": goarch.PtrSize,
+ "InputAndReturnUint8": 1,
+ "MixedTypes": goarch.PtrSize,
+}
+
+// TestAligned tests that the generated _cgo_export.c file has the wanted
+// align attributes for struct types used as arguments or results of
+// //exported functions.
+func TestAligned(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ testenv.MustHaveCGO(t)
+
+ testdata, err := filepath.Abs("testdata")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ objDir := t.TempDir()
+
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "cgo",
+ "-objdir", objDir,
+ filepath.Join(testdata, "aligned.go"))
+ cmd.Stderr = new(bytes.Buffer)
+
+ err = cmd.Run()
+ if err != nil {
+ t.Fatalf("%#q: %v\n%s", cmd, err, cmd.Stderr)
+ }
+
+ haveAligns, err := parseAlign(filepath.Join(objDir, "_cgo_export.c"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that we have all the wanted methods
+ if len(haveAligns) != len(wantAligns) {
+ t.Fatalf("have %d methods with aligned, want %d", len(haveAligns), len(wantAligns))
+ }
+
+ for i := range haveAligns {
+ method := haveAligns[i].Method
+ haveAlign := haveAligns[i].Align
+
+ wantAlign, ok := wantAligns[method]
+ if !ok {
+ t.Errorf("method %s: have aligned %d, want missing entry", method, haveAlign)
+ } else if haveAlign != wantAlign {
+ t.Errorf("method %s: have aligned %d, want %d", method, haveAlign, wantAlign)
+ }
+ }
+}
+
+func parseAlign(filename string) ([]methodAlign, error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open file: %w", err)
+ }
+ defer file.Close()
+
+ var results []methodAlign
+ scanner := bufio.NewScanner(file)
+
+ // Regex to match function declarations like "struct MethodName_return MethodName("
+ funcRegex := regexp.MustCompile(`^struct\s+(\w+)_return\s+(\w+)\(`)
+ // Regex to match simple function declarations like "GoSlice MethodName("
+ simpleFuncRegex := regexp.MustCompile(`^Go\w+\s+(\w+)\(`)
+ // Regex to match void-returning exported functions like "void ReturnEmpty("
+ voidFuncRegex := regexp.MustCompile(`^void\s+(\w+)\(`)
+ // Regex to match align attributes like "__attribute__((aligned(8)))"
+ alignRegex := regexp.MustCompile(`__attribute__\(\(aligned\((\d+)\)\)\)`)
+
+ var currentMethod string
+
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+
+ // Check if this line declares a function with struct return type
+ if matches := funcRegex.FindStringSubmatch(line); matches != nil {
+ currentMethod = matches[2] // Extract the method name
+ } else if matches := simpleFuncRegex.FindStringSubmatch(line); matches != nil {
+ // Check if this line declares a function with simple return type (like GoSlice)
+ currentMethod = matches[1] // Extract the method name
+ } else if matches := voidFuncRegex.FindStringSubmatch(line); matches != nil {
+ // Check if this line declares a void-returning function
+ currentMethod = matches[1] // Extract the method name
+ }
+
+ // Check if this line contains align information
+ if alignMatches := alignRegex.FindStringSubmatch(line); alignMatches != nil && currentMethod != "" {
+ alignStr := alignMatches[1]
+ align, err := strconv.Atoi(alignStr)
+ if err != nil {
+ // Skip this entry if we can't parse the align as integer
+ currentMethod = ""
+ continue
+ }
+ results = append(results, methodAlign{
+ Method: currentMethod,
+ Align: align,
+ })
+ currentMethod = "" // Reset for next method
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error reading file: %w", err)
+ }
+
+ return results, nil
+}
diff --git a/src/cmd/cgo/internal/testout/testdata/aligned.go b/src/cmd/cgo/internal/testout/testdata/aligned.go
new file mode 100644
index 00000000000000..cea6f2889a0cad
--- /dev/null
+++ b/src/cmd/cgo/internal/testout/testdata/aligned.go
@@ -0,0 +1,63 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "C"
+
+//export ReturnEmpty
+func ReturnEmpty() {
+ return
+}
+
+//export ReturnOnlyUint8
+func ReturnOnlyUint8() (uint8, uint8, uint8) {
+ return 1, 2, 3
+}
+
+//export ReturnOnlyUint16
+func ReturnOnlyUint16() (uint16, uint16, uint16) {
+ return 1, 2, 3
+}
+
+//export ReturnOnlyUint32
+func ReturnOnlyUint32() (uint32, uint32, uint32) {
+ return 1, 2, 3
+}
+
+//export ReturnOnlyUint64
+func ReturnOnlyUint64() (uint64, uint64, uint64) {
+ return 1, 2, 3
+}
+
+//export ReturnOnlyInt
+func ReturnOnlyInt() (int, int, int) {
+ return 1, 2, 3
+}
+
+//export ReturnOnlyPtr
+func ReturnOnlyPtr() (*int, *int, *int) {
+ a, b, c := 1, 2, 3
+ return &a, &b, &c
+}
+
+//export ReturnString
+func ReturnString() string {
+ return "hello"
+}
+
+//export ReturnByteSlice
+func ReturnByteSlice() []byte {
+ return []byte{1, 2, 3}
+}
+
+//export InputAndReturnUint8
+func InputAndReturnUint8(a, b, c uint8) (uint8, uint8, uint8) {
+ return a, b, c
+}
+
+//export MixedTypes
+func MixedTypes(a uint8, b uint16, c uint32, d uint64, e int, f *int) (uint8, uint16, uint32, uint64, int, *int) {
+ return a, b, c, d, e, f
+}
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index dfa54e41d33399..a2bcdf89c5ad44 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -949,6 +949,8 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
fmt.Fprintf(gotype, "struct {\n")
off := int64(0)
npad := 0
+ // the align is at least 1 (for char)
+ maxAlign := int64(1)
argField := func(typ ast.Expr, namePat string, args ...interface{}) {
name := fmt.Sprintf(namePat, args...)
t := p.cgoType(typ)
@@ -963,6 +965,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
noSourceConf.Fprint(gotype, fset, typ)
fmt.Fprintf(gotype, "\n")
off += t.Size
+ // keep track of the maximum alignment among all fields
+ // so that we can align the struct correctly
+ if t.Align > maxAlign {
+ maxAlign = t.Align
+ }
}
if fn.Recv != nil {
argField(fn.Recv.List[0].Type, "recv")
@@ -1005,12 +1012,8 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
}
// Build the wrapper function compiled by gcc.
- gccExport := ""
- if goos == "windows" {
- gccExport = "__declspec(dllexport) "
- }
var s strings.Builder
- fmt.Fprintf(&s, "%s%s %s(", gccExport, gccResult, exp.ExpName)
+ fmt.Fprintf(&s, "%s %s(", gccResult, exp.ExpName)
if fn.Recv != nil {
s.WriteString(p.cgoType(fn.Recv.List[0].Type).C.String())
s.WriteString(" recv")
@@ -1051,7 +1054,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) {
// string.h for memset, and is also robust to C++
// types with constructors. Both GCC and LLVM optimize
// this into just zeroing _cgo_a.
- fmt.Fprintf(fgcc, "\ttypedef %s %v _cgo_argtype;\n", ctype.String(), p.packedAttribute())
+ //
+ // The struct should be aligned to the maximum alignment
+ // of any of its fields. This to avoid alignment
+ // issues.
+ fmt.Fprintf(fgcc, "\ttypedef %s %v __attribute__((aligned(%d))) _cgo_argtype;\n", ctype.String(), p.packedAttribute(), maxAlign)
fmt.Fprintf(fgcc, "\tstatic _cgo_argtype _cgo_zero;\n")
fmt.Fprintf(fgcc, "\t_cgo_argtype _cgo_a = _cgo_zero;\n")
if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) {
diff --git a/src/cmd/compile/README.md b/src/cmd/compile/README.md
index 1089348030d78b..02429d5688659e 100644
--- a/src/cmd/compile/README.md
+++ b/src/cmd/compile/README.md
@@ -289,9 +289,9 @@ dependencies, so is not suitable for distributed build systems.)
```
After that, your edit/compile/test cycle can be similar to:
```
- <... make edits to cmd/compile source ...>
+ [... make edits to cmd/compile source ...]
$ toolstash restore && go install cmd/compile # restore known good tools to build compiler
- <... 'go build', 'go test', etc. ...> # use freshly built compiler
+ [... 'go build', 'go test', etc. ...] # use freshly built compiler
```
* toolstash also allows comparing the installed vs. stashed copy of
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 7bc0e536e941e6..43ecb6b4b715b4 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -1189,8 +1189,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
if dstReg == srcReg {
break
}
- tmpReg1 := int16(arm64.REG_R24)
- tmpReg2 := int16(arm64.REG_R25)
+ tmpReg1 := int16(arm64.REG_R25)
+ tmpFReg1 := int16(arm64.REG_F16)
+ tmpFReg2 := int16(arm64.REG_F17)
n := v.AuxInt
if n < 16 {
v.Fatalf("Move too small %d", n)
@@ -1198,10 +1199,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Generate copying instructions.
var off int64
+ for n >= 32 {
+ // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2)
+ // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg)
+ move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false)
+ off += 32
+ n -= 32
+ }
for n >= 16 {
- // LDP off(srcReg), (tmpReg1, tmpReg2)
- // STP (tmpReg1, tmpReg2), off(dstReg)
- move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false)
+ // FMOVQ off(src), tmpFReg1
+ // FMOVQ tmpFReg1, off(dst)
+ move16(s, srcReg, dstReg, tmpFReg1, off, false)
off += 16
n -= 16
}
@@ -1223,9 +1231,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
if dstReg == srcReg {
break
}
- countReg := int16(arm64.REG_R23)
- tmpReg1 := int16(arm64.REG_R24)
- tmpReg2 := int16(arm64.REG_R25)
+ countReg := int16(arm64.REG_R24)
+ tmpReg1 := int16(arm64.REG_R25)
+ tmpFReg1 := int16(arm64.REG_F16)
+ tmpFReg2 := int16(arm64.REG_F17)
n := v.AuxInt
loopSize := int64(64)
if n < 3*loopSize {
@@ -1251,10 +1260,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Move loopSize bytes starting at srcReg to dstReg.
// Increment srcReg and destReg by loopSize as a side effect.
- for range loopSize / 16 {
- // LDP.P 16(srcReg), (tmpReg1, tmpReg2)
- // STP.P (tmpReg1, tmpReg2), 16(dstReg)
- move16(s, srcReg, dstReg, tmpReg1, tmpReg2, 0, true)
+ for range loopSize / 32 {
+ // FLDPQ.P 32(srcReg), (tmpFReg1, tmpFReg2)
+ // FSTPQ.P (tmpFReg1, tmpFReg2), 32(dstReg)
+ move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, 0, true)
}
// Decrement loop count.
// SUB $1, countReg
@@ -1276,10 +1285,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// Copy any fractional portion.
var off int64
+ for n >= 32 {
+ // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2)
+ // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg)
+ move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false)
+ off += 32
+ n -= 32
+ }
for n >= 16 {
- // LDP off(srcReg), (tmpReg1, tmpReg2)
- // STP (tmpReg1, tmpReg2), off(dstReg)
- move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false)
+ // FMOVQ off(src), tmpFReg1
+ // FMOVQ tmpFReg1, off(dst)
+ move16(s, srcReg, dstReg, tmpFReg1, off, false)
off += 16
n -= 16
}
@@ -1699,26 +1715,55 @@ func zero8(s *ssagen.State, reg int16, off int64) {
p.To.Offset = off
}
-// move16 copies 16 bytes at src+off to dst+off.
+// move32 copies 32 bytes at src+off to dst+off.
// Uses registers tmp1 and tmp2.
-// If postInc is true, increment src and dst by 16.
-func move16(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) {
- // LDP off(src), (tmp1, tmp2)
- ld := s.Prog(arm64.ALDP)
+// If postInc is true, increment src and dst by 32.
+func move32(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) {
+ // FLDPQ off(src), (tmp1, tmp2)
+ ld := s.Prog(arm64.AFLDPQ)
ld.From.Type = obj.TYPE_MEM
ld.From.Reg = src
ld.From.Offset = off
ld.To.Type = obj.TYPE_REGREG
ld.To.Reg = tmp1
ld.To.Offset = int64(tmp2)
- // STP (tmp1, tmp2), off(dst)
- st := s.Prog(arm64.ASTP)
+ // FSTPQ (tmp1, tmp2), off(dst)
+ st := s.Prog(arm64.AFSTPQ)
st.From.Type = obj.TYPE_REGREG
st.From.Reg = tmp1
st.From.Offset = int64(tmp2)
st.To.Type = obj.TYPE_MEM
st.To.Reg = dst
st.To.Offset = off
+ if postInc {
+ if off != 0 {
+ panic("can't postinc with non-zero offset")
+ }
+ ld.Scond = arm64.C_XPOST
+ st.Scond = arm64.C_XPOST
+ ld.From.Offset = 32
+ st.To.Offset = 32
+ }
+}
+
+// move16 copies 16 bytes at src+off to dst+off.
+// Uses register tmp1
+// If postInc is true, increment src and dst by 16.
+func move16(s *ssagen.State, src, dst, tmp1 int16, off int64, postInc bool) {
+ // FMOVQ off(src), tmp1
+ ld := s.Prog(arm64.AFMOVQ)
+ ld.From.Type = obj.TYPE_MEM
+ ld.From.Reg = src
+ ld.From.Offset = off
+ ld.To.Type = obj.TYPE_REG
+ ld.To.Reg = tmp1
+ // FMOVQ tmp1, off(dst)
+ st := s.Prog(arm64.AFMOVQ)
+ st.From.Type = obj.TYPE_REG
+ st.From.Reg = tmp1
+ st.To.Type = obj.TYPE_MEM
+ st.To.Reg = dst
+ st.To.Offset = off
if postInc {
if off != 0 {
panic("can't postinc with non-zero offset")
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
index 85873dcc40e1b3..9e8ab2f488bb4d 100644
--- a/src/cmd/compile/internal/base/debug.go
+++ b/src/cmd/compile/internal/base/debug.go
@@ -20,6 +20,7 @@ type DebugFlags struct {
Append int `help:"print information about append compilation"`
Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation" concurrent:"ok"`
Closure int `help:"print information about closure compilation"`
+ Converthash string `help:"hash value for use in debugging changes to platform-dependent float-to-[u]int conversion" concurrent:"ok"`
Defer int `help:"print information about defer compilation"`
DisableNil int `help:"disable nil checks" concurrent:"ok"`
DumpInlFuncProps string `help:"dump function properties from inl heuristics to specified file"`
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
index a0ed876cfc8e0e..1d211e0a2dd9f4 100644
--- a/src/cmd/compile/internal/base/flag.go
+++ b/src/cmd/compile/internal/base/flag.go
@@ -262,6 +262,12 @@ func ParseFlags() {
Debug.LoopVar = 1
}
+ if Debug.Converthash != "" {
+ ConvertHash = NewHashDebug("converthash", Debug.Converthash, nil)
+ } else {
+ // quietly disable the convert hash changes
+ ConvertHash = NewHashDebug("converthash", "qn", nil)
+ }
if Debug.Fmahash != "" {
FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil)
}
diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go
index fa63deb46a3c01..edf567457cb04b 100644
--- a/src/cmd/compile/internal/base/hashdebug.go
+++ b/src/cmd/compile/internal/base/hashdebug.go
@@ -53,6 +53,7 @@ func (d *HashDebug) SetInlineSuffixOnly(b bool) *HashDebug {
// The default compiler-debugging HashDebug, for "-d=gossahash=..."
var hashDebug *HashDebug
+var ConvertHash *HashDebug // for debugging float-to-[u]int conversion changes
var FmaHash *HashDebug // for debugging fused-multiply-add floating point changes
var LoopVarHash *HashDebug // for debugging shared/private loop variable changes
var PGOHash *HashDebug // for debugging PGO optimization decisions
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
index 119f06fbc03351..9e3348c1ecca89 100644
--- a/src/cmd/compile/internal/base/print.go
+++ b/src/cmd/compile/internal/base/print.go
@@ -220,7 +220,7 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) {
fmt.Printf("\n")
// If this is a released compiler version, ask for a bug report.
- if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") {
+ if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") && !strings.Contains(buildcfg.Version, "devel") {
fmt.Printf("\n")
fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
fmt.Printf("https://go.dev/issue/new\n")
diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go
index 372d05809401ff..cb4608a0246574 100644
--- a/src/cmd/compile/internal/devirtualize/devirtualize.go
+++ b/src/cmd/compile/internal/devirtualize/devirtualize.go
@@ -18,9 +18,11 @@ import (
"cmd/compile/internal/types"
)
+const go126ImprovedConcreteTypeAnalysis = true
+
// StaticCall devirtualizes the given call if possible when the concrete callee
// is available statically.
-func StaticCall(call *ir.CallExpr) {
+func StaticCall(s *State, call *ir.CallExpr) {
// For promoted methods (including value-receiver methods promoted
// to pointer-receivers), the interface method wrapper may contain
// expressions that can panic (e.g., ODEREF, ODOTPTR,
@@ -40,15 +42,31 @@ func StaticCall(call *ir.CallExpr) {
}
sel := call.Fun.(*ir.SelectorExpr)
- r := ir.StaticValue(sel.X)
- if r.Op() != ir.OCONVIFACE {
- return
- }
- recv := r.(*ir.ConvExpr)
+ var typ *types.Type
+ if go126ImprovedConcreteTypeAnalysis {
+ typ = concreteType(s, sel.X)
+ if typ == nil {
+ return
+ }
- typ := recv.X.Type()
- if typ.IsInterface() {
- return
+ // Don't create type-assertions that would be impossible at compile-time.
+ // This can happen in such case: any(0).(interface {A()}).A(), this typechecks without
+ // any errors, but will cause a runtime panic. We statically know that int(0) does not
+ // implement that interface, thus we skip the devirtualization, as it is not possible
+ // to make an assertion: any(0).(interface{A()}).(int) (int does not implement interface{A()}).
+ if !typecheck.Implements(typ, sel.X.Type()) {
+ return
+ }
+ } else {
+ r := ir.StaticValue(sel.X)
+ if r.Op() != ir.OCONVIFACE {
+ return
+ }
+ recv := r.(*ir.ConvExpr)
+ typ = recv.X.Type()
+ if typ.IsInterface() {
+ return
+ }
}
// If typ is a shape type, then it was a type argument originally
@@ -99,8 +117,27 @@ func StaticCall(call *ir.CallExpr) {
return
}
- dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
- dt.SetType(typ)
+ dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, typ)
+
+ if go126ImprovedConcreteTypeAnalysis {
+ // Consider:
+ //
+ // var v Iface
+ // v.A()
+ // v = &Impl{}
+ //
+ // Here in the devirtualizer, we determine the concrete type of v as being an *Impl,
+ // but it can still be a nil interface, we have not detected that. The v.(*Impl)
+ // type assertion that we make here would also have failed, but with a different
+ // panic "pkg.Iface is nil, not *pkg.Impl", where previously we would get a nil panic.
+ // We fix this, by introducing an additional nilcheck on the itab.
+ // Calling a method on an nil interface (in most cases) is a bug in a program, so it is fine
+ // to devirtualize and further (possibly) inline them, even though we would never reach
+ // the called function.
+ dt.UseNilPanic = true
+ dt.SetPos(call.Pos())
+ }
+
x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true)
switch x.Op() {
case ir.ODOTMETH:
@@ -138,3 +175,413 @@ func StaticCall(call *ir.CallExpr) {
// Desugar OCALLMETH, if we created one (#57309).
typecheck.FixMethodCall(call)
}
+
+const concreteTypeDebug = false
+
+// concreteType determines the concrete type of n, following OCONVIFACEs and type asserts.
+// Returns nil when the concrete type could not be determined, or when there are multiple
+// (different) types assigned to an interface.
+func concreteType(s *State, n ir.Node) (typ *types.Type) {
+ typ = concreteType1(s, n, make(map[*ir.Name]struct{}))
+ if typ == &noType {
+ return nil
+ }
+ if typ != nil && typ.IsInterface() {
+ base.FatalfAt(n.Pos(), "typ.IsInterface() = true; want = false; typ = %v", typ)
+ }
+ return typ
+}
+
+// noType is a sentinel value returned by [concreteType1].
+var noType types.Type
+
+// concreteType1 analyzes the node n and returns its concrete type if it is statically known.
+// Otherwise, it returns a nil Type, indicating that a concrete type was not determined.
+// When n is known to be statically nil or a self-assignment is detected, in returns a sentinel [noType] type instead.
+func concreteType1(s *State, n ir.Node, seen map[*ir.Name]struct{}) (outT *types.Type) {
+ nn := n // for debug messages
+
+ if concreteTypeDebug {
+ defer func() {
+ t := "&noType"
+ if outT != &noType {
+ t = outT.String()
+ }
+ base.Warn("concreteType1(%v) -> %v", nn, t)
+ }()
+ }
+
+ for {
+ if concreteTypeDebug {
+ base.Warn("concreteType1(%v): analyzing %v", nn, n)
+ }
+
+ if !n.Type().IsInterface() {
+ return n.Type()
+ }
+
+ switch n1 := n.(type) {
+ case *ir.ConvExpr:
+ if n1.Op() == ir.OCONVNOP {
+ if !n1.Type().IsInterface() || !types.Identical(n1.Type().Underlying(), n1.X.Type().Underlying()) {
+ // As we check (directly before this switch) whether n is an interface, thus we should only reach
+ // here for iface conversions where both operands are the same.
+ base.FatalfAt(n1.Pos(), "not identical/interface types found n1.Type = %v; n1.X.Type = %v", n1.Type(), n1.X.Type())
+ }
+ n = n1.X
+ continue
+ }
+ if n1.Op() == ir.OCONVIFACE {
+ n = n1.X
+ continue
+ }
+ case *ir.InlinedCallExpr:
+ if n1.Op() == ir.OINLCALL {
+ n = n1.SingleResult()
+ continue
+ }
+ case *ir.ParenExpr:
+ n = n1.X
+ continue
+ case *ir.TypeAssertExpr:
+ n = n1.X
+ continue
+ }
+ break
+ }
+
+ if n.Op() != ir.ONAME {
+ return nil
+ }
+
+ name := n.(*ir.Name).Canonical()
+ if name.Class != ir.PAUTO {
+ return nil
+ }
+
+ if name.Op() != ir.ONAME {
+ base.FatalfAt(name.Pos(), "name.Op = %v; want = ONAME", n.Op())
+ }
+
+ // name.Curfn must be set, as we checked name.Class != ir.PAUTO before.
+ if name.Curfn == nil {
+ base.FatalfAt(name.Pos(), "name.Curfn = nil; want not nil")
+ }
+
+ if name.Addrtaken() {
+ return nil // conservatively assume it's reassigned with a different type indirectly
+ }
+
+ if _, ok := seen[name]; ok {
+ return &noType // Already analyzed assignments to name, no need to do that twice.
+ }
+ seen[name] = struct{}{}
+
+ if concreteTypeDebug {
+ base.Warn("concreteType1(%v): analyzing assignments to %v", nn, name)
+ }
+
+ var typ *types.Type
+ for _, v := range s.assignments(name) {
+ var t *types.Type
+ switch v := v.(type) {
+ case *types.Type:
+ t = v
+ case ir.Node:
+ t = concreteType1(s, v, seen)
+ if t == &noType {
+ continue
+ }
+ }
+ if t == nil || (typ != nil && !types.Identical(typ, t)) {
+ return nil
+ }
+ typ = t
+ }
+
+ if typ == nil {
+ // Variable either declared with zero value, or only assigned with nil.
+ return &noType
+ }
+
+ return typ
+}
+
+// assignment can be one of:
+// - nil - assignment from an interface type.
+// - *types.Type - assignment from a concrete type (non-interface).
+// - ir.Node - assignment from a ir.Node.
+//
+// In most cases assignment should be an [ir.Node], but in cases where we
+// do not follow the data-flow, we return either a concrete type (*types.Type) or a nil.
+// For example in range over a slice, if the slice elem is of an interface type, then we return
+// a nil, otherwise the elem's concrete type (We do so because we do not analyze assignment to the
+// slice being ranged-over).
+type assignment any
+
+// State holds precomputed state for use in [StaticCall].
+type State struct {
+ // ifaceAssignments maps interface variables to all their assignments
+ // defined inside functions stored in the analyzedFuncs set.
+ // Note: it does not include direct assignments to nil.
+ ifaceAssignments map[*ir.Name][]assignment
+
+ // ifaceCallExprAssigns stores every [*ir.CallExpr], which has an interface
+ // result, that is assigned to a variable.
+ ifaceCallExprAssigns map[*ir.CallExpr][]ifaceAssignRef
+
+ // analyzedFuncs is a set of Funcs that were analyzed for iface assignments.
+ analyzedFuncs map[*ir.Func]struct{}
+}
+
+type ifaceAssignRef struct {
+ name *ir.Name // ifaceAssignments[name]
+ assignmentIndex int // ifaceAssignments[name][assignmentIndex]
+ returnIndex int // (*ir.CallExpr).Result(returnIndex)
+}
+
+// InlinedCall updates the [State] to take into account a newly inlined call.
+func (s *State) InlinedCall(fun *ir.Func, origCall *ir.CallExpr, inlinedCall *ir.InlinedCallExpr) {
+ if _, ok := s.analyzedFuncs[fun]; !ok {
+ // Full analyze has not been yet executed for the provided function, so we can skip it for now.
+ // When no devirtualization happens in a function, it is unnecessary to analyze it.
+ return
+ }
+
+ // Analyze assignments in the newly inlined function.
+ s.analyze(inlinedCall.Init())
+ s.analyze(inlinedCall.Body)
+
+ refs, ok := s.ifaceCallExprAssigns[origCall]
+ if !ok {
+ return
+ }
+ delete(s.ifaceCallExprAssigns, origCall)
+
+ // Update assignments to reference the new ReturnVars of the inlined call.
+ for _, ref := range refs {
+ vt := &s.ifaceAssignments[ref.name][ref.assignmentIndex]
+ if *vt != nil {
+ base.Fatalf("unexpected non-nil assignment")
+ }
+ if concreteTypeDebug {
+ base.Warn(
+ "InlinedCall(%v, %v): replacing interface node in (%v,%v) to %v (typ %v)",
+ origCall, inlinedCall, ref.name, ref.assignmentIndex,
+ inlinedCall.ReturnVars[ref.returnIndex],
+ inlinedCall.ReturnVars[ref.returnIndex].Type(),
+ )
+ }
+
+ // Update ifaceAssignments with an ir.Node from the inlined function’s ReturnVars.
+ // This may enable future devirtualization of calls that reference ref.name.
+ // We will get calls to [StaticCall] from the interleaved package,
+ // to try devirtualize such calls afterwards.
+ *vt = inlinedCall.ReturnVars[ref.returnIndex]
+ }
+}
+
+// assignments returns all assignments to n.
+func (s *State) assignments(n *ir.Name) []assignment {
+ fun := n.Curfn
+ if fun == nil {
+ base.FatalfAt(n.Pos(), "n.Curfn = ")
+ }
+ if n.Class != ir.PAUTO {
+ base.FatalfAt(n.Pos(), "n.Class = %v; want = PAUTO", n.Class)
+ }
+
+ if !n.Type().IsInterface() {
+ base.FatalfAt(n.Pos(), "name passed to assignments is not of an interface type: %v", n.Type())
+ }
+
+ // Analyze assignments in func, if not analyzed before.
+ if _, ok := s.analyzedFuncs[fun]; !ok {
+ if concreteTypeDebug {
+ base.Warn("assignments(): analyzing assignments in %v func", fun)
+ }
+ if s.analyzedFuncs == nil {
+ s.ifaceAssignments = make(map[*ir.Name][]assignment)
+ s.ifaceCallExprAssigns = make(map[*ir.CallExpr][]ifaceAssignRef)
+ s.analyzedFuncs = make(map[*ir.Func]struct{})
+ }
+ s.analyzedFuncs[fun] = struct{}{}
+ s.analyze(fun.Init())
+ s.analyze(fun.Body)
+ }
+
+ return s.ifaceAssignments[n]
+}
+
+// analyze analyzes every assignment to interface variables in nodes, updating [State].
+func (s *State) analyze(nodes ir.Nodes) {
+ assign := func(name ir.Node, assignment assignment) (*ir.Name, int) {
+ if name == nil || name.Op() != ir.ONAME || ir.IsBlank(name) {
+ return nil, -1
+ }
+
+ n, ok := ir.OuterValue(name).(*ir.Name)
+ if !ok || n.Curfn == nil {
+ return nil, -1
+ }
+
+ // Do not track variables that are not of interface types.
+ // For devirtualization they are unnecessary, we will not even look them up.
+ if !n.Type().IsInterface() {
+ return nil, -1
+ }
+
+ n = n.Canonical()
+ if n.Op() != ir.ONAME {
+ base.FatalfAt(n.Pos(), "n.Op = %v; want = ONAME", n.Op())
+ }
+ if n.Class != ir.PAUTO {
+ return nil, -1
+ }
+
+ switch a := assignment.(type) {
+ case nil:
+ case *types.Type:
+ if a != nil && a.IsInterface() {
+ assignment = nil // non-concrete type
+ }
+ case ir.Node:
+ // nil assignment, we can safely ignore them, see [StaticCall].
+ if ir.IsNil(a) {
+ return nil, -1
+ }
+ default:
+ base.Fatalf("unexpected type: %v", assignment)
+ }
+
+ if concreteTypeDebug {
+ base.Warn("analyze(): assignment found %v = %v", name, assignment)
+ }
+
+ s.ifaceAssignments[n] = append(s.ifaceAssignments[n], assignment)
+ return n, len(s.ifaceAssignments[n]) - 1
+ }
+
+ var do func(n ir.Node)
+ do = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ if rhs := n.Y; rhs != nil {
+ for {
+ if r, ok := rhs.(*ir.ParenExpr); ok {
+ rhs = r.X
+ continue
+ }
+ break
+ }
+ if call, ok := rhs.(*ir.CallExpr); ok && call.Fun != nil {
+ retTyp := call.Fun.Type().Results()[0].Type
+ n, idx := assign(n.X, retTyp)
+ if n != nil && retTyp.IsInterface() {
+ // We have a call expression, that returns an interface, store it for later evaluation.
+ // In case this func gets inlined later, we will update the assignment (added before)
+ // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X.
+ s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, 0})
+ }
+ } else {
+ assign(n.X, rhs)
+ }
+ }
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ for i, p := range n.Lhs {
+ if n.Rhs[i] != nil {
+ assign(p, n.Rhs[i])
+ }
+ }
+ case ir.OAS2DOTTYPE:
+ n := n.(*ir.AssignListStmt)
+ if n.Rhs[0] == nil {
+ base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n)
+ }
+ assign(n.Lhs[0], n.Rhs[0])
+ assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize
+ case ir.OAS2MAPR, ir.OAS2RECV, ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ if n.Rhs[0] == nil {
+ base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n)
+ }
+ assign(n.Lhs[0], n.Rhs[0].Type())
+ assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ rhs := n.Rhs[0]
+ for {
+ if r, ok := rhs.(*ir.ParenExpr); ok {
+ rhs = r.X
+ continue
+ }
+ break
+ }
+ if call, ok := rhs.(*ir.CallExpr); ok {
+ for i, p := range n.Lhs {
+ retTyp := call.Fun.Type().Results()[i].Type
+ n, idx := assign(p, retTyp)
+ if n != nil && retTyp.IsInterface() {
+ // We have a call expression, that returns an interface, store it for later evaluation.
+ // In case this func gets inlined later, we will update the assignment (added before)
+ // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X.
+ s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, i})
+ }
+ }
+ } else if call, ok := rhs.(*ir.InlinedCallExpr); ok {
+ for i, p := range n.Lhs {
+ assign(p, call.ReturnVars[i])
+ }
+ } else {
+ base.FatalfAt(n.Pos(), "unexpected type %T in OAS2FUNC Rhs[0]", call)
+ }
+ case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
+ xTyp := n.X.Type()
+
+ // Range over an array pointer.
+ if xTyp.IsPtr() && xTyp.Elem().IsArray() {
+ xTyp = xTyp.Elem()
+ }
+
+ if xTyp.IsArray() || xTyp.IsSlice() {
+ assign(n.Key, nil) // integer does not have methods to devirtualize
+ assign(n.Value, xTyp.Elem())
+ } else if xTyp.IsChan() {
+ assign(n.Key, xTyp.Elem())
+ base.AssertfAt(n.Value == nil, n.Pos(), "n.Value != nil in range over chan")
+ } else if xTyp.IsMap() {
+ assign(n.Key, xTyp.Key())
+ assign(n.Value, xTyp.Elem())
+ } else if xTyp.IsInteger() || xTyp.IsString() {
+ // Range over int/string, results do not have methods, so nothing to devirtualize.
+ assign(n.Key, nil)
+ assign(n.Value, nil)
+ } else {
+ // We will not reach here in case of an range-over-func, as it is
+ // rewrtten to function calls in the noder package.
+ base.FatalfAt(n.Pos(), "range over unexpected type %v", n.X.Type())
+ }
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
+ for _, v := range n.Cases {
+ if v.Var == nil {
+ base.Assert(guard.Tag == nil)
+ continue
+ }
+ assign(v.Var, guard.X)
+ }
+ }
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ if _, ok := s.analyzedFuncs[n.Func]; !ok {
+ s.analyzedFuncs[n.Func] = struct{}{}
+ ir.Visit(n.Func, do)
+ }
+ }
+ }
+ ir.VisitList(nodes, do)
+}
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index 6b34830b3dd5e6..59250edfef0078 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -563,7 +563,10 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) {
if ro == nil {
base.Fatalf("no ReassignOracle for function %v with closure parent %v", fn, fn.ClosureParent)
}
- if s := ro.StaticValue(*r); s.Op() == ir.OLITERAL {
+
+ s := ro.StaticValue(*r)
+ switch s.Op() {
+ case ir.OLITERAL:
lit, ok := s.(*ir.BasicLit)
if !ok || lit.Val().Kind() != constant.Int {
base.Fatalf("unexpected BasicLit Kind")
@@ -577,6 +580,14 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) {
assignTemp(n.Pos(), *r, n.PtrInit())
*r = ir.NewBasicLit(n.Pos(), (*r).Type(), lit.Val())
}
+ case ir.OLEN:
+ x := ro.StaticValue(s.(*ir.UnaryExpr).X)
+ if x.Op() == ir.OSLICELIT {
+ x := x.(*ir.CompLitExpr)
+ // Preserve any side effects of the original expression, then update the value.
+ assignTemp(n.Pos(), *r, n.PtrInit())
+ *r = ir.NewBasicLit(n.Pos(), types.Types[types.TINT], constant.MakeInt64(x.Len))
+ }
}
}
case ir.OCONVIFACE:
diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go
index 954cc306fc81d3..c83bbdb718df56 100644
--- a/src/cmd/compile/internal/inline/interleaved/interleaved.go
+++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go
@@ -45,6 +45,8 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
inlState := make(map[*ir.Func]*inlClosureState)
calleeUseCounts := make(map[*ir.Func]int)
+ var state devirtualize.State
+
// Pre-process all the functions, adding parentheses around call sites and starting their "inl state".
for _, fn := range typecheck.Target.Funcs {
bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn)
@@ -58,7 +60,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
// Do a first pass at counting call sites.
for i := range s.parens {
- s.resolve(i)
+ s.resolve(&state, i)
}
}
@@ -102,10 +104,11 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
for {
for i := l0; i < l1; i++ { // can't use "range parens" here
paren := s.parens[i]
- if new := s.edit(i); new != nil {
+ if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil {
// Update AST and recursively mark nodes.
- paren.X = new
- ir.EditChildren(new, s.mark) // mark may append to parens
+ paren.X = inlinedCall
+ ir.EditChildren(inlinedCall, s.mark) // mark may append to parens
+ state.InlinedCall(s.fn, origCall, inlinedCall)
done = false
}
}
@@ -114,7 +117,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) {
break
}
for i := l0; i < l1; i++ {
- s.resolve(i)
+ s.resolve(&state, i)
}
}
@@ -188,7 +191,7 @@ type inlClosureState struct {
// resolve attempts to resolve a call to a potentially inlineable callee
// and updates use counts on the callees. Returns the call site count
// for that callee.
-func (s *inlClosureState) resolve(i int) (*ir.Func, int) {
+func (s *inlClosureState) resolve(state *devirtualize.State, i int) (*ir.Func, int) {
p := s.parens[i]
if i < len(s.resolved) {
if callee := s.resolved[i]; callee != nil {
@@ -200,7 +203,7 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) {
if !ok { // previously inlined
return nil, -1
}
- devirtualize.StaticCall(call)
+ devirtualize.StaticCall(state, call)
if callee := inline.InlineCallTarget(s.fn, call, s.profile); callee != nil {
for len(s.resolved) <= i {
s.resolved = append(s.resolved, nil)
@@ -213,23 +216,23 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) {
return nil, 0
}
-func (s *inlClosureState) edit(i int) ir.Node {
+func (s *inlClosureState) edit(state *devirtualize.State, i int) (*ir.CallExpr, *ir.InlinedCallExpr) {
n := s.parens[i].X
call, ok := n.(*ir.CallExpr)
if !ok {
- return nil
+ return nil, nil
}
// This is redundant with earlier calls to
// resolve, but because things can change it
// must be re-checked.
- callee, count := s.resolve(i)
+ callee, count := s.resolve(state, i)
if count <= 0 {
- return nil
+ return nil, nil
}
if inlCall := inline.TryInlineCall(s.fn, call, s.bigCaller, s.profile, count == 1 && callee.ClosureParent != nil); inlCall != nil {
- return inlCall
+ return call, inlCall
}
- return nil
+ return nil, nil
}
// Mark inserts parentheses, and is called repeatedly.
@@ -338,16 +341,18 @@ func (s *inlClosureState) unparenthesize() {
// returns.
func (s *inlClosureState) fixpoint() bool {
changed := false
+ var state devirtualize.State
ir.WithFunc(s.fn, func() {
done := false
for !done {
done = true
for i := 0; i < len(s.parens); i++ { // can't use "range parens" here
paren := s.parens[i]
- if new := s.edit(i); new != nil {
+ if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil {
// Update AST and recursively mark nodes.
- paren.X = new
- ir.EditChildren(new, s.mark) // mark may append to parens
+ paren.X = inlinedCall
+ ir.EditChildren(inlinedCall, s.mark) // mark may append to parens
+ state.InlinedCall(s.fn, origCall, inlinedCall)
done = false
changed = true
}
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index 9d34f27ce53363..037957b676a033 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -677,6 +677,11 @@ type TypeAssertExpr struct {
// An internal/abi.TypeAssert descriptor to pass to the runtime.
Descriptor *obj.LSym
+
+ // When set to true, if this assert would panic, then use a nil pointer panic
+ // instead of an interface conversion panic.
+ // It must not be set for type asserts using the commaok form.
+ UseNilPanic bool
}
func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr {
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
index ee0f52fbf3f3b8..f8eb4578809312 100644
--- a/src/cmd/compile/internal/ir/symtab.go
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -13,47 +13,50 @@ import (
var Syms symsStruct
type symsStruct struct {
- AssertE2I *obj.LSym
- AssertE2I2 *obj.LSym
- Asanread *obj.LSym
- Asanwrite *obj.LSym
- CgoCheckMemmove *obj.LSym
- CgoCheckPtrWrite *obj.LSym
- CheckPtrAlignment *obj.LSym
- Deferproc *obj.LSym
- Deferprocat *obj.LSym
- DeferprocStack *obj.LSym
- Deferreturn *obj.LSym
- Duffcopy *obj.LSym
- Duffzero *obj.LSym
- GCWriteBarrier [8]*obj.LSym
- Goschedguarded *obj.LSym
- Growslice *obj.LSym
- InterfaceSwitch *obj.LSym
- MallocGC *obj.LSym
- Memmove *obj.LSym
- Msanread *obj.LSym
- Msanwrite *obj.LSym
- Msanmove *obj.LSym
- Newobject *obj.LSym
- Newproc *obj.LSym
- PanicBounds *obj.LSym
- PanicExtend *obj.LSym
- Panicdivide *obj.LSym
- Panicshift *obj.LSym
- PanicdottypeE *obj.LSym
- PanicdottypeI *obj.LSym
- Panicnildottype *obj.LSym
- Panicoverflow *obj.LSym
- Racefuncenter *obj.LSym
- Racefuncexit *obj.LSym
- Raceread *obj.LSym
- Racereadrange *obj.LSym
- Racewrite *obj.LSym
- Racewriterange *obj.LSym
- TypeAssert *obj.LSym
- WBZero *obj.LSym
- WBMove *obj.LSym
+ AssertE2I *obj.LSym
+ AssertE2I2 *obj.LSym
+ Asanread *obj.LSym
+ Asanwrite *obj.LSym
+ CgoCheckMemmove *obj.LSym
+ CgoCheckPtrWrite *obj.LSym
+ CheckPtrAlignment *obj.LSym
+ Deferproc *obj.LSym
+ Deferprocat *obj.LSym
+ DeferprocStack *obj.LSym
+ Deferreturn *obj.LSym
+ Duffcopy *obj.LSym
+ Duffzero *obj.LSym
+ GCWriteBarrier [8]*obj.LSym
+ Goschedguarded *obj.LSym
+ Growslice *obj.LSym
+ InterfaceSwitch *obj.LSym
+ MallocGC *obj.LSym
+ MallocGCSmallNoScan [27]*obj.LSym
+ MallocGCSmallScanNoHeader [27]*obj.LSym
+ MallocGCTiny [16]*obj.LSym
+ Memmove *obj.LSym
+ Msanread *obj.LSym
+ Msanwrite *obj.LSym
+ Msanmove *obj.LSym
+ Newobject *obj.LSym
+ Newproc *obj.LSym
+ PanicBounds *obj.LSym
+ PanicExtend *obj.LSym
+ Panicdivide *obj.LSym
+ Panicshift *obj.LSym
+ PanicdottypeE *obj.LSym
+ PanicdottypeI *obj.LSym
+ Panicnildottype *obj.LSym
+ Panicoverflow *obj.LSym
+ Racefuncenter *obj.LSym
+ Racefuncexit *obj.LSym
+ Raceread *obj.LSym
+ Racereadrange *obj.LSym
+ Racewrite *obj.LSym
+ Racewriterange *obj.LSym
+ TypeAssert *obj.LSym
+ WBZero *obj.LSym
+ WBMove *obj.LSym
// Wasm
SigPanic *obj.LSym
Staticuint64s *obj.LSym
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index 41eb2dce1cc50f..a8a45b02697079 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -2961,6 +2961,7 @@ func (r *reader) multiExpr() []ir.Node {
as.Def = true
for i := range results {
tmp := r.temp(pos, r.typ())
+ tmp.Defn = as
as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp))
as.Lhs.Append(tmp)
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
index 7d3efef5cdc837..0bea99e38de1bc 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
@@ -162,10 +162,19 @@
(Cvt64to32F ...) => (CVTSQ2SS ...)
(Cvt64to64F ...) => (CVTSQ2SD ...)
-(Cvt32Fto32 ...) => (CVTTSS2SL ...)
-(Cvt32Fto64 ...) => (CVTTSS2SQ ...)
-(Cvt64Fto32 ...) => (CVTTSD2SL ...)
-(Cvt64Fto64 ...) => (CVTTSD2SQ ...)
+// Float, to int.
+// To make AMD64 "overflow" return max positive instead of max negative, compute
+// y and not x, smear the sign bit, and xor.
+(Cvt32Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x)))))
+(Cvt64Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x))))))
+
+(Cvt32Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSS2SQ x) (NOTQ (MOVQf2i (CVTSS2SD x))) )))
+(Cvt64Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSD2SQ x) (NOTQ (MOVQf2i x)))))
+
+(Cvt32Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SL x)
+(Cvt32Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SQ x)
+(Cvt64Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SL x)
+(Cvt64Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SQ x)
(Cvt32Fto64F ...) => (CVTSS2SD ...)
(Cvt64Fto32F ...) => (CVTSD2SS ...)
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
index 43072ae9130ede..cc3758d10956d4 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
@@ -144,8 +144,9 @@ func init() {
gpspsbg = gpspg | buildReg("SB")
fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r25 = buildReg("R25")
r24to25 = buildReg("R24 R25")
- r23to25 = buildReg("R23 R24 R25")
+ f16to17 = buildReg("F16 F17")
rz = buildReg("ZERO")
first16 = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15")
)
@@ -599,8 +600,8 @@ func init() {
aux: "Int64",
argLength: 3,
reg: regInfo{
- inputs: []regMask{gp &^ r24to25, gp &^ r24to25},
- clobbers: r24to25, // TODO: figure out needIntTemp x2
+ inputs: []regMask{gp &^ r25, gp &^ r25},
+ clobbers: r25 | f16to17, // TODO: figure out needIntTemp + x2 for floats
},
faultOnNilArg0: true,
faultOnNilArg1: true,
@@ -617,8 +618,8 @@ func init() {
aux: "Int64",
argLength: 3,
reg: regInfo{
- inputs: []regMask{gp &^ r23to25, gp &^ r23to25},
- clobbers: r23to25, // TODO: figure out needIntTemp x3
+ inputs: []regMask{gp &^ r24to25, gp &^ r24to25},
+ clobbers: r24to25 | f16to17, // TODO: figure out needIntTemp x2 + x2 for floats
clobbersArg0: true,
clobbersArg1: true,
},
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
index a85a566660eee6..7e8b8bf497b8ff 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
@@ -143,6 +143,7 @@ func init() {
gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg | rz}}
+ gpoldatom = regInfo{inputs: []regMask{gpspsbg, gpg}}
gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
preldreg = regInfo{inputs: []regMask{gpspg}}
@@ -431,6 +432,12 @@ func init() {
faultOnNilArg1: true,
},
+ // Atomic operations.
+ //
+ // resultNotInArgs is needed by all ops lowering to LoongArch
+ // atomic memory access instructions, because these instructions
+ // are defined to require rd != rj && rd != rk per the ISA spec.
+
// atomic loads.
// load from arg0. arg1=mem.
// returns so they can be properly ordered with other loads.
@@ -500,8 +507,8 @@ func init() {
// Atomic 32 bit AND/OR.
// *arg0 &= (|=) arg1. arg2=mem. returns nil.
- {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
- {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpoldatom, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpoldatom, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
// Atomic 32,64 bit AND/OR.
// *arg0 &= (|=) arg1. arg2=mem. returns . auxint must be zero.
diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules
index f3bd8d8b4f18f1..f632a01109f764 100644
--- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules
+++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules
@@ -55,12 +55,9 @@
(ZeroExt32to64 x:(I64Load32U _ _)) => x
(ZeroExt16to(64|32) x:(I64Load16U _ _)) => x
(ZeroExt8to(64|32|16) x:(I64Load8U _ _)) => x
-(SignExt32to64 x) && buildcfg.GOWASM.SignExt => (I64Extend32S x)
-(SignExt8to(64|32|16) x) && buildcfg.GOWASM.SignExt => (I64Extend8S x)
-(SignExt16to(64|32) x) && buildcfg.GOWASM.SignExt => (I64Extend16S x)
-(SignExt32to64 x) => (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
-(SignExt16to(64|32) x) => (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
-(SignExt8to(64|32|16) x) => (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+(SignExt32to64 x) => (I64Extend32S x)
+(SignExt8to(64|32|16) x) => (I64Extend8S x)
+(SignExt16to(64|32) x) => (I64Extend16S x)
(ZeroExt32to64 x) => (I64And x (I64Const [0xffffffff]))
(ZeroExt16to(64|32) x) => (I64And x (I64Const [0xffff]))
(ZeroExt8to(64|32|16) x) => (I64And x (I64Const [0xff]))
diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules
index 58872ca85a3961..af9c24f53fd245 100644
--- a/src/cmd/compile/internal/ssa/_gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/_gen/generic.rules
@@ -50,10 +50,10 @@
(Cvt32to64F (Const32 [c])) => (Const64F [float64(c)])
(Cvt64to32F (Const64 [c])) => (Const32F [float32(c)])
(Cvt64to64F (Const64 [c])) => (Const64F [float64(c)])
-(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)])
-(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)])
-(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)])
-(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)])
+(Cvt32Fto32 (Const32F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)])
+(Cvt32Fto64 (Const32F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)])
+(Cvt64Fto32 (Const64F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)])
+(Cvt64Fto64 (Const64F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)])
(Round32F x:(Const32F)) => x
(Round64F x:(Const64F)) => x
(CvtBoolToUint8 (ConstBool [false])) => (Const8 [0])
@@ -347,6 +347,22 @@
(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1])))
(OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1])))
+// NaN check: ( x != x || x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) x) )
+(OrB (Neq64F x x) ((Less|Leq)64F x y:(Const64F [c]))) => (Not ((Leq|Less)64F y x))
+(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) x)) => (Not ((Leq|Less)64F x y))
+(OrB (Neq32F x x) ((Less|Leq)32F x y:(Const32F [c]))) => (Not ((Leq|Less)32F y x))
+(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) x)) => (Not ((Leq|Less)32F x y))
+
+// NaN check: ( x != x || Abs(x) (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) Abs(x) )
+(OrB (Neq64F x x) ((Less|Leq)64F abs:(Abs x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y abs))
+(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) abs:(Abs x))) => (Not ((Leq|Less)64F abs y))
+
+// NaN check: ( x != x || -x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) -x) )
+(OrB (Neq64F x x) ((Less|Leq)64F neg:(Neg64F x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y neg))
+(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) neg:(Neg64F x))) => (Not ((Leq|Less)64F neg y))
+(OrB (Neq32F x x) ((Less|Leq)32F neg:(Neg32F x) y:(Const32F [c]))) => (Not ((Leq|Less)32F y neg))
+(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) neg:(Neg32F x))) => (Not ((Leq|Less)32F neg y))
+
// Canonicalize x-const to x+(-const)
(Sub64 x (Const64 [c])) && x.Op != OpConst64 => (Add64 (Const64 [-c]) x)
(Sub32 x (Const32 [c])) && x.Op != OpConst32 => (Add32 (Const32 [-c]) x)
@@ -989,6 +1005,10 @@
(Const64 [0])
(Const64 [0]))
+// Special rule to help constant slicing; len > 0 implies cap > 0 implies Slicemask is all 1
+(SliceMake (AddPtr x (And64 y (Slicemask _))) w:(Const64 [c]) z) && c > 0 => (SliceMake (AddPtr x y) w z)
+(SliceMake (AddPtr x (And32 y (Slicemask _))) w:(Const32 [c]) z) && c > 0 => (SliceMake (AddPtr x y) w z)
+
// interface ops
(ConstInterface) =>
(IMake
@@ -2045,28 +2065,32 @@
// for rewriting results of some late-expanded rewrites (below)
(SelectN [n] m:(MakeResult ___)) => m.Args[n]
+// TODO(matloob): Try out having non-zeroing mallocs for prointerless
+// memory, and leaving the zeroing here. Then the compiler can remove
+// the zeroing if the user has explicit writes to the whole object.
+
// for late-expanded calls, recognize newobject and remove zeroing and nilchecks
-(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
- && isSameCall(call.Aux, "runtime.newobject")
+(Zero (SelectN [0] call:(StaticLECall ___)) mem:(SelectN [1] call))
+ && isMalloc(call.Aux)
=> mem
-(Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
+(Store (SelectN [0] call:(StaticLECall ___)) x mem:(SelectN [1] call))
&& isConstZero(x)
- && isSameCall(call.Aux, "runtime.newobject")
+ && isMalloc(call.Aux)
=> mem
-(Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
+(Store (OffPtr (SelectN [0] call:(StaticLECall ___))) x mem:(SelectN [1] call))
&& isConstZero(x)
- && isSameCall(call.Aux, "runtime.newobject")
+ && isMalloc(call.Aux)
=> mem
-(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
- && isSameCall(call.Aux, "runtime.newobject")
+(NilCheck ptr:(SelectN [0] call:(StaticLECall ___)) _)
+ && isMalloc(call.Aux)
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
=> ptr
-(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
- && isSameCall(call.Aux, "runtime.newobject")
+(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall ___))) _)
+ && isMalloc(call.Aux)
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
=> ptr
@@ -2079,6 +2103,9 @@
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
=> ptr
+// .dict args are always non-nil.
+(NilCheck ptr:(Arg {sym}) _) && isDictArgSym(sym) => ptr
+
// Nil checks of nil checks are redundant.
// See comment at the end of https://go-review.googlesource.com/c/go/+/537775.
(NilCheck ptr:(NilCheck _ _) _ ) => ptr
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index 9e67e8339992c7..d0adff788c0a4f 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -7,6 +7,7 @@ package ssa
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
)
// dse does dead-store elimination on the Function.
@@ -213,7 +214,7 @@ func elimDeadAutosGeneric(f *Func) {
case OpAddr, OpLocalAddr:
// Propagate the address if it points to an auto.
n, ok := v.Aux.(*ir.Name)
- if !ok || n.Class != ir.PAUTO {
+ if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) {
return
}
if addr[v] == nil {
@@ -224,7 +225,7 @@ func elimDeadAutosGeneric(f *Func) {
case OpVarDef:
// v should be eliminated if we eliminate the auto.
n, ok := v.Aux.(*ir.Name)
- if !ok || n.Class != ir.PAUTO {
+ if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) {
return
}
if elim[v] == nil {
@@ -240,7 +241,7 @@ func elimDeadAutosGeneric(f *Func) {
// may not be used by the inline code, but will be used by
// panic processing).
n, ok := v.Aux.(*ir.Name)
- if !ok || n.Class != ir.PAUTO {
+ if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) {
return
}
if !used.Has(n) {
@@ -373,7 +374,7 @@ func elimUnreadAutos(f *Func) {
if !ok {
continue
}
- if n.Class != ir.PAUTO {
+ if n.Class != ir.PAUTO && !isABIInternalParam(f, n) {
continue
}
@@ -413,3 +414,16 @@ func elimUnreadAutos(f *Func) {
store.Op = OpCopy
}
}
+
+// isABIInternalParam returns whether n is a parameter of an ABIInternal
+// function. For dead store elimination, we can treat parameters the same
+// way as autos. Storing to a parameter can be removed if it is not read
+// or address-taken.
+//
+// We check ABI here because for a cgo_unsafe_arg function (which is ABI0),
+// all the args are effectively address-taken, but not necessarily have
+// an Addr or LocalAddr op. We could probably just check for cgo_unsafe_arg,
+// but ABIInternal is mostly what matters.
+func isABIInternalParam(f *Func, n *ir.Name) bool {
+ return n.Class == ir.PPARAM && f.ABISelf.Which() == obj.ABIInternal
+}
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index 5736f0b8126484..fc8cb3f2fef0af 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -102,6 +102,7 @@ func (c *Config) NewFunc(fe Frontend, cache *Cache) *Func {
NamedValues: make(map[LocalSlot][]*Value),
CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot),
CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot),
+ OwnAux: &AuxCall{},
}
}
diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go
index a1e639e0486b62..4639d674e145fa 100644
--- a/src/cmd/compile/internal/ssa/func_test.go
+++ b/src/cmd/compile/internal/ssa/func_test.go
@@ -250,6 +250,11 @@ func Exit(arg string) ctrl {
return ctrl{BlockExit, arg, []string{}}
}
+// Ret specifies a BlockRet.
+func Ret(arg string) ctrl {
+ return ctrl{BlockRet, arg, []string{}}
+}
+
// Eq specifies a BlockAMD64EQ.
func Eq(cond, sub, alt string) ctrl {
return ctrl{BlockAMD64EQ, cond, []string{sub, alt}}
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
index 68defde7b4b956..0cee91b532b101 100644
--- a/src/cmd/compile/internal/ssa/fuse.go
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -9,8 +9,8 @@ import (
"fmt"
)
-// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange).
-func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) }
+// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck).
+func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck) }
// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect).
func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect) }
@@ -21,6 +21,7 @@ const (
fuseTypePlain fuseType = 1 << iota
fuseTypeIf
fuseTypeIntInRange
+ fuseTypeNanCheck
fuseTypeBranchRedirect
fuseTypeShortCircuit
)
@@ -38,7 +39,10 @@ func fuse(f *Func, typ fuseType) {
changed = fuseBlockIf(b) || changed
}
if typ&fuseTypeIntInRange != 0 {
- changed = fuseIntegerComparisons(b) || changed
+ changed = fuseIntInRange(b) || changed
+ }
+ if typ&fuseTypeNanCheck != 0 {
+ changed = fuseNanCheck(b) || changed
}
if typ&fuseTypePlain != 0 {
changed = fuseBlockPlain(b) || changed
diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go
index f5fb84b0d73532..b6eb8fcb90dfbc 100644
--- a/src/cmd/compile/internal/ssa/fuse_comparisons.go
+++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go
@@ -4,21 +4,36 @@
package ssa
-// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5',
-// which can be optimized to 'unsigned(x-1) < 4'.
-//
-// Look for branch structure like:
+// fuseIntInRange transforms integer range checks to remove the short-circuit operator. For example,
+// it would convert `if 1 <= x && x < 5 { ... }` into `if (1 <= x) & (x < 5) { ... }`. Rewrite rules
+// can then optimize these into unsigned range checks, `if unsigned(x-1) < 4 { ... }` in this case.
+func fuseIntInRange(b *Block) bool {
+ return fuseComparisons(b, canOptIntInRange)
+}
+
+// fuseNanCheck replaces the short-circuit operators between NaN checks and comparisons with
+// constants. For example, it would transform `if x != x || x > 1.0 { ... }` into
+// `if (x != x) | (x > 1.0) { ... }`. Rewrite rules can then merge the NaN check with the comparison,
+// in this case generating `if !(x <= 1.0) { ... }`.
+func fuseNanCheck(b *Block) bool {
+ return fuseComparisons(b, canOptNanCheck)
+}
+
+// fuseComparisons looks for control graphs that match this pattern:
//
-// p
+// p - predecessor
// |\
-// | b
+// | b - block
// |/ \
-// s0 s1
+// s0 s1 - successors
//
-// In our example, p has control '1 <= x', b has control 'x < 5',
-// and s0 and s1 are the if and else results of the comparison.
+// This pattern is typical for if statements such as `if x || y { ... }` and `if x && y { ... }`.
//
-// This will be optimized into:
+// If canOptControls returns true when passed the control values for p and b then fuseComparisons
+// will try to convert p into a plain block with only one successor (b) and modify b's control
+// value to include p's control value (effectively causing b to be speculatively executed).
+//
+// This transformation results in a control graph that will now look like this:
//
// p
// \
@@ -26,9 +41,12 @@ package ssa
// / \
// s0 s1
//
-// where b has the combined control value 'unsigned(x-1) < 4'.
// Later passes will then fuse p and b.
-func fuseIntegerComparisons(b *Block) bool {
+//
+// In other words `if x || y { ... }` will become `if x | y { ... }` and `if x && y { ... }` will
+// become `if x & y { ... }`. This is a useful transformation because we can then use rewrite
+// rules to optimize `x | y` and `x & y`.
+func fuseComparisons(b *Block, canOptControls func(a, b *Value, op Op) bool) bool {
if len(b.Preds) != 1 {
return false
}
@@ -45,14 +63,6 @@ func fuseIntegerComparisons(b *Block) bool {
return false
}
- // Check if the control values combine to make an integer inequality that
- // can be further optimized later.
- bc := b.Controls[0]
- pc := p.Controls[0]
- if !areMergeableInequalities(bc, pc) {
- return false
- }
-
// If the first (true) successors match then we have a disjunction (||).
// If the second (false) successors match then we have a conjunction (&&).
for i, op := range [2]Op{OpOrB, OpAndB} {
@@ -60,6 +70,13 @@ func fuseIntegerComparisons(b *Block) bool {
continue
}
+ // Check if the control values can be usefully combined.
+ bc := b.Controls[0]
+ pc := p.Controls[0]
+ if !canOptControls(bc, pc, op) {
+ return false
+ }
+
// TODO(mundaym): should we also check the cost of executing b?
// Currently we might speculatively execute b even if b contains
// a lot of instructions. We could just check that len(b.Values)
@@ -125,7 +142,7 @@ func isUnsignedInequality(v *Value) bool {
return false
}
-func areMergeableInequalities(x, y *Value) bool {
+func canOptIntInRange(x, y *Value, op Op) bool {
// We need both inequalities to be either in the signed or unsigned domain.
// TODO(mundaym): it would also be good to merge when we have an Eq op that
// could be transformed into a Less/Leq. For example in the unsigned
@@ -155,3 +172,60 @@ func areMergeableInequalities(x, y *Value) bool {
}
return false
}
+
+// canOptNanCheck reports whether one of arguments is a NaN check and the other
+// is a comparison with a constant that can be combined together.
+//
+// Examples (c must be a constant):
+//
+// v != v || v < c => !(c <= v)
+// v != v || v <= c => !(c < v)
+// v != v || c < v => !(v <= c)
+// v != v || c <= v => !(v < c)
+func canOptNanCheck(x, y *Value, op Op) bool {
+ if op != OpOrB {
+ return false
+ }
+
+ for i := 0; i <= 1; i, x, y = i+1, y, x {
+ if len(x.Args) != 2 || x.Args[0] != x.Args[1] {
+ continue
+ }
+ v := x.Args[0]
+ switch x.Op {
+ case OpNeq64F:
+ if y.Op != OpLess64F && y.Op != OpLeq64F {
+ return false
+ }
+ for j := 0; j <= 1; j++ {
+ a, b := y.Args[j], y.Args[j^1]
+ if a.Op != OpConst64F {
+ continue
+ }
+ // Sign bit operations not affect NaN check results. This special case allows us
+ // to optimize statements like `if v != v || Abs(v) > c { ... }`.
+ if (b.Op == OpAbs || b.Op == OpNeg64F) && b.Args[0] == v {
+ return true
+ }
+ return b == v
+ }
+ case OpNeq32F:
+ if y.Op != OpLess32F && y.Op != OpLeq32F {
+ return false
+ }
+ for j := 0; j <= 1; j++ {
+ a, b := y.Args[j], y.Args[j^1]
+ if a.Op != OpConst32F {
+ continue
+ }
+ // Sign bit operations not affect NaN check results. This special case allows us
+ // to optimize statements like `if v != v || -v > c { ... }`.
+ if b.Op == OpNeg32F && b.Args[0] == v {
+ return true
+ }
+ return b == v
+ }
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 92adf5341b2574..061f1333382af4 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -23199,10 +23199,10 @@ var opcodeTable = [...]opInfo{
faultOnNilArg1: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30
- {1, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30
+ {0, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30
+ {1, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30
},
- clobbers: 25165824, // R24 R25
+ clobbers: 422212481843200, // R25 F16 F17
},
},
{
@@ -23213,10 +23213,10 @@ var opcodeTable = [...]opInfo{
faultOnNilArg1: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 306184191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R26 R30
- {1, 306184191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R26 R30
+ {0, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30
+ {1, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30
},
- clobbers: 29360128, // R23 R24 R25
+ clobbers: 422212490231808, // R24 R25 F16 F17
clobbersArg0: true,
clobbersArg1: true,
},
@@ -26375,9 +26375,6 @@ var opcodeTable = [...]opInfo{
{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
- outputs: []outputInfo{
- {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
- },
},
},
{
@@ -26392,9 +26389,6 @@ var opcodeTable = [...]opInfo{
{1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
{0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
},
- outputs: []outputInfo{
- {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
- },
},
},
{
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go
index 309229b4d753b7..c9bce23b286476 100644
--- a/src/cmd/compile/internal/ssa/prove.go
+++ b/src/cmd/compile/internal/ssa/prove.go
@@ -1240,6 +1240,173 @@ func (ft *factsTable) cleanup(f *Func) {
f.Cache.freeBoolSlice(ft.recurseCheck)
}
+// addSlicesOfSameLen finds the slices that are in the same block and whose Op
+// is OpPhi and always have the same length, then add the equality relationship
+// between them to ft. If two slices start out with the same length and decrease
+// in length by the same amount on each round of the loop (or in the if block),
+// then we think their lengths are always equal.
+//
+// See https://go.dev/issues/75144
+//
+// In fact, we are just propagating the equality
+//
+// if len(a) == len(b) { // from here
+// for len(a) > 4 {
+// a = a[4:]
+// b = b[4:]
+// }
+// if len(a) == len(b) { // to here
+// return true
+// }
+// }
+//
+// or change the for to if:
+//
+// if len(a) == len(b) { // from here
+// if len(a) > 4 {
+// a = a[4:]
+// b = b[4:]
+// }
+// if len(a) == len(b) { // to here
+// return true
+// }
+// }
+func addSlicesOfSameLen(ft *factsTable, b *Block) {
+ // Let w points to the first value we're interested in, and then we
+ // only process those values that appear to be the same length as w,
+ // looping only once. This should be enough in most cases. And u is
+ // similar to w, see comment for predIndex.
+ var u, w *Value
+ var i, j, k sliceInfo
+ isInterested := func(v *Value) bool {
+ j = getSliceInfo(v)
+ return j.sliceWhere != sliceUnknown
+ }
+ for _, v := range b.Values {
+ if v.Uses == 0 {
+ continue
+ }
+ if v.Op == OpPhi && len(v.Args) == 2 && ft.lens[v.ID] != nil && isInterested(v) {
+ if j.predIndex == 1 && ft.lens[v.Args[0].ID] != nil {
+ // found v = (Phi x (SliceMake _ (Add64 (Const64 [n]) (SliceLen x)) _))) or
+ // v = (Phi x (SliceMake _ (Add64 (Const64 [n]) (SliceLen v)) _)))
+ if w == nil {
+ k = j
+ w = v
+ continue
+ }
+ // propagate the equality
+ if j == k && ft.orderS.Equal(ft.lens[v.Args[0].ID], ft.lens[w.Args[0].ID]) {
+ ft.update(b, ft.lens[v.ID], ft.lens[w.ID], signed, eq)
+ }
+ } else if j.predIndex == 0 && ft.lens[v.Args[1].ID] != nil {
+ // found v = (Phi (SliceMake _ (Add64 (Const64 [n]) (SliceLen x)) _)) x) or
+ // v = (Phi (SliceMake _ (Add64 (Const64 [n]) (SliceLen v)) _)) x)
+ if u == nil {
+ i = j
+ u = v
+ continue
+ }
+ // propagate the equality
+ if j == i && ft.orderS.Equal(ft.lens[v.Args[1].ID], ft.lens[u.Args[1].ID]) {
+ ft.update(b, ft.lens[v.ID], ft.lens[u.ID], signed, eq)
+ }
+ }
+ }
+ }
+}
+
+type sliceWhere int
+
+const (
+ sliceUnknown sliceWhere = iota
+ sliceInFor
+ sliceInIf
+)
+
+// predIndex is used to indicate the branch represented by the predecessor
+// block in which the slicing operation occurs.
+type predIndex int
+
+type sliceInfo struct {
+ lengthDiff int64
+ sliceWhere
+ predIndex
+}
+
+// getSliceInfo returns the negative increment of the slice length in a slice
+// operation by examine the Phi node at the merge block. So, we only interest
+// in the slice operation if it is inside a for block or an if block.
+// Otherwise it returns sliceInfo{0, sliceUnknown, 0}.
+//
+// For the following for block:
+//
+// for len(a) > 4 {
+// a = a[4:]
+// }
+//
+// vp = (Phi v3 v9)
+// v5 = (SliceLen vp)
+// v7 = (Add64 (Const64 [-4]) v5)
+// v9 = (SliceMake _ v7 _)
+//
+// returns sliceInfo{-4, sliceInFor, 1}
+//
+// For a subsequent merge block after an if block:
+//
+// if len(a) > 4 {
+// a = a[4:]
+// }
+// a // here
+//
+// vp = (Phi v3 v9)
+// v5 = (SliceLen v3)
+// v7 = (Add64 (Const64 [-4]) v5)
+// v9 = (SliceMake _ v7 _)
+//
+// returns sliceInfo{-4, sliceInIf, 1}
+//
+// Returns sliceInfo{0, sliceUnknown, 0} if it is not the slice
+// operation we are interested in.
+func getSliceInfo(vp *Value) (inf sliceInfo) {
+ if vp.Op != OpPhi || len(vp.Args) != 2 {
+ return
+ }
+ var i predIndex
+ var l *Value // length for OpSliceMake
+ if vp.Args[0].Op != OpSliceMake && vp.Args[1].Op == OpSliceMake {
+ l = vp.Args[1].Args[1]
+ i = 1
+ } else if vp.Args[0].Op == OpSliceMake && vp.Args[1].Op != OpSliceMake {
+ l = vp.Args[0].Args[1]
+ i = 0
+ } else {
+ return
+ }
+ var op Op
+ switch l.Op {
+ case OpAdd64:
+ op = OpConst64
+ case OpAdd32:
+ op = OpConst32
+ default:
+ return
+ }
+ if l.Args[0].Op == op && l.Args[1].Op == OpSliceLen && l.Args[1].Args[0] == vp {
+ return sliceInfo{l.Args[0].AuxInt, sliceInFor, i}
+ }
+ if l.Args[1].Op == op && l.Args[0].Op == OpSliceLen && l.Args[0].Args[0] == vp {
+ return sliceInfo{l.Args[1].AuxInt, sliceInFor, i}
+ }
+ if l.Args[0].Op == op && l.Args[1].Op == OpSliceLen && l.Args[1].Args[0] == vp.Args[1-i] {
+ return sliceInfo{l.Args[0].AuxInt, sliceInIf, i}
+ }
+ if l.Args[1].Op == op && l.Args[0].Op == OpSliceLen && l.Args[0].Args[0] == vp.Args[1-i] {
+ return sliceInfo{l.Args[1].AuxInt, sliceInIf, i}
+ }
+ return
+}
+
// prove removes redundant BlockIf branches that can be inferred
// from previous dominating comparisons.
//
@@ -1505,6 +1672,9 @@ func prove(f *Func) {
addBranchRestrictions(ft, parent, branch)
}
+ // Add slices of the same length start from current block.
+ addSlicesOfSameLen(ft, node.block)
+
if ft.unsat {
// node.block is unreachable.
// Remove it and don't visit
@@ -1766,7 +1936,8 @@ func (ft *factsTable) flowLimit(v *Value) bool {
b := ft.limits[v.Args[1].ID]
sub := ft.newLimit(v, a.sub(b, uint(v.Type.Size())*8))
mod := ft.detectSignedMod(v)
- return sub || mod
+ inferred := ft.detectSliceLenRelation(v)
+ return sub || mod || inferred
case OpNeg64, OpNeg32, OpNeg16, OpNeg8:
a := ft.limits[v.Args[0].ID]
bitsize := uint(v.Type.Size()) * 8
@@ -1947,6 +2118,68 @@ func (ft *factsTable) detectSignedMod(v *Value) bool {
// TODO: non-powers-of-2
return false
}
+
+// detectSliceLenRelation matches the pattern where
+// 1. v := slicelen - index, OR v := slicecap - index
+// AND
+// 2. index <= slicelen - K
+// THEN
+//
+// slicecap - index >= slicelen - index >= K
+//
+// Note that "index" is not useed for indexing in this pattern, but
+// in the motivating example (chunked slice iteration) it is.
+func (ft *factsTable) detectSliceLenRelation(v *Value) (inferred bool) {
+ if v.Op != OpSub64 {
+ return false
+ }
+
+ if !(v.Args[0].Op == OpSliceLen || v.Args[0].Op == OpSliceCap) {
+ return false
+ }
+
+ slice := v.Args[0].Args[0]
+ index := v.Args[1]
+
+ for o := ft.orderings[index.ID]; o != nil; o = o.next {
+ if o.d != signed {
+ continue
+ }
+ or := o.r
+ if or != lt && or != lt|eq {
+ continue
+ }
+ ow := o.w
+ if ow.Op != OpAdd64 && ow.Op != OpSub64 {
+ continue
+ }
+ var lenOffset *Value
+ if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice {
+ lenOffset = ow.Args[1]
+ } else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice {
+ lenOffset = ow.Args[0]
+ }
+ if lenOffset == nil || lenOffset.Op != OpConst64 {
+ continue
+ }
+ K := lenOffset.AuxInt
+ if ow.Op == OpAdd64 {
+ K = -K
+ }
+ if K < 0 {
+ continue
+ }
+ if or == lt {
+ K++
+ }
+ if K < 0 { // We hate thinking about overflow
+ continue
+ }
+ inferred = inferred || ft.signedMin(v, K)
+ }
+ return inferred
+}
+
func (ft *factsTable) detectSignedModByPowerOfTwo(v *Value) bool {
// We're looking for:
//
@@ -2174,6 +2407,76 @@ func unsignedSubUnderflows(a, b uint64) bool {
return a < b
}
+// checkForChunkedIndexBounds looks for index expressions of the form
+// A[i+delta] where delta < K and i <= len(A)-K. That is, this is a chunked
+// iteration where the index is not directly compared to the length.
+// if isReslice, then delta can be equal to K.
+func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value, isReslice bool) bool {
+ if bound.Op != OpSliceLen && bound.Op != OpSliceCap {
+ return false
+ }
+
+ // this is a slice bounds check against len or capacity,
+ // and refers back to a prior check against length, which
+ // will also work for the cap since that is not smaller
+ // than the length.
+
+ slice := bound.Args[0]
+ lim := ft.limits[index.ID]
+ if lim.min < 0 {
+ return false
+ }
+ i, delta := isConstDelta(index)
+ if i == nil {
+ return false
+ }
+ if delta < 0 {
+ return false
+ }
+ // special case for blocked iteration over a slice.
+ // slicelen > i + delta && <==== if clauses above
+ // && index >= 0 <==== if clause above
+ // delta >= 0 && <==== if clause above
+ // slicelen-K >/>= x <==== checked below
+ // && K >=/> delta <==== checked below
+ // then v > w
+ // example: i <=/< len - 4/3 means i+{0,1,2,3} are legal indices
+ for o := ft.orderings[i.ID]; o != nil; o = o.next {
+ if o.d != signed {
+ continue
+ }
+ if ow := o.w; ow.Op == OpAdd64 {
+ var lenOffset *Value
+ if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice {
+ lenOffset = ow.Args[1]
+ } else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice {
+ lenOffset = ow.Args[0]
+ }
+ if lenOffset == nil || lenOffset.Op != OpConst64 {
+ continue
+ }
+ if K := -lenOffset.AuxInt; K >= 0 {
+ or := o.r
+ if isReslice {
+ K++
+ }
+ if or == lt {
+ or = lt | eq
+ K++
+ }
+ if K < 0 { // We hate thinking about overflow
+ continue
+ }
+
+ if delta < K && or == lt|eq {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
func addLocalFacts(ft *factsTable, b *Block) {
// Propagate constant ranges among values in this block.
// We do this before the second loop so that we have the
@@ -2285,6 +2588,20 @@ func addLocalFacts(ft *factsTable, b *Block) {
if v.Args[0].Op == OpSliceMake {
ft.update(b, v, v.Args[0].Args[2], signed, eq)
}
+ case OpIsInBounds:
+ if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1], false) {
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %s for blocked indexing", v.Op)
+ }
+ ft.booleanTrue(v)
+ }
+ case OpIsSliceInBounds:
+ if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1], true) {
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %s for blocked reslicing", v.Op)
+ }
+ ft.booleanTrue(v)
+ }
case OpPhi:
addLocalFactsPhi(ft, v)
}
@@ -2382,24 +2699,38 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) {
switch v.Op {
case OpSlicemask:
// Replace OpSlicemask operations in b with constants where possible.
- x, delta := isConstDelta(v.Args[0])
- if x == nil {
+ cap := v.Args[0]
+ x, delta := isConstDelta(cap)
+ if x != nil {
+ // slicemask(x + y)
+ // if x is larger than -y (y is negative), then slicemask is -1.
+ lim := ft.limits[x.ID]
+ if lim.umin > uint64(-delta) {
+ if cap.Op == OpAdd64 {
+ v.reset(OpConst64)
+ } else {
+ v.reset(OpConst32)
+ }
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved slicemask not needed")
+ }
+ v.AuxInt = -1
+ }
break
}
- // slicemask(x + y)
- // if x is larger than -y (y is negative), then slicemask is -1.
- lim := ft.limits[x.ID]
- if lim.umin > uint64(-delta) {
- if v.Args[0].Op == OpAdd64 {
+ lim := ft.limits[cap.ID]
+ if lim.umin > 0 {
+ if cap.Type.Size() == 8 {
v.reset(OpConst64)
} else {
v.reset(OpConst32)
}
if b.Func.pass.debug > 0 {
- b.Func.Warnl(v.Pos, "Proved slicemask not needed")
+ b.Func.Warnl(v.Pos, "Proved slicemask not needed (by limit)")
}
v.AuxInt = -1
}
+
case OpCtz8, OpCtz16, OpCtz32, OpCtz64:
// On some architectures, notably amd64, we can generate much better
// code for CtzNN if we know that the argument is non-zero.
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 1ce85a8f63b76a..0009de4fa69608 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -609,6 +609,29 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos
} else if v.rematerializeable() {
// Rematerialize instead of loading from the spill location.
c = v.copyIntoWithXPos(s.curBlock, pos)
+ // We need to consider its output mask and potentially issue a Copy
+ // if there are register mask conflicts.
+ // This currently happens for the SIMD package only between GP and FP
+ // register. Because Intel's vector extension can put integer value into
+ // FP, which is seen as a vector. Example instruction: VPSLL[BWDQ]
+ // Because GP and FP masks do not overlap, mask & outputMask == 0
+ // detects this situation thoroughly.
+ sourceMask := s.regspec(c).outputs[0].regs
+ if mask&sourceMask == 0 && !onWasmStack {
+ s.setOrig(c, v)
+ s.assignReg(s.allocReg(sourceMask, v), v, c)
+ // v.Type for the new OpCopy is likely wrong and it might delay the problem
+ // until ssa to asm lowering, which might need the types to generate the right
+ // assembly for OpCopy. For Intel's GP to FP move, it happens to be that
+ // MOV instruction has such a variant so it happens to be right.
+ // But it's unclear for other architectures or situations, and the problem
+ // might be exposed when the assembler sees illegal instructions.
+ // Right now make we still pick v.Type, because at least its size should be correct
+ // for the rematerialization case the amd64 SIMD package exposed.
+ // TODO: We might need to figure out a way to find the correct type or make
+ // the asm lowering use reg info only for OpCopy.
+ c = s.curBlock.NewValue1(pos, OpCopy, v.Type, c)
+ }
} else {
// Load v from its spill location.
spill := s.makeSpill(v, s.curBlock)
@@ -2538,7 +2561,29 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP
e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString())
}
if dstReg {
- x = v.copyInto(e.p)
+ // We want to rematerialize v into a register that is incompatible with v's op's register mask.
+ // Instead of setting the wrong register for the rematerialized v, we should find the right register
+ // for it and emit an additional copy to move to the desired register.
+ // For #70451.
+ if e.s.regspec(v).outputs[0].regs®Mask(1< regDebug {
@@ -2764,6 +2806,7 @@ func (s *regAllocState) computeLive() {
s.live = make([][]liveInfo, f.NumBlocks())
s.desired = make([]desiredState, f.NumBlocks())
var phis []*Value
+ rematIDs := make([]ID, 0, 64)
live := f.newSparseMapPos(f.NumValues())
defer f.retSparseMapPos(live)
@@ -2816,9 +2859,20 @@ func (s *regAllocState) computeLive() {
continue
}
if opcodeTable[v.Op].call {
+ rematIDs = rematIDs[:0]
c := live.contents()
for i := range c {
c[i].val += unlikelyDistance
+ vid := c[i].key
+ if s.values[vid].rematerializeable {
+ rematIDs = append(rematIDs, vid)
+ }
+ }
+ // We don't spill rematerializeable values, and assuming they
+ // are live across a call would only force shuffle to add some
+ // (dead) constant rematerialization. Remove them.
+ for _, r := range rematIDs {
+ live.remove(r)
}
}
for _, a := range v.Args {
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
index 0f69b852d12971..12f5820f1ff81d 100644
--- a/src/cmd/compile/internal/ssa/regalloc_test.go
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -6,6 +6,7 @@ package ssa
import (
"cmd/compile/internal/types"
+ "cmd/internal/obj/x86"
"fmt"
"testing"
)
@@ -264,6 +265,48 @@ func TestClobbersArg1(t *testing.T) {
}
}
+func TestNoRematerializeDeadConstant(t *testing.T) {
+ c := testConfigARM64(t)
+ f := c.Fun("b1",
+ Bloc("b1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("addr", OpArg, c.config.Types.Int32.PtrTo(), 0, c.Temp(c.config.Types.Int32.PtrTo())),
+ Valu("const", OpARM64MOVDconst, c.config.Types.Int32, -1, nil), // Original constant
+ Valu("cmp", OpARM64CMPconst, types.TypeFlags, 0, nil, "const"),
+ Goto("b2"),
+ ),
+ Bloc("b2",
+ Valu("phi_mem", OpPhi, types.TypeMem, 0, nil, "mem", "callmem"),
+ Eq("cmp", "b6", "b3"),
+ ),
+ Bloc("b3",
+ Valu("call", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "phi_mem"),
+ Valu("callmem", OpSelectN, types.TypeMem, 0, nil, "call"),
+ Eq("cmp", "b5", "b4"),
+ ),
+ Bloc("b4", // A block where we don't really need to rematerialize the constant -1
+ Goto("b2"),
+ ),
+ Bloc("b5",
+ Valu("user", OpAMD64MOVQstore, types.TypeMem, 0, nil, "addr", "const", "callmem"),
+ Exit("user"),
+ ),
+ Bloc("b6",
+ Exit("phi_mem"),
+ ),
+ )
+
+ regalloc(f.f)
+ checkFunc(f.f)
+
+ // Check that in block b4, there's no dead rematerialization of the constant -1
+ for _, v := range f.blocks["b4"].Values {
+ if v.Op == OpARM64MOVDconst && v.AuxInt == -1 {
+ t.Errorf("constant -1 rematerialized in loop block b4: %s", v.LongString())
+ }
+ }
+}
+
func numSpills(b *Block) int {
return numOps(b, OpStoreReg)
}
@@ -279,3 +322,27 @@ func numOps(b *Block, op Op) int {
}
return n
}
+
+func TestRematerializeableRegCompatible(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpAMD64MOVLconst, c.config.Types.Int32, 1, nil),
+ Valu("a", OpAMD64POR, c.config.Types.Float32, 0, nil, "x", "x"),
+ Valu("res", OpMakeResult, types.NewResults([]*types.Type{c.config.Types.Float32, types.TypeMem}), 0, nil, "a", "mem"),
+ Ret("res"),
+ ),
+ )
+ regalloc(f.f)
+ checkFunc(f.f)
+ moveFound := false
+ for _, v := range f.f.Blocks[0].Values {
+ if v.Op == OpCopy && x86.REG_X0 <= v.Reg() && v.Reg() <= x86.REG_X31 {
+ moveFound = true
+ }
+ }
+ if !moveFound {
+ t.Errorf("Expects an Copy to be issued, but got: %+v", f.f)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 6d83ba565317a3..f02019df384f5f 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -6,9 +6,11 @@ package ssa
import (
"cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/rttype"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
@@ -454,6 +456,26 @@ func isSameCall(aux Aux, name string) bool {
return fn != nil && fn.String() == name
}
+func isMalloc(aux Aux) bool {
+ return isNewObject(aux) || isSpecializedMalloc(aux)
+}
+
+func isNewObject(aux Aux) bool {
+ fn := aux.(*AuxCall).Fn
+ return fn != nil && fn.String() == "runtime.newobject"
+}
+
+func isSpecializedMalloc(aux Aux) bool {
+ fn := aux.(*AuxCall).Fn
+ if fn == nil {
+ return false
+ }
+ name := fn.String()
+ return strings.HasPrefix(name, "runtime.mallocgcSmallNoScanSC") ||
+ strings.HasPrefix(name, "runtime.mallocgcSmallScanNoHeaderSC") ||
+ strings.HasPrefix(name, "runtime.mallocTiny")
+}
+
// canLoadUnaligned reports if the architecture supports unaligned load operations.
func canLoadUnaligned(c *Config) bool {
return c.ctxt.Arch.Alignment == 1
@@ -2057,12 +2079,12 @@ func isFixedLoad(v *Value, sym Sym, off int64) bool {
return false
}
- if strings.HasPrefix(lsym.Name, "type:") {
+ if ti := lsym.TypeInfo(); ti != nil {
// Type symbols do not contain information about their fields, unlike the cases above.
// Hand-implement field accesses.
// TODO: can this be replaced with reflectdata.writeType and just use the code above?
- t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type)
+ t := ti.Type.(*types.Type)
for _, f := range rttype.Type.Fields() {
if f.Offset == off && copyCompatibleType(v.Type, f.Type) {
@@ -2116,12 +2138,12 @@ func rewriteFixedLoad(v *Value, sym Sym, sb *Value, off int64) *Value {
base.Fatalf("fixedLoad data not known for %s:%d", sym, off)
}
- if strings.HasPrefix(lsym.Name, "type:") {
+ if ti := lsym.TypeInfo(); ti != nil {
// Type symbols do not contain information about their fields, unlike the cases above.
// Hand-implement field accesses.
// TODO: can this be replaced with reflectdata.writeType and just use the code above?
- t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type)
+ t := ti.Type.(*types.Type)
ptrSizedOpConst := OpConst64
if f.Config.PtrSize == 4 {
@@ -2611,10 +2633,7 @@ func isDirectType1(v *Value) bool {
return isDirectType2(v.Args[0])
case OpAddr:
lsym := v.Aux.(*obj.LSym)
- if lsym.Extra == nil {
- return false
- }
- if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
+ if ti := lsym.TypeInfo(); ti != nil {
return types.IsDirectIface(ti.Type.(*types.Type))
}
}
@@ -2647,10 +2666,7 @@ func isDirectIface1(v *Value, depth int) bool {
return isDirectIface2(v.Args[0], depth-1)
case OpAddr:
lsym := v.Aux.(*obj.LSym)
- if lsym.Extra == nil {
- return false
- }
- if ii, ok := (*lsym.Extra).(*obj.ItabInfo); ok {
+ if ii := lsym.ItabInfo(); ii != nil {
return types.IsDirectIface(ii.Type.(*types.Type))
}
case OpConstNil:
@@ -2744,3 +2760,7 @@ func panicBoundsCToAux(p PanicBoundsC) Aux {
func panicBoundsCCToAux(p PanicBoundsCC) Aux {
return p
}
+
+func isDictArgSym(sym Sym) bool {
+ return sym.(*ir.Name).Sym().Name == typecheck.LocalDictName
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index a7ee632ae1af72..e702925f5f3bac 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -5,6 +5,7 @@ package ssa
import "internal/buildcfg"
import "math"
import "cmd/internal/obj"
+import "cmd/compile/internal/base"
import "cmd/compile/internal/types"
func rewriteValueAMD64(v *Value) bool {
@@ -694,11 +695,9 @@ func rewriteValueAMD64(v *Value) bool {
case OpCtz8NonZero:
return rewriteValueAMD64_OpCtz8NonZero(v)
case OpCvt32Fto32:
- v.Op = OpAMD64CVTTSS2SL
- return true
+ return rewriteValueAMD64_OpCvt32Fto32(v)
case OpCvt32Fto64:
- v.Op = OpAMD64CVTTSS2SQ
- return true
+ return rewriteValueAMD64_OpCvt32Fto64(v)
case OpCvt32Fto64F:
v.Op = OpAMD64CVTSS2SD
return true
@@ -709,14 +708,12 @@ func rewriteValueAMD64(v *Value) bool {
v.Op = OpAMD64CVTSL2SD
return true
case OpCvt64Fto32:
- v.Op = OpAMD64CVTTSD2SL
- return true
+ return rewriteValueAMD64_OpCvt64Fto32(v)
case OpCvt64Fto32F:
v.Op = OpAMD64CVTSD2SS
return true
case OpCvt64Fto64:
- v.Op = OpAMD64CVTTSD2SQ
- return true
+ return rewriteValueAMD64_OpCvt64Fto64(v)
case OpCvt64to32F:
v.Op = OpAMD64CVTSQ2SS
return true
@@ -25511,6 +25508,190 @@ func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpCvt32Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Fto32 x)
+ // cond: base.ConvertHash.MatchPos(v.Pos, nil)
+ // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x)))))
+ for {
+ t := v.Type
+ x := v_0
+ if !(base.ConvertHash.MatchPos(v.Pos, nil)) {
+ break
+ }
+ v.reset(OpAMD64XORL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64SARLconst, t)
+ v0.AuxInt = int8ToAuxInt(31)
+ v1 := b.NewValue0(v.Pos, OpAMD64ANDL, t)
+ y := b.NewValue0(v.Pos, OpAMD64CVTTSS2SL, t)
+ y.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpAMD64NOTL, typ.Int32)
+ v4 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v4.AddArg(x)
+ v3.AddArg(v4)
+ v1.AddArg2(y, v3)
+ v0.AddArg(v1)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (Cvt32Fto32 x)
+ // cond: !base.ConvertHash.MatchPos(v.Pos, nil)
+ // result: (CVTTSS2SL x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(!base.ConvertHash.MatchPos(v.Pos, nil)) {
+ break
+ }
+ v.reset(OpAMD64CVTTSS2SL)
+ v.Type = t
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCvt32Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Fto64 x)
+ // cond: base.ConvertHash.MatchPos(v.Pos, nil)
+ // result: (XORQ y (SARQconst [63] (ANDQ y:(CVTTSS2SQ x) (NOTQ (MOVQf2i (CVTSS2SD x))) )))
+ for {
+ t := v.Type
+ x := v_0
+ if !(base.ConvertHash.MatchPos(v.Pos, nil)) {
+ break
+ }
+ v.reset(OpAMD64XORQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64SARQconst, t)
+ v0.AuxInt = int8ToAuxInt(63)
+ v1 := b.NewValue0(v.Pos, OpAMD64ANDQ, t)
+ y := b.NewValue0(v.Pos, OpAMD64CVTTSS2SQ, t)
+ y.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpAMD64NOTQ, typ.Int64)
+ v4 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v5 := b.NewValue0(v.Pos, OpAMD64CVTSS2SD, typ.Float64)
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v3.AddArg(v4)
+ v1.AddArg2(y, v3)
+ v0.AddArg(v1)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (Cvt32Fto64 x)
+ // cond: !base.ConvertHash.MatchPos(v.Pos, nil)
+ // result: (CVTTSS2SQ x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(!base.ConvertHash.MatchPos(v.Pos, nil)) {
+ break
+ }
+ v.reset(OpAMD64CVTTSS2SQ)
+ v.Type = t
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCvt64Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64Fto32 x)
+ // cond: base.ConvertHash.MatchPos(v.Pos, nil)
+ // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x))))))
+ for {
+ t := v.Type
+ x := v_0
+ if !(base.ConvertHash.MatchPos(v.Pos, nil)) {
+ break
+ }
+ v.reset(OpAMD64XORL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64SARLconst, t)
+ v0.AuxInt = int8ToAuxInt(31)
+ v1 := b.NewValue0(v.Pos, OpAMD64ANDL, t)
+ y := b.NewValue0(v.Pos, OpAMD64CVTTSD2SL, t)
+ y.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpAMD64NOTL, typ.Int32)
+ v4 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpAMD64CVTSD2SS, typ.Float32)
+ v5.AddArg(x)
+ v4.AddArg(v5)
+ v3.AddArg(v4)
+ v1.AddArg2(y, v3)
+ v0.AddArg(v1)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (Cvt64Fto32 x)
+ // cond: !base.ConvertHash.MatchPos(v.Pos, nil)
+ // result: (CVTTSD2SL x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(!base.ConvertHash.MatchPos(v.Pos, nil)) {
+ break
+ }
+ v.reset(OpAMD64CVTTSD2SL)
+ v.Type = t
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCvt64Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64Fto64 x)
+ // cond: base.ConvertHash.MatchPos(v.Pos, nil)
+ // result: (XORQ y (SARQconst [63] (ANDQ y:(CVTTSD2SQ x) (NOTQ (MOVQf2i x)))))
+ for {
+ t := v.Type
+ x := v_0
+ if !(base.ConvertHash.MatchPos(v.Pos, nil)) {
+ break
+ }
+ v.reset(OpAMD64XORQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64SARQconst, t)
+ v0.AuxInt = int8ToAuxInt(63)
+ v1 := b.NewValue0(v.Pos, OpAMD64ANDQ, t)
+ y := b.NewValue0(v.Pos, OpAMD64CVTTSD2SQ, t)
+ y.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpAMD64NOTQ, typ.Int64)
+ v4 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg(v4)
+ v1.AddArg2(y, v3)
+ v0.AddArg(v1)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (Cvt64Fto64 x)
+ // cond: !base.ConvertHash.MatchPos(v.Pos, nil)
+ // result: (CVTTSD2SQ x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(!base.ConvertHash.MatchPos(v.Pos, nil)) {
+ break
+ }
+ v.reset(OpAMD64CVTTSD2SQ)
+ v.Type = t
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpDiv16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go
index c3c5528aaa6ab4..a164a6eee555b9 100644
--- a/src/cmd/compile/internal/ssa/rewriteWasm.go
+++ b/src/cmd/compile/internal/ssa/rewriteWasm.go
@@ -2,7 +2,6 @@
package ssa
-import "internal/buildcfg"
import "math"
import "cmd/compile/internal/types"
@@ -3202,8 +3201,6 @@ func rewriteValueWasm_OpRsh8x8(v *Value) bool {
}
func rewriteValueWasm_OpSignExt16to32(v *Value) bool {
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (SignExt16to32 x:(I64Load16S _ _))
// result: x
for {
@@ -3215,34 +3212,16 @@ func rewriteValueWasm_OpSignExt16to32(v *Value) bool {
return true
}
// match: (SignExt16to32 x)
- // cond: buildcfg.GOWASM.SignExt
// result: (I64Extend16S x)
for {
x := v_0
- if !(buildcfg.GOWASM.SignExt) {
- break
- }
v.reset(OpWasmI64Extend16S)
v.AddArg(x)
return true
}
- // match: (SignExt16to32 x)
- // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
- for {
- x := v_0
- v.reset(OpWasmI64ShrS)
- v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
- v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
- v1.AuxInt = int64ToAuxInt(48)
- v0.AddArg2(x, v1)
- v.AddArg2(v0, v1)
- return true
- }
}
func rewriteValueWasm_OpSignExt16to64(v *Value) bool {
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (SignExt16to64 x:(I64Load16S _ _))
// result: x
for {
@@ -3254,34 +3233,16 @@ func rewriteValueWasm_OpSignExt16to64(v *Value) bool {
return true
}
// match: (SignExt16to64 x)
- // cond: buildcfg.GOWASM.SignExt
// result: (I64Extend16S x)
for {
x := v_0
- if !(buildcfg.GOWASM.SignExt) {
- break
- }
v.reset(OpWasmI64Extend16S)
v.AddArg(x)
return true
}
- // match: (SignExt16to64 x)
- // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
- for {
- x := v_0
- v.reset(OpWasmI64ShrS)
- v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
- v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
- v1.AuxInt = int64ToAuxInt(48)
- v0.AddArg2(x, v1)
- v.AddArg2(v0, v1)
- return true
- }
}
func rewriteValueWasm_OpSignExt32to64(v *Value) bool {
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (SignExt32to64 x:(I64Load32S _ _))
// result: x
for {
@@ -3293,34 +3254,16 @@ func rewriteValueWasm_OpSignExt32to64(v *Value) bool {
return true
}
// match: (SignExt32to64 x)
- // cond: buildcfg.GOWASM.SignExt
// result: (I64Extend32S x)
for {
x := v_0
- if !(buildcfg.GOWASM.SignExt) {
- break
- }
v.reset(OpWasmI64Extend32S)
v.AddArg(x)
return true
}
- // match: (SignExt32to64 x)
- // result: (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
- for {
- x := v_0
- v.reset(OpWasmI64ShrS)
- v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
- v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
- v1.AuxInt = int64ToAuxInt(32)
- v0.AddArg2(x, v1)
- v.AddArg2(v0, v1)
- return true
- }
}
func rewriteValueWasm_OpSignExt8to16(v *Value) bool {
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (SignExt8to16 x:(I64Load8S _ _))
// result: x
for {
@@ -3332,34 +3275,16 @@ func rewriteValueWasm_OpSignExt8to16(v *Value) bool {
return true
}
// match: (SignExt8to16 x)
- // cond: buildcfg.GOWASM.SignExt
// result: (I64Extend8S x)
for {
x := v_0
- if !(buildcfg.GOWASM.SignExt) {
- break
- }
v.reset(OpWasmI64Extend8S)
v.AddArg(x)
return true
}
- // match: (SignExt8to16 x)
- // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
- for {
- x := v_0
- v.reset(OpWasmI64ShrS)
- v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
- v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
- v1.AuxInt = int64ToAuxInt(56)
- v0.AddArg2(x, v1)
- v.AddArg2(v0, v1)
- return true
- }
}
func rewriteValueWasm_OpSignExt8to32(v *Value) bool {
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (SignExt8to32 x:(I64Load8S _ _))
// result: x
for {
@@ -3371,34 +3296,16 @@ func rewriteValueWasm_OpSignExt8to32(v *Value) bool {
return true
}
// match: (SignExt8to32 x)
- // cond: buildcfg.GOWASM.SignExt
// result: (I64Extend8S x)
for {
x := v_0
- if !(buildcfg.GOWASM.SignExt) {
- break
- }
v.reset(OpWasmI64Extend8S)
v.AddArg(x)
return true
}
- // match: (SignExt8to32 x)
- // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
- for {
- x := v_0
- v.reset(OpWasmI64ShrS)
- v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
- v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
- v1.AuxInt = int64ToAuxInt(56)
- v0.AddArg2(x, v1)
- v.AddArg2(v0, v1)
- return true
- }
}
func rewriteValueWasm_OpSignExt8to64(v *Value) bool {
v_0 := v.Args[0]
- b := v.Block
- typ := &b.Func.Config.Types
// match: (SignExt8to64 x:(I64Load8S _ _))
// result: x
for {
@@ -3410,29 +3317,13 @@ func rewriteValueWasm_OpSignExt8to64(v *Value) bool {
return true
}
// match: (SignExt8to64 x)
- // cond: buildcfg.GOWASM.SignExt
// result: (I64Extend8S x)
for {
x := v_0
- if !(buildcfg.GOWASM.SignExt) {
- break
- }
v.reset(OpWasmI64Extend8S)
v.AddArg(x)
return true
}
- // match: (SignExt8to64 x)
- // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
- for {
- x := v_0
- v.reset(OpWasmI64ShrS)
- v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
- v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
- v1.AuxInt = int64ToAuxInt(56)
- v0.AddArg2(x, v1)
- v.AddArg2(v0, v1)
- return true
- }
}
func rewriteValueWasm_OpSlicemask(v *Value) bool {
v_0 := v.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 7e23194e6aba12..79c444a86b29df 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -422,6 +422,8 @@ func rewriteValuegeneric(v *Value) bool {
return rewriteValuegeneric_OpSliceCap(v)
case OpSliceLen:
return rewriteValuegeneric_OpSliceLen(v)
+ case OpSliceMake:
+ return rewriteValuegeneric_OpSliceMake(v)
case OpSlicePtr:
return rewriteValuegeneric_OpSlicePtr(v)
case OpSlicemask:
@@ -6605,12 +6607,16 @@ func rewriteValuegeneric_OpCtz8(v *Value) bool {
func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool {
v_0 := v.Args[0]
// match: (Cvt32Fto32 (Const32F [c]))
+ // cond: c >= -1<<31 && c < 1<<31
// result: (Const32 [int32(c)])
for {
if v_0.Op != OpConst32F {
break
}
c := auxIntToFloat32(v_0.AuxInt)
+ if !(c >= -1<<31 && c < 1<<31) {
+ break
+ }
v.reset(OpConst32)
v.AuxInt = int32ToAuxInt(int32(c))
return true
@@ -6620,12 +6626,16 @@ func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool {
func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool {
v_0 := v.Args[0]
// match: (Cvt32Fto64 (Const32F [c]))
+ // cond: c >= -1<<63 && c < 1<<63
// result: (Const64 [int64(c)])
for {
if v_0.Op != OpConst32F {
break
}
c := auxIntToFloat32(v_0.AuxInt)
+ if !(c >= -1<<63 && c < 1<<63) {
+ break
+ }
v.reset(OpConst64)
v.AuxInt = int64ToAuxInt(int64(c))
return true
@@ -6680,12 +6690,16 @@ func rewriteValuegeneric_OpCvt32to64F(v *Value) bool {
func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool {
v_0 := v.Args[0]
// match: (Cvt64Fto32 (Const64F [c]))
+ // cond: c >= -1<<31 && c < 1<<31
// result: (Const32 [int32(c)])
for {
if v_0.Op != OpConst64F {
break
}
c := auxIntToFloat64(v_0.AuxInt)
+ if !(c >= -1<<31 && c < 1<<31) {
+ break
+ }
v.reset(OpConst32)
v.AuxInt = int32ToAuxInt(int32(c))
return true
@@ -6730,12 +6744,16 @@ func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool {
func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool {
v_0 := v.Args[0]
// match: (Cvt64Fto64 (Const64F [c]))
+ // cond: c >= -1<<63 && c < 1<<63
// result: (Const64 [int64(c)])
for {
if v_0.Op != OpConst64F {
break
}
c := auxIntToFloat64(v_0.AuxInt)
+ if !(c >= -1<<63 && c < 1<<63) {
+ break
+ }
v.reset(OpConst64)
v.AuxInt = int64ToAuxInt(int64(c))
return true
@@ -21316,8 +21334,8 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
v.copyOf(ptr)
return true
}
- // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
- // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall ___)) _)
+ // cond: isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check")
// result: ptr
for {
ptr := v_0
@@ -21325,14 +21343,17 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
break
}
call := ptr.Args[0]
- if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ if call.Op != OpStaticLECall {
+ break
+ }
+ if !(isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
break
}
v.copyOf(ptr)
return true
}
- // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
- // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall ___))) _)
+ // cond: isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check")
// result: ptr
for {
ptr := v_0
@@ -21344,7 +21365,10 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
break
}
call := ptr_0.Args[0]
- if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ if call.Op != OpStaticLECall {
+ break
+ }
+ if !(isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
break
}
v.copyOf(ptr)
@@ -21393,6 +21417,21 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
v.copyOf(ptr)
return true
}
+ // match: (NilCheck ptr:(Arg {sym}) _)
+ // cond: isDictArgSym(sym)
+ // result: ptr
+ for {
+ ptr := v_0
+ if ptr.Op != OpArg {
+ break
+ }
+ sym := auxToSym(ptr.Aux)
+ if !(isDictArgSym(sym)) {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
// match: (NilCheck ptr:(NilCheck _ _) _ )
// result: ptr
for {
@@ -23940,6 +23979,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
+ typ := &b.Func.Config.Types
// match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
// cond: c >= d
// result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d])))
@@ -25252,6 +25292,558 @@ func rewriteValuegeneric_OpOrB(v *Value) bool {
}
break
}
+ // match: (OrB (Neq64F x x) (Less64F x y:(Const64F [c])))
+ // result: (Not (Leq64F y x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst64F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Leq64F x y:(Const64F [c])))
+ // result: (Not (Less64F y x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst64F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) x))
+ // result: (Not (Leq64F x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst64F {
+ continue
+ }
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) x))
+ // result: (Not (Less64F x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst64F {
+ continue
+ }
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq32F x x) (Less32F x y:(Const32F [c])))
+ // result: (Not (Leq32F y x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq32F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess32F {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst32F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq32F x x) (Leq32F x y:(Const32F [c])))
+ // result: (Not (Less32F y x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq32F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq32F {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst32F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq32F x x) (Less32F y:(Const32F [c]) x))
+ // result: (Not (Leq32F x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq32F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess32F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst32F {
+ continue
+ }
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq32F x x) (Leq32F y:(Const32F [c]) x))
+ // result: (Not (Less32F x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq32F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq32F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst32F {
+ continue
+ }
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Less64F abs:(Abs x) y:(Const64F [c])))
+ // result: (Not (Leq64F y abs))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ abs := v_1.Args[0]
+ if abs.Op != OpAbs || x != abs.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst64F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool)
+ v0.AddArg2(y, abs)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Leq64F abs:(Abs x) y:(Const64F [c])))
+ // result: (Not (Less64F y abs))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ abs := v_1.Args[0]
+ if abs.Op != OpAbs || x != abs.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst64F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool)
+ v0.AddArg2(y, abs)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) abs:(Abs x)))
+ // result: (Not (Leq64F abs y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst64F {
+ continue
+ }
+ abs := v_1.Args[1]
+ if abs.Op != OpAbs || x != abs.Args[0] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool)
+ v0.AddArg2(abs, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) abs:(Abs x)))
+ // result: (Not (Less64F abs y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst64F {
+ continue
+ }
+ abs := v_1.Args[1]
+ if abs.Op != OpAbs || x != abs.Args[0] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool)
+ v0.AddArg2(abs, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Less64F neg:(Neg64F x) y:(Const64F [c])))
+ // result: (Not (Leq64F y neg))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ neg := v_1.Args[0]
+ if neg.Op != OpNeg64F || x != neg.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst64F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool)
+ v0.AddArg2(y, neg)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Leq64F neg:(Neg64F x) y:(Const64F [c])))
+ // result: (Not (Less64F y neg))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ neg := v_1.Args[0]
+ if neg.Op != OpNeg64F || x != neg.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst64F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool)
+ v0.AddArg2(y, neg)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) neg:(Neg64F x)))
+ // result: (Not (Leq64F neg y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst64F {
+ continue
+ }
+ neg := v_1.Args[1]
+ if neg.Op != OpNeg64F || x != neg.Args[0] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool)
+ v0.AddArg2(neg, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) neg:(Neg64F x)))
+ // result: (Not (Less64F neg y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq64F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq64F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst64F {
+ continue
+ }
+ neg := v_1.Args[1]
+ if neg.Op != OpNeg64F || x != neg.Args[0] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool)
+ v0.AddArg2(neg, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq32F x x) (Less32F neg:(Neg32F x) y:(Const32F [c])))
+ // result: (Not (Leq32F y neg))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq32F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess32F {
+ continue
+ }
+ _ = v_1.Args[1]
+ neg := v_1.Args[0]
+ if neg.Op != OpNeg32F || x != neg.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst32F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool)
+ v0.AddArg2(y, neg)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq32F x x) (Leq32F neg:(Neg32F x) y:(Const32F [c])))
+ // result: (Not (Less32F y neg))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq32F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq32F {
+ continue
+ }
+ _ = v_1.Args[1]
+ neg := v_1.Args[0]
+ if neg.Op != OpNeg32F || x != neg.Args[0] {
+ continue
+ }
+ y := v_1.Args[1]
+ if y.Op != OpConst32F {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool)
+ v0.AddArg2(y, neg)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq32F x x) (Less32F y:(Const32F [c]) neg:(Neg32F x)))
+ // result: (Not (Leq32F neg y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq32F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLess32F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst32F {
+ continue
+ }
+ neg := v_1.Args[1]
+ if neg.Op != OpNeg32F || x != neg.Args[0] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool)
+ v0.AddArg2(neg, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Neq32F x x) (Leq32F y:(Const32F [c]) neg:(Neg32F x)))
+ // result: (Not (Less32F neg y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNeq32F {
+ continue
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] || v_1.Op != OpLeq32F {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if y.Op != OpConst32F {
+ continue
+ }
+ neg := v_1.Args[1]
+ if neg.Op != OpNeg32F || x != neg.Args[0] {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool)
+ v0.AddArg2(neg, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
return false
}
func rewriteValuegeneric_OpPhi(v *Value) bool {
@@ -30499,6 +31091,91 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool {
}
return false
}
+func rewriteValuegeneric_OpSliceMake(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SliceMake (AddPtr x (And64 y (Slicemask _))) w:(Const64 [c]) z)
+ // cond: c > 0
+ // result: (SliceMake (AddPtr x y) w z)
+ for {
+ if v_0.Op != OpAddPtr {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_0_1.Args[1]
+ v_0_1_0 := v_0_1.Args[0]
+ v_0_1_1 := v_0_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_1_0, v_0_1_1 = _i0+1, v_0_1_1, v_0_1_0 {
+ y := v_0_1_0
+ if v_0_1_1.Op != OpSlicemask {
+ continue
+ }
+ w := v_1
+ if w.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(w.AuxInt)
+ z := v_2
+ if !(c > 0) {
+ continue
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpAddPtr, t)
+ v0.AddArg2(x, y)
+ v.AddArg3(v0, w, z)
+ return true
+ }
+ break
+ }
+ // match: (SliceMake (AddPtr x (And32 y (Slicemask _))) w:(Const32 [c]) z)
+ // cond: c > 0
+ // result: (SliceMake (AddPtr x y) w z)
+ for {
+ if v_0.Op != OpAddPtr {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_0_1.Args[1]
+ v_0_1_0 := v_0_1.Args[0]
+ v_0_1_1 := v_0_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_1_0, v_0_1_1 = _i0+1, v_0_1_1, v_0_1_0 {
+ y := v_0_1_0
+ if v_0_1_1.Op != OpSlicemask {
+ continue
+ }
+ w := v_1
+ if w.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(w.AuxInt)
+ z := v_2
+ if !(c > 0) {
+ continue
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpAddPtr, t)
+ v0.AddArg2(x, y)
+ v.AddArg3(v0, w, z)
+ return true
+ }
+ break
+ }
+ return false
+}
func rewriteValuegeneric_OpSlicePtr(v *Value) bool {
v_0 := v.Args[0]
// match: (SlicePtr (SliceMake (SlicePtr x) _ _))
@@ -31808,27 +32485,27 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
v.AddArg3(dst, e, mem)
return true
}
- // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
- // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // match: (Store (SelectN [0] call:(StaticLECall ___)) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isMalloc(call.Aux)
// result: mem
for {
if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
call := v_0.Args[0]
- if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ if call.Op != OpStaticLECall {
break
}
x := v_1
mem := v_2
- if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isMalloc(call.Aux)) {
break
}
v.copyOf(mem)
return true
}
- // match: (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
- // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // match: (Store (OffPtr (SelectN [0] call:(StaticLECall ___))) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isMalloc(call.Aux)
// result: mem
for {
if v_0.Op != OpOffPtr {
@@ -31839,12 +32516,12 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
break
}
call := v_0_0.Args[0]
- if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ if call.Op != OpStaticLECall {
break
}
x := v_1
mem := v_2
- if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isMalloc(call.Aux)) {
break
}
v.copyOf(mem)
@@ -36187,19 +36864,19 @@ func rewriteValuegeneric_OpZero(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
- // cond: isSameCall(call.Aux, "runtime.newobject")
+ // match: (Zero (SelectN [0] call:(StaticLECall ___)) mem:(SelectN [1] call))
+ // cond: isMalloc(call.Aux)
// result: mem
for {
if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
break
}
call := v_0.Args[0]
- if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ if call.Op != OpStaticLECall {
break
}
mem := v_1
- if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isSameCall(call.Aux, "runtime.newobject")) {
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isMalloc(call.Aux)) {
break
}
v.copyOf(mem)
diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go
index 8cd11e9828e0f9..d0f09da86ffce2 100644
--- a/src/cmd/compile/internal/ssa/stmtlines_test.go
+++ b/src/cmd/compile/internal/ssa/stmtlines_test.go
@@ -137,17 +137,17 @@ func TestStmtLines(t *testing.T) {
}
}
- var m int
+ var m float64
if runtime.GOARCH == "amd64" {
- m = 1 // > 99% obtained on amd64, no backsliding
+ m = 0.011 // > 98.9% obtained on amd64, no backsliding
} else if runtime.GOARCH == "riscv64" {
- m = 3 // XXX temporary update threshold to 97% for regabi
+ m = 0.03 // XXX temporary update threshold to 97% for regabi
} else {
- m = 2 // expect 98% elsewhere.
+ m = 0.02 // expect 98% elsewhere.
}
- if len(nonStmtLines)*100 > m*len(lines) {
- t.Errorf("Saw too many (%s, > %d%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m, len(lines), len(nonStmtLines))
+ if float64(len(nonStmtLines)) > m*float64(len(lines)) {
+ t.Errorf("Saw too many (%s, > %.1f%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m*100, len(lines), len(nonStmtLines))
}
t.Logf("Saw %d out of %d lines without statement marks", len(nonStmtLines), len(lines))
if testing.Verbose() {
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index 55ab23ce9a0bcd..51a70c7fd4fdcd 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -600,7 +600,7 @@ func (v *Value) removeable() bool {
func AutoVar(v *Value) (*ir.Name, int64) {
if loc, ok := v.Block.Func.RegAlloc[v.ID].(LocalSlot); ok {
if v.Type.Size() > loc.Type.Size() {
- v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
+ v.Fatalf("v%d: spill/restore type %v doesn't fit in slot type %v", v.ID, v.Type, loc.Type)
}
return loc.N, loc.Off
}
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
index ec6901f13ec1d2..ec5a0fed29d791 100644
--- a/src/cmd/compile/internal/ssa/writebarrier.go
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -798,7 +798,16 @@ func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) {
if call.Op != OpStaticCall {
return nil, false
}
- if !isSameCall(call.Aux, "runtime.newobject") {
+ // Check for new object, or for new object calls that have been transformed into size-specialized malloc calls.
+ // Calls that have return type unsafe pointer may have originally been produced by flushPendingHeapAllocations
+ // in the ssa generator, so may have not originally been newObject calls.
+ var numParameters int64
+ switch {
+ case isNewObject(call.Aux):
+ numParameters = 1
+ case isSpecializedMalloc(call.Aux) && !v.Type.IsUnsafePtr():
+ numParameters = 3
+ default:
return nil, false
}
if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 {
@@ -813,7 +822,7 @@ func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) {
if v.Args[0].Args[0].Op != OpSP {
return nil, false
}
- if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+c.RegSize { // offset of return value
+ if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+numParameters*c.RegSize { // offset of return value
return nil, false
}
return mem, true
diff --git a/src/cmd/compile/internal/ssagen/phi.go b/src/cmd/compile/internal/ssagen/phi.go
index 19b6920913d83c..0dcf353bf43089 100644
--- a/src/cmd/compile/internal/ssagen/phi.go
+++ b/src/cmd/compile/internal/ssagen/phi.go
@@ -253,7 +253,7 @@ func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *ty
}
// Add a phi to block c for variable n.
hasPhi.add(c.ID)
- v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
+ v := c.NewValue0I(s.s.blockStarts[b.ID], ssa.OpPhi, typ, int64(n))
// Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
if var_.Op() == ir.ONAME {
s.s.addNamedValue(var_.(*ir.Name), v)
@@ -513,6 +513,7 @@ loop:
v.Op = ssa.OpPhi
v.AddArgs(args...)
v.Aux = nil
+ v.Pos = s.s.blockStarts[b.ID]
continue loop
}
w = a // save witness
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 1e2159579dfbf2..ae7d57566f7e0d 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -12,6 +12,7 @@ import (
"go/constant"
"html"
"internal/buildcfg"
+ "internal/runtime/gc"
"os"
"path/filepath"
"slices"
@@ -124,6 +125,15 @@ func InitConfig() {
ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch")
+ for i := 1; i < len(ir.Syms.MallocGCSmallNoScan); i++ {
+ ir.Syms.MallocGCSmallNoScan[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocgcSmallNoScanSC%d", i))
+ }
+ for i := 1; i < len(ir.Syms.MallocGCSmallScanNoHeader); i++ {
+ ir.Syms.MallocGCSmallScanNoHeader[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocgcSmallScanNoHeaderSC%d", i))
+ }
+ for i := 1; i < len(ir.Syms.MallocGCTiny); i++ {
+ ir.Syms.MallocGCTiny[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocTiny%d", i))
+ }
ir.Syms.MallocGC = typecheck.LookupRuntimeFunc("mallocgc")
ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove")
ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
@@ -690,7 +700,7 @@ func allocAlign(t *types.Type) int64 {
func (s *state) newHeapaddr(n *ir.Name) {
size := allocSize(n.Type())
if n.Type().HasPointers() || size >= maxAggregatedHeapAllocation || size == 0 {
- s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil))
+ s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
return
}
@@ -709,7 +719,7 @@ func (s *state) newHeapaddr(n *ir.Name) {
// Make an allocation, but the type being allocated is just
// the first pending object. We will come back and update it
// later if needed.
- allocCall = s.newObject(n.Type(), nil)
+ allocCall = s.newObjectNonSpecialized(n.Type(), nil)
} else {
allocCall = s.pendingHeapAllocations[0].Args[0]
}
@@ -762,7 +772,11 @@ func (s *state) flushPendingHeapAllocations() {
s.constBool(true), // needZero TODO: false is ok?
call.Args[1], // memory
}
- call.Aux = ssa.StaticAuxCall(ir.Syms.MallocGC, s.f.ABIDefault.ABIAnalyzeTypes(
+ mallocSym := ir.Syms.MallocGC
+ if specialMallocSym := s.specializedMallocSym(size, false); specialMallocSym != nil {
+ mallocSym = specialMallocSym
+ }
+ call.Aux = ssa.StaticAuxCall(mallocSym, s.f.ABIDefault.ABIAnalyzeTypes(
[]*types.Type{args[0].Type, args[1].Type, args[2].Type},
[]*types.Type{types.Types[types.TUNSAFEPTR]},
))
@@ -774,6 +788,43 @@ func (s *state) flushPendingHeapAllocations() {
ptr.Type = types.Types[types.TUNSAFEPTR]
}
+func (s *state) specializedMallocSym(size int64, hasPointers bool) *obj.LSym {
+ if !s.sizeSpecializedMallocEnabled() {
+ return nil
+ }
+ ptrSize := s.config.PtrSize
+ ptrBits := ptrSize * 8
+ minSizeForMallocHeader := ptrSize * ptrBits
+ heapBitsInSpan := size <= minSizeForMallocHeader
+ if !heapBitsInSpan {
+ return nil
+ }
+ divRoundUp := func(n, a uintptr) uintptr { return (n + a - 1) / a }
+ sizeClass := gc.SizeToSizeClass8[divRoundUp(uintptr(size), gc.SmallSizeDiv)]
+ if hasPointers {
+ return ir.Syms.MallocGCSmallScanNoHeader[sizeClass]
+ }
+ if size < gc.TinySize {
+ return ir.Syms.MallocGCTiny[size]
+ }
+ return ir.Syms.MallocGCSmallNoScan[sizeClass]
+}
+
+func (s *state) sizeSpecializedMallocEnabled() bool {
+ if base.Flag.CompilingRuntime {
+ // The compiler forces the values of the asan, msan, and race flags to false if
+ // we're compiling the runtime, so we lose the information about whether we're
+ // building in asan, msan, or race mode. Because the specialized functions don't
+ // work in that mode, just turn if off in that case.
+ // TODO(matloob): Save the information about whether the flags were passed in
+ // originally so we can turn off size specialized malloc in that case instead
+ // using Instrumenting below. Then we can remove this condition.
+ return false
+ }
+
+ return buildcfg.Experiment.SizeSpecializedMalloc && !base.Flag.Cfg.Instrumenting
+}
+
// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
// and then sets it as n's heap address.
func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
@@ -796,7 +847,24 @@ func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
}
// newObject returns an SSA value denoting new(typ).
-func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value {
+func (s *state) newObject(typ *types.Type) *ssa.Value {
+ if typ.Size() == 0 {
+ return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
+ }
+ rtype := s.reflectType(typ)
+ if specialMallocSym := s.specializedMallocSym(typ.Size(), typ.HasPointers()); specialMallocSym != nil {
+ return s.rtcall(specialMallocSym, true, []*types.Type{types.NewPtr(typ)},
+ s.constInt(types.Types[types.TUINTPTR], typ.Size()),
+ rtype,
+ s.constBool(true),
+ )[0]
+ }
+ return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0]
+}
+
+// newObjectNonSpecialized returns an SSA value denoting new(typ). It does
+// not produce size-specialized malloc functions.
+func (s *state) newObjectNonSpecialized(typ *types.Type, rtype *ssa.Value) *ssa.Value {
if typ.Size() == 0 {
return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
}
@@ -1020,6 +1088,9 @@ type state struct {
// First argument of append calls that could be stack allocated.
appendTargets map[ir.Node]bool
+
+ // Block starting position, indexed by block id.
+ blockStarts []src.XPos
}
type funcLine struct {
@@ -1078,6 +1149,9 @@ func (s *state) startBlock(b *ssa.Block) {
s.curBlock = b
s.vars = map[ir.Node]*ssa.Value{}
clear(s.fwdVars)
+ for len(s.blockStarts) <= int(b.ID) {
+ s.blockStarts = append(s.blockStarts, src.NoXPos)
+ }
}
// endBlock marks the end of generating code for the current block.
@@ -1104,6 +1178,9 @@ func (s *state) endBlock() *ssa.Block {
b.Pos = src.NoXPos
} else {
b.Pos = s.lastPos
+ if s.blockStarts[b.ID] == src.NoXPos {
+ s.blockStarts[b.ID] = s.lastPos
+ }
}
return b
}
@@ -1120,6 +1197,11 @@ func (s *state) pushLine(line src.XPos) {
} else {
s.lastPos = line
}
+ // The first position we see for a new block is its starting position
+ // (the line number for its phis, if any).
+ if b := s.curBlock; b != nil && s.blockStarts[b.ID] == src.NoXPos {
+ s.blockStarts[b.ID] = line
+ }
s.line = append(s.line, line)
}
@@ -2574,13 +2656,13 @@ var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
{types.TFLOAT32, types.TUINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
{types.TFLOAT32, types.TUINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
- {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
- {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
+ {types.TFLOAT32, types.TUINT32}: {ssa.OpInvalid, ssa.OpCopy, types.TINT64}, // Cvt64Fto32U, branchy code expansion instead
+ {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
{types.TFLOAT64, types.TUINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
{types.TFLOAT64, types.TUINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
- {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
- {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
+ {types.TFLOAT64, types.TUINT32}: {ssa.OpInvalid, ssa.OpCopy, types.TINT64}, // Cvt64Fto32U, branchy code expansion instead
+ {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
// float
{types.TFLOAT64, types.TFLOAT32}: {ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
@@ -2797,7 +2879,19 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
}
if ft.IsFloat() || tt.IsFloat() {
- conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
+ cft, ctt := s.concreteEtype(ft), s.concreteEtype(tt)
+ conv, ok := fpConvOpToSSA[twoTypes{cft, ctt}]
+ // there's a change to a conversion-op table, this restores the old behavior if ConvertHash is false.
+ // use salted hash to distinguish unsigned convert at a Pos from signed convert at a Pos
+ if ctt == types.TUINT32 && ft.IsFloat() && !base.ConvertHash.MatchPosWithInfo(n.Pos(), "U", nil) {
+ // revert to old behavior
+ conv.op1 = ssa.OpCvt64Fto64
+ if cft == types.TFLOAT32 {
+ conv.op1 = ssa.OpCvt32Fto64
+ }
+ conv.op2 = ssa.OpTrunc64to32
+
+ }
if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
@@ -2860,10 +2954,23 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
}
// ft is float32 or float64, and tt is unsigned integer
if ft.Size() == 4 {
- return s.float32ToUint64(n, v, ft, tt)
+ switch tt.Size() {
+ case 8:
+ return s.float32ToUint64(n, v, ft, tt)
+ case 4, 2, 1:
+ // TODO should 2 and 1 saturate or truncate?
+ return s.float32ToUint32(n, v, ft, tt)
+ }
}
if ft.Size() == 8 {
- return s.float64ToUint64(n, v, ft, tt)
+ switch tt.Size() {
+ case 8:
+ return s.float64ToUint64(n, v, ft, tt)
+ case 4, 2, 1:
+ // TODO should 2 and 1 saturate or truncate?
+ return s.float64ToUint32(n, v, ft, tt)
+ }
+
}
s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
return nil
@@ -3581,11 +3688,10 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
case ir.ONEW:
n := n.(*ir.UnaryExpr)
- var rtype *ssa.Value
if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE {
- rtype = s.expr(x.RType)
+ return s.newObjectNonSpecialized(n.Type().Elem(), s.expr(x.RType))
}
- return s.newObject(n.Type().Elem(), rtype)
+ return s.newObject(n.Type().Elem())
case ir.OUNSAFEADD:
n := n.(*ir.BinaryExpr)
@@ -5553,7 +5659,9 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft,
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
// candidate mantissa is 0.
+
cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
+
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
@@ -5779,34 +5887,63 @@ func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ss
func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
// cutoff:=1<<(intY_Size-1)
// if x < floatX(cutoff) {
- // result = uintY(x)
+ // result = uintY(x) // bThen
+ // // gated by ConvertHash, clamp negative inputs to zero
+ // if x < 0 { // unlikely
+ // result = 0 // bZero
+ // }
// } else {
- // y = x - floatX(cutoff)
+ // y = x - floatX(cutoff) // bElse
// z = uintY(y)
// result = z | -(cutoff)
// }
+
cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
- cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
+ cmp := s.newValueOrSfCall2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.SetControl(cmp)
b.Likely = ssa.BranchLikely
- bThen := s.f.NewBlock(ssa.BlockPlain)
+ var bThen, bZero *ssa.Block
+ // use salted hash to distinguish unsigned convert at a Pos from signed convert at a Pos
+ newConversion := base.ConvertHash.MatchPosWithInfo(n.Pos(), "U", nil)
+ if newConversion {
+ bZero = s.f.NewBlock(ssa.BlockPlain)
+ bThen = s.f.NewBlock(ssa.BlockIf)
+ } else {
+ bThen = s.f.NewBlock(ssa.BlockPlain)
+ }
+
bElse := s.f.NewBlock(ssa.BlockPlain)
bAfter := s.f.NewBlock(ssa.BlockPlain)
b.AddEdgeTo(bThen)
s.startBlock(bThen)
- a0 := s.newValue1(cvttab.cvt2U, tt, x)
+ a0 := s.newValueOrSfCall1(cvttab.cvt2U, tt, x)
s.vars[n] = a0
- s.endBlock()
- bThen.AddEdgeTo(bAfter)
+
+ if newConversion {
+ cmpz := s.newValueOrSfCall2(cvttab.ltf, types.Types[types.TBOOL], x, cvttab.floatValue(s, ft, 0.0))
+ s.endBlock()
+ bThen.SetControl(cmpz)
+ bThen.AddEdgeTo(bZero)
+ bThen.Likely = ssa.BranchUnlikely
+ bThen.AddEdgeTo(bAfter)
+
+ s.startBlock(bZero)
+ s.vars[n] = cvttab.intValue(s, tt, 0)
+ s.endBlock()
+ bZero.AddEdgeTo(bAfter)
+ } else {
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+ }
b.AddEdgeTo(bElse)
s.startBlock(bElse)
- y := s.newValue2(cvttab.subf, ft, x, cutoff)
- y = s.newValue1(cvttab.cvt2U, tt, y)
+ y := s.newValueOrSfCall2(cvttab.subf, ft, x, cutoff)
+ y = s.newValueOrSfCall1(cvttab.cvt2U, tt, y)
z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
a1 := s.newValue2(cvttab.or, tt, y, z)
s.vars[n] = a1
@@ -5827,6 +5964,25 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
if n.ITab != nil {
targetItab = s.expr(n.ITab)
}
+
+ if n.UseNilPanic {
+ if commaok {
+ base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && commaok == true")
+ }
+ if n.Type().IsInterface() {
+ // Currently we do not expect the compiler to emit type asserts with UseNilPanic, that assert to an interface type.
+ // If needed, this can be relaxed in the future, but for now we can assert that.
+ base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && Type().IsInterface() == true")
+ }
+ typs := s.f.Config.Types
+ iface = s.newValue2(
+ ssa.OpIMake,
+ iface.Type,
+ s.nilCheck(s.newValue1(ssa.OpITab, typs.BytePtr, iface)),
+ s.newValue1(ssa.OpIData, typs.BytePtr, iface),
+ )
+ }
+
return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor)
}
diff --git a/src/cmd/compile/internal/test/float_test.go b/src/cmd/compile/internal/test/float_test.go
index 9e61148c5297e5..7a5e27870f97f0 100644
--- a/src/cmd/compile/internal/test/float_test.go
+++ b/src/cmd/compile/internal/test/float_test.go
@@ -623,6 +623,110 @@ func TestInf(t *testing.T) {
}
}
+//go:noinline
+func isNaNOrGtZero64(x float64) bool {
+ return math.IsNaN(x) || x > 0
+}
+
+//go:noinline
+func isNaNOrGteZero64(x float64) bool {
+ return x >= 0 || math.IsNaN(x)
+}
+
+//go:noinline
+func isNaNOrLtZero64(x float64) bool {
+ return x < 0 || math.IsNaN(x)
+}
+
+//go:noinline
+func isNaNOrLteZero64(x float64) bool {
+ return math.IsNaN(x) || x <= 0
+}
+
+func TestFusedNaNChecks64(t *testing.T) {
+ tests := []struct {
+ value float64
+ isZero bool
+ isGreaterThanZero bool
+ isLessThanZero bool
+ isNaN bool
+ }{
+ {value: 0.0, isZero: true},
+ {value: math.Copysign(0, -1), isZero: true},
+ {value: 1.0, isGreaterThanZero: true},
+ {value: -1.0, isLessThanZero: true},
+ {value: math.Inf(1), isGreaterThanZero: true},
+ {value: math.Inf(-1), isLessThanZero: true},
+ {value: math.NaN(), isNaN: true},
+ }
+
+ check := func(name string, f func(x float64) bool, value float64, want bool) {
+ got := f(value)
+ if got != want {
+ t.Errorf("%v(%g): want %v, got %v", name, value, want, got)
+ }
+ }
+
+ for _, test := range tests {
+ check("isNaNOrGtZero64", isNaNOrGtZero64, test.value, test.isNaN || test.isGreaterThanZero)
+ check("isNaNOrGteZero64", isNaNOrGteZero64, test.value, test.isNaN || test.isGreaterThanZero || test.isZero)
+ check("isNaNOrLtZero64", isNaNOrLtZero64, test.value, test.isNaN || test.isLessThanZero)
+ check("isNaNOrLteZero64", isNaNOrLteZero64, test.value, test.isNaN || test.isLessThanZero || test.isZero)
+ }
+}
+
+//go:noinline
+func isNaNOrGtZero32(x float32) bool {
+ return x > 0 || x != x
+}
+
+//go:noinline
+func isNaNOrGteZero32(x float32) bool {
+ return x != x || x >= 0
+}
+
+//go:noinline
+func isNaNOrLtZero32(x float32) bool {
+ return x != x || x < 0
+}
+
+//go:noinline
+func isNaNOrLteZero32(x float32) bool {
+ return x <= 0 || x != x
+}
+
+func TestFusedNaNChecks32(t *testing.T) {
+ tests := []struct {
+ value float32
+ isZero bool
+ isGreaterThanZero bool
+ isLessThanZero bool
+ isNaN bool
+ }{
+ {value: 0.0, isZero: true},
+ {value: float32(math.Copysign(0, -1)), isZero: true},
+ {value: 1.0, isGreaterThanZero: true},
+ {value: -1.0, isLessThanZero: true},
+ {value: float32(math.Inf(1)), isGreaterThanZero: true},
+ {value: float32(math.Inf(-1)), isLessThanZero: true},
+ {value: float32(math.NaN()), isNaN: true},
+ }
+
+ check := func(name string, f func(x float32) bool, value float32, want bool) {
+ got := f(value)
+ if got != want {
+ t.Errorf("%v(%g): want %v, got %v", name, value, want, got)
+ }
+ }
+
+ for _, test := range tests {
+ check("isNaNOrGtZero32", isNaNOrGtZero32, test.value, test.isNaN || test.isGreaterThanZero)
+ check("isNaNOrGteZero32", isNaNOrGteZero32, test.value, test.isNaN || test.isGreaterThanZero || test.isZero)
+ check("isNaNOrLtZero32", isNaNOrLtZero32, test.value, test.isNaN || test.isLessThanZero)
+ check("isNaNOrLteZero32", isNaNOrLteZero32, test.value, test.isNaN || test.isLessThanZero || test.isZero)
+ }
+}
+
var sinkFloat float64
func BenchmarkMul2(b *testing.B) {
diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go
index 90dda18cc88426..d306600ebd2710 100644
--- a/src/cmd/compile/internal/types2/alias.go
+++ b/src/cmd/compile/internal/types2/alias.go
@@ -113,7 +113,6 @@ func unalias(a0 *Alias) Type {
for a := a0; a != nil; a, _ = t.(*Alias) {
t = a.fromRHS
}
-
// It's fine to memoize nil types since it's the zero value for actual.
// It accomplishes nothing.
a0.actual = t
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index 0d3c8b8e3e5e0f..4b7012e6c45e9f 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -2468,8 +2468,8 @@ func TestInstantiateErrors(t *testing.T) {
t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs)
}
- var argErr *ArgumentError
- if !errors.As(err, &argErr) {
+ argErr, ok := errors.AsType[*ArgumentError](err)
+ if !ok {
t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs)
}
@@ -2484,8 +2484,8 @@ func TestArgumentErrorUnwrapping(t *testing.T) {
Index: 1,
Err: Error{Msg: "test"},
}
- var e Error
- if !errors.As(err, &e) {
+ e, ok := errors.AsType[Error](err)
+ if !ok {
t.Fatalf("error %v does not wrap types.Error", err)
}
if e.Msg != "test" {
diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go
index 8af5f4037a5e3e..87f5c8beeafcaf 100644
--- a/src/cmd/compile/internal/types2/assignments.go
+++ b/src/cmd/compile/internal/types2/assignments.go
@@ -91,7 +91,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) {
// x.typ is typed
// A generic (non-instantiated) function value cannot be assigned to a variable.
- if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
+ if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
check.errorf(x, WrongTypeArgCount, "cannot use generic function %s without instantiation in %s", x, context)
x.mode = invalid
return
@@ -261,7 +261,7 @@ func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand, context string
var target *target
// avoid calling ExprString if not needed
if T != nil {
- if _, ok := under(T).(*Signature); ok {
+ if _, ok := T.Underlying().(*Signature); ok {
target = newTarget(T, ExprString(lhs))
}
}
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index df207a2746d627..84acb4ca484164 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -144,7 +144,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// len(x)
mode := invalid
var val constant.Value
- switch t := arrayPtrDeref(under(x.typ)).(type) {
+ switch t := arrayPtrDeref(x.typ.Underlying()).(type) {
case *Basic:
if isString(t) && id == _Len {
if x.mode == constant_ {
@@ -203,7 +203,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
if mode == invalid {
// avoid error if underlying type is invalid
- if isValid(under(x.typ)) {
+ if isValid(x.typ.Underlying()) {
code := InvalidCap
if id == _Len {
code = InvalidLen
@@ -322,7 +322,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// (applyTypeFunc never calls f with a type parameter)
f := func(typ Type) Type {
assert(!isTypeParam(typ))
- if t, _ := under(typ).(*Basic); t != nil {
+ if t, _ := typ.Underlying().(*Basic); t != nil {
switch t.kind {
case Float32:
return Typ[Complex64]
@@ -472,7 +472,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// (applyTypeFunc never calls f with a type parameter)
f := func(typ Type) Type {
assert(!isTypeParam(typ))
- if t, _ := under(typ).(*Basic); t != nil {
+ if t, _ := typ.Underlying().(*Basic); t != nil {
switch t.kind {
case Complex64:
return Typ[Float32]
@@ -1020,7 +1020,7 @@ func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) {
}()
}
- switch u := under(t).(type) {
+ switch u := t.Underlying().(type) {
case *Array:
return hasVarSize(u.elem, seen)
case *Struct:
@@ -1112,7 +1112,7 @@ func makeSig(res Type, args ...Type) *Signature {
// otherwise it returns typ.
func arrayPtrDeref(typ Type) Type {
if p, ok := Unalias(typ).(*Pointer); ok {
- if a, _ := under(p.base).(*Array); a != nil {
+ if a, _ := p.base.Underlying().(*Array); a != nil {
return a
}
}
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
index b7a2ebb41ec976..aca205ad985013 100644
--- a/src/cmd/compile/internal/types2/call.go
+++ b/src/cmd/compile/internal/types2/call.go
@@ -205,7 +205,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
case 1:
check.expr(nil, x, call.ArgList[0])
if x.mode != invalid {
- if t, _ := under(T).(*Interface); t != nil && !isTypeParam(T) {
+ if t, _ := T.Underlying().(*Interface); t != nil && !isTypeParam(T) {
if !t.IsMethodSet() {
check.errorf(call, MisplacedConstraintIface, "cannot use interface %s in conversion (contains specific type constraints or is comparable)", T)
break
@@ -812,7 +812,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName
obj, index, indirect = lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel, false)
if obj == nil {
// Don't report another error if the underlying type was invalid (go.dev/issue/49541).
- if !isValid(under(x.typ)) {
+ if !isValid(x.typ.Underlying()) {
goto Error
}
diff --git a/src/cmd/compile/internal/types2/const.go b/src/cmd/compile/internal/types2/const.go
index 5e5bc74ba34ed6..b68d72de4d27f4 100644
--- a/src/cmd/compile/internal/types2/const.go
+++ b/src/cmd/compile/internal/types2/const.go
@@ -33,7 +33,7 @@ func (check *Checker) overflow(x *operand, opPos syntax.Pos) {
// x.typ cannot be a type parameter (type
// parameters cannot be constant types).
if isTyped(x.typ) {
- check.representable(x, under(x.typ).(*Basic))
+ check.representable(x, x.typ.Underlying().(*Basic))
return
}
diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go
index 0ad79afe71c409..d0920d7ef1006d 100644
--- a/src/cmd/compile/internal/types2/conversions.go
+++ b/src/cmd/compile/internal/types2/conversions.go
@@ -18,7 +18,7 @@ func (check *Checker) conversion(x *operand, T Type) {
constArg := x.mode == constant_
constConvertibleTo := func(T Type, val *constant.Value) bool {
- switch t, _ := under(T).(*Basic); {
+ switch t, _ := T.Underlying().(*Basic); {
case t == nil:
// nothing to do
case representableConst(x.val, check, t, val):
@@ -142,8 +142,8 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
origT := T
V := Unalias(x.typ)
T = Unalias(T)
- Vu := under(V)
- Tu := under(T)
+ Vu := V.Underlying()
+ Tu := T.Underlying()
Vp, _ := V.(*TypeParam)
Tp, _ := T.(*TypeParam)
@@ -158,7 +158,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
// and their pointer base types are not type parameters"
if V, ok := V.(*Pointer); ok {
if T, ok := T.(*Pointer); ok {
- if IdenticalIgnoreTags(under(V.base), under(T.base)) && !isTypeParam(V.base) && !isTypeParam(T.base) {
+ if IdenticalIgnoreTags(V.base.Underlying(), T.base.Underlying()) && !isTypeParam(V.base) && !isTypeParam(T.base) {
return true
}
}
@@ -211,7 +211,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
return false
}
case *Pointer:
- if a, _ := under(a.Elem()).(*Array); a != nil {
+ if a, _ := a.Elem().Underlying().(*Array); a != nil {
if Identical(s.Elem(), a.Elem()) {
if check == nil || check.allowVersion(go1_17) {
return true
@@ -292,23 +292,23 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
}
func isUintptr(typ Type) bool {
- t, _ := under(typ).(*Basic)
+ t, _ := typ.Underlying().(*Basic)
return t != nil && t.kind == Uintptr
}
func isUnsafePointer(typ Type) bool {
- t, _ := under(typ).(*Basic)
+ t, _ := typ.Underlying().(*Basic)
return t != nil && t.kind == UnsafePointer
}
func isPointer(typ Type) bool {
- _, ok := under(typ).(*Pointer)
+ _, ok := typ.Underlying().(*Pointer)
return ok
}
func isBytesOrRunes(typ Type) bool {
- if s, _ := under(typ).(*Slice); s != nil {
- t, _ := under(s.elem).(*Basic)
+ if s, _ := typ.Underlying().(*Slice); s != nil {
+ t, _ := s.elem.Underlying().(*Basic)
return t != nil && (t.kind == Byte || t.kind == Rune)
}
return false
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index 34105816a65af3..b830cb6f4f3c1c 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -225,8 +225,8 @@ func (check *Checker) validCycle(obj Object) (valid bool) {
start := obj.color() - grey // index of obj in objPath
cycle := check.objPath[start:]
tparCycle := false // if set, the cycle is through a type parameter list
- nval := 0 // number of (constant or variable) values in the cycle; valid if !generic
- ndef := 0 // number of type definitions in the cycle; valid if !generic
+ nval := 0 // number of (constant or variable) values in the cycle
+ ndef := 0 // number of type definitions in the cycle
loop:
for _, obj := range cycle {
switch obj := obj.(type) {
@@ -235,7 +235,7 @@ loop:
case *TypeName:
// If we reach a generic type that is part of a cycle
// and we are in a type parameter list, we have a cycle
- // through a type parameter list, which is invalid.
+ // through a type parameter list.
if check.inTParamList && isGeneric(obj.typ) {
tparCycle = true
break loop
@@ -286,20 +286,23 @@ loop:
}()
}
- if !tparCycle {
- // A cycle involving only constants and variables is invalid but we
- // ignore them here because they are reported via the initialization
- // cycle check.
- if nval == len(cycle) {
- return true
- }
+ // Cycles through type parameter lists are ok (go.dev/issue/68162).
+ if tparCycle {
+ return true
+ }
- // A cycle involving only types (and possibly functions) must have at least
- // one type definition to be permitted: If there is no type definition, we
- // have a sequence of alias type names which will expand ad infinitum.
- if nval == 0 && ndef > 0 {
- return true
- }
+ // A cycle involving only constants and variables is invalid but we
+ // ignore them here because they are reported via the initialization
+ // cycle check.
+ if nval == len(cycle) {
+ return true
+ }
+
+ // A cycle involving only types (and possibly functions) must have at least
+ // one type definition to be permitted: If there is no type definition, we
+ // have a sequence of alias type names which will expand ad infinitum.
+ if nval == 0 && ndef > 0 {
+ return true
}
check.cycleError(cycle, firstInSrc(cycle))
@@ -388,7 +391,7 @@ func (check *Checker) constDecl(obj *Const, typ, init syntax.Expr, inherited boo
if !isConstType(t) {
// don't report an error if the type is an invalid C (defined) type
// (go.dev/issue/22090)
- if isValid(under(t)) {
+ if isValid(t.Underlying()) {
check.errorf(typ, InvalidConstType, "invalid constant type %s", t)
}
obj.typ = Typ[Invalid]
@@ -473,7 +476,7 @@ func (check *Checker) isImportedConstraint(typ Type) bool {
if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil {
return false
}
- u, _ := named.under().(*Interface)
+ u, _ := named.Underlying().(*Interface)
return u != nil && !u.IsMethodSet()
}
@@ -555,31 +558,33 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN
named := check.newNamed(obj, nil, nil)
setDefType(def, named)
+ // The RHS of a named N can be nil if, for example, N is defined as a cycle of aliases with
+ // gotypesalias=0. Consider:
+ //
+ // type D N // N.resolve() will panic
+ // type N A
+ // type A = N // N.fromRHS is not set before N.resolve(), since A does not call setDefType
+ //
+ // There is likely a better way to detect such cases, but it may not be worth the effort.
+ // Instead, we briefly permit a nil N.fromRHS while type-checking D.
+ named.allowNilRHS = true
+ defer (func() { named.allowNilRHS = false })()
+
if tdecl.TParamList != nil {
check.openScope(tdecl, "type parameters")
defer check.closeScope()
check.collectTypeParams(&named.tparams, tdecl.TParamList)
}
- // determine underlying type of named
rhs = check.definedType(tdecl.Type, obj)
assert(rhs != nil)
named.fromRHS = rhs
- // If the underlying type was not set while type-checking the right-hand
- // side, it is invalid and an error should have been reported elsewhere.
- if named.underlying == nil {
- named.underlying = Typ[Invalid]
- }
-
- // Disallow a lone type parameter as the RHS of a type declaration (go.dev/issue/45639).
- // We don't need this restriction anymore if we make the underlying type of a type
- // parameter its constraint interface: if the RHS is a lone type parameter, we will
- // use its underlying type (like we do for any RHS in a type declaration), and its
- // underlying type is an interface and the type declaration is well defined.
+ // spec: "In a type definition the given type cannot be a type parameter."
+ // (See also go.dev/issue/45639.)
if isTypeParam(rhs) {
check.error(tdecl.Type, MisplacedTypeParam, "cannot use a type parameter as RHS in type declaration")
- named.underlying = Typ[Invalid]
+ named.fromRHS = Typ[Invalid]
}
}
@@ -721,7 +726,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
}
func (check *Checker) checkFieldUniqueness(base *Named) {
- if t, _ := base.under().(*Struct); t != nil {
+ if t, _ := base.Underlying().(*Struct); t != nil {
var mset objset
for i := 0; i < base.NumMethods(); i++ {
m := base.Method(i)
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index e5f9a1c6f7cb49..d62b0247578d6e 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -361,7 +361,7 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
// If the new type is not final and still untyped, just
// update the recorded type.
if !final && isUntyped(typ) {
- old.typ = under(typ).(*Basic)
+ old.typ = typ.Underlying().(*Basic)
check.untyped[x] = old
return
}
@@ -431,7 +431,7 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
return nil, nil, InvalidUntypedConversion
}
- switch u := under(target).(type) {
+ switch u := target.Underlying().(type) {
case *Basic:
if x.mode == constant_ {
v, code := check.representation(x, u)
@@ -616,7 +616,7 @@ Error:
// incomparableCause returns a more specific cause why typ is not comparable.
// If there is no more specific cause, the result is "".
func (check *Checker) incomparableCause(typ Type) string {
- switch under(typ).(type) {
+ switch typ.Underlying().(type) {
case *Slice, *Signature, *Map:
return compositeKind(typ) + " can only be compared to nil"
}
@@ -963,7 +963,7 @@ type target struct {
// The result is nil if typ is not a signature.
func newTarget(typ Type, desc string) *target {
if typ != nil {
- if sig, _ := under(typ).(*Signature); sig != nil {
+ if sig, _ := typ.Underlying().(*Signature); sig != nil {
return &target{sig, desc}
}
}
@@ -1112,7 +1112,7 @@ func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Ty
check.errorf(x, InvalidAssert, invalidOp+"cannot use type assertion on type parameter value %s", x)
goto Error
}
- if _, ok := under(x.typ).(*Interface); !ok {
+ if _, ok := x.typ.Underlying().(*Interface); !ok {
check.errorf(x, InvalidAssert, invalidOp+"%s is not an interface", x)
goto Error
}
diff --git a/src/cmd/compile/internal/types2/gcsizes.go b/src/cmd/compile/internal/types2/gcsizes.go
index 15f3e006425ade..54e8ea23c173c5 100644
--- a/src/cmd/compile/internal/types2/gcsizes.go
+++ b/src/cmd/compile/internal/types2/gcsizes.go
@@ -16,7 +16,7 @@ func (s *gcSizes) Alignof(T Type) (result int64) {
// For arrays and structs, alignment is defined in terms
// of alignment of the elements and fields, respectively.
- switch t := under(T).(type) {
+ switch t := T.Underlying().(type) {
case *Array:
// spec: "For a variable x of array type: unsafe.Alignof(x)
// is the same as unsafe.Alignof(x[0]), but at least 1."
@@ -96,7 +96,7 @@ func (s *gcSizes) Offsetsof(fields []*Var) []int64 {
}
func (s *gcSizes) Sizeof(T Type) int64 {
- switch t := under(T).(type) {
+ switch t := T.Underlying().(type) {
case *Basic:
assert(isTyped(T))
k := t.kind
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
index 7e16a87332dcdf..ca84184d35afed 100644
--- a/src/cmd/compile/internal/types2/index.go
+++ b/src/cmd/compile/internal/types2/index.go
@@ -35,7 +35,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
return false
case value:
- if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
+ if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
// function instantiation
return true
}
@@ -50,7 +50,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
// ordinary index expression
valid := false
length := int64(-1) // valid if >= 0
- switch typ := under(x.typ).(type) {
+ switch typ := x.typ.Underlying().(type) {
case *Basic:
if isString(typ) {
valid = true
@@ -73,7 +73,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
x.typ = typ.elem
case *Pointer:
- if typ, _ := under(typ.base).(*Array); typ != nil {
+ if typ, _ := typ.base.Underlying().(*Array); typ != nil {
valid = true
length = typ.len
x.mode = variable
@@ -124,7 +124,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
mode = value
}
case *Pointer:
- if t, _ := under(t.base).(*Array); t != nil {
+ if t, _ := t.base.Underlying().(*Array); t != nil {
l = t.len
e = t.elem
}
@@ -247,7 +247,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
// but don't go from untyped string to string.
cu = Typ[String]
if !isTypeParam(x.typ) {
- cu = under(x.typ) // untyped string remains untyped
+ cu = x.typ.Underlying() // untyped string remains untyped
}
}
@@ -292,7 +292,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
x.typ = &Slice{elem: u.elem}
case *Pointer:
- if u, _ := under(u.base).(*Array); u != nil {
+ if u, _ := u.base.Underlying().(*Array); u != nil {
valid = true
length = u.len
x.typ = &Slice{elem: u.elem}
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index 08d422969059f2..e7c59597372413 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -668,7 +668,7 @@ func coreTerm(tpar *TypeParam) (*term, bool) {
if n == 1 {
if debug {
u, _ := commonUnder(tpar, nil)
- assert(under(single.typ) == u)
+ assert(single.typ.Underlying() == u)
}
return single, true
}
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index 1c8c12d07cfafa..3c4044ed3c9f2c 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -83,7 +83,7 @@ func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, e
//
// For Named types the resulting instance may be unexpanded.
//
-// check may be nil (when not type-checking syntax); pos is used only only if check is non-nil.
+// check may be nil (when not type-checking syntax); pos is used only if check is non-nil.
func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, expanding *Named, ctxt *Context) (res Type) {
// The order of the contexts below matters: we always prefer instances in the
// expanding instance context in order to preserve reference cycles.
@@ -226,12 +226,12 @@ func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type,
// If the provided cause is non-nil, it may be set to an error string
// explaining why V does not implement (or satisfy, for constraints) T.
func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool {
- Vu := under(V)
- Tu := under(T)
+ Vu := V.Underlying()
+ Tu := T.Underlying()
if !isValid(Vu) || !isValid(Tu) {
return true // avoid follow-on errors
}
- if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) {
+ if p, _ := Vu.(*Pointer); p != nil && !isValid(p.base.Underlying()) {
return true // avoid follow-on errors (see go.dev/issue/49541 for an example)
}
@@ -339,7 +339,7 @@ func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool
// If V ∉ t.typ but V ∈ ~t.typ then remember this type
// so we can suggest it as an alternative in the error
// message.
- if alt == nil && !t.tilde && Identical(t.typ, under(t.typ)) {
+ if alt == nil && !t.tilde && Identical(t.typ, t.typ.Underlying()) {
tt := *t
tt.tilde = true
if tt.includes(V) {
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
index 624b510dc83dc3..0b9282e1a7cfc2 100644
--- a/src/cmd/compile/internal/types2/lookup.go
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -152,7 +152,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string
// *typ where typ is an interface (incl. a type parameter) has no methods.
if isPtr {
- if _, ok := under(typ).(*Interface); ok {
+ if _, ok := typ.Underlying().(*Interface); ok {
return
}
}
@@ -202,7 +202,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string
}
}
- switch t := under(typ).(type) {
+ switch t := typ.Underlying().(type) {
case *Struct:
// look for a matching field and collect embedded types
for i, f := range t.fields {
@@ -373,7 +373,7 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b
// The comparator is used to compare signatures.
// If a method is missing and cause is not nil, *cause describes the error.
func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y Type) bool, cause *string) (method *Func, wrongType bool) {
- methods := under(T).(*Interface).typeSet().methods // T must be an interface
+ methods := T.Underlying().(*Interface).typeSet().methods // T must be an interface
if len(methods) == 0 {
return nil, false
}
@@ -393,7 +393,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y
var m *Func // method on T we're trying to implement
var f *Func // method on V, if found (state is one of ok, wrongName, wrongSig)
- if u, _ := under(V).(*Interface); u != nil {
+ if u, _ := V.Underlying().(*Interface); u != nil {
tset := u.typeSet()
for _, m = range methods {
_, f = tset.LookupMethod(m.pkg, m.name, false)
@@ -534,7 +534,7 @@ func (check *Checker) hasAllMethods(V, T Type, static bool, equivalent func(x, y
// hasInvalidEmbeddedFields reports whether T is a struct (or a pointer to a struct) that contains
// (directly or indirectly) embedded fields with invalid types.
func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool {
- if S, _ := under(derefStructPtr(T)).(*Struct); S != nil && !seen[S] {
+ if S, _ := derefStructPtr(T).Underlying().(*Struct); S != nil && !seen[S] {
if seen == nil {
seen = make(map[*Struct]bool)
}
@@ -549,14 +549,14 @@ func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool {
}
func isInterfacePtr(T Type) bool {
- p, _ := under(T).(*Pointer)
+ p, _ := T.Underlying().(*Pointer)
return p != nil && IsInterface(p.base)
}
// check may be nil.
func (check *Checker) interfacePtrError(T Type) string {
assert(isInterfacePtr(T))
- if p, _ := under(T).(*Pointer); isTypeParam(p.base) {
+ if p, _ := T.Underlying().(*Pointer); isTypeParam(p.base) {
return check.sprintf("type %s is pointer to type parameter, not type parameter", T)
}
return check.sprintf("type %s is pointer to interface, not interface", T)
@@ -629,8 +629,8 @@ func deref(typ Type) (Type, bool) {
// derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a
// (named or unnamed) struct and returns its base. Otherwise it returns typ.
func derefStructPtr(typ Type) Type {
- if p, _ := under(typ).(*Pointer); p != nil {
- if _, ok := under(p.base).(*Struct); ok {
+ if p, _ := typ.Underlying().(*Pointer); p != nil {
+ if _, ok := p.base.Underlying().(*Struct); ok {
return p.base
}
}
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
index d02b95e874a470..4a28929c513a9b 100644
--- a/src/cmd/compile/internal/types2/named.go
+++ b/src/cmd/compile/internal/types2/named.go
@@ -62,14 +62,14 @@ import (
// - We say that a Named type is "resolved" if its RHS information has been
// loaded or fully type-checked. For Named types constructed from export
// data, this may involve invoking a loader function to extract information
-// from export data. For instantiated named types this involves reading
-// information from their origin.
+// from export data. For instantiated Named types this involves reading
+// information from their origin and substituting type arguments into a
+// "synthetic" RHS; this process is called "expanding" the RHS (see below).
// - We say that a Named type is "expanded" if it is an instantiated type and
-// type parameters in its underlying type and methods have been substituted
-// with the type arguments from the instantiation. A type may be partially
-// expanded if some but not all of these details have been substituted.
-// Similarly, we refer to these individual details (underlying type or
-// method) as being "expanded".
+// type parameters in its RHS and methods have been substituted with the type
+// arguments from the instantiation. A type may be partially expanded if some
+// but not all of these details have been substituted. Similarly, we refer to
+// these individual details (RHS or method) as being "expanded".
// - When all information is known for a named type, we say it is "complete".
//
// Some invariants to keep in mind: each declared Named type has a single
@@ -107,18 +107,17 @@ type Named struct {
check *Checker // non-nil during type-checking; nil otherwise
obj *TypeName // corresponding declared object for declared types; see above for instantiated types
- // fromRHS holds the type (on RHS of declaration) this *Named type is derived
- // from (for cycle reporting). Only used by validType, and therefore does not
- // require synchronization.
- fromRHS Type
+ // flags indicating temporary violations of the invariants for fromRHS and underlying
+ allowNilRHS bool // same as below, as well as briefly during checking of a type declaration
+ allowNilUnderlying bool // may be true from creation via [NewNamed] until [Named.SetUnderlying]
- // information for instantiated types; nil otherwise
- inst *instance
+ underlying Type // underlying type, or nil
+ inst *instance // information for instantiated types; nil otherwise
- mu sync.Mutex // guards all fields below
- state_ uint32 // the current state of this type; must only be accessed atomically
- underlying Type // possibly a *Named during setup; never a *Named once set up completely
- tparams *TypeParamList // type parameters, or nil
+ mu sync.Mutex // guards all fields below
+ state_ uint32 // the current state of this type; must only be accessed atomically
+ fromRHS Type // the declaration RHS this type is derived from
+ tparams *TypeParamList // type parameters, or nil
// methods declared for this type (not the method set of this type)
// Signatures are type-checked lazily.
@@ -145,7 +144,7 @@ type namedState uint32
// Note: the order of states is relevant
const (
- unresolved namedState = iota // tparams, underlying type and methods might be unavailable
+ unresolved namedState = iota // type parameters, RHS, underlying, and methods might be unavailable
resolved // resolve has run; methods might be unexpanded (for instances)
loaded // loader has run; constraints might be unexpanded (for generic types)
complete // all data is known
@@ -158,10 +157,18 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
if asNamed(underlying) != nil {
panic("underlying type must not be *Named")
}
- return (*Checker)(nil).newNamed(obj, underlying, methods)
+ n := (*Checker)(nil).newNamed(obj, underlying, methods)
+ if underlying == nil {
+ n.allowNilRHS = true
+ n.allowNilUnderlying = true
+ } else {
+ n.SetUnderlying(underlying)
+ }
+ return n
+
}
-// resolve resolves the type parameters, methods, and underlying type of n.
+// resolve resolves the type parameters, methods, and RHS of n.
//
// For the purposes of resolution, there are three categories of named types:
// 1. Instantiated Types
@@ -171,18 +178,17 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
// Note that the above form a partition.
//
// Instantiated types:
-// Type parameters, methods, and underlying type of n become accessible,
-// though methods are lazily populated as needed.
+// Type parameters, methods, and RHS of n become accessible, though methods
+// are lazily populated as needed.
//
// Lazy loaded types:
-// Type parameters, methods, and underlying type of n become accessible
-// and are fully expanded.
+// Type parameters, methods, and RHS of n become accessible and are fully
+// expanded.
//
// All others:
-// Effectively, nothing happens. The underlying type of n may still be
-// a named type.
+// Effectively, nothing happens.
func (n *Named) resolve() *Named {
- if n.state() > unresolved { // avoid locking below
+ if n.state() >= resolved { // avoid locking below
return n
}
@@ -191,21 +197,19 @@ func (n *Named) resolve() *Named {
n.mu.Lock()
defer n.mu.Unlock()
- if n.state() > unresolved {
+ if n.state() >= resolved {
return n
}
if n.inst != nil {
- assert(n.underlying == nil) // n is an unresolved instance
- assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil
+ assert(n.fromRHS == nil) // instantiated types are not declared types
+ assert(n.loader == nil) // cannot import an instantiation
orig := n.inst.orig
orig.resolve()
- underlying := n.expandUnderlying()
+ n.fromRHS = n.expandRHS()
n.tparams = orig.tparams
- n.underlying = underlying
- n.fromRHS = orig.fromRHS // for cycle detection
if len(orig.methods) == 0 {
n.setState(complete) // nothing further to do
@@ -224,25 +228,25 @@ func (n *Named) resolve() *Named {
// methods would need to support reentrant calls though. It would
// also make the API more future-proof towards further extensions.
if n.loader != nil {
- assert(n.underlying == nil)
- assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil
+ assert(n.fromRHS == nil) // not loaded yet
+ assert(n.inst == nil) // cannot import an instantiation
tparams, underlying, methods, delayed := n.loader(n)
n.loader = nil
n.tparams = bindTParams(tparams)
- n.underlying = underlying
n.fromRHS = underlying // for cycle detection
n.methods = methods
- // advance state to avoid deadlock calling delayed functions
- n.setState(loaded)
+ n.setState(loaded) // avoid deadlock calling delayed functions
for _, f := range delayed {
f()
}
}
+ assert(n.fromRHS != nil || n.allowNilRHS)
+ assert(n.underlying == nil) // underlying comes after resolving
n.setState(complete)
return n
}
@@ -259,8 +263,8 @@ func (n *Named) setState(state namedState) {
}
// newNamed is like NewNamed but with a *Checker receiver.
-func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
- typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods}
+func (check *Checker) newNamed(obj *TypeName, fromRHS Type, methods []*Func) *Named {
+ typ := &Named{check: check, obj: obj, fromRHS: fromRHS, methods: methods}
if obj.typ == nil {
obj.typ = typ
}
@@ -300,25 +304,13 @@ func (check *Checker) newNamedInstance(pos syntax.Pos, orig *Named, targs []Type
return typ
}
-func (t *Named) cleanup() {
- assert(t.inst == nil || t.inst.orig.inst == nil)
- // Ensure that every defined type created in the course of type-checking has
- // either non-*Named underlying type, or is unexpanded.
- //
- // This guarantees that we don't leak any types whose underlying type is
- // *Named, because any unexpanded instances will lazily compute their
- // underlying type by substituting in the underlying type of their origin.
- // The origin must have either been imported or type-checked and expanded
- // here, and in either case its underlying type will be fully expanded.
- switch t.underlying.(type) {
- case nil:
- if t.TypeArgs().Len() == 0 {
- panic("nil underlying")
- }
- case *Named, *Alias:
- t.under() // t.under may add entries to check.cleaners
+func (n *Named) cleanup() {
+ // Instances can have a nil underlying at the end of type checking — they
+ // will lazily expand it as needed. All other types must have one.
+ if n.inst == nil {
+ n.resolve().under()
}
- t.check = nil
+ n.check = nil
}
// Obj returns the type name for the declaration defining the named type t. For
@@ -477,10 +469,12 @@ func (t *Named) SetUnderlying(underlying Type) {
if asNamed(underlying) != nil {
panic("underlying type must not be *Named")
}
- t.resolve().underlying = underlying
- if t.fromRHS == nil {
- t.fromRHS = underlying // for cycle detection
- }
+ // Invariant: Presence of underlying type implies it was resolved.
+ t.fromRHS = underlying
+ t.allowNilRHS = false
+ t.resolve()
+ t.underlying = underlying
+ t.allowNilUnderlying = false
}
// AddMethod adds method m unless it is already in the method list.
@@ -523,9 +517,20 @@ func (t *Named) methodIndex(name string, foldCase bool) int {
// Alias types.
//
// [underlying type]: https://go.dev/ref/spec#Underlying_types.
-func (t *Named) Underlying() Type {
- // TODO(gri) Investigate if Unalias can be moved to where underlying is set.
- return Unalias(t.resolve().underlying)
+func (n *Named) Underlying() Type {
+ n.resolve()
+
+ // The gccimporter depends on writing a nil underlying via NewNamed and
+ // immediately reading it back. Rather than putting that in under() and
+ // complicating things there, we just check for that special case here.
+ if n.fromRHS == nil {
+ assert(n.allowNilRHS)
+ if n.allowNilUnderlying {
+ return nil
+ }
+ }
+
+ return n.under()
}
func (t *Named) String() string { return TypeString(t, nil) }
@@ -536,89 +541,55 @@ func (t *Named) String() string { return TypeString(t, nil) }
// TODO(rfindley): reorganize the loading and expansion methods under this
// heading.
-// under returns the expanded underlying type of n0; possibly by following
-// forward chains of named types. If an underlying type is found, resolve
-// the chain by setting the underlying type for each defined type in the
-// chain before returning it. If no underlying type is found or a cycle
-// is detected, the result is Typ[Invalid]. If a cycle is detected and
-// n0.check != nil, the cycle is reported.
-//
-// This is necessary because the underlying type of named may be itself a
-// named type that is incomplete:
-//
-// type (
-// A B
-// B *C
-// C A
-// )
-//
-// The type of C is the (named) type of A which is incomplete,
-// and which has as its underlying type the named type B.
-func (n0 *Named) under() Type {
- u := n0.Underlying()
-
- // If the underlying type of a defined type is not a defined
- // (incl. instance) type, then that is the desired underlying
- // type.
- var n1 *Named
- switch u1 := u.(type) {
- case nil:
- // After expansion via Underlying(), we should never encounter a nil
- // underlying.
- panic("nil underlying")
- default:
- // common case
- return u
- case *Named:
- // handled below
- n1 = u1
- }
-
- if n0.check == nil {
- panic("Named.check == nil but type is incomplete")
- }
-
- // Invariant: after this point n0 as well as any named types in its
- // underlying chain should be set up when this function exits.
- check := n0.check
- n := n0
-
- seen := make(map[*Named]int) // types that need their underlying type resolved
- var path []Object // objects encountered, for cycle reporting
-
-loop:
- for {
- seen[n] = len(seen)
- path = append(path, n.obj)
- n = n1
- if i, ok := seen[n]; ok {
- // cycle
- check.cycleError(path[i:], firstInSrc(path[i:]))
- u = Typ[Invalid]
- break
- }
- u = n.Underlying()
- switch u1 := u.(type) {
+// under returns the (possibly expanded) underlying type of n.
+//
+// It does so by following RHS type chains. If a type literal is found, each
+// named type in the chain has its underlying set to that type. Aliases are
+// skipped because their underlying type is not memoized.
+//
+// This function also checks for instantiated layout cycles, which are
+// reachable only in the case where resolve() expanded an instantiated
+// type which became self-referencing without indirection. If such a
+// cycle is found, the result is Typ[Invalid]; if n.check != nil, the
+// cycle is also reported.
+func (n *Named) under() Type {
+ assert(n.state() >= resolved)
+
+ if n.underlying != nil {
+ return n.underlying
+ }
+
+ var rhs Type = n
+ var u Type
+
+ seen := make(map[*Named]int)
+ var path []Object
+
+ for u == nil {
+ switch t := rhs.(type) {
case nil:
u = Typ[Invalid]
- break loop
- default:
- break loop
+ case *Alias:
+ rhs = unalias(t)
case *Named:
- // Continue collecting *Named types in the chain.
- n1 = u1
+ if i, ok := seen[t]; ok {
+ n.check.cycleError(path[i:], firstInSrc(path[i:]))
+ u = Typ[Invalid]
+ break
+ }
+ seen[t] = len(seen)
+ path = append(path, t.obj)
+ t.resolve()
+ assert(t.fromRHS != nil || t.allowNilRHS)
+ rhs = t.fromRHS
+ default:
+ u = rhs // any type literal works
}
}
- for n := range seen {
- // We should never have to update the underlying type of an imported type;
- // those underlying types should have been resolved during the import.
- // Also, doing so would lead to a race condition (was go.dev/issue/31749).
- // Do this check always, not just in debug mode (it's cheap).
- if n.obj.pkg != check.pkg {
- panic("imported type with unresolved underlying type")
- }
- n.underlying = u
+ // go back up the chain
+ for t := range seen {
+ t.underlying = u
}
return u
@@ -646,78 +617,108 @@ func (check *Checker) context() *Context {
return check.ctxt
}
-// expandUnderlying substitutes type arguments in the underlying type n.orig,
-// returning the result. Returns Typ[Invalid] if there was an error.
-func (n *Named) expandUnderlying() Type {
+// expandRHS crafts a synthetic RHS for an instantiated type using the RHS of
+// its origin type (which must be a generic type).
+//
+// Suppose that we had:
+//
+// type T[P any] struct {
+// f P
+// }
+//
+// type U T[int]
+//
+// When we go to U, we observe T[int]. Since T[int] is an instantiation, it has no
+// declaration. Here, we craft a synthetic RHS for T[int] as if it were declared,
+// somewhat similar to:
+//
+// type T[int] struct {
+// f int
+// }
+//
+// And note that the synthetic RHS here is the same as the underlying for U. Now,
+// consider:
+//
+// type T[_ any] U
+// type U int
+// type V T[U]
+//
+// The synthetic RHS for T[U] becomes:
+//
+// type T[U] U
+//
+// Whereas the underlying of V is int, not U.
+func (n *Named) expandRHS() (rhs Type) {
check := n.check
if check != nil && check.conf.Trace {
- check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n)
+ check.trace(n.obj.pos, "-- Named.expandRHS %s", n)
check.indent++
defer func() {
check.indent--
- check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying)
+ check.trace(n.obj.pos, "=> %s (rhs = %s)", n, rhs)
}()
}
- assert(n.inst.orig.underlying != nil)
+ assert(n.state() == unresolved)
+
if n.inst.ctxt == nil {
n.inst.ctxt = NewContext()
}
+ ctxt := n.inst.ctxt
orig := n.inst.orig
- targs := n.inst.targs
- if asNamed(orig.underlying) != nil {
- // We should only get a Named underlying type here during type checking
- // (for example, in recursive type declarations).
- assert(check != nil)
- }
+ assert(orig.state() >= resolved)
+ assert(orig.fromRHS != nil)
+
+ targs := n.inst.targs
+ tpars := orig.tparams
- if orig.tparams.Len() != targs.Len() {
- // Mismatching arg and tparam length may be checked elsewhere.
+ if targs.Len() != tpars.Len() {
return Typ[Invalid]
}
- // Ensure that an instance is recorded before substituting, so that we
- // resolve n for any recursive references.
- h := n.inst.ctxt.instanceHash(orig, targs.list())
- n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n)
- assert(n == n2)
+ h := ctxt.instanceHash(orig, targs.list())
+ u := ctxt.update(h, orig, targs.list(), n) // block fixed point infinite instantiation
+ assert(n == u)
- smap := makeSubstMap(orig.tparams.list(), targs.list())
- var ctxt *Context
+ m := makeSubstMap(tpars.list(), targs.list())
if check != nil {
ctxt = check.context()
}
- underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt)
- // If the underlying type of n is an interface, we need to set the receiver of
- // its methods accurately -- we set the receiver of interface methods on
- // the RHS of a type declaration to the defined type.
- if iface, _ := underlying.(*Interface); iface != nil {
+
+ rhs = check.subst(n.obj.pos, orig.fromRHS, m, n, ctxt)
+
+ // TODO(markfreeman): Can we handle this in substitution?
+ // If the RHS is an interface, we must set the receiver of interface methods
+ // to the named type.
+ if iface, _ := rhs.(*Interface); iface != nil {
if methods, copied := replaceRecvType(iface.methods, orig, n); copied {
- // If the underlying type doesn't actually use type parameters, it's
- // possible that it wasn't substituted. In this case we need to create
- // a new *Interface before modifying receivers.
- if iface == orig.underlying {
- old := iface
- iface = check.newInterface()
- iface.embeddeds = old.embeddeds
- assert(old.complete) // otherwise we are copying incomplete data
- iface.complete = old.complete
- iface.implicit = old.implicit // should be false but be conservative
- underlying = iface
+ // If the RHS doesn't use type parameters, it may not have been
+ // substituted; we need to craft a new interface first.
+ if iface == orig.fromRHS {
+ assert(iface.complete) // otherwise we are copying incomplete data
+
+ crafted := check.newInterface()
+ crafted.complete = true
+ crafted.implicit = false
+ crafted.embeddeds = iface.embeddeds
+
+ iface = crafted
}
iface.methods = methods
iface.tset = nil // recompute type set with new methods
- // If check != nil, check.newInterface will have saved the interface for later completion.
- if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated
+ // go.dev/issue/61561: We have to complete the interface even without a checker.
+ if check == nil {
iface.typeSet()
}
+
+ return iface
}
}
- return underlying
+ return rhs
}
// safeUnderlying returns the underlying type of typ without expanding
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
index 7096c556971077..463ed30308d0ee 100644
--- a/src/cmd/compile/internal/types2/object.go
+++ b/src/cmd/compile/internal/types2/object.go
@@ -295,7 +295,8 @@ func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName
// lazily calls resolve to finish constructing the Named object.
func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(*Named) ([]*TypeParam, Type, []*Func, []func())) *TypeName {
obj := NewTypeName(pos, pkg, name, nil)
- NewNamed(obj, nil, nil).loader = load
+ n := (*Checker)(nil).newNamed(obj, nil, nil)
+ n.loader = load
return obj
}
@@ -638,7 +639,7 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) {
} else {
// TODO(gri) should this be fromRHS for *Named?
// (See discussion in #66559.)
- typ = under(typ)
+ typ = typ.Underlying()
}
}
diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go
index 81f46af535152b..cd9e9f3575f71a 100644
--- a/src/cmd/compile/internal/types2/operand.go
+++ b/src/cmd/compile/internal/types2/operand.go
@@ -194,7 +194,7 @@ func operandString(x *operand, qf Qualifier) string {
what := compositeKind(x.typ)
if what == "" {
// x.typ must be basic type
- what = under(x.typ).(*Basic).name
+ what = x.typ.Underlying().(*Basic).name
}
desc += what + " "
}
@@ -229,7 +229,7 @@ func operandString(x *operand, qf Qualifier) string {
// ("array", "slice", etc.) or the empty string if typ is not
// composite but a basic type.
func compositeKind(typ Type) string {
- switch under(typ).(type) {
+ switch typ.Underlying().(type) {
case *Basic:
return ""
case *Array:
@@ -319,8 +319,8 @@ func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Cod
return true, 0
}
- Vu := under(V)
- Tu := under(T)
+ Vu := V.Underlying()
+ Tu := T.Underlying()
Vp, _ := V.(*TypeParam)
Tp, _ := T.(*TypeParam)
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
index c157672ba58a4d..60147c5e21138c 100644
--- a/src/cmd/compile/internal/types2/predicates.go
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -28,11 +28,11 @@ func isString(t Type) bool { return isBasic(t, IsString) }
func isIntegerOrFloat(t Type) bool { return isBasic(t, IsInteger|IsFloat) }
func isConstType(t Type) bool { return isBasic(t, IsConstType) }
-// isBasic reports whether under(t) is a basic type with the specified info.
+// isBasic reports whether t.Underlying() is a basic type with the specified info.
// If t is a type parameter the result is false; i.e.,
// isBasic does not look inside a type parameter.
func isBasic(t Type, info BasicInfo) bool {
- u, _ := under(t).(*Basic)
+ u, _ := t.Underlying().(*Basic)
return u != nil && u.info&info != 0
}
@@ -48,7 +48,7 @@ func allString(t Type) bool { return allBasic(t, IsString) }
func allOrdered(t Type) bool { return allBasic(t, IsOrdered) }
func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) }
-// allBasic reports whether under(t) is a basic type with the specified info.
+// allBasic reports whether t.Underlying() is a basic type with the specified info.
// If t is a type parameter, the result is true if isBasic(t, info) is true
// for all specific types of the type parameter's type set.
func allBasic(t Type, info BasicInfo) bool {
@@ -107,7 +107,7 @@ func isUntypedNumeric(t Type) bool {
// IsInterface reports whether t is an interface type.
func IsInterface(t Type) bool {
- _, ok := under(t).(*Interface)
+ _, ok := t.Underlying().(*Interface)
return ok
}
@@ -163,7 +163,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError {
}
seen[T] = true
- switch t := under(T).(type) {
+ switch t := T.Underlying().(type) {
case *Basic:
// assume invalid types to be comparable to avoid follow-up errors
if t.kind == UntypedNil {
@@ -206,7 +206,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError {
// hasNil reports whether type t includes the nil value.
func hasNil(t Type) bool {
- switch u := under(t).(type) {
+ switch u := t.Underlying().(type) {
case *Basic:
return u.kind == UnsafePointer
case *Slice, *Pointer, *Signature, *Map, *Chan:
diff --git a/src/cmd/compile/internal/types2/range.go b/src/cmd/compile/internal/types2/range.go
index b654601eafc73e..899f5c09911a5e 100644
--- a/src/cmd/compile/internal/types2/range.go
+++ b/src/cmd/compile/internal/types2/range.go
@@ -35,7 +35,7 @@ func (check *Checker) rangeStmt(inner stmtContext, rangeStmt *syntax.ForStmt, no
check.expr(nil, &x, rangeVar)
if isTypes2 && x.mode != invalid && sValue == nil && !check.hasCallOrRecv {
- if t, ok := arrayPtrDeref(under(x.typ)).(*Array); ok {
+ if t, ok := arrayPtrDeref(x.typ.Underlying()).(*Array); ok {
for {
// Put constant info on the thing inside parentheses.
// That's where (*../noder/writer).expr expects it.
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
index ea1cfd88cc60b4..ea60254fa6832e 100644
--- a/src/cmd/compile/internal/types2/signature.go
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -203,7 +203,7 @@ func (check *Checker) collectRecv(rparam *syntax.Field, scopePos syntax.Pos) (*V
case *Alias:
// Methods on generic aliases are not permitted.
// Only report an error if the alias type is valid.
- if isValid(unalias(t)) {
+ if isValid(t) {
check.errorf(rbase, InvalidRecv, "cannot define new methods on generic alias type %s", t)
}
// Ok to continue but do not set basetype in this case so that
@@ -439,7 +439,7 @@ func (check *Checker) validRecv(pos poser, recv *Var) {
break
}
var cause string
- switch u := T.under().(type) {
+ switch u := T.Underlying().(type) {
case *Basic:
// unsafe.Pointer is treated like a regular pointer
if u.kind == UnsafePointer {
diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go
index d435c049c5b87d..13b96209114de1 100644
--- a/src/cmd/compile/internal/types2/sizeof_test.go
+++ b/src/cmd/compile/internal/types2/sizeof_test.go
@@ -31,7 +31,7 @@ func TestSizeof(t *testing.T) {
{Interface{}, 40, 80},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 60, 112},
+ {Named{}, 64, 120},
{TypeParam{}, 28, 48},
{term{}, 12, 24},
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
index 7b1c00b40ab9a9..534ecfba35ced7 100644
--- a/src/cmd/compile/internal/types2/sizes.go
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -54,7 +54,7 @@ func (s *StdSizes) Alignof(T Type) (result int64) {
// For arrays and structs, alignment is defined in terms
// of alignment of the elements and fields, respectively.
- switch t := under(T).(type) {
+ switch t := T.Underlying().(type) {
case *Array:
// spec: "For a variable x of array type: unsafe.Alignof(x)
// is the same as unsafe.Alignof(x[0]), but at least 1."
@@ -162,7 +162,7 @@ var basicSizes = [...]byte{
}
func (s *StdSizes) Sizeof(T Type) int64 {
- switch t := under(T).(type) {
+ switch t := T.Underlying().(type) {
case *Basic:
assert(isTyped(T))
k := t.kind
@@ -307,7 +307,7 @@ func (conf *Config) offsetsof(T *Struct) []int64 {
func (conf *Config) offsetof(T Type, index []int) int64 {
var offs int64
for _, i := range index {
- s := under(T).(*Struct)
+ s := T.Underlying().(*Struct)
d := conf.offsetsof(s)[i]
if d < 0 {
return -1
diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go
index f5cdc472f77de2..99b75332e8ffec 100644
--- a/src/cmd/compile/internal/types2/struct.go
+++ b/src/cmd/compile/internal/types2/struct.go
@@ -141,12 +141,12 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
// Because we have a name, typ must be of the form T or *T, where T is the name
// of a (named or alias) type, and t (= deref(typ)) must be the type of T.
// We must delay this check to the end because we don't want to instantiate
- // (via under(t)) a possibly incomplete type.
+ // (via t.Underlying()) a possibly incomplete type.
embeddedTyp := typ // for closure below
embeddedPos := pos
check.later(func() {
t, isPtr := deref(embeddedTyp)
- switch u := under(t).(type) {
+ switch u := t.Underlying().(type) {
case *Basic:
if !isValid(t) {
// error was reported before
diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go
index c60b5eb41722e4..2ffa00c32c7ec4 100644
--- a/src/cmd/compile/internal/types2/typeparam.go
+++ b/src/cmd/compile/internal/types2/typeparam.go
@@ -113,7 +113,7 @@ func (t *TypeParam) iface() *Interface {
// determine constraint interface
var ityp *Interface
- switch u := under(bound).(type) {
+ switch u := bound.Underlying().(type) {
case *Basic:
if !isValid(u) {
// error is reported elsewhere
diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go
index ce487e74f7232a..fafe6f368ba432 100644
--- a/src/cmd/compile/internal/types2/typeset.go
+++ b/src/cmd/compile/internal/types2/typeset.go
@@ -114,13 +114,13 @@ func (s *_TypeSet) all(f func(t, u Type) bool) bool {
for _, t := range s.terms {
assert(t.typ != nil)
- // Unalias(x) == under(x) for ~x terms
+ // Unalias(x) == x.Underlying() for ~x terms
u := Unalias(t.typ)
if !t.tilde {
- u = under(u)
+ u = u.Underlying()
}
if debug {
- assert(Identical(u, under(u)))
+ assert(Identical(u, u.Underlying()))
}
if !f(t.typ, u) {
return false
@@ -264,7 +264,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_
}
var comparable bool
var terms termlist
- switch u := under(typ).(type) {
+ switch u := typ.Underlying().(type) {
case *Interface:
// For now we don't permit type parameters as constraints.
assert(!isTypeParam(typ))
@@ -380,7 +380,7 @@ func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos syn
var allTerms termlist
for _, t := range utyp.terms {
var terms termlist
- u := under(t.typ)
+ u := t.typ.Underlying()
if ui, _ := u.(*Interface); ui != nil {
// For now we don't permit type parameters as constraints.
assert(!isTypeParam(t.typ))
diff --git a/src/cmd/compile/internal/types2/typeset_test.go b/src/cmd/compile/internal/types2/typeset_test.go
index 40ca28e525feb5..bcff2489306828 100644
--- a/src/cmd/compile/internal/types2/typeset_test.go
+++ b/src/cmd/compile/internal/types2/typeset_test.go
@@ -64,7 +64,7 @@ func TestTypeSetString(t *testing.T) {
if obj == nil {
t.Fatalf("%s: T not found (invalid test case)", body)
}
- T, ok := under(obj.Type()).(*Interface)
+ T, ok := obj.Type().Underlying().(*Interface)
if !ok {
t.Fatalf("%s: %v is not an interface (invalid test case)", body, obj)
}
diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go
index 47f53bc12d77b5..b1f0d0929bae33 100644
--- a/src/cmd/compile/internal/types2/typestring.go
+++ b/src/cmd/compile/internal/types2/typestring.go
@@ -455,7 +455,7 @@ func (w *typeWriter) tuple(tup *Tuple, variadic bool) {
} else {
// special case:
// append(s, "foo"...) leads to signature func([]byte, string...)
- if t, _ := under(typ).(*Basic); t == nil || t.kind != String {
+ if t, _ := typ.Underlying().(*Basic); t == nil || t.kind != String {
w.error("expected string type")
continue
}
diff --git a/src/cmd/compile/internal/types2/typeterm.go b/src/cmd/compile/internal/types2/typeterm.go
index 97791324e1e75c..cb11811d458c5a 100644
--- a/src/cmd/compile/internal/types2/typeterm.go
+++ b/src/cmd/compile/internal/types2/typeterm.go
@@ -115,7 +115,7 @@ func (x *term) includes(t Type) bool {
u := t
if x.tilde {
- u = under(u)
+ u = u.Underlying()
}
return Identical(x.typ, u)
}
@@ -155,11 +155,11 @@ func (x *term) disjoint(y *term) bool {
}
ux := x.typ
if y.tilde {
- ux = under(ux)
+ ux = ux.Underlying()
}
uy := y.typ
if x.tilde {
- uy = under(uy)
+ uy = uy.Underlying()
}
return !Identical(ux, uy)
}
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
index 8accc46751fdd0..8601ce627768e4 100644
--- a/src/cmd/compile/internal/types2/typexpr.go
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -169,11 +169,11 @@ func (check *Checker) validVarType(e syntax.Expr, typ Type) {
return
}
- // We don't want to call under() or complete interfaces while we are in
+ // We don't want to call typ.Underlying() or complete interfaces while we are in
// the middle of type-checking parameter declarations that might belong
// to interface methods. Delay this check to the end of type-checking.
check.later(func() {
- if t, _ := under(typ).(*Interface); t != nil {
+ if t, _ := typ.Underlying().(*Interface); t != nil {
pos := syntax.StartPos(e)
tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position?
if !tset.IsMethodSet() {
@@ -239,7 +239,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *TypeName) (T Type) {
check.indent--
var under Type
if T != nil {
- // Calling under() here may lead to endless instantiations.
+ // Calling T.Underlying() here may lead to endless instantiations.
// Test case: type T[P any] *T[P]
under = safeUnderlying(T)
}
@@ -425,7 +425,7 @@ func setDefType(def *TypeName, typ Type) {
case *Basic:
assert(t == Typ[Invalid])
case *Named:
- t.underlying = typ
+ t.fromRHS = typ
default:
panic(fmt.Sprintf("unexpected type %T", t))
}
diff --git a/src/cmd/compile/internal/types2/under.go b/src/cmd/compile/internal/types2/under.go
index 078ba9ab172453..98c62733c7cd99 100644
--- a/src/cmd/compile/internal/types2/under.go
+++ b/src/cmd/compile/internal/types2/under.go
@@ -6,19 +6,8 @@ package types2
import "iter"
-// under returns the true expanded underlying type.
-// If it doesn't exist, the result is Typ[Invalid].
-// under must only be called when a type is known
-// to be fully set up.
-func under(t Type) Type {
- if t := asNamed(t); t != nil {
- return t.under()
- }
- return t.Underlying()
-}
-
// If typ is a type parameter, underIs returns the result of typ.underIs(f).
-// Otherwise, underIs returns the result of f(under(typ)).
+// Otherwise, underIs returns the result of f(typ.Underlying()).
func underIs(typ Type, f func(Type) bool) bool {
return all(typ, func(_, u Type) bool {
return f(u)
@@ -31,7 +20,7 @@ func all(t Type, f func(t, u Type) bool) bool {
if p, _ := Unalias(t).(*TypeParam); p != nil {
return p.typeset(f)
}
- return f(t, under(t))
+ return f(t, t.Underlying())
}
// typeset is an iterator over the (type/underlying type) pairs of the
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
index 9cd3af8607181e..eecef455ac577c 100644
--- a/src/cmd/compile/internal/types2/unify.go
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -270,7 +270,7 @@ func (u *unifier) inferred(tparams []*TypeParam) []Type {
// it is a non-type parameter interface. Otherwise it returns nil.
func asInterface(x Type) (i *Interface) {
if _, ok := Unalias(x).(*TypeParam); !ok {
- i, _ = under(x).(*Interface)
+ i, _ = x.Underlying().(*Interface)
}
return i
}
@@ -339,7 +339,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
if traceInference {
u.tracef("%s ≡ under %s", x, ny)
}
- y = ny.under()
+ y = ny.Underlying()
// Per the spec, a defined type cannot have an underlying type
// that is a type parameter.
assert(!isTypeParam(y))
@@ -430,7 +430,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
u.set(px, y)
default:
// Neither x nor y are defined types.
- if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv {
+ if yc, _ := y.Underlying().(*Chan); yc != nil && yc.dir != SendRecv {
// y is a directed channel type: select y.
u.set(px, y)
}
diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go
index 1bf4353f264e34..ab0bd43cd350ac 100644
--- a/src/cmd/compile/internal/types2/union.go
+++ b/src/cmd/compile/internal/types2/union.go
@@ -93,7 +93,7 @@ func parseUnion(check *Checker, uexpr syntax.Expr) Type {
continue
}
- u := under(t.typ)
+ u := t.typ.Underlying()
f, _ := u.(*Interface)
if t.tilde {
if f != nil {
diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go
index c66caebd10cef7..332cd174f97466 100644
--- a/src/cmd/compile/internal/types2/universe.go
+++ b/src/cmd/compile/internal/types2/universe.go
@@ -116,7 +116,7 @@ func defPredeclaredTypes() {
{
obj := NewTypeName(nopos, nil, "error", nil)
obj.setColor(black)
- typ := NewNamed(obj, nil, nil)
+ typ := (*Checker)(nil).newNamed(obj, nil, nil)
// error.Error() string
recv := newVar(RecvVar, nopos, nil, "", typ)
@@ -128,7 +128,8 @@ func defPredeclaredTypes() {
ityp := &Interface{methods: []*Func{err}, complete: true}
computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset
- typ.SetUnderlying(ityp)
+ typ.fromRHS = ityp
+ typ.Underlying()
def(obj)
}
@@ -136,12 +137,13 @@ func defPredeclaredTypes() {
{
obj := NewTypeName(nopos, nil, "comparable", nil)
obj.setColor(black)
- typ := NewNamed(obj, nil, nil)
+ typ := (*Checker)(nil).newNamed(obj, nil, nil)
// interface{} // marked as comparable
ityp := &Interface{complete: true, tset: &_TypeSet{nil, allTermlist, true}}
- typ.SetUnderlying(ityp)
+ typ.fromRHS = ityp
+ typ.Underlying()
def(obj)
}
}
diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go
index 32e389a6562445..528f5121e392ee 100644
--- a/src/cmd/compile/internal/types2/validtype.go
+++ b/src/cmd/compile/internal/types2/validtype.go
@@ -91,13 +91,6 @@ func (check *Checker) validType0(pos syntax.Pos, typ Type, nest, path []*Named)
// break
// }
- // Don't report a 2nd error if we already know the type is invalid
- // (e.g., if a cycle was detected earlier, via under).
- // Note: ensure that t.orig is fully resolved by calling Underlying().
- if !isValid(t.Underlying()) {
- return false
- }
-
// If the current type t is also found in nest, (the memory of) t is
// embedded in itself, indicating an invalid recursive type.
for _, e := range nest {
@@ -125,8 +118,9 @@ func (check *Checker) validType0(pos syntax.Pos, typ Type, nest, path []*Named)
// are not yet available to other goroutines).
assert(t.obj.pkg == check.pkg)
assert(t.Origin().obj.pkg == check.pkg)
- t.underlying = Typ[Invalid]
- t.Origin().underlying = Typ[Invalid]
+
+ // let t become invalid when it resolves
+ t.Origin().fromRHS = Typ[Invalid]
// Find the starting point of the cycle and report it.
// Because each type in nest must also appear in path (see invariant below),
diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go
index a7d8182a747ce7..25add3d8043905 100644
--- a/src/cmd/compile/internal/walk/walk.go
+++ b/src/cmd/compile/internal/walk/walk.go
@@ -275,6 +275,15 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
// function calls, which could clobber function call arguments/results
// currently on the stack.
func mayCall(n ir.Node) bool {
+ // This is intended to avoid putting constants
+ // into temporaries with the race detector (or other
+ // instrumentation) which interferes with simple
+ // "this is a constant" tests in ssagen.
+ // Also, it will generally lead to better code.
+ if n.Op() == ir.OLITERAL {
+ return false
+ }
+
// When instrumenting, any expression might require function calls.
if base.Flag.Cfg.Instrumenting {
return true
diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go
index daee82f1fd7366..1e3b318e8c9fe0 100644
--- a/src/cmd/compile/internal/wasm/ssa.go
+++ b/src/cmd/compile/internal/wasm/ssa.go
@@ -14,7 +14,6 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/wasm"
- "internal/buildcfg"
)
/*
@@ -425,27 +424,11 @@ func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) {
case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S:
getValue64(s, v.Args[0])
- if buildcfg.GOWASM.SatConv {
- s.Prog(v.Op.Asm())
- } else {
- if v.Op == ssa.OpWasmI64TruncSatF32S {
- s.Prog(wasm.AF64PromoteF32)
- }
- p := s.Prog(wasm.ACall)
- p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS}
- }
+ s.Prog(v.Op.Asm())
case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
getValue64(s, v.Args[0])
- if buildcfg.GOWASM.SatConv {
- s.Prog(v.Op.Asm())
- } else {
- if v.Op == ssa.OpWasmI64TruncSatF32U {
- s.Prog(wasm.AF64PromoteF32)
- }
- p := s.Prog(wasm.ACall)
- p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU}
- }
+ s.Prog(v.Op.Asm())
case ssa.OpWasmF32DemoteF64:
getValue64(s, v.Args[0])
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index 9a7951726f6f04..2fcdb2d3915b9b 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -1397,7 +1397,7 @@ var (
binExesIncludedInDistpack = []string{"cmd/go", "cmd/gofmt"}
// Keep in sync with the filter in cmd/distpack/pack.go.
- toolsIncludedInDistpack = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/link", "cmd/preprofile", "cmd/vet"}
+ toolsIncludedInDistpack = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/fix", "cmd/link", "cmd/preprofile", "cmd/vet"}
// We could install all tools in "cmd", but is unnecessary because we will
// remove them in distpack, so instead install the tools that will actually
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index 080de832b2ad96..62cd9376927e0d 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -90,6 +90,7 @@ var bootstrapDirs = []string{
"internal/platform",
"internal/profile",
"internal/race",
+ "internal/runtime/gc",
"internal/saferio",
"internal/syscall/unix",
"internal/types/errors",
diff --git a/src/cmd/distpack/pack.go b/src/cmd/distpack/pack.go
index 6bab45f1d3d642..09c3a331195f58 100644
--- a/src/cmd/distpack/pack.go
+++ b/src/cmd/distpack/pack.go
@@ -172,7 +172,7 @@ func main() {
default:
return false
// Keep in sync with toolsIncludedInDistpack in cmd/dist/build.go.
- case "asm", "cgo", "compile", "cover", "link", "preprofile", "vet":
+ case "asm", "cgo", "compile", "cover", "fix", "link", "preprofile", "vet":
}
}
return true
diff --git a/src/cmd/fix/doc.go b/src/cmd/fix/doc.go
deleted file mode 100644
index b3d69144717172..00000000000000
--- a/src/cmd/fix/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Fix finds Go programs that use old APIs and rewrites them to use
-newer ones. After you update to a new Go release, fix helps make
-the necessary changes to your programs.
-
-Usage:
-
- go tool fix [ignored...]
-
-This tool is currently in transition. All its historical fixers were
-long obsolete and have been removed, so it is currently a no-op. In
-due course the tool will integrate with the Go analysis framework
-(golang.org/x/tools/go/analysis) and run a modern suite of fix
-algorithms; see https://go.dev/issue/71859.
-*/
-package main
diff --git a/src/cmd/fix/main.go b/src/cmd/fix/main.go
index 87cc0d6414601b..422fa827459900 100644
--- a/src/cmd/fix/main.go
+++ b/src/cmd/fix/main.go
@@ -1,31 +1,59 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+/*
+Fix is a tool executed by "go fix" to update Go programs that use old
+features of the language and library and rewrite them to use newer
+ones. After you update to a new Go release, fix helps make the
+necessary changes to your programs.
+
+See the documentation for "go fix" for how to run this command.
+You can provide an alternative tool using "go fix -fixtool=..."
+
+Run "go tool fix help" to see the list of analyzers supported by this
+program.
+
+See [golang.org/x/tools/go/analysis] for information on how to write
+an analyzer that can suggest fixes.
+*/
package main
import (
- "flag"
- "fmt"
- "os"
-)
+ "cmd/internal/objabi"
+ "cmd/internal/telemetry/counter"
-var (
- _ = flag.Bool("diff", false, "obsolete, no effect")
- _ = flag.String("go", "", "obsolete, no effect")
- _ = flag.String("r", "", "obsolete, no effect")
- _ = flag.String("force", "", "obsolete, no effect")
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildtag"
+ "golang.org/x/tools/go/analysis/passes/hostport"
+ "golang.org/x/tools/go/analysis/unitchecker"
)
-func usage() {
- fmt.Fprintf(os.Stderr, "usage: go tool fix [-diff] [-r ignored] [-force ignored] ...\n")
- flag.PrintDefaults()
- os.Exit(2)
-}
-
func main() {
- flag.Usage = usage
- flag.Parse()
+ // Keep consistent with cmd/vet/main.go!
+ counter.Open()
+ objabi.AddVersionFlag()
+ counter.Inc("fix/invocations")
+
+ unitchecker.Main(suite...) // (never returns)
+}
- os.Exit(0)
+// The fix suite analyzers produce fixes that are safe to apply.
+// (Diagnostics may not describe actual problems,
+// but their fixes must be unambiguously safe to apply.)
+var suite = []*analysis.Analyzer{
+ buildtag.Analyzer,
+ hostport.Analyzer,
+ // TODO(adonovan): now the modernize (proposal #75266) and
+ // inline (proposal #75267) analyzers are published, revendor
+ // x/tools and add them here.
+ //
+ // TODO(adonovan):add any other vet analyzers whose fixes are always safe.
+ // Candidates to audit: sigchanyzer, printf, assign, unreachable.
+ // Rejected:
+ // - composites: some types (e.g. PointXY{1,2}) don't want field names.
+ // - timeformat: flipping MM/DD is a behavior change, but the code
+ // could potentially be a workaround for another bug.
+ // - stringintconv: offers two fixes, user input required to choose.
+ // - fieldalignment: poor signal/noise; fix could be a regression.
}
diff --git a/src/cmd/go.mod b/src/cmd/go.mod
index 017883a7870cd2..1fc256ae6f2ce7 100644
--- a/src/cmd/go.mod
+++ b/src/cmd/go.mod
@@ -6,16 +6,16 @@ require (
github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5
golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58
golang.org/x/build v0.0.0-20250806225920-b7c66c047964
- golang.org/x/mod v0.28.0
+ golang.org/x/mod v0.29.0
golang.org/x/sync v0.17.0
- golang.org/x/sys v0.36.0
- golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053
+ golang.org/x/sys v0.37.0
+ golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8
golang.org/x/term v0.34.0
- golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4
+ golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5
)
require (
github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b // indirect
- golang.org/x/text v0.29.0 // indirect
+ golang.org/x/text v0.30.0 // indirect
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef // indirect
)
diff --git a/src/cmd/go.sum b/src/cmd/go.sum
index 0906ffcc605854..eb7af161552020 100644
--- a/src/cmd/go.sum
+++ b/src/cmd/go.sum
@@ -10,19 +10,19 @@ golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58 h1:uxPa6+/WsUfzikIAPMqpT
golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/build v0.0.0-20250806225920-b7c66c047964 h1:yRs1K51GKq7hsIO+YHJ8LsslrvwFceNPIv0tYjpcBd0=
golang.org/x/build v0.0.0-20250806225920-b7c66c047964/go.mod h1:i9Vx7+aOQUpYJRxSO+OpRStVBCVL/9ccI51xblWm5WY=
-golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
-golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
+golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
-golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
-golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 h1:dHQOQddU4YHS5gY33/6klKjq7Gp3WwMyOXGNp5nzRj8=
-golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE=
+golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
+golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU=
+golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
-golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
-golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
-golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 h1:IcXDtHggZZo+GzNzvVRPyNFLnOc2/Z1gg3ZVIWF2uCU=
-golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
+golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
+golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
+golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 h1:cz7f45KGWAtyIrz6bm45Gc+lw8beIxBSW3EQh4Bwbg4=
+golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef h1:mqLYrXCXYEZOop9/Dbo6RPX11539nwiCNBb1icVPmw8=
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ=
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index f1e1b1c333542c..67c0ecbe8b2fcf 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -18,7 +18,7 @@
// clean remove object files and cached files
// doc show documentation for package or symbol
// env print Go environment information
-// fix update packages to use new APIs
+// fix apply fixes suggested by static checkers
// fmt gofmt (reformat) package sources
// generate generate Go files by processing source
// get add dependencies to current module and install them
@@ -495,22 +495,34 @@
//
// For more about environment variables, see 'go help environment'.
//
-// # Update packages to use new APIs
+// # Apply fixes suggested by static checkers
//
// Usage:
//
-// go fix [-fix list] [packages]
+// go fix [build flags] [-fixtool prog] [fix flags] [packages]
//
-// Fix runs the Go fix command on the packages named by the import paths.
+// Fix runs the Go fix tool (cmd/vet) on the named packages
+// and applies suggested fixes.
//
-// The -fix flag sets a comma-separated list of fixes to run.
-// The default is all known fixes.
-// (Its value is passed to 'go tool fix -r'.)
+// It supports these flags:
+//
+// -diff
+// instead of applying each fix, print the patch as a unified diff
+//
+// The -fixtool=prog flag selects a different analysis tool with
+// alternative or additional fixes; see the documentation for go vet's
+// -vettool flag for details.
//
-// For more about fix, see 'go doc cmd/fix'.
// For more about specifying packages, see 'go help packages'.
//
-// To run fix with other options, run 'go tool fix'.
+// For a list of fixers and their flags, see 'go tool fix help'.
+//
+// For details of a specific fixer such as 'hostport',
+// see 'go tool fix help hostport'.
+//
+// The build flags supported by go fix are those that control package resolution
+// and execution, such as -C, -n, -x, -v, -tags, and -toolexec.
+// For more about these flags, see 'go help build'.
//
// See also: go fmt, go vet.
//
@@ -1280,11 +1292,6 @@
// The -json flag prints the final go.mod file in JSON format instead of
// writing it back to go.mod. The JSON output corresponds to these Go types:
//
-// type Module struct {
-// Path string
-// Version string
-// }
-//
// type GoMod struct {
// Module ModPath
// Go string
@@ -1294,6 +1301,13 @@
// Exclude []Module
// Replace []Replace
// Retract []Retract
+// Tool []Tool
+// Ignore []Ignore
+// }
+//
+// type Module struct {
+// Path string
+// Version string
// }
//
// type ModPath struct {
@@ -2012,20 +2026,34 @@
//
// go vet [build flags] [-vettool prog] [vet flags] [packages]
//
-// Vet runs the Go vet command on the packages named by the import paths.
+// Vet runs the Go vet tool (cmd/vet) on the named packages
+// and reports diagnostics.
//
-// For more about vet and its flags, see 'go doc cmd/vet'.
-// For more about specifying packages, see 'go help packages'.
-// For a list of checkers and their flags, see 'go tool vet help'.
-// For details of a specific checker such as 'printf', see 'go tool vet help printf'.
+// It supports these flags:
//
-// The -vettool=prog flag selects a different analysis tool with alternative
-// or additional checks.
-// For example, the 'shadow' analyzer can be built and run using these commands:
+// -c int
+// display offending line with this many lines of context (default -1)
+// -json
+// emit JSON output
+// -fix
+// instead of printing each diagnostic, apply its first fix (if any)
+// -diff
+// instead of applying each fix, print the patch as a unified diff
+//
+// The -vettool=prog flag selects a different analysis tool with
+// alternative or additional checks. For example, the 'shadow' analyzer
+// can be built and run using these commands:
//
// go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest
// go vet -vettool=$(which shadow)
//
+// Alternative vet tools should be built atop golang.org/x/tools/go/analysis/unitchecker,
+// which handles the interaction with go vet.
+//
+// For more about specifying packages, see 'go help packages'.
+// For a list of checkers and their flags, see 'go tool vet help'.
+// For details of a specific checker such as 'printf', see 'go tool vet help printf'.
+//
// The build flags supported by go vet are those that control package resolution
// and execution, such as -C, -n, -x, -v, -tags, and -toolexec.
// For more about these flags, see 'go help build'.
@@ -3242,6 +3270,10 @@
// The following flags are recognized by the 'go test' command and
// control the execution of any test:
//
+// -artifacts
+// Save test artifacts in the directory specified by -outputdir.
+// See 'go doc testing.T.ArtifactDir'.
+//
// -bench regexp
// Run only those benchmarks matching a regular expression.
// By default, no benchmarks are run.
@@ -3336,6 +3368,10 @@
// This will only list top-level tests. No subtest or subbenchmarks will be
// shown.
//
+// -outputdir directory
+// Place output files from profiling and test artifacts in the
+// specified directory, by default the directory in which "go test" is running.
+//
// -parallel n
// Allow parallel execution of test functions that call t.Parallel, and
// fuzz targets that call t.Parallel when running the seed corpus.
@@ -3447,10 +3483,6 @@
// Sample 1 in n stack traces of goroutines holding a
// contended mutex.
//
-// -outputdir directory
-// Place output files from profiling in the specified directory,
-// by default the directory in which "go test" is running.
-//
// -trace trace.out
// Write an execution trace to the specified file before exiting.
//
diff --git a/src/cmd/go/internal/base/path.go b/src/cmd/go/internal/base/path.go
index 5bb7bc3bde63e2..a7577f62e76898 100644
--- a/src/cmd/go/internal/base/path.go
+++ b/src/cmd/go/internal/base/path.go
@@ -55,8 +55,7 @@ func sameFile(path1, path2 string) bool {
// ShortPathError rewrites the path in err using base.ShortPath, if err is a wrapped PathError.
func ShortPathError(err error) error {
- var pe *fs.PathError
- if errors.As(err, &pe) {
+ if pe, ok := errors.AsType[*fs.PathError](err); ok {
pe.Path = ShortPath(pe.Path)
}
return err
diff --git a/src/cmd/go/internal/bug/bug.go b/src/cmd/go/internal/bug/bug.go
index 4ff45d2d888c96..4e9ae1e9b499ca 100644
--- a/src/cmd/go/internal/bug/bug.go
+++ b/src/cmd/go/internal/bug/bug.go
@@ -21,6 +21,7 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/envcmd"
+ "cmd/go/internal/modload"
"cmd/go/internal/web"
"cmd/go/internal/work"
)
@@ -44,7 +45,7 @@ func runBug(ctx context.Context, cmd *base.Command, args []string) {
if len(args) > 0 {
base.Fatalf("go: bug takes no arguments")
}
- work.BuildInit()
+ work.BuildInit(modload.LoaderState)
var buf strings.Builder
buf.WriteString(bugHeader)
diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go
index c6f311e0263af8..1c05977de554f8 100644
--- a/src/cmd/go/internal/clean/clean.go
+++ b/src/cmd/go/internal/clean/clean.go
@@ -120,7 +120,7 @@ func init() {
}
func runClean(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
+ modload.InitWorkfile(modload.LoaderState)
if len(args) > 0 {
cacheFlag := ""
switch {
@@ -142,13 +142,13 @@ func runClean(ctx context.Context, cmd *base.Command, args []string) {
// either the flags and arguments explicitly imply a package,
// or no other target (such as a cache) was requested to be cleaned.
cleanPkg := len(args) > 0 || cleanI || cleanR
- if (!modload.Enabled() || modload.HasModRoot()) &&
+ if (!modload.Enabled(modload.LoaderState) || modload.HasModRoot(modload.LoaderState)) &&
!cleanCache && !cleanModcache && !cleanTestcache && !cleanFuzzcache {
cleanPkg = true
}
if cleanPkg {
- for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) {
+ for _, pkg := range load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{}, args) {
clean(pkg)
}
}
diff --git a/src/cmd/go/internal/doc/pkgsite.go b/src/cmd/go/internal/doc/pkgsite.go
index 06289ac4fc9a8a..c173167b6329a4 100644
--- a/src/cmd/go/internal/doc/pkgsite.go
+++ b/src/cmd/go/internal/doc/pkgsite.go
@@ -81,8 +81,7 @@ func doPkgsite(urlPath, fragment string) error {
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
- var ee *exec.ExitError
- if errors.As(err, &ee) {
+ if ee, ok := errors.AsType[*exec.ExitError](err); ok {
// Exit with the same exit status as pkgsite to avoid
// printing of "exit status" error messages.
// Any relevant messages have already been printed
diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go
index 6ad6954dd52125..13708ae170c1d8 100644
--- a/src/cmd/go/internal/envcmd/env.go
+++ b/src/cmd/go/internal/envcmd/env.go
@@ -191,14 +191,14 @@ func findEnv(env []cfg.EnvVar, name string) string {
// ExtraEnvVars returns environment variables that should not leak into child processes.
func ExtraEnvVars() []cfg.EnvVar {
gomod := ""
- modload.Init()
- if modload.HasModRoot() {
+ modload.Init(modload.LoaderState)
+ if modload.HasModRoot(modload.LoaderState) {
gomod = modload.ModFilePath()
- } else if modload.Enabled() {
+ } else if modload.Enabled(modload.LoaderState) {
gomod = os.DevNull
}
- modload.InitWorkfile()
- gowork := modload.WorkFilePath()
+ modload.InitWorkfile(modload.LoaderState)
+ gowork := modload.WorkFilePath(modload.LoaderState)
// As a special case, if a user set off explicitly, report that in GOWORK.
if cfg.Getenv("GOWORK") == "off" {
gowork = "off"
@@ -336,7 +336,7 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) {
}
}
if needCostly {
- work.BuildInit()
+ work.BuildInit(modload.LoaderState)
env = append(env, ExtraEnvVarsCostly()...)
}
diff --git a/src/cmd/go/internal/fix/fix.go b/src/cmd/go/internal/fix/fix.go
deleted file mode 100644
index 8947da05c3ee63..00000000000000
--- a/src/cmd/go/internal/fix/fix.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package fix implements the “go fix” command.
-package fix
-
-import (
- "cmd/go/internal/base"
- "cmd/go/internal/cfg"
- "cmd/go/internal/load"
- "cmd/go/internal/modload"
- "cmd/go/internal/str"
- "cmd/go/internal/work"
- "context"
- "fmt"
- "go/build"
- "os"
- "path/filepath"
-)
-
-var CmdFix = &base.Command{
- UsageLine: "go fix [-fix list] [packages]",
- Short: "update packages to use new APIs",
- Long: `
-Fix runs the Go fix command on the packages named by the import paths.
-
-The -fix flag sets a comma-separated list of fixes to run.
-The default is all known fixes.
-(Its value is passed to 'go tool fix -r'.)
-
-For more about fix, see 'go doc cmd/fix'.
-For more about specifying packages, see 'go help packages'.
-
-To run fix with other options, run 'go tool fix'.
-
-See also: go fmt, go vet.
- `,
-}
-
-var fixes = CmdFix.Flag.String("fix", "", "comma-separated list of fixes to apply")
-
-func init() {
- work.AddBuildFlags(CmdFix, work.OmitBuildOnlyFlags)
- CmdFix.Run = runFix // fix cycle
-}
-
-func runFix(ctx context.Context, cmd *base.Command, args []string) {
- pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args)
- w := 0
- for _, pkg := range pkgs {
- if pkg.Error != nil {
- base.Errorf("%v", pkg.Error)
- continue
- }
- pkgs[w] = pkg
- w++
- }
- pkgs = pkgs[:w]
-
- printed := false
- for _, pkg := range pkgs {
- if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main {
- if !printed {
- fmt.Fprintf(os.Stderr, "go: not fixing packages in dependency modules\n")
- printed = true
- }
- continue
- }
- // Use pkg.gofiles instead of pkg.Dir so that
- // the command only applies to this package,
- // not to packages in subdirectories.
- files := base.RelPaths(pkg.InternalAllGoFiles())
- goVersion := ""
- if pkg.Module != nil {
- goVersion = "go" + pkg.Module.GoVersion
- } else if pkg.Standard {
- goVersion = build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1]
- }
- var fixArg []string
- if *fixes != "" {
- fixArg = []string{"-r=" + *fixes}
- }
- base.Run(str.StringList(cfg.BuildToolexec, filepath.Join(cfg.GOROOTbin, "go"), "tool", "fix", "-go="+goVersion, fixArg, files))
- }
-}
diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go
index 62b22f6bcfa407..a42e7753050356 100644
--- a/src/cmd/go/internal/fmtcmd/fmt.go
+++ b/src/cmd/go/internal/fmtcmd/fmt.go
@@ -59,8 +59,8 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) {
baseGofmtArgs := len(gofmtArgs)
baseGofmtArgLen := gofmtArgLen
- for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) {
- if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main {
+ for _, pkg := range load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{}, args) {
+ if modload.Enabled(modload.LoaderState) && pkg.Module != nil && !pkg.Module.Main {
if !printed {
fmt.Fprintf(os.Stderr, "go: not formatting packages in dependency modules\n")
printed = true
@@ -68,11 +68,10 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) {
continue
}
if pkg.Error != nil {
- var nogo *load.NoGoError
- var embed *load.EmbedError
- if (errors.As(pkg.Error, &nogo) || errors.As(pkg.Error, &embed)) && len(pkg.InternalAllGoFiles()) > 0 {
- // Skip this error, as we will format
- // all files regardless.
+ if _, ok := errors.AsType[*load.NoGoError](pkg.Error); ok {
+ // Skip this error, as we will format all files regardless.
+ } else if _, ok := errors.AsType[*load.EmbedError](pkg.Error); ok && len(pkg.InternalAllGoFiles()) > 0 {
+ // Skip this error, as we will format all files regardless.
} else {
base.Errorf("%v", pkg.Error)
continue
diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go
index 0f4b4a972e9107..4250916b8d09d6 100644
--- a/src/cmd/go/internal/generate/generate.go
+++ b/src/cmd/go/internal/generate/generate.go
@@ -182,7 +182,7 @@ func init() {
}
func runGenerate(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
+ modload.InitWorkfile(modload.LoaderState)
if generateRunFlag != "" {
var err error
@@ -204,8 +204,8 @@ func runGenerate(ctx context.Context, cmd *base.Command, args []string) {
// Even if the arguments are .go files, this loop suffices.
printed := false
pkgOpts := load.PackageOpts{IgnoreImports: true}
- for _, pkg := range load.PackagesAndErrors(ctx, pkgOpts, args) {
- if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main {
+ for _, pkg := range load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, args) {
+ if modload.Enabled(modload.LoaderState) && pkg.Module != nil && !pkg.Module.Main {
if !printed {
fmt.Fprintf(os.Stderr, "go: not generating in packages in dependency modules\n")
printed = true
diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go
index bee7dc8053ee6f..0bf86ae004a26e 100644
--- a/src/cmd/go/internal/list/list.go
+++ b/src/cmd/go/internal/list/list.go
@@ -419,7 +419,7 @@ func (v *jsonFlag) needAny(fields ...string) bool {
var nl = []byte{'\n'}
func runList(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
+ modload.InitWorkfile(modload.LoaderState)
if *listFmt != "" && listJson {
base.Fatalf("go list -f cannot be used with -json")
@@ -427,11 +427,11 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
if *listReuse != "" && !*listM {
base.Fatalf("go list -reuse cannot be used without -m")
}
- if *listReuse != "" && modload.HasModRoot() {
+ if *listReuse != "" && modload.HasModRoot(modload.LoaderState) {
base.Fatalf("go list -reuse cannot be used inside a module")
}
- work.BuildInit()
+ work.BuildInit(modload.LoaderState)
out := newTrackingWriter(os.Stdout)
defer out.w.Flush()
@@ -496,12 +496,12 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
}
}
- modload.Init()
+ modload.Init(modload.LoaderState)
if *listRetracted {
if cfg.BuildMod == "vendor" {
base.Fatalf("go list -retracted cannot be used when vendoring is enabled")
}
- if !modload.Enabled() {
+ if !modload.Enabled(modload.LoaderState) {
base.Fatalf("go list -retracted can only be used in module-aware mode")
}
}
@@ -525,11 +525,11 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
base.Fatalf("go list -test cannot be used with -m")
}
- if modload.Init(); !modload.Enabled() {
+ if modload.Init(modload.LoaderState); !modload.Enabled(modload.LoaderState) {
base.Fatalf("go: list -m cannot be used with GO111MODULE=off")
}
- modload.LoadModFile(ctx) // Sets cfg.BuildMod as a side-effect.
+ modload.LoadModFile(modload.LoaderState, ctx) // Sets cfg.BuildMod as a side-effect.
if cfg.BuildMod == "vendor" {
const actionDisabledFormat = "go: can't %s using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)"
@@ -613,7 +613,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
SuppressBuildInfo: !*listExport && !listJsonFields.needAny("Stale", "StaleReason"),
SuppressEmbedFiles: !*listExport && !listJsonFields.needAny("EmbedFiles", "TestEmbedFiles", "XTestEmbedFiles"),
}
- pkgs := load.PackagesAndErrors(ctx, pkgOpts, args)
+ pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, args)
if !*listE {
w := 0
for _, pkg := range pkgs {
@@ -727,7 +727,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
b.NeedExport = *listExport
b.NeedCompiledGoFiles = *listCompiled
if cfg.BuildCover {
- load.PrepareForCoverageBuild(pkgs)
+ load.PrepareForCoverageBuild(modload.LoaderState, pkgs)
}
a := &work.Action{}
// TODO: Use pkgsFilter?
diff --git a/src/cmd/go/internal/load/flag.go b/src/cmd/go/internal/load/flag.go
index 55bdab013505ab..86a922bc103a82 100644
--- a/src/cmd/go/internal/load/flag.go
+++ b/src/cmd/go/internal/load/flag.go
@@ -6,6 +6,7 @@ package load
import (
"cmd/go/internal/base"
+ "cmd/go/internal/modload"
"cmd/internal/quoted"
"fmt"
"strings"
@@ -63,7 +64,7 @@ func (f *PerPackageFlag) set(v, cwd string) error {
return fmt.Errorf("parameter may not start with quote character %c", v[0])
}
pattern := strings.TrimSpace(v[:i])
- match = MatchPackage(pattern, cwd)
+ match = MatchPackage(modload.LoaderState, pattern, cwd)
v = v[i+1:]
}
flags, err := quoted.Split(v)
diff --git a/src/cmd/go/internal/load/godebug.go b/src/cmd/go/internal/load/godebug.go
index 8ea8ffab1aea1f..817cc4faebf7b4 100644
--- a/src/cmd/go/internal/load/godebug.go
+++ b/src/cmd/go/internal/load/godebug.go
@@ -45,12 +45,12 @@ func ParseGoDebug(text string) (key, value string, err error) {
// defaultGODEBUG returns the default GODEBUG setting for the main package p.
// When building a test binary, directives, testDirectives, and xtestDirectives
// list additional directives from the package under test.
-func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []build.Directive) string {
+func defaultGODEBUG(loaderstate *modload.State, p *Package, directives, testDirectives, xtestDirectives []build.Directive) string {
if p.Name != "main" {
return ""
}
- goVersion := modload.MainModules.GoVersion()
- if modload.RootMode == modload.NoRoot && p.Module != nil {
+ goVersion := loaderstate.MainModules.GoVersion(loaderstate)
+ if loaderstate.RootMode == modload.NoRoot && p.Module != nil {
// This is go install pkg@version or go run pkg@version.
// Use the Go version from the package.
// If there isn't one, then assume Go 1.20,
@@ -73,7 +73,7 @@ func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []bu
}
// Add directives from main module go.mod.
- for _, g := range modload.MainModules.Godebugs() {
+ for _, g := range loaderstate.MainModules.Godebugs(loaderstate) {
if m == nil {
m = make(map[string]string)
}
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index 1f791546f90088..cfaece2072dbb9 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -290,8 +290,8 @@ func (p *Package) setLoadPackageDataError(err error, path string, stk *ImportSta
// Replace (possibly wrapped) *build.NoGoError with *load.NoGoError.
// The latter is more specific about the cause.
- var nogoErr *build.NoGoError
- if errors.As(err, &nogoErr) {
+ nogoErr, ok := errors.AsType[*build.NoGoError](err)
+ if ok {
if p.Dir == "" && nogoErr.Dir != "" {
p.Dir = nogoErr.Dir
}
@@ -686,8 +686,8 @@ const (
)
// LoadPackage does Load import, but without a parent package load context
-func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package {
- p, err := loadImport(ctx, opts, nil, path, srcDir, nil, stk, importPos, mode)
+func LoadPackage(loaderstate *modload.State, ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package {
+ p, err := loadImport(loaderstate, ctx, opts, nil, path, srcDir, nil, stk, importPos, mode)
if err != nil {
base.Fatalf("internal error: loadImport of %q with nil parent returned an error", path)
}
@@ -703,7 +703,7 @@ func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk
// The returned PackageError, if any, describes why parent is not allowed
// to import the named package, with the error referring to importPos.
// The PackageError can only be non-nil when parent is not nil.
-func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) {
+func loadImport(loaderstate *modload.State, ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) {
ctx, span := trace.StartSpan(ctx, "modload.loadImport "+path)
defer span.Done()
@@ -718,9 +718,9 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi
parentRoot = parent.Root
parentIsStd = parent.Standard
}
- bp, loaded, err := loadPackageData(ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode)
+ bp, loaded, err := loadPackageData(loaderstate, ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode)
if loaded && pre != nil && !opts.IgnoreImports {
- pre.preloadImports(ctx, opts, bp.Imports, bp)
+ pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp)
}
if bp == nil {
p := &Package{
@@ -771,7 +771,7 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi
// Load package.
// loadPackageData may return bp != nil even if an error occurs,
// in order to return partial information.
- p.load(ctx, opts, path, stk, importPos, bp, err)
+ p.load(loaderstate, ctx, opts, path, stk, importPos, bp, err)
if !cfg.ModulesEnabled && path != cleanImport(path) {
p.Error = &PackageError{
@@ -784,7 +784,7 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi
}
// Checked on every import because the rules depend on the code doing the importing.
- if perr := disallowInternal(ctx, srcDir, parent, parentPath, p, stk); perr != nil {
+ if perr := disallowInternal(loaderstate, ctx, srcDir, parent, parentPath, p, stk); perr != nil {
perr.setPos(importPos)
return p, perr
}
@@ -838,7 +838,7 @@ func extractFirstImport(importPos []token.Position) *token.Position {
//
// loadPackageData returns a boolean, loaded, which is true if this is the
// first time the package was loaded. Callers may preload imports in this case.
-func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) {
+func loadPackageData(loaderstate *modload.State, ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) {
ctx, span := trace.StartSpan(ctx, "load.loadPackageData "+path)
defer span.Done()
@@ -883,7 +883,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
r.path = newPath
r.dir = dir
} else if cfg.ModulesEnabled {
- r.dir, r.path, r.err = modload.Lookup(parentPath, parentIsStd, path)
+ r.dir, r.path, r.err = modload.Lookup(loaderstate, parentPath, parentIsStd, path)
} else if build.IsLocalImport(path) {
r.dir = filepath.Join(parentDir, path)
r.path = dirToImportPath(r.dir)
@@ -892,7 +892,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
// find out the key to use in packageCache without the
// overhead of repeated calls to buildContext.Import.
// The code is also needed in a few other places anyway.
- r.path = resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd)
+ r.path = resolveImportPath(loaderstate, path, parentPath, parentDir, parentRoot, parentIsStd)
} else if mode&ResolveModule != 0 {
r.path = moduleImportPath(path, parentPath, parentDir, parentRoot)
}
@@ -921,7 +921,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
} else {
buildContext.GOPATH = "" // Clear GOPATH so packages are imported as pure module packages
}
- modroot := modload.PackageModRoot(ctx, r.path)
+ modroot := modload.PackageModRoot(loaderstate, ctx, r.path)
if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) {
modroot = cfg.GOROOTsrc
gorootSrcCmd := filepath.Join(cfg.GOROOTsrc, "cmd")
@@ -942,7 +942,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
if cfg.ModulesEnabled {
// Override data.p.Root, since ImportDir sets it to $GOPATH, if
// the module is inside $GOPATH/src.
- if info := modload.PackageModuleInfo(ctx, path); info != nil {
+ if info := modload.PackageModuleInfo(loaderstate, ctx, path); info != nil {
data.p.Root = info.Dir
}
}
@@ -989,7 +989,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
if cfg.GOBIN != "" {
data.p.BinDir = cfg.GOBIN
} else if cfg.ModulesEnabled {
- data.p.BinDir = modload.BinDir()
+ data.p.BinDir = modload.BinDir(loaderstate)
}
}
@@ -1068,7 +1068,7 @@ func newPreload() *preload {
// preloadMatches loads data for package paths matched by patterns.
// When preloadMatches returns, some packages may not be loaded yet, but
// loadPackageData and loadImport are always safe to call.
-func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matches []*search.Match) {
+func (pre *preload) preloadMatches(loaderstate *modload.State, ctx context.Context, opts PackageOpts, matches []*search.Match) {
for _, m := range matches {
for _, pkg := range m.Pkgs {
select {
@@ -1077,10 +1077,10 @@ func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matche
case pre.sema <- struct{}{}:
go func(pkg string) {
mode := 0 // don't use vendoring or module import resolution
- bp, loaded, err := loadPackageData(ctx, pkg, "", base.Cwd(), "", false, mode)
+ bp, loaded, err := loadPackageData(loaderstate, ctx, pkg, "", base.Cwd(), "", false, mode)
<-pre.sema
if bp != nil && loaded && err == nil && !opts.IgnoreImports {
- pre.preloadImports(ctx, opts, bp.Imports, bp)
+ pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp)
}
}(pkg)
}
@@ -1091,7 +1091,7 @@ func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matche
// preloadImports queues a list of imports for preloading.
// When preloadImports returns, some packages may not be loaded yet,
// but loadPackageData and loadImport are always safe to call.
-func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) {
+func (pre *preload) preloadImports(loaderstate *modload.State, ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) {
parentIsStd := parent.Goroot && parent.ImportPath != "" && search.IsStandardImportPath(parent.ImportPath)
for _, path := range imports {
if path == "C" || path == "unsafe" {
@@ -1102,10 +1102,10 @@ func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, import
return
case pre.sema <- struct{}{}:
go func(path string) {
- bp, loaded, err := loadPackageData(ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport)
+ bp, loaded, err := loadPackageData(loaderstate, ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport)
<-pre.sema
if bp != nil && loaded && err == nil && !opts.IgnoreImports {
- pre.preloadImports(ctx, opts, bp.Imports, bp)
+ pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp)
}
}(path)
}
@@ -1160,12 +1160,12 @@ func ResolveImportPath(parent *Package, path string) (found string) {
parentRoot = parent.Root
parentIsStd = parent.Standard
}
- return resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd)
+ return resolveImportPath(modload.LoaderState, path, parentPath, parentDir, parentRoot, parentIsStd)
}
-func resolveImportPath(path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) {
+func resolveImportPath(loaderstate *modload.State, path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) {
if cfg.ModulesEnabled {
- if _, p, e := modload.Lookup(parentPath, parentIsStd, path); e == nil {
+ if _, p, e := modload.Lookup(loaderstate, parentPath, parentIsStd, path); e == nil {
return p
}
return path
@@ -1463,7 +1463,7 @@ func reusePackage(p *Package, stk *ImportStack) *Package {
// is allowed to import p.
// If the import is allowed, disallowInternal returns the original package p.
// If not, it returns a new package containing just an appropriate error.
-func disallowInternal(ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError {
+func disallowInternal(loaderstate *modload.State, ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError {
// golang.org/s/go14internal:
// An import of a path containing the element “internal”
// is disallowed if the importing code is outside the tree
@@ -1552,7 +1552,7 @@ func disallowInternal(ctx context.Context, srcDir string, importer *Package, imp
// directory containing them.
// If the directory is outside the main modules, this will resolve to ".",
// which is not a prefix of any valid module.
- importerPath, _ = modload.MainModules.DirImportPath(ctx, importer.Dir)
+ importerPath, _ = loaderstate.MainModules.DirImportPath(loaderstate, ctx, importer.Dir)
}
parentOfInternal := p.ImportPath[:i]
if str.HasPathPrefix(importerPath, parentOfInternal) {
@@ -1771,7 +1771,7 @@ func (p *Package) DefaultExecName() string {
// load populates p using information from bp, err, which should
// be the result of calling build.Context.Import.
// stk contains the import stack, not including path itself.
-func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) {
+func (p *Package) load(loaderstate *modload.State, ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) {
p.copyBuild(opts, bp)
// The localPrefix is the path we interpret ./ imports relative to,
@@ -1835,7 +1835,7 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *
elem = full
}
if p.Internal.Build.BinDir == "" && cfg.ModulesEnabled {
- p.Internal.Build.BinDir = modload.BinDir()
+ p.Internal.Build.BinDir = modload.BinDir(loaderstate)
}
if p.Internal.Build.BinDir != "" {
// Install to GOBIN or bin of GOPATH entry.
@@ -1973,9 +1973,9 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *
pkgPath = "command-line-arguments"
}
if cfg.ModulesEnabled {
- p.Module = modload.PackageModuleInfo(ctx, pkgPath)
+ p.Module = modload.PackageModuleInfo(loaderstate, ctx, pkgPath)
}
- p.DefaultGODEBUG = defaultGODEBUG(p, nil, nil, nil)
+ p.DefaultGODEBUG = defaultGODEBUG(loaderstate, p, nil, nil, nil)
if !opts.SuppressEmbedFiles {
p.EmbedFiles, p.Internal.Embed, err = resolveEmbed(p.Dir, p.EmbedPatterns)
@@ -2026,7 +2026,7 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *
if path == "C" {
continue
}
- p1, err := loadImport(ctx, opts, nil, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport)
+ p1, err := loadImport(loaderstate, ctx, opts, nil, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport)
if err != nil && p.Error == nil {
p.Error = err
p.Incomplete = true
@@ -2813,7 +2813,7 @@ func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) []
}
walkTest := func(root *Package, path string) {
var stk ImportStack
- p1, err := loadImport(ctx, opts, nil, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport)
+ p1, err := loadImport(modload.LoaderState, ctx, opts, nil, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport)
if err != nil && root.Error == nil {
// Assign error importing the package to the importer.
root.Error = err
@@ -2840,16 +2840,16 @@ func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) []
// dependencies (like sync/atomic for coverage).
// TODO(jayconrod): delete this function and set flags automatically
// in LoadImport instead.
-func LoadImportWithFlags(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) {
- p, err := loadImport(context.TODO(), PackageOpts{}, nil, path, srcDir, parent, stk, importPos, mode)
+func LoadImportWithFlags(loaderstate *modload.State, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) {
+ p, err := loadImport(loaderstate, context.TODO(), PackageOpts{}, nil, path, srcDir, parent, stk, importPos, mode)
setToolFlags(p)
return p, err
}
// LoadPackageWithFlags is the same as LoadImportWithFlags but without a parent.
// It's then guaranteed to not return an error
-func LoadPackageWithFlags(path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package {
- p := LoadPackage(context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode)
+func LoadPackageWithFlags(loaderstate *modload.State, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package {
+ p := LoadPackage(loaderstate, context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode)
setToolFlags(p)
return p
}
@@ -2899,7 +2899,7 @@ type PackageOpts struct {
//
// To obtain a flat list of packages, use PackageList.
// To report errors loading packages, use ReportPackageErrors.
-func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) []*Package {
+func PackagesAndErrors(loaderstate *modload.State, ctx context.Context, opts PackageOpts, patterns []string) []*Package {
ctx, span := trace.StartSpan(ctx, "load.PackagesAndErrors")
defer span.Done()
@@ -2911,7 +2911,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string)
// We need to test whether the path is an actual Go file and not a
// package path or pattern ending in '.go' (see golang.org/issue/34653).
if fi, err := fsys.Stat(p); err == nil && !fi.IsDir() {
- pkgs := []*Package{GoFilesPackage(ctx, opts, patterns)}
+ pkgs := []*Package{GoFilesPackage(loaderstate, ctx, opts, patterns)}
setPGOProfilePath(pkgs)
return pkgs
}
@@ -2919,13 +2919,13 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string)
}
var matches []*search.Match
- if modload.Init(); cfg.ModulesEnabled {
+ if modload.Init(loaderstate); cfg.ModulesEnabled {
modOpts := modload.PackageOpts{
ResolveMissingImports: true,
LoadTests: opts.ModResolveTests,
SilencePackageErrors: true,
}
- matches, _ = modload.LoadPackages(ctx, modOpts, patterns...)
+ matches, _ = modload.LoadPackages(loaderstate, ctx, modOpts, patterns...)
} else {
matches = search.ImportPaths(patterns)
}
@@ -2938,7 +2938,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string)
pre := newPreload()
defer pre.flush()
- pre.preloadMatches(ctx, opts, matches)
+ pre.preloadMatches(loaderstate, ctx, opts, matches)
for _, m := range matches {
for _, pkg := range m.Pkgs {
@@ -2952,7 +2952,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string)
// a literal and also a non-literal pattern.
mode |= cmdlinePkgLiteral
}
- p, perr := loadImport(ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode)
+ p, perr := loadImport(loaderstate, ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode)
if perr != nil {
base.Fatalf("internal error: loadImport of %q with nil parent returned an error", pkg)
}
@@ -3243,8 +3243,8 @@ func setToolFlags(pkgs ...*Package) {
// GoFilesPackage creates a package for building a collection of Go files
// (typically named on the command line). The target is named p.a for
// package p or named after the first Go file for package main.
-func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Package {
- modload.Init()
+func GoFilesPackage(loaderstate *modload.State, ctx context.Context, opts PackageOpts, gofiles []string) *Package {
+ modload.Init(loaderstate)
for _, f := range gofiles {
if !strings.HasSuffix(f, ".go") {
@@ -3289,7 +3289,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa
ctxt.ReadDir = func(string) ([]fs.FileInfo, error) { return dirent, nil }
if cfg.ModulesEnabled {
- modload.ImportFromFiles(ctx, gofiles)
+ modload.ImportFromFiles(loaderstate, ctx, gofiles)
}
var err error
@@ -3305,7 +3305,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa
pkg := new(Package)
pkg.Internal.Local = true
pkg.Internal.CmdlineFiles = true
- pkg.load(ctx, opts, "command-line-arguments", &stk, nil, bp, err)
+ pkg.load(loaderstate, ctx, opts, "command-line-arguments", &stk, nil, bp, err)
if !cfg.ModulesEnabled {
pkg.Internal.LocalPrefix = dirToImportPath(dir)
}
@@ -3319,7 +3319,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa
if cfg.GOBIN != "" {
pkg.Target = filepath.Join(cfg.GOBIN, exe)
} else if cfg.ModulesEnabled {
- pkg.Target = filepath.Join(modload.BinDir(), exe)
+ pkg.Target = filepath.Join(modload.BinDir(loaderstate), exe)
}
}
@@ -3347,11 +3347,11 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa
// module, but its go.mod file (if it has one) must not contain directives that
// would cause it to be interpreted differently if it were the main module
// (replace, exclude).
-func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) {
- if !modload.ForceUseModules {
+func PackagesAndErrorsOutsideModule(loaderstate *modload.State, ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) {
+ if !loaderstate.ForceUseModules {
panic("modload.ForceUseModules must be true")
}
- if modload.RootMode != modload.NoRoot {
+ if loaderstate.RootMode != modload.NoRoot {
panic("modload.RootMode must be NoRoot")
}
@@ -3404,12 +3404,12 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args
allowed = nil
}
noneSelected := func(path string) (version string) { return "none" }
- qrs, err := modload.QueryPackages(ctx, patterns[0], version, noneSelected, allowed)
+ qrs, err := modload.QueryPackages(loaderstate, ctx, patterns[0], version, noneSelected, allowed)
if err != nil {
return nil, fmt.Errorf("%s: %w", args[0], err)
}
rootMod := qrs[0].Mod
- deprecation, err := modload.CheckDeprecation(ctx, rootMod)
+ deprecation, err := modload.CheckDeprecation(loaderstate, ctx, rootMod)
if err != nil {
return nil, fmt.Errorf("%s: %w", args[0], err)
}
@@ -3438,12 +3438,12 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args
// Since we are in NoRoot mode, the build list initially contains only
// the dummy command-line-arguments module. Add a requirement on the
// module that provides the packages named on the command line.
- if _, err := modload.EditBuildList(ctx, nil, []module.Version{rootMod}); err != nil {
+ if _, err := modload.EditBuildList(loaderstate, ctx, nil, []module.Version{rootMod}); err != nil {
return nil, fmt.Errorf("%s: %w", args[0], err)
}
// Load packages for all arguments.
- pkgs := PackagesAndErrors(ctx, opts, patterns)
+ pkgs := PackagesAndErrors(loaderstate, ctx, opts, patterns)
// Check that named packages are all provided by the same module.
for _, pkg := range pkgs {
@@ -3471,14 +3471,14 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args
}
// EnsureImport ensures that package p imports the named package.
-func EnsureImport(p *Package, pkg string) {
+func EnsureImport(loaderstate *modload.State, p *Package, pkg string) {
for _, d := range p.Internal.Imports {
if d.Name == pkg {
return
}
}
- p1, err := LoadImportWithFlags(pkg, p.Dir, p, &ImportStack{}, nil, 0)
+ p1, err := LoadImportWithFlags(loaderstate, pkg, p.Dir, p, &ImportStack{}, nil, 0)
if err != nil {
base.Fatalf("load %s: %v", pkg, err)
}
@@ -3494,7 +3494,7 @@ func EnsureImport(p *Package, pkg string) {
// "go test -cover"). It walks through the packages being built (and
// dependencies) and marks them for coverage instrumentation when
// appropriate, and possibly adding additional deps where needed.
-func PrepareForCoverageBuild(pkgs []*Package) {
+func PrepareForCoverageBuild(loaderstate *modload.State, pkgs []*Package) {
var match []func(*Package) bool
matchMainModAndCommandLine := func(p *Package) bool {
@@ -3507,7 +3507,7 @@ func PrepareForCoverageBuild(pkgs []*Package) {
// the specific packages selected by the user-specified pattern(s).
match = make([]func(*Package) bool, len(cfg.BuildCoverPkg))
for i := range cfg.BuildCoverPkg {
- match[i] = MatchPackage(cfg.BuildCoverPkg[i], base.Cwd())
+ match[i] = MatchPackage(loaderstate, cfg.BuildCoverPkg[i], base.Cwd())
}
} else {
// Without -coverpkg, instrument only packages in the main module
@@ -3519,10 +3519,10 @@ func PrepareForCoverageBuild(pkgs []*Package) {
// Visit the packages being built or installed, along with all of
// their dependencies, and mark them to be instrumented, taking
// into account the matchers we've set up in the sequence above.
- SelectCoverPackages(PackageList(pkgs), match, "build")
+ SelectCoverPackages(loaderstate, PackageList(pkgs), match, "build")
}
-func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op string) []*Package {
+func SelectCoverPackages(loaderstate *modload.State, roots []*Package, match []func(*Package) bool, op string) []*Package {
var warntag string
var includeMain bool
switch op {
@@ -3602,7 +3602,7 @@ func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op strin
// Force import of sync/atomic into package if atomic mode.
if cfg.BuildCoverMode == "atomic" {
- EnsureImport(p, "sync/atomic")
+ EnsureImport(loaderstate, p, "sync/atomic")
}
}
diff --git a/src/cmd/go/internal/load/search.go b/src/cmd/go/internal/load/search.go
index 941cfb77a2ec08..09e32a4f46a69b 100644
--- a/src/cmd/go/internal/load/search.go
+++ b/src/cmd/go/internal/load/search.go
@@ -14,7 +14,7 @@ import (
)
// MatchPackage(pattern, cwd)(p) reports whether package p matches pattern in the working directory cwd.
-func MatchPackage(pattern, cwd string) func(*Package) bool {
+func MatchPackage(loaderstate *modload.State, pattern, cwd string) func(*Package) bool {
switch {
case search.IsRelativePath(pattern):
// Split pattern into leading pattern-free directory path
@@ -54,13 +54,13 @@ func MatchPackage(pattern, cwd string) func(*Package) bool {
return func(p *Package) bool { return p.Standard }
case pattern == "cmd":
return func(p *Package) bool { return p.Standard && strings.HasPrefix(p.ImportPath, "cmd/") }
- case pattern == "tool" && modload.Enabled():
+ case pattern == "tool" && modload.Enabled(loaderstate):
return func(p *Package) bool {
- return modload.MainModules.Tools()[p.ImportPath]
+ return loaderstate.MainModules.Tools()[p.ImportPath]
}
- case pattern == "work" && modload.Enabled():
+ case pattern == "work" && modload.Enabled(loaderstate):
return func(p *Package) bool {
- return p.Module != nil && modload.MainModules.Contains(p.Module.Path)
+ return p.Module != nil && loaderstate.MainModules.Contains(p.Module.Path)
}
default:
diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go
index f895e3a2461d9e..9019545b4b8d82 100644
--- a/src/cmd/go/internal/load/test.go
+++ b/src/cmd/go/internal/load/test.go
@@ -24,6 +24,7 @@ import (
"unicode/utf8"
"cmd/go/internal/fsys"
+ "cmd/go/internal/modload"
"cmd/go/internal/str"
"cmd/go/internal/trace"
)
@@ -106,7 +107,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p
defer pre.flush()
allImports := append([]string{}, p.TestImports...)
allImports = append(allImports, p.XTestImports...)
- pre.preloadImports(ctx, opts, allImports, p.Internal.Build)
+ pre.preloadImports(modload.LoaderState, ctx, opts, allImports, p.Internal.Build)
var ptestErr, pxtestErr *PackageError
var imports, ximports []*Package
@@ -116,7 +117,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p
stk.Push(ImportInfo{Pkg: p.ImportPath + " (test)"})
rawTestImports := str.StringList(p.TestImports)
for i, path := range p.TestImports {
- p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport)
+ p1, err := loadImport(modload.LoaderState, ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport)
if err != nil && ptestErr == nil {
ptestErr = err
incomplete = true
@@ -145,7 +146,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p
var pxtestIncomplete bool
rawXTestImports := str.StringList(p.XTestImports)
for i, path := range p.XTestImports {
- p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport)
+ p1, err := loadImport(modload.LoaderState, ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport)
if err != nil && pxtestErr == nil {
pxtestErr = err
}
@@ -292,7 +293,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p
}
pb := p.Internal.Build
- pmain.DefaultGODEBUG = defaultGODEBUG(pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives)
+ pmain.DefaultGODEBUG = defaultGODEBUG(modload.LoaderState, pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives)
if pmain.Internal.BuildInfo == nil || pmain.DefaultGODEBUG != p.DefaultGODEBUG {
// Either we didn't generate build info for the package under test (because it wasn't package main), or
// the DefaultGODEBUG used to build the test main package is different from the DefaultGODEBUG
@@ -321,7 +322,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p
if dep == ptest.ImportPath {
pmain.Internal.Imports = append(pmain.Internal.Imports, ptest)
} else {
- p1, err := loadImport(ctx, opts, pre, dep, "", nil, &stk, nil, 0)
+ p1, err := loadImport(modload.LoaderState, ctx, opts, pre, dep, "", nil, &stk, nil, 0)
if err != nil && pmain.Error == nil {
pmain.Error = err
pmain.Incomplete = true
@@ -649,6 +650,14 @@ func (t *testFuncs) ImportPath() string {
return pkg
}
+func (t *testFuncs) ModulePath() string {
+ m := t.Package.Module
+ if m == nil {
+ return ""
+ }
+ return m.Path
+}
+
// Covered returns a string describing which packages are being tested for coverage.
// If the covered package is the same as the tested package, it returns the empty string.
// Otherwise it is a comma-separated human-readable list of packages beginning with
@@ -836,6 +845,7 @@ func init() {
testdeps.CoverMarkProfileEmittedFunc = cfile.MarkProfileEmitted
{{end}}
+ testdeps.ModulePath = {{.ModulePath | printf "%q"}}
testdeps.ImportPath = {{.ImportPath | printf "%q"}}
}
diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go
index 2f4feae8f254b2..f4ed4b45834e89 100644
--- a/src/cmd/go/internal/modcmd/download.go
+++ b/src/cmd/go/internal/modcmd/download.go
@@ -109,18 +109,18 @@ type ModuleJSON struct {
}
func runDownload(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
+ modload.InitWorkfile(modload.LoaderState)
// Check whether modules are enabled and whether we're in a module.
- modload.ForceUseModules = true
+ modload.LoaderState.ForceUseModules = true
modload.ExplicitWriteGoMod = true
haveExplicitArgs := len(args) > 0
- if modload.HasModRoot() || modload.WorkFilePath() != "" {
- modload.LoadModFile(ctx) // to fill MainModules
+ if modload.HasModRoot(modload.LoaderState) || modload.WorkFilePath(modload.LoaderState) != "" {
+ modload.LoadModFile(modload.LoaderState, ctx) // to fill MainModules
if haveExplicitArgs {
- for _, mainModule := range modload.MainModules.Versions() {
+ for _, mainModule := range modload.LoaderState.MainModules.Versions() {
targetAtUpgrade := mainModule.Path + "@upgrade"
targetAtPatch := mainModule.Path + "@patch"
for _, arg := range args {
@@ -130,14 +130,14 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
}
}
}
- } else if modload.WorkFilePath() != "" {
+ } else if modload.WorkFilePath(modload.LoaderState) != "" {
// TODO(#44435): Think about what the correct query is to download the
// right set of modules. Also see code review comment at
// https://go-review.googlesource.com/c/go/+/359794/comments/ce946a80_6cf53992.
args = []string{"all"}
} else {
- mainModule := modload.MainModules.Versions()[0]
- modFile := modload.MainModules.ModFile(mainModule)
+ mainModule := modload.LoaderState.MainModules.Versions()[0]
+ modFile := modload.LoaderState.MainModules.ModFile(mainModule)
if modFile.Go == nil || gover.Compare(modFile.Go.Version, gover.ExplicitIndirectVersion) < 0 {
if len(modFile.Require) > 0 {
args = []string{"all"}
@@ -169,7 +169,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
}
if len(args) == 0 {
- if modload.HasModRoot() {
+ if modload.HasModRoot(modload.LoaderState) {
os.Stderr.WriteString("go: no module dependencies to download\n")
} else {
base.Errorf("go: no modules specified (see 'go help mod download')")
@@ -177,7 +177,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
base.Exit()
}
- if *downloadReuse != "" && modload.HasModRoot() {
+ if *downloadReuse != "" && modload.HasModRoot(modload.LoaderState) {
base.Fatalf("go mod download -reuse cannot be used inside a module")
}
@@ -220,7 +220,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
// when we can.
}
- if !haveExplicitArgs && modload.WorkFilePath() == "" {
+ if !haveExplicitArgs && modload.WorkFilePath(modload.LoaderState) == "" {
// 'go mod download' is sometimes run without arguments to pre-populate the
// module cache. In modules that aren't at go 1.17 or higher, it may fetch
// modules that aren't needed to build packages in the main module. This is
@@ -291,7 +291,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
// with no arguments we download the module pattern "all",
// which may include dependencies that are normally pruned out
// of the individual modules in the workspace.
- if haveExplicitArgs || modload.WorkFilePath() != "" {
+ if haveExplicitArgs || modload.WorkFilePath(modload.LoaderState) != "" {
var sw toolchain.Switcher
// Add errors to the Switcher in deterministic order so that they will be
// logged deterministically.
@@ -347,7 +347,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
//
// Don't save sums for 'go mod download' without arguments unless we're in
// workspace mode; see comment above.
- if haveExplicitArgs || modload.WorkFilePath() != "" {
+ if haveExplicitArgs || modload.WorkFilePath(modload.LoaderState) != "" {
if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil {
base.Error(err)
}
diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go
index aafd9752a8e487..041b4432bfd4f0 100644
--- a/src/cmd/go/internal/modcmd/edit.go
+++ b/src/cmd/go/internal/modcmd/edit.go
@@ -104,11 +104,6 @@ writing it back to go.mod.
The -json flag prints the final go.mod file in JSON format instead of
writing it back to go.mod. The JSON output corresponds to these Go types:
- type Module struct {
- Path string
- Version string
- }
-
type GoMod struct {
Module ModPath
Go string
@@ -118,6 +113,13 @@ writing it back to go.mod. The JSON output corresponds to these Go types:
Exclude []Module
Replace []Replace
Retract []Retract
+ Tool []Tool
+ Ignore []Ignore
+ }
+
+ type Module struct {
+ Path string
+ Version string
}
type ModPath struct {
diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go
index 172c1dda5ce8fb..3bc6009b57b595 100644
--- a/src/cmd/go/internal/modcmd/graph.go
+++ b/src/cmd/go/internal/modcmd/graph.go
@@ -52,13 +52,13 @@ func init() {
}
func runGraph(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
+ modload.InitWorkfile(modload.LoaderState)
if len(args) > 0 {
base.Fatalf("go: 'go mod graph' accepts no arguments")
}
- modload.ForceUseModules = true
- modload.RootMode = modload.NeedRoot
+ modload.LoaderState.ForceUseModules = true
+ modload.LoaderState.RootMode = modload.NeedRoot
goVersion := graphGo.String()
if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 {
diff --git a/src/cmd/go/internal/modcmd/init.go b/src/cmd/go/internal/modcmd/init.go
index 356a0569913edf..618c673bf86f24 100644
--- a/src/cmd/go/internal/modcmd/init.go
+++ b/src/cmd/go/internal/modcmd/init.go
@@ -43,6 +43,6 @@ func runInit(ctx context.Context, cmd *base.Command, args []string) {
modPath = args[0]
}
- modload.ForceUseModules = true
+ modload.LoaderState.ForceUseModules = true
modload.CreateModFile(ctx, modPath) // does all the hard work
}
diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go
index 2efa33a7c343dd..c693bd52a38af9 100644
--- a/src/cmd/go/internal/modcmd/tidy.go
+++ b/src/cmd/go/internal/modcmd/tidy.go
@@ -119,8 +119,8 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) {
// those packages. In order to make 'go test' reproducible for the packages
// that are in 'all' but outside of the main module, we must explicitly
// request that their test dependencies be included.
- modload.ForceUseModules = true
- modload.RootMode = modload.NeedRoot
+ modload.LoaderState.ForceUseModules = true
+ modload.LoaderState.RootMode = modload.NeedRoot
goVersion := tidyGo.String()
if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 {
@@ -130,7 +130,7 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) {
})
}
- modload.LoadPackages(ctx, modload.PackageOpts{
+ modload.LoadPackages(modload.LoaderState, ctx, modload.PackageOpts{
TidyGoVersion: tidyGo.String(),
Tags: imports.AnyTags(),
Tidy: true,
diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go
index e1a9081a95ff80..8d9672d5365523 100644
--- a/src/cmd/go/internal/modcmd/vendor.go
+++ b/src/cmd/go/internal/modcmd/vendor.go
@@ -66,8 +66,8 @@ func init() {
}
func runVendor(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
- if modload.WorkFilePath() != "" {
+ modload.InitWorkfile(modload.LoaderState)
+ if modload.WorkFilePath(modload.LoaderState) != "" {
base.Fatalf("go: 'go mod vendor' cannot be run in workspace mode. Run 'go work vendor' to vendor the workspace or set 'GOWORK=off' to exit workspace mode.")
}
RunVendor(ctx, vendorE, vendorO, args)
@@ -77,8 +77,8 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string)
if len(args) != 0 {
base.Fatalf("go: 'go mod vendor' accepts no arguments")
}
- modload.ForceUseModules = true
- modload.RootMode = modload.NeedRoot
+ modload.LoaderState.ForceUseModules = true
+ modload.LoaderState.RootMode = modload.NeedRoot
loadOpts := modload.PackageOpts{
Tags: imports.AnyTags(),
@@ -88,7 +88,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string)
AllowErrors: vendorE,
SilenceMissingStdImports: true,
}
- _, pkgs := modload.LoadPackages(ctx, loadOpts, "all")
+ _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, loadOpts, "all")
var vdir string
switch {
@@ -97,7 +97,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string)
case vendorO != "":
vdir = filepath.Join(base.Cwd(), vendorO)
default:
- vdir = filepath.Join(modload.VendorDir())
+ vdir = filepath.Join(modload.VendorDir(modload.LoaderState))
}
if err := os.RemoveAll(vdir); err != nil {
base.Fatal(err)
@@ -106,7 +106,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string)
modpkgs := make(map[module.Version][]string)
for _, pkg := range pkgs {
m := modload.PackageModule(pkg)
- if m.Path == "" || modload.MainModules.Contains(m.Path) {
+ if m.Path == "" || modload.LoaderState.MainModules.Contains(m.Path) {
continue
}
modpkgs[m] = append(modpkgs[m], pkg)
@@ -116,13 +116,13 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string)
includeAllReplacements := false
includeGoVersions := false
isExplicit := map[module.Version]bool{}
- gv := modload.MainModules.GoVersion()
- if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(base.Cwd()) != "" || modload.ModFile().Go != nil) {
+ gv := modload.LoaderState.MainModules.GoVersion(modload.LoaderState)
+ if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(modload.LoaderState, base.Cwd()) != "" || modload.ModFile().Go != nil) {
// If the Go version is at least 1.14, annotate all explicit 'require' and
// 'replace' targets found in the go.mod file so that we can perform a
// stronger consistency check when -mod=vendor is set.
- for _, m := range modload.MainModules.Versions() {
- if modFile := modload.MainModules.ModFile(m); modFile != nil {
+ for _, m := range modload.LoaderState.MainModules.Versions() {
+ if modFile := modload.LoaderState.MainModules.ModFile(m); modFile != nil {
for _, r := range modFile.Require {
isExplicit[r.Mod] = true
}
@@ -156,13 +156,13 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string)
w = io.MultiWriter(&buf, os.Stderr)
}
- if modload.MainModules.WorkFile() != nil {
+ if modload.LoaderState.MainModules.WorkFile() != nil {
fmt.Fprintf(w, "## workspace\n")
}
replacementWritten := make(map[module.Version]bool)
for _, m := range vendorMods {
- replacement := modload.Replacement(m)
+ replacement := modload.Replacement(modload.LoaderState, m)
line := moduleLine(m, replacement)
replacementWritten[m] = true
io.WriteString(w, line)
@@ -192,8 +192,8 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string)
// Record unused and wildcard replacements at the end of the modules.txt file:
// without access to the complete build list, the consumer of the vendor
// directory can't otherwise determine that those replacements had no effect.
- for _, m := range modload.MainModules.Versions() {
- if workFile := modload.MainModules.WorkFile(); workFile != nil {
+ for _, m := range modload.LoaderState.MainModules.Versions() {
+ if workFile := modload.LoaderState.MainModules.WorkFile(); workFile != nil {
for _, r := range workFile.Replace {
if replacementWritten[r.Old] {
// We already recorded this replacement.
@@ -208,14 +208,14 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string)
}
}
}
- if modFile := modload.MainModules.ModFile(m); modFile != nil {
+ if modFile := modload.LoaderState.MainModules.ModFile(m); modFile != nil {
for _, r := range modFile.Replace {
if replacementWritten[r.Old] {
// We already recorded this replacement.
continue
}
replacementWritten[r.Old] = true
- rNew := modload.Replacement(r.Old)
+ rNew := modload.Replacement(modload.LoaderState, r.Old)
if rNew == (module.Version{}) {
// There is no replacement. Don't try to write it.
continue
@@ -269,7 +269,7 @@ func moduleLine(m, r module.Version) string {
}
func vendorPkg(vdir, pkg string) {
- src, realPath, _ := modload.Lookup("", false, pkg)
+ src, realPath, _ := modload.Lookup(modload.LoaderState, "", false, pkg)
if src == "" {
base.Errorf("internal error: no pkg for %s\n", pkg)
return
@@ -315,7 +315,7 @@ func vendorPkg(vdir, pkg string) {
}
}
var embedPatterns []string
- if gover.Compare(modload.MainModules.GoVersion(), "1.22") >= 0 {
+ if gover.Compare(modload.LoaderState.MainModules.GoVersion(modload.LoaderState), "1.22") >= 0 {
embedPatterns = bp.EmbedPatterns
} else {
// Maintain the behavior of https://github.com/golang/go/issues/63473
@@ -431,7 +431,7 @@ func matchPotentialSourceFile(dir string, info fs.DirEntry) bool {
return false
}
if info.Name() == "go.mod" || info.Name() == "go.sum" {
- if gv := modload.MainModules.GoVersion(); gover.Compare(gv, "1.17") >= 0 {
+ if gv := modload.LoaderState.MainModules.GoVersion(modload.LoaderState); gover.Compare(gv, "1.17") >= 0 {
// As of Go 1.17, we strip go.mod and go.sum files from dependency modules.
// Otherwise, 'go' commands invoked within the vendor subtree may misidentify
// an arbitrary directory within the vendor tree as a module root.
diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go
index d07f730c5d0dcf..d8227bcd5455a3 100644
--- a/src/cmd/go/internal/modcmd/verify.go
+++ b/src/cmd/go/internal/modcmd/verify.go
@@ -44,14 +44,14 @@ func init() {
}
func runVerify(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
+ modload.InitWorkfile(modload.LoaderState)
if len(args) != 0 {
// NOTE(rsc): Could take a module pattern.
base.Fatalf("go: verify takes no arguments")
}
- modload.ForceUseModules = true
- modload.RootMode = modload.NeedRoot
+ modload.LoaderState.ForceUseModules = true
+ modload.LoaderState.RootMode = modload.NeedRoot
// Only verify up to GOMAXPROCS zips at once.
type token struct{}
@@ -94,7 +94,7 @@ func verifyMod(ctx context.Context, mod module.Version) []error {
// "go" and "toolchain" have no disk footprint; nothing to verify.
return nil
}
- if modload.MainModules.Contains(mod.Path) {
+ if modload.LoaderState.MainModules.Contains(mod.Path) {
return nil
}
var errs []error
diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go
index 198672d8064113..b37d9fded0f47c 100644
--- a/src/cmd/go/internal/modcmd/why.go
+++ b/src/cmd/go/internal/modcmd/why.go
@@ -63,9 +63,9 @@ func init() {
}
func runWhy(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
- modload.ForceUseModules = true
- modload.RootMode = modload.NeedRoot
+ modload.InitWorkfile(modload.LoaderState)
+ modload.LoaderState.ForceUseModules = true
+ modload.LoaderState.RootMode = modload.NeedRoot
modload.ExplicitWriteGoMod = true // don't write go.mod in ListModules
loadOpts := modload.PackageOpts{
@@ -89,7 +89,7 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) {
}
byModule := make(map[string][]string)
- _, pkgs := modload.LoadPackages(ctx, loadOpts, "all")
+ _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, loadOpts, "all")
for _, path := range pkgs {
m := modload.PackageModule(path)
if m.Path != "" {
@@ -120,9 +120,9 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) {
}
} else {
// Resolve to packages.
- matches, _ := modload.LoadPackages(ctx, loadOpts, args...)
+ matches, _ := modload.LoadPackages(modload.LoaderState, ctx, loadOpts, args...)
- modload.LoadPackages(ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages)
+ modload.LoadPackages(modload.LoaderState, ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages)
sep := ""
for _, m := range matches {
diff --git a/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go
index 16cc1457058933..9d59d1a8ea8218 100644
--- a/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go
+++ b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go
@@ -94,7 +94,7 @@ func TestZipSums(t *testing.T) {
cfg.GOPROXY = "direct"
cfg.GOSUMDB = "off"
- modload.Init()
+ modload.Init(modload.LoaderState)
// Shard tests by downloading only every nth module when shard flags are set.
// This makes it easier to test small groups of modules quickly. We avoid
diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go
index 25dbf3972fd465..141e1708fa6ded 100644
--- a/src/cmd/go/internal/modget/get.go
+++ b/src/cmd/go/internal/modget/get.go
@@ -298,7 +298,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
base.Fatalf("go: -insecure flag is no longer supported; use GOINSECURE instead")
}
- modload.ForceUseModules = true
+ modload.LoaderState.ForceUseModules = true
// Do not allow any updating of go.mod until we've applied
// all the requested changes and checked that the result matches
@@ -307,14 +307,14 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
// Allow looking up modules for import paths when outside of a module.
// 'go get' is expected to do this, unlike other commands.
- modload.AllowMissingModuleImports()
+ modload.AllowMissingModuleImports(modload.LoaderState)
// 'go get' no longer builds or installs packages, so there's nothing to do
// if there's no go.mod file.
// TODO(#40775): make modload.Init return ErrNoModRoot instead of exiting.
// We could handle that here by printing a different message.
- modload.Init()
- if !modload.HasModRoot() {
+ modload.Init(modload.LoaderState)
+ if !modload.HasModRoot(modload.LoaderState) {
base.Fatalf("go: go.mod file not found in current directory or any parent directory.\n" +
"\t'go get' is no longer supported outside a module.\n" +
"\tTo build and install a command, use 'go install' with a version,\n" +
@@ -424,9 +424,9 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) {
newReqs := reqsFromGoMod(modload.ModFile())
r.reportChanges(oldReqs, newReqs)
- if gowork := modload.FindGoWork(base.Cwd()); gowork != "" {
+ if gowork := modload.FindGoWork(modload.LoaderState, base.Cwd()); gowork != "" {
wf, err := modload.ReadWorkFile(gowork)
- if err == nil && modload.UpdateWorkGoVersion(wf, modload.MainModules.GoVersion()) {
+ if err == nil && modload.UpdateWorkGoVersion(wf, modload.LoaderState.MainModules.GoVersion(modload.LoaderState)) {
modload.WriteWorkFile(gowork, wf)
}
}
@@ -448,7 +448,7 @@ func updateTools(ctx context.Context, queries []*query, opts *modload.WriteOpts)
patterns = append(patterns, q.pattern)
}
- matches, _ := modload.LoadPackages(ctx, pkgOpts, patterns...)
+ matches, _ := modload.LoadPackages(modload.LoaderState, ctx, pkgOpts, patterns...)
for i, m := range matches {
if queries[i].version == "none" {
opts.DropTools = append(opts.DropTools, m.Pkgs...)
@@ -574,7 +574,7 @@ func newResolver(ctx context.Context, queries []*query) *resolver {
buildListVersion: initialVersion,
initialVersion: initialVersion,
nonesByPath: map[string]*query{},
- workspace: loadWorkspace(modload.FindGoWork(base.Cwd())),
+ workspace: loadWorkspace(modload.FindGoWork(modload.LoaderState, base.Cwd())),
}
for _, q := range queries {
@@ -645,7 +645,7 @@ func (r *resolver) noneForPath(mPath string) (nq *query, found bool) {
// allowed versions.
func (r *resolver) queryModule(ctx context.Context, mPath, query string, selected func(string) string) (module.Version, error) {
current := r.initialSelected(mPath)
- rev, err := modload.Query(ctx, mPath, query, current, r.checkAllowedOr(query, selected))
+ rev, err := modload.Query(modload.LoaderState, ctx, mPath, query, current, r.checkAllowedOr(query, selected))
if err != nil {
return module.Version{}, err
}
@@ -655,7 +655,7 @@ func (r *resolver) queryModule(ctx context.Context, mPath, query string, selecte
// queryPackages wraps modload.QueryPackage, substituting r.checkAllowedOr to
// decide allowed versions.
func (r *resolver) queryPackages(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, err error) {
- results, err := modload.QueryPackages(ctx, pattern, query, selected, r.checkAllowedOr(query, selected))
+ results, err := modload.QueryPackages(modload.LoaderState, ctx, pattern, query, selected, r.checkAllowedOr(query, selected))
if len(results) > 0 {
pkgMods = make([]module.Version, 0, len(results))
for _, qr := range results {
@@ -668,7 +668,7 @@ func (r *resolver) queryPackages(ctx context.Context, pattern, query string, sel
// queryPattern wraps modload.QueryPattern, substituting r.checkAllowedOr to
// decide allowed versions.
func (r *resolver) queryPattern(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, mod module.Version, err error) {
- results, modOnly, err := modload.QueryPattern(ctx, pattern, query, selected, r.checkAllowedOr(query, selected))
+ results, modOnly, err := modload.QueryPattern(modload.LoaderState, ctx, pattern, query, selected, r.checkAllowedOr(query, selected))
if len(results) > 0 {
pkgMods = make([]module.Version, 0, len(results))
for _, qr := range results {
@@ -721,8 +721,8 @@ func (r *resolver) queryNone(ctx context.Context, q *query) {
if !q.isWildcard() {
q.pathOnce(q.pattern, func() pathSet {
- hasModRoot := modload.HasModRoot()
- if hasModRoot && modload.MainModules.Contains(q.pattern) {
+ hasModRoot := modload.HasModRoot(modload.LoaderState)
+ if hasModRoot && modload.LoaderState.MainModules.Contains(q.pattern) {
v := module.Version{Path: q.pattern}
// The user has explicitly requested to downgrade their own module to
// version "none". This is not an entirely unreasonable request: it
@@ -746,7 +746,7 @@ func (r *resolver) queryNone(ctx context.Context, q *query) {
continue
}
q.pathOnce(curM.Path, func() pathSet {
- if modload.HasModRoot() && curM.Version == "" && modload.MainModules.Contains(curM.Path) {
+ if modload.HasModRoot(modload.LoaderState) && curM.Version == "" && modload.LoaderState.MainModules.Contains(curM.Path) {
return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{curM}, Pattern: q.pattern, Query: q.version})
}
return pathSet{mod: module.Version{Path: curM.Path, Version: "none"}}
@@ -766,13 +766,13 @@ func (r *resolver) performLocalQueries(ctx context.Context) {
// Absolute paths like C:\foo and relative paths like ../foo... are
// restricted to matching packages in the main module.
- pkgPattern, mainModule := modload.MainModules.DirImportPath(ctx, q.pattern)
+ pkgPattern, mainModule := modload.LoaderState.MainModules.DirImportPath(modload.LoaderState, ctx, q.pattern)
if pkgPattern == "." {
modload.MustHaveModRoot()
- versions := modload.MainModules.Versions()
+ versions := modload.LoaderState.MainModules.Versions()
modRoots := make([]string, 0, len(versions))
for _, m := range versions {
- modRoots = append(modRoots, modload.MainModules.ModRoot(m))
+ modRoots = append(modRoots, modload.LoaderState.MainModules.ModRoot(m))
}
var plural string
if len(modRoots) != 1 {
@@ -792,7 +792,7 @@ func (r *resolver) performLocalQueries(ctx context.Context) {
}
if !q.isWildcard() {
modload.MustHaveModRoot()
- return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, modload.MainModules.ModRoot(mainModule)))
+ return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, modload.LoaderState.MainModules.ModRoot(mainModule)))
}
search.WarnUnmatched([]*search.Match{match})
return pathSet{}
@@ -848,7 +848,7 @@ func (r *resolver) queryWildcard(ctx context.Context, q *query) {
return pathSet{}
}
- if modload.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) {
+ if modload.LoaderState.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) {
if q.matchesPath(curM.Path) {
return errSet(&modload.QueryMatchesMainModulesError{
MainModules: []module.Version{curM},
@@ -1065,7 +1065,7 @@ func (r *resolver) queryPath(ctx context.Context, q *query) {
// pattern is "tool".
func (r *resolver) performToolQueries(ctx context.Context) {
for _, q := range r.toolQueries {
- for tool := range modload.MainModules.Tools() {
+ for tool := range modload.LoaderState.MainModules.Tools() {
q.pathOnce(tool, func() pathSet {
pkgMods, err := r.queryPackages(ctx, tool, q.version, r.initialSelected)
return pathSet{pkgMods: pkgMods, err: err}
@@ -1082,10 +1082,10 @@ func (r *resolver) performWorkQueries(ctx context.Context) {
// TODO(matloob): Maybe export MainModules.mustGetSingleMainModule and call that.
// There are a few other places outside the modload package where we expect
// a single main module.
- if len(modload.MainModules.Versions()) != 1 {
+ if len(modload.LoaderState.MainModules.Versions()) != 1 {
panic("internal error: number of main modules is not exactly one in resolution phase of go get")
}
- mainModule := modload.MainModules.Versions()[0]
+ mainModule := modload.LoaderState.MainModules.Versions()[0]
// We know what the result is going to be, assuming the main module is not
// empty, (it's the main module itself) but first check to see that there
@@ -1275,13 +1275,13 @@ func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPack
return nil
}
- _, pkgs := modload.LoadPackages(ctx, opts, patterns...)
+ _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, opts, patterns...)
for _, pkgPath := range pkgs {
const (
parentPath = ""
parentIsStd = false
)
- _, _, err := modload.Lookup(parentPath, parentIsStd, pkgPath)
+ _, _, err := modload.Lookup(modload.LoaderState, parentPath, parentIsStd, pkgPath)
if err == nil {
continue
}
@@ -1294,15 +1294,13 @@ func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPack
continue
}
- var (
- importMissing *modload.ImportMissingError
- ambiguous *modload.AmbiguousImportError
- )
- if !errors.As(err, &importMissing) && !errors.As(err, &ambiguous) {
- // The package, which is a dependency of something we care about, has some
- // problem that we can't resolve with a version change.
- // Leave the error for the final LoadPackages call.
- continue
+ if _, ok := errors.AsType[*modload.ImportMissingError](err); !ok {
+ if _, ok := errors.AsType[*modload.AmbiguousImportError](err); !ok {
+ // The package, which is a dependency of something we care about, has some
+ // problem that we can't resolve with a version change.
+ // Leave the error for the final LoadPackages call.
+ continue
+ }
}
path := pkgPath
@@ -1496,7 +1494,7 @@ func (r *resolver) disambiguate(cs pathSet) (filtered pathSet, isPackage bool, m
continue
}
- if modload.MainModules.Contains(m.Path) {
+ if modload.LoaderState.MainModules.Contains(m.Path) {
if m.Version == "" {
return pathSet{}, true, m, true
}
@@ -1612,7 +1610,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
// info, but switch back to single module mode when fetching sums so that we update
// the single module's go.sum file.
var exitWorkspace func()
- if r.workspace != nil && r.workspace.hasModule(modload.MainModules.Versions()[0].Path) {
+ if r.workspace != nil && r.workspace.hasModule(modload.LoaderState.MainModules.Versions()[0].Path) {
var err error
exitWorkspace, err = modload.EnterWorkspace(ctx)
if err != nil {
@@ -1653,7 +1651,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
AllowErrors: true,
SilenceNoGoErrors: true,
}
- matches, pkgs := modload.LoadPackages(ctx, pkgOpts, pkgPatterns...)
+ matches, pkgs := modload.LoadPackages(modload.LoaderState, ctx, pkgOpts, pkgPatterns...)
for _, m := range matches {
if len(m.Errs) > 0 {
base.SetExitStatus(1)
@@ -1661,7 +1659,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
}
}
for _, pkg := range pkgs {
- if dir, _, err := modload.Lookup("", false, pkg); err != nil {
+ if dir, _, err := modload.Lookup(modload.LoaderState, "", false, pkg); err != nil {
if dir != "" && errors.Is(err, imports.ErrNoGo) {
// Since dir is non-empty, we must have located source files
// associated with either the package or its test — ErrNoGo must
@@ -1674,7 +1672,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
}
base.SetExitStatus(1)
- if ambiguousErr := (*modload.AmbiguousImportError)(nil); errors.As(err, &ambiguousErr) {
+ if ambiguousErr, ok := errors.AsType[*modload.AmbiguousImportError](err); ok {
for _, m := range ambiguousErr.Modules {
relevantMods[m] |= hasPkg
}
@@ -1692,7 +1690,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
}
}
- reqs := modload.LoadModFile(ctx)
+ reqs := modload.LoadModFile(modload.LoaderState, ctx)
for m := range relevantMods {
if reqs.IsDirect(m.Path) {
relevantMods[m] |= direct
@@ -1716,8 +1714,8 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
for i := range retractions {
i := i
r.work.Add(func() {
- err := modload.CheckRetractions(ctx, retractions[i].m)
- if retractErr := (*modload.ModuleRetractedError)(nil); errors.As(err, &retractErr) {
+ err := modload.CheckRetractions(modload.LoaderState, ctx, retractions[i].m)
+ if _, ok := errors.AsType[*modload.ModuleRetractedError](err); ok {
retractions[i].message = err.Error()
}
})
@@ -1737,7 +1735,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
for i := range deprecations {
i := i
r.work.Add(func() {
- deprecation, err := modload.CheckDeprecation(ctx, deprecations[i].m)
+ deprecation, err := modload.CheckDeprecation(modload.LoaderState, ctx, deprecations[i].m)
if err != nil || deprecation == "" {
return
}
@@ -1767,7 +1765,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
i := i
m := r.buildList[i]
mActual := m
- if mRepl := modload.Replacement(m); mRepl.Path != "" {
+ if mRepl := modload.Replacement(modload.LoaderState, m); mRepl.Path != "" {
mActual = mRepl
}
old := module.Version{Path: m.Path, Version: r.initialVersion[m.Path]}
@@ -1775,7 +1773,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin
continue
}
oldActual := old
- if oldRepl := modload.Replacement(old); oldRepl.Path != "" {
+ if oldRepl := modload.Replacement(modload.LoaderState, old); oldRepl.Path != "" {
oldActual = oldRepl
}
if mActual == oldActual || mActual.Version == "" || !modfetch.HaveSum(oldActual) {
@@ -1951,7 +1949,7 @@ func (r *resolver) resolve(q *query, m module.Version) {
panic("internal error: resolving a module.Version with an empty path")
}
- if modload.MainModules.Contains(m.Path) && m.Version != "" {
+ if modload.LoaderState.MainModules.Contains(m.Path) && m.Version != "" {
reportError(q, &modload.QueryMatchesMainModulesError{
MainModules: []module.Version{{Path: m.Path}},
Pattern: q.pattern,
@@ -1983,19 +1981,19 @@ func (r *resolver) updateBuildList(ctx context.Context, additions []module.Versi
resolved := make([]module.Version, 0, len(r.resolvedVersion))
for mPath, rv := range r.resolvedVersion {
- if !modload.MainModules.Contains(mPath) {
+ if !modload.LoaderState.MainModules.Contains(mPath) {
resolved = append(resolved, module.Version{Path: mPath, Version: rv.version})
}
}
- changed, err := modload.EditBuildList(ctx, additions, resolved)
+ changed, err := modload.EditBuildList(modload.LoaderState, ctx, additions, resolved)
if err != nil {
if errors.Is(err, gover.ErrTooNew) {
toolchain.SwitchOrFatal(ctx, err)
}
- var constraint *modload.ConstraintError
- if !errors.As(err, &constraint) {
+ constraint, ok := errors.AsType[*modload.ConstraintError](err)
+ if !ok {
base.Fatal(err)
}
@@ -2066,8 +2064,11 @@ func reqsFromGoMod(f *modfile.File) []module.Version {
// does not exist at the requested version, either because the module does not
// exist at all or because it does not include that specific version.
func isNoSuchModuleVersion(err error) bool {
- var noMatch *modload.NoMatchingVersionError
- return errors.Is(err, os.ErrNotExist) || errors.As(err, &noMatch)
+ if errors.Is(err, os.ErrNotExist) {
+ return true
+ }
+ _, ok := errors.AsType[*modload.NoMatchingVersionError](err)
+ return ok
}
// isNoSuchPackageVersion reports whether err indicates that the requested
@@ -2075,8 +2076,11 @@ func isNoSuchModuleVersion(err error) bool {
// that could contain it exists at that version, or because every such module
// that does exist does not actually contain the package.
func isNoSuchPackageVersion(err error) bool {
- var noPackage *modload.PackageNotInModuleError
- return isNoSuchModuleVersion(err) || errors.As(err, &noPackage)
+ if isNoSuchModuleVersion(err) {
+ return true
+ }
+ _, ok := errors.AsType[*modload.PackageNotInModuleError](err)
+ return ok
}
// workspace represents the set of modules in a workspace.
diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go
index 05872d52ec4e04..7076bbadce898b 100644
--- a/src/cmd/go/internal/modget/query.go
+++ b/src/cmd/go/internal/modget/query.go
@@ -184,7 +184,7 @@ func (q *query) validate() error {
if q.pattern == "all" {
// If there is no main module, "all" is not meaningful.
- if !modload.HasModRoot() {
+ if !modload.HasModRoot(modload.LoaderState) {
return fmt.Errorf(`cannot match "all": %v`, modload.ErrNoModRoot)
}
if !versionOkForMainModule(q.version) {
@@ -192,7 +192,7 @@ func (q *query) validate() error {
// request that we remove all module requirements, leaving only the main
// module and standard library. Perhaps we should implement that someday.
return &modload.QueryUpgradesAllError{
- MainModules: modload.MainModules.Versions(),
+ MainModules: modload.LoaderState.MainModules.Versions(),
Query: q.version,
}
}
@@ -283,7 +283,7 @@ func reportError(q *query, err error) {
// If err already mentions all of the relevant parts of q, just log err to
// reduce stutter. Otherwise, log both q and err.
//
- // TODO(bcmills): Use errors.As to unpack these errors instead of parsing
+ // TODO(bcmills): Use errors.AsType to unpack these errors instead of parsing
// strings with regular expressions.
if !utf8.ValidString(q.pattern) || !utf8.ValidString(q.version) {
diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go
index 6e30afd5247b36..5b1b643d272dfd 100644
--- a/src/cmd/go/internal/modload/build.go
+++ b/src/cmd/go/internal/modload/build.go
@@ -51,8 +51,8 @@ func findStandardImportPath(path string) string {
// a given package. If modules are not enabled or if the package is in the
// standard library or if the package was not successfully loaded with
// LoadPackages or ImportFromFiles, nil is returned.
-func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePublic {
- if isStandardImportPath(pkgpath) || !Enabled() {
+func PackageModuleInfo(loaderstate *State, ctx context.Context, pkgpath string) *modinfo.ModulePublic {
+ if isStandardImportPath(pkgpath) || !Enabled(loaderstate) {
return nil
}
m, ok := findModule(loaded, pkgpath)
@@ -60,23 +60,23 @@ func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePubli
return nil
}
- rs := LoadModFile(ctx)
- return moduleInfo(ctx, rs, m, 0, nil)
+ rs := LoadModFile(loaderstate, ctx)
+ return moduleInfo(loaderstate, ctx, rs, m, 0, nil)
}
// PackageModRoot returns the module root directory for the module that provides
// a given package. If modules are not enabled or if the package is in the
// standard library or if the package was not successfully loaded with
// LoadPackages or ImportFromFiles, the empty string is returned.
-func PackageModRoot(ctx context.Context, pkgpath string) string {
- if isStandardImportPath(pkgpath) || !Enabled() || cfg.BuildMod == "vendor" {
+func PackageModRoot(loaderstate *State, ctx context.Context, pkgpath string) string {
+ if isStandardImportPath(pkgpath) || !Enabled(loaderstate) || cfg.BuildMod == "vendor" {
return ""
}
m, ok := findModule(loaded, pkgpath)
if !ok {
return ""
}
- root, _, err := fetch(ctx, m)
+ root, _, err := fetch(loaderstate, ctx, m)
if err != nil {
return ""
}
@@ -84,26 +84,26 @@ func PackageModRoot(ctx context.Context, pkgpath string) string {
}
func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic {
- if !Enabled() {
+ if !Enabled(LoaderState) {
return nil
}
if path, vers, found := strings.Cut(path, "@"); found {
m := module.Version{Path: path, Version: vers}
- return moduleInfo(ctx, nil, m, 0, nil)
+ return moduleInfo(LoaderState, ctx, nil, m, 0, nil)
}
- rs := LoadModFile(ctx)
+ rs := LoadModFile(LoaderState, ctx)
var (
v string
ok bool
)
if rs.pruning == pruned {
- v, ok = rs.rootSelected(path)
+ v, ok = rs.rootSelected(LoaderState, path)
}
if !ok {
- mg, err := rs.Graph(ctx)
+ mg, err := rs.Graph(LoaderState, ctx)
if err != nil {
base.Fatal(err)
}
@@ -119,7 +119,7 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic {
}
}
- return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0, nil)
+ return moduleInfo(LoaderState, ctx, rs, module.Version{Path: path, Version: v}, 0, nil)
}
// addUpdate fills in m.Update if an updated version is available.
@@ -128,12 +128,11 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) {
return
}
- info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed)
- var noVersionErr *NoMatchingVersionError
- if errors.Is(err, ErrDisallowed) ||
+ info, err := Query(LoaderState, ctx, m.Path, "upgrade", m.Version, CheckAllowed)
+ if _, ok := errors.AsType[*NoMatchingVersionError](err); ok ||
errors.Is(err, fs.ErrNotExist) ||
- errors.As(err, &noVersionErr) {
- // Ignore "not found" and "no matching version" errors.
+ errors.Is(err, ErrDisallowed) {
+ // Ignore "no matching version" and "not found" errors.
// This means the proxy has no matching version or no versions at all.
//
// Ignore "disallowed" errors. This means the current version is
@@ -222,7 +221,7 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo
if listRetracted {
allowed = CheckExclusions
}
- v, origin, err := versions(ctx, m.Path, allowed)
+ v, origin, err := versions(LoaderState, ctx, m.Path, allowed)
if err != nil && m.Error == nil {
m.Error = &modinfo.ModuleError{Err: err.Error()}
}
@@ -232,16 +231,16 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo
// addRetraction fills in m.Retracted if the module was retracted by its author.
// m.Error is set if there's an error loading retraction information.
-func addRetraction(ctx context.Context, m *modinfo.ModulePublic) {
+func addRetraction(loaderstate *State, ctx context.Context, m *modinfo.ModulePublic) {
if m.Version == "" {
return
}
- err := CheckRetractions(ctx, module.Version{Path: m.Path, Version: m.Version})
- var noVersionErr *NoMatchingVersionError
- var retractErr *ModuleRetractedError
- if err == nil || errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) {
- // Ignore "not found" and "no matching version" errors.
+ err := CheckRetractions(loaderstate, ctx, module.Version{Path: m.Path, Version: m.Version})
+ if err == nil {
+ return
+ } else if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) {
+ // Ignore "no matching version" and "not found" errors.
// This means the proxy has no matching version or no versions at all.
//
// We should report other errors though. An attacker that controls the
@@ -250,7 +249,7 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) {
// hide versions, since the "list" and "latest" endpoints are not
// authenticated.
return
- } else if errors.As(err, &retractErr) {
+ } else if retractErr, ok := errors.AsType[*ModuleRetractedError](err); ok {
if len(retractErr.Rationale) == 0 {
m.Retracted = []string{"retracted by module author"}
} else {
@@ -264,10 +263,9 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) {
// addDeprecation fills in m.Deprecated if the module was deprecated by its
// author. m.Error is set if there's an error loading deprecation information.
func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) {
- deprecation, err := CheckDeprecation(ctx, module.Version{Path: m.Path, Version: m.Version})
- var noVersionErr *NoMatchingVersionError
- if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) {
- // Ignore "not found" and "no matching version" errors.
+ deprecation, err := CheckDeprecation(LoaderState, ctx, module.Version{Path: m.Path, Version: m.Version})
+ if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) {
+ // Ignore "no matching version" and "not found" errors.
// This means the proxy has no matching version or no versions at all.
//
// We should report other errors though. An attacker that controls the
@@ -289,8 +287,8 @@ func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) {
// moduleInfo returns information about module m, loaded from the requirements
// in rs (which may be nil to indicate that m was not loaded from a requirement
// graph).
-func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic {
- if m.Version == "" && MainModules.Contains(m.Path) {
+func moduleInfo(loaderstate *State, ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic {
+ if m.Version == "" && loaderstate.MainModules.Contains(m.Path) {
info := &modinfo.ModulePublic{
Path: m.Path,
Version: m.Version,
@@ -301,7 +299,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
} else {
panic("internal error: GoVersion not set for main module")
}
- if modRoot := MainModules.ModRoot(m); modRoot != "" {
+ if modRoot := loaderstate.MainModules.ModRoot(m); modRoot != "" {
info.Dir = modRoot
info.GoMod = modFilePath(modRoot)
}
@@ -324,7 +322,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
}
checksumOk := func(suffix string) bool {
- return rs == nil || m.Version == "" || !mustHaveSums() ||
+ return rs == nil || m.Version == "" || !mustHaveSums(loaderstate) ||
modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix})
}
@@ -332,7 +330,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
if m.Version != "" {
if old := reuse[mod]; old != nil {
- if err := checkReuse(ctx, mod, old.Origin); err == nil {
+ if err := checkReuse(loaderstate, ctx, mod, old.Origin); err == nil {
*m = *old
m.Query = ""
m.Dir = ""
@@ -340,7 +338,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
}
}
- if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil {
+ if q, err := Query(loaderstate, ctx, m.Path, m.Version, "", nil); err != nil {
m.Error = &modinfo.ModuleError{Err: err.Error()}
} else {
m.Version = q.Version
@@ -351,7 +349,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
if m.GoVersion == "" && checksumOk("/go.mod") {
// Load the go.mod file to determine the Go version, since it hasn't
// already been populated from rawGoVersion.
- if summary, err := rawGoModSummary(mod); err == nil && summary.goVersion != "" {
+ if summary, err := rawGoModSummary(loaderstate, mod); err == nil && summary.goVersion != "" {
m.GoVersion = summary.goVersion
}
}
@@ -379,7 +377,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
}
if mode&ListRetracted != 0 {
- addRetraction(ctx, m)
+ addRetraction(loaderstate, ctx, m)
}
}
}
@@ -391,7 +389,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
return info
}
- r := Replacement(m)
+ r := Replacement(loaderstate, m)
if r.Path == "" {
if cfg.BuildMod == "vendor" {
// It's tempting to fill in the "Dir" field to point within the vendor
@@ -420,7 +418,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li
if filepath.IsAbs(r.Path) {
info.Replace.Dir = r.Path
} else {
- info.Replace.Dir = filepath.Join(replaceRelativeTo(), r.Path)
+ info.Replace.Dir = filepath.Join(replaceRelativeTo(loaderstate), r.Path)
}
info.Replace.GoMod = filepath.Join(info.Replace.Dir, "go.mod")
}
diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go
index 2ba04f707b5472..54ec4d23ebe08e 100644
--- a/src/cmd/go/internal/modload/buildlist.go
+++ b/src/cmd/go/internal/modload/buildlist.go
@@ -85,16 +85,6 @@ type cachedGraph struct {
err error // If err is non-nil, mg may be incomplete (but must still be non-nil).
}
-// requirements is the requirement graph for the main module.
-//
-// It is always non-nil if the main module's go.mod file has been loaded.
-//
-// This variable should only be read from the loadModFile function, and should
-// only be written in the loadModFile and commitRequirements functions.
-// All other functions that need or produce a *Requirements should
-// accept and/or return an explicit parameter.
-var requirements *Requirements
-
func mustHaveGoRoot(roots []module.Version) {
for _, m := range roots {
if m.Path == "go" {
@@ -114,21 +104,21 @@ func mustHaveGoRoot(roots []module.Version) {
//
// If vendoring is in effect, the caller must invoke initVendor on the returned
// *Requirements before any other method.
-func newRequirements(pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements {
+func newRequirements(loaderstate *State, pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements {
mustHaveGoRoot(rootModules)
if pruning != workspace {
- if workFilePath != "" {
+ if loaderstate.workFilePath != "" {
panic("in workspace mode, but pruning is not workspace in newRequirements")
}
}
if pruning != workspace {
- if workFilePath != "" {
+ if loaderstate.workFilePath != "" {
panic("in workspace mode, but pruning is not workspace in newRequirements")
}
for i, m := range rootModules {
- if m.Version == "" && MainModules.Contains(m.Path) {
+ if m.Version == "" && loaderstate.MainModules.Contains(m.Path) {
panic(fmt.Sprintf("newRequirements called with untrimmed build list: rootModules[%v] is a main module", i))
}
if m.Path == "" || m.Version == "" {
@@ -172,10 +162,10 @@ func (rs *Requirements) String() string {
// initVendor initializes rs.graph from the given list of vendored module
// dependencies, overriding the graph that would normally be loaded from module
// requirements.
-func (rs *Requirements) initVendor(vendorList []module.Version) {
+func (rs *Requirements) initVendor(loaderstate *State, vendorList []module.Version) {
rs.graphOnce.Do(func() {
- roots := MainModules.Versions()
- if inWorkspaceMode() {
+ roots := loaderstate.MainModules.Versions()
+ if inWorkspaceMode(loaderstate) {
// Use rs.rootModules to pull in the go and toolchain roots
// from the go.work file and preserve the invariant that all
// of rs.rootModules are in mg.g.
@@ -186,7 +176,7 @@ func (rs *Requirements) initVendor(vendorList []module.Version) {
}
if rs.pruning == pruned {
- mainModule := MainModules.mustGetSingleMainModule()
+ mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate)
// The roots of a single pruned module should already include every module in the
// vendor list, because the vendored modules are the same as those needed
// for graph pruning.
@@ -194,7 +184,7 @@ func (rs *Requirements) initVendor(vendorList []module.Version) {
// Just to be sure, we'll double-check that here.
inconsistent := false
for _, m := range vendorList {
- if v, ok := rs.rootSelected(m.Path); !ok || v != m.Version {
+ if v, ok := rs.rootSelected(loaderstate, m.Path); !ok || v != m.Version {
base.Errorf("go: vendored module %v should be required explicitly in go.mod", m)
inconsistent = true
}
@@ -218,15 +208,15 @@ func (rs *Requirements) initVendor(vendorList []module.Version) {
// graph, but still distinguishes between direct and indirect
// dependencies.
vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""}
- if inWorkspaceMode() {
- for _, m := range MainModules.Versions() {
- reqs, _ := rootsFromModFile(m, MainModules.ModFile(m), omitToolchainRoot)
+ if inWorkspaceMode(loaderstate) {
+ for _, m := range loaderstate.MainModules.Versions() {
+ reqs, _ := rootsFromModFile(loaderstate, m, loaderstate.MainModules.ModFile(m), omitToolchainRoot)
mg.g.Require(m, append(reqs, vendorMod))
}
mg.g.Require(vendorMod, vendorList)
} else {
- mainModule := MainModules.mustGetSingleMainModule()
+ mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate)
mg.g.Require(mainModule, append(rs.rootModules, vendorMod))
mg.g.Require(vendorMod, vendorList)
}
@@ -237,8 +227,8 @@ func (rs *Requirements) initVendor(vendorList []module.Version) {
}
// GoVersion returns the Go language version for the Requirements.
-func (rs *Requirements) GoVersion() string {
- v, _ := rs.rootSelected("go")
+func (rs *Requirements) GoVersion(loaderstate *State) string {
+ v, _ := rs.rootSelected(loaderstate, "go")
if v == "" {
panic("internal error: missing go version in modload.Requirements")
}
@@ -248,8 +238,8 @@ func (rs *Requirements) GoVersion() string {
// rootSelected returns the version of the root dependency with the given module
// path, or the zero module.Version and ok=false if the module is not a root
// dependency.
-func (rs *Requirements) rootSelected(path string) (version string, ok bool) {
- if MainModules.Contains(path) {
+func (rs *Requirements) rootSelected(loaderstate *State, path string) (version string, ok bool) {
+ if loaderstate.MainModules.Contains(path) {
return "", true
}
if v, ok := rs.maxRootVersion[path]; ok {
@@ -262,9 +252,9 @@ func (rs *Requirements) rootSelected(path string) (version string, ok bool) {
// of the same module or a requirement on any version of the main module.
// Redundant requirements should be pruned, but they may influence version
// selection.
-func (rs *Requirements) hasRedundantRoot() bool {
+func (rs *Requirements) hasRedundantRoot(loaderstate *State) bool {
for i, m := range rs.rootModules {
- if MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) {
+ if loaderstate.MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) {
return true
}
}
@@ -279,9 +269,9 @@ func (rs *Requirements) hasRedundantRoot() bool {
//
// If the requirements of any relevant module fail to load, Graph also
// returns a non-nil error of type *mvs.BuildListError.
-func (rs *Requirements) Graph(ctx context.Context) (*ModuleGraph, error) {
+func (rs *Requirements) Graph(loaderstate *State, ctx context.Context) (*ModuleGraph, error) {
rs.graphOnce.Do(func() {
- mg, mgErr := readModGraph(ctx, rs.pruning, rs.rootModules, nil)
+ mg, mgErr := readModGraph(loaderstate, ctx, rs.pruning, rs.rootModules, nil)
rs.graph.Store(&cachedGraph{mg, mgErr})
})
cached := rs.graph.Load()
@@ -317,7 +307,7 @@ var readModGraphDebugOnce sync.Once
//
// Unlike LoadModGraph, readModGraph does not attempt to diagnose or update
// inconsistent roots.
-func readModGraph(ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) {
+func readModGraph(loaderstate *State, ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) {
mustHaveGoRoot(roots)
if pruning == pruned {
// Enable diagnostics for lazy module loading
@@ -343,10 +333,10 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
}
var graphRoots []module.Version
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
graphRoots = roots
} else {
- graphRoots = MainModules.Versions()
+ graphRoots = loaderstate.MainModules.Versions()
}
var (
mu sync.Mutex // guards mg.g and hasError during loading
@@ -357,10 +347,10 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
)
if pruning != workspace {
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
panic("pruning is not workspace in workspace mode")
}
- mg.g.Require(MainModules.mustGetSingleMainModule(), roots)
+ mg.g.Require(loaderstate.MainModules.mustGetSingleMainModule(loaderstate), roots)
}
type dedupKey struct {
@@ -377,7 +367,7 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
// m's go.mod file indicates that it supports graph pruning.
loadOne := func(m module.Version) (*modFileSummary, error) {
return mg.loadCache.Do(m, func() (*modFileSummary, error) {
- summary, err := goModSummary(m)
+ summary, err := goModSummary(loaderstate, m)
mu.Lock()
if err == nil {
@@ -537,12 +527,12 @@ func (mg *ModuleGraph) findError() error {
return nil
}
-func (mg *ModuleGraph) allRootsSelected() bool {
+func (mg *ModuleGraph) allRootsSelected(loaderstate *State) bool {
var roots []module.Version
- if inWorkspaceMode() {
- roots = MainModules.Versions()
+ if inWorkspaceMode(loaderstate) {
+ roots = loaderstate.MainModules.Versions()
} else {
- roots, _ = mg.g.RequiredBy(MainModules.mustGetSingleMainModule())
+ roots, _ = mg.g.RequiredBy(loaderstate.MainModules.mustGetSingleMainModule(loaderstate))
}
for _, m := range roots {
if mg.Selected(m.Path) != m.Version {
@@ -563,13 +553,13 @@ func (mg *ModuleGraph) allRootsSelected() bool {
// LoadModGraph need only be called if LoadPackages is not,
// typically in commands that care about modules but no particular package.
func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) {
- rs, err := loadModFile(ctx, nil)
+ rs, err := loadModFile(LoaderState, ctx, nil)
if err != nil {
return nil, err
}
if goVersion != "" {
- v, _ := rs.rootSelected("go")
+ v, _ := rs.rootSelected(LoaderState, "go")
if gover.Compare(v, gover.GoStrictVersion) >= 0 && gover.Compare(goVersion, v) < 0 {
return nil, fmt.Errorf("requested Go version %s cannot load module graph (requires Go >= %s)", goVersion, v)
}
@@ -579,17 +569,17 @@ func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) {
// Use newRequirements instead of convertDepth because convertDepth
// also updates roots; here, we want to report the unmodified roots
// even though they may seem inconsistent.
- rs = newRequirements(unpruned, rs.rootModules, rs.direct)
+ rs = newRequirements(LoaderState, unpruned, rs.rootModules, rs.direct)
}
- return rs.Graph(ctx)
+ return rs.Graph(LoaderState, ctx)
}
- rs, mg, err := expandGraph(ctx, rs)
+ rs, mg, err := expandGraph(LoaderState, ctx, rs)
if err != nil {
return nil, err
}
- requirements = rs
+ LoaderState.requirements = rs
return mg, nil
}
@@ -604,22 +594,22 @@ func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) {
// from those roots and any error encountered while loading that graph.
// expandGraph returns non-nil requirements and a non-nil graph regardless of
// errors. On error, the roots might not be updated to be consistent.
-func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) {
- mg, mgErr := rs.Graph(ctx)
+func expandGraph(loaderstate *State, ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) {
+ mg, mgErr := rs.Graph(loaderstate, ctx)
if mgErr != nil {
// Without the graph, we can't update the roots: we don't know which
// versions of transitive dependencies would be selected.
return rs, mg, mgErr
}
- if !mg.allRootsSelected() {
+ if !mg.allRootsSelected(loaderstate) {
// The roots of rs are not consistent with the rest of the graph. Update
// them. In an unpruned module this is a no-op for the build list as a whole —
// it just promotes what were previously transitive requirements to be
// roots — but in a pruned module it may pull in previously-irrelevant
// transitive dependencies.
- newRS, rsErr := updateRoots(ctx, rs.direct, rs, nil, nil, false)
+ newRS, rsErr := updateRoots(loaderstate, ctx, rs.direct, rs, nil, nil, false)
if rsErr != nil {
// Failed to update roots, perhaps because of an error in a transitive
// dependency needed for the update. Return the original Requirements
@@ -627,7 +617,7 @@ func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleG
return rs, mg, rsErr
}
rs = newRS
- mg, mgErr = rs.Graph(ctx)
+ mg, mgErr = rs.Graph(loaderstate, ctx)
}
return rs, mg, mgErr
@@ -649,16 +639,16 @@ func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleG
// On success, EditBuildList reports whether the selected version of any module
// in the build list may have been changed (possibly to or from "none") as a
// result.
-func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) {
- rs, changed, err := editRequirements(ctx, LoadModFile(ctx), add, mustSelect)
+func EditBuildList(loaderstate *State, ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) {
+ rs, changed, err := editRequirements(loaderstate, ctx, LoadModFile(loaderstate, ctx), add, mustSelect)
if err != nil {
return false, err
}
- requirements = rs
+ loaderstate.requirements = rs
return changed, nil
}
-func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Version) *Requirements {
+func overrideRoots(loaderstate *State, ctx context.Context, rs *Requirements, replace []module.Version) *Requirements {
drop := make(map[string]bool)
for _, m := range replace {
drop[m.Path] = true
@@ -671,7 +661,7 @@ func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Versi
}
roots = append(roots, replace...)
gover.ModSort(roots)
- return newRequirements(rs.pruning, roots, rs.direct)
+ return newRequirements(loaderstate, rs.pruning, roots, rs.direct)
}
// A ConstraintError describes inconsistent constraints in EditBuildList
@@ -775,28 +765,28 @@ func (c Conflict) String() string {
// tidyRoots trims the root dependencies to the minimal requirements needed to
// both retain the same versions of all packages in pkgs and satisfy the
// graph-pruning invariants (if applicable).
-func tidyRoots(ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) {
- mainModule := MainModules.mustGetSingleMainModule()
+func tidyRoots(loaderstate *State, ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) {
+ mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate)
if rs.pruning == unpruned {
- return tidyUnprunedRoots(ctx, mainModule, rs, pkgs)
+ return tidyUnprunedRoots(loaderstate, ctx, mainModule, rs, pkgs)
}
- return tidyPrunedRoots(ctx, mainModule, rs, pkgs)
+ return tidyPrunedRoots(loaderstate, ctx, mainModule, rs, pkgs)
}
-func updateRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) {
+func updateRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) {
switch rs.pruning {
case unpruned:
- return updateUnprunedRoots(ctx, direct, rs, add)
+ return updateUnprunedRoots(loaderstate, ctx, direct, rs, add)
case pruned:
- return updatePrunedRoots(ctx, direct, rs, pkgs, add, rootsImported)
+ return updatePrunedRoots(loaderstate, ctx, direct, rs, pkgs, add, rootsImported)
case workspace:
- return updateWorkspaceRoots(ctx, direct, rs, add)
+ return updateWorkspaceRoots(loaderstate, ctx, direct, rs, add)
default:
panic(fmt.Sprintf("unsupported pruning mode: %v", rs.pruning))
}
}
-func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) {
+func updateWorkspaceRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) {
if len(add) != 0 {
// add should be empty in workspace mode because workspace mode implies
// -mod=readonly, which in turn implies no new requirements. The code path
@@ -807,7 +797,7 @@ func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requi
// return an error.
panic("add is not empty")
}
- return newRequirements(workspace, rs.rootModules, direct), nil
+ return newRequirements(loaderstate, workspace, rs.rootModules, direct), nil
}
// tidyPrunedRoots returns a minimal set of root requirements that maintains the
@@ -826,16 +816,16 @@ func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requi
// To ensure that the loading process eventually converges, the caller should
// add any needed roots from the tidy root set (without removing existing untidy
// roots) until the set of roots has converged.
-func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) {
+func tidyPrunedRoots(loaderstate *State, ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) {
var (
roots []module.Version
pathIsRoot = map[string]bool{mainModule.Path: true}
)
- if v, ok := old.rootSelected("go"); ok {
+ if v, ok := old.rootSelected(loaderstate, "go"); ok {
roots = append(roots, module.Version{Path: "go", Version: v})
pathIsRoot["go"] = true
}
- if v, ok := old.rootSelected("toolchain"); ok {
+ if v, ok := old.rootSelected(loaderstate, "toolchain"); ok {
roots = append(roots, module.Version{Path: "toolchain", Version: v})
pathIsRoot["toolchain"] = true
}
@@ -857,7 +847,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir
if !pkg.flags.has(pkgInAll) {
continue
}
- if pkg.fromExternalModule() && !pathIsRoot[pkg.mod.Path] {
+ if pkg.fromExternalModule(loaderstate) && !pathIsRoot[pkg.mod.Path] {
roots = append(roots, pkg.mod)
pathIsRoot[pkg.mod.Path] = true
}
@@ -865,11 +855,11 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir
queued[pkg] = true
}
gover.ModSort(roots)
- tidy := newRequirements(pruned, roots, old.direct)
+ tidy := newRequirements(loaderstate, pruned, roots, old.direct)
for len(queue) > 0 {
roots = tidy.rootModules
- mg, err := tidy.Graph(ctx)
+ mg, err := tidy.Graph(loaderstate, ctx)
if err != nil {
return nil, err
}
@@ -902,12 +892,12 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir
if len(roots) > len(tidy.rootModules) {
gover.ModSort(roots)
- tidy = newRequirements(pruned, roots, tidy.direct)
+ tidy = newRequirements(loaderstate, pruned, roots, tidy.direct)
}
}
roots = tidy.rootModules
- _, err := tidy.Graph(ctx)
+ _, err := tidy.Graph(loaderstate, ctx)
if err != nil {
return nil, err
}
@@ -931,8 +921,8 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir
pkg := pkg
q.Add(func() {
skipModFile := true
- _, _, _, _, err := importFromModules(ctx, pkg.path, tidy, nil, skipModFile)
- if aie := (*AmbiguousImportError)(nil); errors.As(err, &aie) {
+ _, _, _, _, err := importFromModules(loaderstate, ctx, pkg.path, tidy, nil, skipModFile)
+ if _, ok := errors.AsType[*AmbiguousImportError](err); ok {
disambiguateRoot.Store(pkg.mod, true)
}
})
@@ -948,8 +938,8 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir
if len(roots) > len(tidy.rootModules) {
module.Sort(roots)
- tidy = newRequirements(pruned, roots, tidy.direct)
- _, err = tidy.Graph(ctx)
+ tidy = newRequirements(loaderstate, pruned, roots, tidy.direct)
+ _, err = tidy.Graph(loaderstate, ctx)
if err != nil {
return nil, err
}
@@ -1009,7 +999,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir
//
// (See https://golang.org/design/36460-lazy-module-loading#invariants for more
// detail.)
-func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) {
+func updatePrunedRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) {
roots := rs.rootModules
rootsUpgraded := false
@@ -1019,7 +1009,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem
// either pkgInAll or pkgIsRoot is included as a root.”
needSort := false
for _, pkg := range pkgs {
- if !pkg.fromExternalModule() {
+ if !pkg.fromExternalModule(loaderstate) {
// pkg was not loaded from a module dependency, so we don't need
// to do anything special to maintain that dependency.
continue
@@ -1068,7 +1058,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem
continue
}
- if _, ok := rs.rootSelected(pkg.mod.Path); ok {
+ if _, ok := rs.rootSelected(loaderstate, pkg.mod.Path); ok {
// It is possible that the main module's go.mod file is incomplete or
// otherwise erroneous — for example, perhaps the author forgot to 'git
// add' their updated go.mod file after adding a new package import, or
@@ -1104,7 +1094,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem
}
for _, m := range add {
- if v, ok := rs.rootSelected(m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 {
+ if v, ok := rs.rootSelected(loaderstate, m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 {
roots = append(roots, m)
rootsUpgraded = true
needSort = true
@@ -1121,7 +1111,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem
// We've added or upgraded one or more roots, so load the full module
// graph so that we can update those roots to be consistent with other
// requirements.
- if mustHaveCompleteRequirements() {
+ if mustHaveCompleteRequirements(loaderstate) {
// Our changes to the roots may have moved dependencies into or out of
// the graph-pruning horizon, which could in turn change the selected
// versions of other modules. (For pruned modules adding or removing an
@@ -1129,9 +1119,9 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem
return rs, errGoModDirty
}
- rs = newRequirements(pruned, roots, direct)
+ rs = newRequirements(loaderstate, pruned, roots, direct)
var err error
- mg, err = rs.Graph(ctx)
+ mg, err = rs.Graph(loaderstate, ctx)
if err != nil {
return rs, err
}
@@ -1145,20 +1135,20 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem
// We've already loaded the full module graph, which includes the
// requirements of all of the root modules — even the transitive
// requirements, if they are unpruned!
- mg, _ = rs.Graph(ctx)
+ mg, _ = rs.Graph(loaderstate, ctx)
} else if cfg.BuildMod == "vendor" {
// We can't spot-check the requirements of other modules because we
// don't in general have their go.mod files available in the vendor
// directory. (Fortunately this case is impossible, because mg.graph is
// always non-nil in vendor mode!)
panic("internal error: rs.graph is unexpectedly nil with -mod=vendor")
- } else if !spotCheckRoots(ctx, rs, spotCheckRoot) {
+ } else if !spotCheckRoots(loaderstate, ctx, rs, spotCheckRoot) {
// We spot-checked the explicit requirements of the roots that are
// relevant to the packages we've loaded. Unfortunately, they're
// inconsistent in some way; we need to load the full module graph
// so that we can fix the roots properly.
var err error
- mg, err = rs.Graph(ctx)
+ mg, err = rs.Graph(loaderstate, ctx)
if err != nil {
return rs, err
}
@@ -1168,7 +1158,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem
roots = make([]module.Version, 0, len(rs.rootModules))
rootsUpgraded = false
inRootPaths := make(map[string]bool, len(rs.rootModules)+1)
- for _, mm := range MainModules.Versions() {
+ for _, mm := range loaderstate.MainModules.Versions() {
inRootPaths[mm.Path] = true
}
for _, m := range rs.rootModules {
@@ -1194,7 +1184,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem
var v string
if mg == nil {
- v, _ = rs.rootSelected(m.Path)
+ v, _ = rs.rootSelected(loaderstate, m.Path)
} else {
v = mg.Selected(m.Path)
}
@@ -1228,12 +1218,12 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem
// preserve its cached ModuleGraph (if any).
return rs, nil
}
- return newRequirements(pruned, roots, direct), nil
+ return newRequirements(loaderstate, pruned, roots, direct), nil
}
// spotCheckRoots reports whether the versions of the roots in rs satisfy the
// explicit requirements of the modules in mods.
-func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool {
+func spotCheckRoots(loaderstate *State, ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@@ -1245,14 +1235,14 @@ func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Versi
return
}
- summary, err := goModSummary(m)
+ summary, err := goModSummary(loaderstate, m)
if err != nil {
cancel()
return
}
for _, r := range summary.require {
- if v, ok := rs.rootSelected(r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 {
+ if v, ok := rs.rootSelected(loaderstate, r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 {
cancel()
return
}
@@ -1274,7 +1264,7 @@ func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Versi
// the selected version of every module that provided or lexically could have
// provided a package in pkgs, and includes the selected version of every such
// module in direct as a root.
-func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) {
+func tidyUnprunedRoots(loaderstate *State, ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) {
var (
// keep is a set of modules that provide packages or are needed to
// disambiguate imports.
@@ -1302,16 +1292,16 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ
// without its sum. See #47738.
altMods = map[string]string{}
)
- if v, ok := old.rootSelected("go"); ok {
+ if v, ok := old.rootSelected(loaderstate, "go"); ok {
keep = append(keep, module.Version{Path: "go", Version: v})
keptPath["go"] = true
}
- if v, ok := old.rootSelected("toolchain"); ok {
+ if v, ok := old.rootSelected(loaderstate, "toolchain"); ok {
keep = append(keep, module.Version{Path: "toolchain", Version: v})
keptPath["toolchain"] = true
}
for _, pkg := range pkgs {
- if !pkg.fromExternalModule() {
+ if !pkg.fromExternalModule(loaderstate) {
continue
}
if m := pkg.mod; !keptPath[m.Path] {
@@ -1360,7 +1350,7 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ
}
}
- return newRequirements(unpruned, min, old.direct), nil
+ return newRequirements(loaderstate, unpruned, min, old.direct), nil
}
// updateUnprunedRoots returns a set of root requirements that includes the selected
@@ -1377,8 +1367,8 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ
// by a dependency in add.
// 4. Every version in add is selected at its given version unless upgraded by
// (the dependencies of) an existing root or another module in add.
-func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) {
- mg, err := rs.Graph(ctx)
+func updateUnprunedRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) {
+ mg, err := rs.Graph(loaderstate, ctx)
if err != nil {
// We can't ignore errors in the module graph even if the user passed the -e
// flag to try to push past them. If we can't load the complete module
@@ -1386,7 +1376,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir
return rs, err
}
- if mustHaveCompleteRequirements() {
+ if mustHaveCompleteRequirements(loaderstate) {
// Instead of actually updating the requirements, just check that no updates
// are needed.
if rs == nil {
@@ -1406,7 +1396,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir
}
}
for mPath := range direct {
- if _, ok := rs.rootSelected(mPath); !ok {
+ if _, ok := rs.rootSelected(loaderstate, mPath); !ok {
// Module m is supposed to be listed explicitly, but isn't.
//
// Note that this condition is also detected (and logged with more
@@ -1445,7 +1435,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir
// This is only for convenience and clarity for end users: in an unpruned module,
// the choice of explicit vs. implicit dependency has no impact on MVS
// selection (for itself or any other module).
- keep := append(mg.BuildList()[MainModules.Len():], add...)
+ keep := append(mg.BuildList()[loaderstate.MainModules.Len():], add...)
for _, m := range keep {
if direct[m.Path] && !inRootPaths[m.Path] {
rootPaths = append(rootPaths, m.Path)
@@ -1454,14 +1444,14 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir
}
var roots []module.Version
- for _, mainModule := range MainModules.Versions() {
+ for _, mainModule := range loaderstate.MainModules.Versions() {
min, err := mvs.Req(mainModule, rootPaths, &mvsReqs{roots: keep})
if err != nil {
return rs, err
}
roots = append(roots, min...)
}
- if MainModules.Len() > 1 {
+ if loaderstate.MainModules.Len() > 1 {
gover.ModSort(roots)
}
if rs.pruning == unpruned && slices.Equal(roots, rs.rootModules) && maps.Equal(direct, rs.direct) {
@@ -1470,12 +1460,12 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir
return rs, nil
}
- return newRequirements(unpruned, roots, direct), nil
+ return newRequirements(loaderstate, unpruned, roots, direct), nil
}
// convertPruning returns a version of rs with the given pruning behavior.
// If rs already has the given pruning, convertPruning returns rs unmodified.
-func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) {
+func convertPruning(loaderstate *State, ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) {
if rs.pruning == pruning {
return rs, nil
} else if rs.pruning == workspace || pruning == workspace {
@@ -1487,7 +1477,7 @@ func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) (
// pruned module graph are a superset of the roots of an unpruned one, so
// we don't need to add any new roots — we just need to drop the ones that
// are redundant, which is exactly what updateUnprunedRoots does.
- return updateUnprunedRoots(ctx, rs.direct, rs, nil)
+ return updateUnprunedRoots(loaderstate, ctx, rs.direct, rs, nil)
}
// We are converting an unpruned module to a pruned one.
@@ -1497,9 +1487,9 @@ func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) (
// root set! “Include the transitive dependencies of every module in the build
// list” is exactly what happens in a pruned module if we promote every module
// in the build list to a root.
- mg, err := rs.Graph(ctx)
+ mg, err := rs.Graph(loaderstate, ctx)
if err != nil {
return rs, err
}
- return newRequirements(pruned, mg.BuildList()[MainModules.Len():], rs.direct), nil
+ return newRequirements(loaderstate, pruned, mg.BuildList()[loaderstate.MainModules.Len():], rs.direct), nil
}
diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go
index b406193dc5a673..72d0f754224456 100644
--- a/src/cmd/go/internal/modload/edit.go
+++ b/src/cmd/go/internal/modload/edit.go
@@ -42,7 +42,7 @@ import (
// If pruning is enabled, the roots of the edited requirements include an
// explicit entry for each module path in tryUpgrade, mustSelect, and the roots
// of rs, unless the selected version for the module path is "none".
-func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) {
+func editRequirements(loaderstate *State, ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) {
if rs.pruning == workspace {
panic("editRequirements cannot edit workspace requirements")
}
@@ -82,7 +82,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
}
if rootPruning != rs.pruning {
- rs, err = convertPruning(ctx, rs, rootPruning)
+ rs, err = convertPruning(loaderstate, ctx, rs, rootPruning)
if err != nil {
return orig, false, err
}
@@ -100,13 +100,13 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
// dependencies, so we need to treat everything in the build list as
// potentially relevant — that is, as what would be a “root” in a module
// with graph pruning enabled.
- mg, err := rs.Graph(ctx)
+ mg, err := rs.Graph(loaderstate, ctx)
if err != nil {
// If we couldn't load the graph, we don't know what its requirements were
// to begin with, so we can't edit those requirements in a coherent way.
return orig, false, err
}
- bl := mg.BuildList()[MainModules.Len():]
+ bl := mg.BuildList()[loaderstate.MainModules.Len():]
selectedRoot = make(map[string]string, len(bl))
for _, m := range bl {
selectedRoot[m.Path] = m.Version
@@ -224,10 +224,12 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
// of every root. The upgraded roots are in addition to the original
// roots, so we will have enough information to trace a path to each
// conflict we discover from one or more of the original roots.
- mg, upgradedRoots, err := extendGraph(ctx, rootPruning, roots, selectedRoot)
+ mg, upgradedRoots, err := extendGraph(loaderstate, ctx, rootPruning, roots, selectedRoot)
if err != nil {
- var tooNew *gover.TooNewError
- if mg == nil || errors.As(err, &tooNew) {
+ if mg == nil {
+ return orig, false, err
+ }
+ if _, ok := errors.AsType[*gover.TooNewError](err); ok {
return orig, false, err
}
// We're about to walk the entire extended module graph, so we will find
@@ -389,7 +391,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
// the edit. We want to make sure we consider keeping it as-is,
// even if it wouldn't normally be included. (For example, it might
// be a pseudo-version or pre-release.)
- origMG, _ := orig.Graph(ctx)
+ origMG, _ := orig.Graph(loaderstate, ctx)
origV := origMG.Selected(m.Path)
if conflict.Err != nil && origV == m.Version {
@@ -413,7 +415,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
rejectedRoot[m] = true
prev := m
for {
- prev, err = previousVersion(ctx, prev)
+ prev, err = previousVersion(loaderstate, ctx, prev)
if gover.ModCompare(m.Path, m.Version, origV) > 0 && (gover.ModCompare(m.Path, prev.Version, origV) < 0 || err != nil) {
// previousVersion skipped over origV. Insert it into the order.
prev.Version = origV
@@ -513,13 +515,13 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
// The modules in mustSelect are always promoted to be explicit.
for _, m := range mustSelect {
- if m.Version != "none" && !MainModules.Contains(m.Path) {
+ if m.Version != "none" && !loaderstate.MainModules.Contains(m.Path) {
rootPaths = append(rootPaths, m.Path)
}
}
for _, m := range roots {
- if v, ok := rs.rootSelected(m.Path); ok && (v == m.Version || rs.direct[m.Path]) {
+ if v, ok := rs.rootSelected(loaderstate, m.Path); ok && (v == m.Version || rs.direct[m.Path]) {
// m.Path was formerly a root, and either its version hasn't changed or
// we believe that it provides a package directly imported by a package
// or test in the main module. For now we'll assume that it is still
@@ -530,7 +532,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
}
}
- roots, err = mvs.Req(MainModules.mustGetSingleMainModule(), rootPaths, &mvsReqs{roots: roots})
+ roots, err = mvs.Req(loaderstate.MainModules.mustGetSingleMainModule(loaderstate), rootPaths, &mvsReqs{roots: roots})
if err != nil {
return nil, false, err
}
@@ -561,7 +563,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
direct[m.Path] = true
}
}
- edited = newRequirements(rootPruning, roots, direct)
+ edited = newRequirements(loaderstate, rootPruning, roots, direct)
// If we ended up adding a dependency that upgrades our go version far enough
// to activate pruning, we must convert the edited Requirements in order to
@@ -576,7 +578,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
// those two modules will never be downgraded due to a conflict with any other
// constraint.
if rootPruning == unpruned {
- if v, ok := edited.rootSelected("go"); ok && pruningForGoVersion(v) == pruned {
+ if v, ok := edited.rootSelected(loaderstate, "go"); ok && pruningForGoVersion(v) == pruned {
// Since we computed the edit with the unpruned graph, and the pruned
// graph is a strict subset of the unpruned graph, this conversion
// preserves the exact (edited) build list that we already computed.
@@ -585,7 +587,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
// the graph. 'go get' will check for that sort of transition and log a
// message reminding the user how to clean up this mess we're about to
// make. 😅
- edited, err = convertPruning(ctx, edited, pruned)
+ edited, err = convertPruning(loaderstate, ctx, edited, pruned)
if err != nil {
return orig, false, err
}
@@ -605,9 +607,9 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
// The extended graph is useful for diagnosing version conflicts: for each
// selected module version, it can provide a complete path of requirements from
// some root to that version.
-func extendGraph(ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) {
+func extendGraph(loaderstate *State, ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) {
for {
- mg, err = readModGraph(ctx, rootPruning, roots, upgradedRoot)
+ mg, err = readModGraph(loaderstate, ctx, rootPruning, roots, upgradedRoot)
// We keep on going even if err is non-nil until we reach a steady state.
// (Note that readModGraph returns a non-nil *ModuleGraph even in case of
// errors.) The caller may be able to fix the errors by adjusting versions,
diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go
index 171d9d692fbb82..a2a98289b0ed77 100644
--- a/src/cmd/go/internal/modload/import.go
+++ b/src/cmd/go/internal/modload/import.go
@@ -82,8 +82,8 @@ func (e *ImportMissingError) Error() string {
if e.QueryErr != nil {
return fmt.Sprintf("%s: %v", message, e.QueryErr)
}
- if e.ImportingMainModule.Path != "" && e.ImportingMainModule != MainModules.ModContainingCWD() {
- return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, MainModules.ModRoot(e.ImportingMainModule), e.Path)
+ if e.ImportingMainModule.Path != "" && e.ImportingMainModule != LoaderState.MainModules.ModContainingCWD() {
+ return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, LoaderState.MainModules.ModRoot(e.ImportingMainModule), e.Path)
}
return fmt.Sprintf("%s; to add it:\n\tgo get %s", message, e.Path)
}
@@ -262,7 +262,7 @@ func (e *invalidImportError) Unwrap() error {
// (https://go.dev/issue/56222) for modules with 'go' versions between 1.17 and
// 1.20, preventing unnecessary go.sum churn and network access in those
// modules.
-func importFromModules(ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) {
+func importFromModules(loaderstate *State, ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) {
invalidf := func(format string, args ...interface{}) (module.Version, string, string, []module.Version, error) {
return module.Version{}, "", "", nil, &invalidImportError{
importPath: path,
@@ -299,12 +299,12 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
// Is the package in the standard library?
pathIsStd := search.IsStandardImportPath(path)
if pathIsStd && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
- for _, mainModule := range MainModules.Versions() {
- if MainModules.InGorootSrc(mainModule) {
- if dir, ok, err := dirInModule(path, MainModules.PathPrefix(mainModule), MainModules.ModRoot(mainModule), true); err != nil {
- return module.Version{}, MainModules.ModRoot(mainModule), dir, nil, err
+ for _, mainModule := range loaderstate.MainModules.Versions() {
+ if loaderstate.MainModules.InGorootSrc(mainModule) {
+ if dir, ok, err := dirInModule(path, loaderstate.MainModules.PathPrefix(mainModule), loaderstate.MainModules.ModRoot(mainModule), true); err != nil {
+ return module.Version{}, loaderstate.MainModules.ModRoot(mainModule), dir, nil, err
} else if ok {
- return mainModule, MainModules.ModRoot(mainModule), dir, nil, nil
+ return mainModule, loaderstate.MainModules.ModRoot(mainModule), dir, nil, nil
}
}
}
@@ -321,10 +321,10 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
// Everything must be in the main modules or the main module's or workspace's vendor directory.
if cfg.BuildMod == "vendor" {
var mainErr error
- for _, mainModule := range MainModules.Versions() {
- modRoot := MainModules.ModRoot(mainModule)
+ for _, mainModule := range loaderstate.MainModules.Versions() {
+ modRoot := loaderstate.MainModules.ModRoot(mainModule)
if modRoot != "" {
- dir, mainOK, err := dirInModule(path, MainModules.PathPrefix(mainModule), modRoot, true)
+ dir, mainOK, err := dirInModule(path, loaderstate.MainModules.PathPrefix(mainModule), modRoot, true)
if mainErr == nil {
mainErr = err
}
@@ -336,8 +336,8 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
}
}
- if HasModRoot() {
- vendorDir := VendorDir()
+ if HasModRoot(loaderstate) {
+ vendorDir := VendorDir(loaderstate)
dir, inVendorDir, _ := dirInModule(path, "", vendorDir, false)
if inVendorDir {
readVendorList(vendorDir)
@@ -345,13 +345,13 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
// vendor/modules.txt does not exist or the user manually added directories to the vendor directory.
// Go 1.23 and later require vendored packages to be present in modules.txt to be imported.
_, ok := vendorPkgModule[path]
- if ok || (gover.Compare(MainModules.GoVersion(), gover.ExplicitModulesTxtImportVersion) < 0) {
+ if ok || (gover.Compare(loaderstate.MainModules.GoVersion(loaderstate), gover.ExplicitModulesTxtImportVersion) < 0) {
mods = append(mods, vendorPkgModule[path])
dirs = append(dirs, dir)
roots = append(roots, vendorDir)
} else {
subCommand := "mod"
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
subCommand = "work"
}
fmt.Fprintf(os.Stderr, "go: ignoring package %s which exists in the vendor directory but is missing from vendor/modules.txt. To sync the vendor directory run go %s vendor.\n", path, subCommand)
@@ -399,7 +399,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
ok bool
)
if mg == nil {
- v, ok = rs.rootSelected(prefix)
+ v, ok = rs.rootSelected(loaderstate, prefix)
} else {
v, ok = mg.Selected(prefix), true
}
@@ -408,9 +408,9 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
}
m := module.Version{Path: prefix, Version: v}
- root, isLocal, err := fetch(ctx, m)
+ root, isLocal, err := fetch(loaderstate, ctx, m)
if err != nil {
- if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) {
+ if _, ok := errors.AsType[*sumMissingError](err); ok {
// We are missing a sum needed to fetch a module in the build list.
// We can't verify that the package is unique, and we may not find
// the package at all. Keep checking other modules to decide which
@@ -471,8 +471,8 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
// If the module graph is pruned and this is a test-only dependency
// of a package in "all", we didn't necessarily load that file
// when we read the module graph, so do it now to be sure.
- if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !MainModules.Contains(mods[0].Path) {
- if _, err := goModSummary(mods[0]); err != nil {
+ if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !loaderstate.MainModules.Contains(mods[0].Path) {
+ if _, err := goModSummary(loaderstate, mods[0]); err != nil {
return module.Version{}, "", "", nil, err
}
}
@@ -483,7 +483,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
// We checked the full module graph and still didn't find the
// requested package.
var queryErr error
- if !HasModRoot() {
+ if !HasModRoot(loaderstate) {
queryErr = ErrNoModRoot
}
return module.Version{}, "", "", nil, &ImportMissingError{Path: path, QueryErr: queryErr, isStd: pathIsStd}
@@ -491,7 +491,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
// So far we've checked the root dependencies.
// Load the full module graph and try again.
- mg, err = rs.Graph(ctx)
+ mg, err = rs.Graph(loaderstate, ctx)
if err != nil {
// We might be missing one or more transitive (implicit) dependencies from
// the module graph, so we can't return an ImportMissingError here — one
@@ -507,12 +507,12 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M
//
// Unlike QueryPattern, queryImport prefers to add a replaced version of a
// module *before* checking the proxies for a version to add.
-func queryImport(ctx context.Context, path string, rs *Requirements) (module.Version, error) {
+func queryImport(loaderstate *State, ctx context.Context, path string, rs *Requirements) (module.Version, error) {
// To avoid spurious remote fetches, try the latest replacement for each
// module (golang.org/issue/26241).
var mods []module.Version
- if MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check.
- for mp, mv := range MainModules.HighestReplaced() {
+ if loaderstate.MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check.
+ for mp, mv := range loaderstate.MainModules.HighestReplaced() {
if !maybeInModule(path, mp) {
continue
}
@@ -528,7 +528,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver
mv = module.ZeroPseudoVersion("v0")
}
}
- mg, err := rs.Graph(ctx)
+ mg, err := rs.Graph(loaderstate, ctx)
if err != nil {
return module.Version{}, err
}
@@ -547,9 +547,9 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver
return len(mods[i].Path) > len(mods[j].Path)
})
for _, m := range mods {
- root, isLocal, err := fetch(ctx, m)
+ root, isLocal, err := fetch(loaderstate, ctx, m)
if err != nil {
- if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) {
+ if _, ok := errors.AsType[*sumMissingError](err); ok {
return module.Version{}, &ImportMissingSumError{importPath: path}
}
return module.Version{}, err
@@ -567,7 +567,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver
// The package path is not valid to fetch remotely,
// so it can only exist in a replaced module,
// and we know from the above loop that it is not.
- replacement := Replacement(mods[0])
+ replacement := Replacement(loaderstate, mods[0])
return module.Version{}, &PackageNotInModuleError{
Mod: mods[0],
Query: "latest",
@@ -607,12 +607,12 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver
// and return m, dir, ImportMissingError.
fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path)
- mg, err := rs.Graph(ctx)
+ mg, err := rs.Graph(loaderstate, ctx)
if err != nil {
return module.Version{}, err
}
- candidates, err := QueryPackages(ctx, path, "latest", mg.Selected, CheckAllowed)
+ candidates, err := QueryPackages(loaderstate, ctx, path, "latest", mg.Selected, CheckAllowed)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
// Return "cannot find module providing package […]" instead of whatever
@@ -747,15 +747,15 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile
//
// The isLocal return value reports whether the replacement,
// if any, is local to the filesystem.
-func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) {
- if modRoot := MainModules.ModRoot(mod); modRoot != "" {
+func fetch(loaderstate *State, ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) {
+ if modRoot := loaderstate.MainModules.ModRoot(mod); modRoot != "" {
return modRoot, true, nil
}
- if r := Replacement(mod); r.Path != "" {
+ if r := Replacement(loaderstate, mod); r.Path != "" {
if r.Version == "" {
dir = r.Path
if !filepath.IsAbs(dir) {
- dir = filepath.Join(replaceRelativeTo(), dir)
+ dir = filepath.Join(replaceRelativeTo(loaderstate), dir)
}
// Ensure that the replacement directory actually exists:
// dirInModule does not report errors for missing modules,
@@ -780,7 +780,7 @@ func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, e
mod = r
}
- if mustHaveSums() && !modfetch.HaveSum(mod) {
+ if mustHaveSums(loaderstate) && !modfetch.HaveSum(mod) {
return "", false, module.VersionError(mod, &sumMissingError{})
}
@@ -790,8 +790,8 @@ func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, e
// mustHaveSums reports whether we require that all checksums
// needed to load or build packages are already present in the go.sum file.
-func mustHaveSums() bool {
- return HasModRoot() && cfg.BuildMod == "readonly" && !inWorkspaceMode()
+func mustHaveSums(loaderstate *State) bool {
+ return HasModRoot(loaderstate) && cfg.BuildMod == "readonly" && !inWorkspaceMode(loaderstate)
}
type sumMissingError struct {
diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go
index eb4f5d64d3a3c7..a5c4b837a0be12 100644
--- a/src/cmd/go/internal/modload/import_test.go
+++ b/src/cmd/go/internal/modload/import_test.go
@@ -60,21 +60,21 @@ func TestQueryImport(t *testing.T) {
testenv.MustHaveExecPath(t, "git")
oldAllowMissingModuleImports := allowMissingModuleImports
- oldRootMode := RootMode
+ oldRootMode := LoaderState.RootMode
defer func() {
allowMissingModuleImports = oldAllowMissingModuleImports
- RootMode = oldRootMode
+ LoaderState.RootMode = oldRootMode
}()
allowMissingModuleImports = true
- RootMode = NoRoot
+ LoaderState.RootMode = NoRoot
ctx := context.Background()
- rs := LoadModFile(ctx)
+ rs := LoadModFile(LoaderState, ctx)
for _, tt := range importTests {
t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
// Note that there is no build list, so Import should always fail.
- m, err := queryImport(ctx, tt.path, rs)
+ m, err := queryImport(LoaderState, ctx, tt.path, rs)
if tt.err == "" {
if err != nil {
diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go
index 498ff7433ea6fe..20751528862ce2 100644
--- a/src/cmd/go/internal/modload/init.go
+++ b/src/cmd/go/internal/modload/init.go
@@ -38,13 +38,6 @@ import (
//
// TODO(#40775): See if these can be plumbed as explicit parameters.
var (
- // RootMode determines whether a module root is needed.
- RootMode Root
-
- // ForceUseModules may be set to force modules to be enabled when
- // GO111MODULE=auto or to report an error when GO111MODULE=off.
- ForceUseModules bool
-
allowMissingModuleImports bool
// ExplicitWriteGoMod prevents LoadPackages, ListModules, and other functions
@@ -60,26 +53,18 @@ var (
// Variables set in Init.
var (
- initialized bool
-
- // These are primarily used to initialize the MainModules, and should be
- // eventually superseded by them but are still used in cases where the module
- // roots are required but MainModules hasn't been initialized yet. Set to
- // the modRoots of the main modules.
- // modRoots != nil implies len(modRoots) > 0
- modRoots []string
- gopath string
+ gopath string
)
// EnterModule resets MainModules and requirements to refer to just this one module.
func EnterModule(ctx context.Context, enterModroot string) {
- MainModules = nil // reset MainModules
- requirements = nil
- workFilePath = "" // Force module mode
+ LoaderState.MainModules = nil // reset MainModules
+ LoaderState.requirements = nil
+ LoaderState.workFilePath = "" // Force module mode
modfetch.Reset()
- modRoots = []string{enterModroot}
- LoadModFile(ctx)
+ LoaderState.modRoots = []string{enterModroot}
+ LoadModFile(LoaderState, ctx)
}
// EnterWorkspace enters workspace mode from module mode, applying the updated requirements to the main
@@ -88,36 +73,30 @@ func EnterModule(ctx context.Context, enterModroot string) {
// EnterWorkspace will modify the global state they depend on in a non-thread-safe way.
func EnterWorkspace(ctx context.Context) (exit func(), err error) {
// Find the identity of the main module that will be updated before we reset modload state.
- mm := MainModules.mustGetSingleMainModule()
+ mm := LoaderState.MainModules.mustGetSingleMainModule(LoaderState)
// Get the updated modfile we will use for that module.
- _, _, updatedmodfile, err := UpdateGoModFromReqs(ctx, WriteOpts{})
+ _, _, updatedmodfile, err := UpdateGoModFromReqs(LoaderState, ctx, WriteOpts{})
if err != nil {
return nil, err
}
// Reset the state to a clean state.
- oldstate := setState(state{})
- ForceUseModules = true
+ oldstate := setState(State{})
+ LoaderState.ForceUseModules = true
// Load in workspace mode.
- InitWorkfile()
- LoadModFile(ctx)
+ InitWorkfile(LoaderState)
+ LoadModFile(LoaderState, ctx)
// Update the content of the previous main module, and recompute the requirements.
- *MainModules.ModFile(mm) = *updatedmodfile
- requirements = requirementsFromModFiles(ctx, MainModules.workFile, slices.Collect(maps.Values(MainModules.modFiles)), nil)
+ *LoaderState.MainModules.ModFile(mm) = *updatedmodfile
+ LoaderState.requirements = requirementsFromModFiles(LoaderState, ctx, LoaderState.MainModules.workFile, slices.Collect(maps.Values(LoaderState.MainModules.modFiles)), nil)
return func() {
setState(oldstate)
}, nil
}
-// Variable set in InitWorkfile
-var (
- // Set to the path to the go.work file, or "" if workspace mode is disabled.
- workFilePath string
-)
-
type MainModuleSet struct {
// versions are the module.Version values of each of the main modules.
// For each of them, the Path fields are ordinary module paths and the Version
@@ -203,12 +182,12 @@ func (mms *MainModuleSet) InGorootSrc(m module.Version) bool {
return mms.inGorootSrc[m]
}
-func (mms *MainModuleSet) mustGetSingleMainModule() module.Version {
+func (mms *MainModuleSet) mustGetSingleMainModule(loaderstate *State) module.Version {
if mms == nil || len(mms.versions) == 0 {
panic("internal error: mustGetSingleMainModule called in context with no main modules")
}
if len(mms.versions) != 1 {
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
panic("internal error: mustGetSingleMainModule called in workspace mode")
} else {
panic("internal error: multiple main modules present outside of workspace mode")
@@ -217,14 +196,14 @@ func (mms *MainModuleSet) mustGetSingleMainModule() module.Version {
return mms.versions[0]
}
-func (mms *MainModuleSet) GetSingleIndexOrNil() *modFileIndex {
+func (mms *MainModuleSet) GetSingleIndexOrNil(loaderstate *State) *modFileIndex {
if mms == nil {
return nil
}
if len(mms.versions) == 0 {
return nil
}
- return mms.indices[mms.mustGetSingleMainModule()]
+ return mms.indices[mms.mustGetSingleMainModule(loaderstate)]
}
func (mms *MainModuleSet) Index(m module.Version) *modFileIndex {
@@ -267,12 +246,12 @@ func (mms *MainModuleSet) HighestReplaced() map[string]string {
// GoVersion returns the go version set on the single module, in module mode,
// or the go.work file in workspace mode.
-func (mms *MainModuleSet) GoVersion() string {
- if inWorkspaceMode() {
+func (mms *MainModuleSet) GoVersion(loaderstate *State) string {
+ if inWorkspaceMode(loaderstate) {
return gover.FromGoWork(mms.workFile)
}
if mms != nil && len(mms.versions) == 1 {
- f := mms.ModFile(mms.mustGetSingleMainModule())
+ f := mms.ModFile(mms.mustGetSingleMainModule(loaderstate))
if f == nil {
// Special case: we are outside a module, like 'go run x.go'.
// Assume the local Go version.
@@ -287,15 +266,15 @@ func (mms *MainModuleSet) GoVersion() string {
// Godebugs returns the godebug lines set on the single module, in module mode,
// or on the go.work file in workspace mode.
// The caller must not modify the result.
-func (mms *MainModuleSet) Godebugs() []*modfile.Godebug {
- if inWorkspaceMode() {
+func (mms *MainModuleSet) Godebugs(loaderstate *State) []*modfile.Godebug {
+ if inWorkspaceMode(loaderstate) {
if mms.workFile != nil {
return mms.workFile.Godebug
}
return nil
}
if mms != nil && len(mms.versions) == 1 {
- f := mms.ModFile(mms.mustGetSingleMainModule())
+ f := mms.ModFile(mms.mustGetSingleMainModule(loaderstate))
if f == nil {
// Special case: we are outside a module, like 'go run x.go'.
return nil
@@ -309,8 +288,6 @@ func (mms *MainModuleSet) WorkFileReplaceMap() map[module.Version]module.Version
return mms.workFileReplaceMap
}
-var MainModules *MainModuleSet
-
type Root int
const (
@@ -338,16 +315,16 @@ const (
// To make permanent changes to the require statements
// in go.mod, edit it before loading.
func ModFile() *modfile.File {
- Init()
- modFile := MainModules.ModFile(MainModules.mustGetSingleMainModule())
+ Init(LoaderState)
+ modFile := LoaderState.MainModules.ModFile(LoaderState.MainModules.mustGetSingleMainModule(LoaderState))
if modFile == nil {
- die()
+ die(LoaderState)
}
return modFile
}
-func BinDir() string {
- Init()
+func BinDir(loaderstate *State) string {
+ Init(loaderstate)
if cfg.GOBIN != "" {
return cfg.GOBIN
}
@@ -360,13 +337,13 @@ func BinDir() string {
// InitWorkfile initializes the workFilePath variable for commands that
// operate in workspace mode. It should not be called by other commands,
// for example 'go mod tidy', that don't operate in workspace mode.
-func InitWorkfile() {
+func InitWorkfile(loaderstate *State) {
// Initialize fsys early because we need overlay to read go.work file.
fips140.Init()
if err := fsys.Init(); err != nil {
base.Fatal(err)
}
- workFilePath = FindGoWork(base.Cwd())
+ loaderstate.workFilePath = FindGoWork(loaderstate, base.Cwd())
}
// FindGoWork returns the name of the go.work file for this command,
@@ -374,8 +351,8 @@ func InitWorkfile() {
// Most code should use Init and Enabled rather than use this directly.
// It is exported mainly for Go toolchain switching, which must process
// the go.work very early at startup.
-func FindGoWork(wd string) string {
- if RootMode == NoRoot {
+func FindGoWork(loaderstate *State, wd string) string {
+ if loaderstate.RootMode == NoRoot {
return ""
}
@@ -394,34 +371,34 @@ func FindGoWork(wd string) string {
// WorkFilePath returns the absolute path of the go.work file, or "" if not in
// workspace mode. WorkFilePath must be called after InitWorkfile.
-func WorkFilePath() string {
- return workFilePath
+func WorkFilePath(loaderstate *State) string {
+ return loaderstate.workFilePath
}
// Reset clears all the initialized, cached state about the use of modules,
// so that we can start over.
func Reset() {
- setState(state{})
+ setState(State{})
}
-func setState(s state) state {
- oldState := state{
- initialized: initialized,
- forceUseModules: ForceUseModules,
- rootMode: RootMode,
- modRoots: modRoots,
+func setState(s State) State {
+ oldState := State{
+ initialized: LoaderState.initialized,
+ ForceUseModules: LoaderState.ForceUseModules,
+ RootMode: LoaderState.RootMode,
+ modRoots: LoaderState.modRoots,
modulesEnabled: cfg.ModulesEnabled,
- mainModules: MainModules,
- requirements: requirements,
+ MainModules: LoaderState.MainModules,
+ requirements: LoaderState.requirements,
}
- initialized = s.initialized
- ForceUseModules = s.forceUseModules
- RootMode = s.rootMode
- modRoots = s.modRoots
+ LoaderState.initialized = s.initialized
+ LoaderState.ForceUseModules = s.ForceUseModules
+ LoaderState.RootMode = s.RootMode
+ LoaderState.modRoots = s.modRoots
cfg.ModulesEnabled = s.modulesEnabled
- MainModules = s.mainModules
- requirements = s.requirements
- workFilePath = s.workFilePath
+ LoaderState.MainModules = s.MainModules
+ LoaderState.requirements = s.requirements
+ LoaderState.workFilePath = s.workFilePath
// The modfetch package's global state is used to compute
// the go.sum file, so save and restore it along with the
// modload state.
@@ -429,27 +406,56 @@ func setState(s state) state {
return oldState
}
-type state struct {
- initialized bool
- forceUseModules bool
- rootMode Root
- modRoots []string
- modulesEnabled bool
- mainModules *MainModuleSet
- requirements *Requirements
- workFilePath string
- modfetchState modfetch.State
+type State struct {
+ initialized bool
+
+ // ForceUseModules may be set to force modules to be enabled when
+ // GO111MODULE=auto or to report an error when GO111MODULE=off.
+ ForceUseModules bool
+
+ // RootMode determines whether a module root is needed.
+ RootMode Root
+
+ // These are primarily used to initialize the MainModules, and should
+ // be eventually superseded by them but are still used in cases where
+ // the module roots are required but MainModules has not been
+ // initialized yet. Set to the modRoots of the main modules.
+ // modRoots != nil implies len(modRoots) > 0
+ modRoots []string
+ modulesEnabled bool
+ MainModules *MainModuleSet
+
+ // requirements is the requirement graph for the main module.
+ //
+ // It is always non-nil if the main module's go.mod file has been
+ // loaded.
+ //
+ // This variable should only be read from the loadModFile
+ // function, and should only be written in the loadModFile and
+ // commitRequirements functions. All other functions that need or
+ // produce a *Requirements should accept and/or return an explicit
+ // parameter.
+ requirements *Requirements
+
+ // Set to the path to the go.work file, or "" if workspace mode is
+ // disabled
+ workFilePath string
+ modfetchState modfetch.State
}
+func NewState() *State { return &State{} }
+
+var LoaderState = NewState()
+
// Init determines whether module mode is enabled, locates the root of the
// current module (if any), sets environment variables for Git subprocesses, and
// configures the cfg, codehost, load, modfetch, and search packages for use
// with modules.
-func Init() {
- if initialized {
+func Init(loaderstate *State) {
+ if loaderstate.initialized {
return
}
- initialized = true
+ loaderstate.initialized = true
fips140.Init()
@@ -462,11 +468,11 @@ func Init() {
default:
base.Fatalf("go: unknown environment setting GO111MODULE=%s", env)
case "auto":
- mustUseModules = ForceUseModules
+ mustUseModules = loaderstate.ForceUseModules
case "on", "":
mustUseModules = true
case "off":
- if ForceUseModules {
+ if loaderstate.ForceUseModules {
base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'")
}
mustUseModules = false
@@ -490,15 +496,15 @@ func Init() {
if os.Getenv("GCM_INTERACTIVE") == "" {
os.Setenv("GCM_INTERACTIVE", "never")
}
- if modRoots != nil {
+ if loaderstate.modRoots != nil {
// modRoot set before Init was called ("go mod init" does this).
// No need to search for go.mod.
- } else if RootMode == NoRoot {
+ } else if loaderstate.RootMode == NoRoot {
if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") {
base.Fatalf("go: -modfile cannot be used with commands that ignore the current module")
}
- modRoots = nil
- } else if workFilePath != "" {
+ loaderstate.modRoots = nil
+ } else if loaderstate.workFilePath != "" {
// We're in workspace mode, which implies module mode.
if cfg.ModFile != "" {
base.Fatalf("go: -modfile cannot be used in workspace mode")
@@ -508,7 +514,7 @@ func Init() {
if cfg.ModFile != "" {
base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.")
}
- if RootMode == NeedRoot {
+ if loaderstate.RootMode == NeedRoot {
base.Fatal(ErrNoModRoot)
}
if !mustUseModules {
@@ -523,14 +529,14 @@ func Init() {
// It's a bit of a peculiar thing to disallow but quite mysterious
// when it happens. See golang.org/issue/26708.
fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir())
- if RootMode == NeedRoot {
+ if loaderstate.RootMode == NeedRoot {
base.Fatal(ErrNoModRoot)
}
if !mustUseModules {
return
}
} else {
- modRoots = []string{modRoot}
+ loaderstate.modRoots = []string{modRoot}
}
}
if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") {
@@ -539,13 +545,13 @@ func Init() {
// We're in module mode. Set any global variables that need to be set.
cfg.ModulesEnabled = true
- setDefaultBuildMod()
+ setDefaultBuildMod(loaderstate)
list := filepath.SplitList(cfg.BuildContext.GOPATH)
if len(list) > 0 && list[0] != "" {
gopath = list[0]
if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil {
fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in $GOPATH %v\n", gopath)
- if RootMode == NeedRoot {
+ if loaderstate.RootMode == NeedRoot {
base.Fatal(ErrNoModRoot)
}
if !mustUseModules {
@@ -565,11 +571,11 @@ func Init() {
// be called until the command is installed and flags are parsed. Instead of
// calling Init and Enabled, the main package can call this function.
func WillBeEnabled() bool {
- if modRoots != nil || cfg.ModulesEnabled {
+ if LoaderState.modRoots != nil || cfg.ModulesEnabled {
// Already enabled.
return true
}
- if initialized {
+ if LoaderState.initialized {
// Initialized, not enabled.
return false
}
@@ -616,49 +622,49 @@ func FindGoMod(wd string) string {
// If modules are enabled but there is no main module, Enabled returns true
// and then the first use of module information will call die
// (usually through MustModRoot).
-func Enabled() bool {
- Init()
- return modRoots != nil || cfg.ModulesEnabled
+func Enabled(loaderstate *State) bool {
+ Init(loaderstate)
+ return loaderstate.modRoots != nil || cfg.ModulesEnabled
}
-func VendorDir() string {
- if inWorkspaceMode() {
- return filepath.Join(filepath.Dir(WorkFilePath()), "vendor")
+func VendorDir(loaderstate *State) string {
+ if inWorkspaceMode(loaderstate) {
+ return filepath.Join(filepath.Dir(WorkFilePath(loaderstate)), "vendor")
}
// Even if -mod=vendor, we could be operating with no mod root (and thus no
// vendor directory). As long as there are no dependencies that is expected
// to work. See script/vendor_outside_module.txt.
- modRoot := MainModules.ModRoot(MainModules.mustGetSingleMainModule())
+ modRoot := loaderstate.MainModules.ModRoot(loaderstate.MainModules.mustGetSingleMainModule(loaderstate))
if modRoot == "" {
panic("vendor directory does not exist when in single module mode outside of a module")
}
return filepath.Join(modRoot, "vendor")
}
-func inWorkspaceMode() bool {
- if !initialized {
+func inWorkspaceMode(loaderstate *State) bool {
+ if !loaderstate.initialized {
panic("inWorkspaceMode called before modload.Init called")
}
- if !Enabled() {
+ if !Enabled(loaderstate) {
return false
}
- return workFilePath != ""
+ return loaderstate.workFilePath != ""
}
// HasModRoot reports whether a main module or main modules are present.
// HasModRoot may return false even if Enabled returns true: for example, 'get'
// does not require a main module.
-func HasModRoot() bool {
- Init()
- return modRoots != nil
+func HasModRoot(loaderstate *State) bool {
+ Init(loaderstate)
+ return loaderstate.modRoots != nil
}
// MustHaveModRoot checks that a main module or main modules are present,
// and calls base.Fatalf if there are no main modules.
func MustHaveModRoot() {
- Init()
- if !HasModRoot() {
- die()
+ Init(LoaderState)
+ if !HasModRoot(LoaderState) {
+ die(LoaderState)
}
}
@@ -680,11 +686,11 @@ func modFilePath(modRoot string) string {
return filepath.Join(modRoot, "go.mod")
}
-func die() {
+func die(loaderstate *State) {
if cfg.Getenv("GO111MODULE") == "off" {
base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'")
}
- if !inWorkspaceMode() {
+ if !inWorkspaceMode(loaderstate) {
if dir, name := findAltConfig(base.Cwd()); dir != "" {
rel, err := filepath.Rel(base.Cwd(), dir)
if err != nil {
@@ -705,7 +711,7 @@ func die() {
type noMainModulesError struct{}
func (e noMainModulesError) Error() string {
- if inWorkspaceMode() {
+ if inWorkspaceMode(LoaderState) {
return "no modules were found in the current workspace; see 'go help work'"
}
return "go.mod file not found in current directory or any parent directory; see 'go help modules'"
@@ -862,33 +868,33 @@ func UpdateWorkFile(wf *modfile.WorkFile) {
// other, but unlike LoadModGraph does not load the full module graph or check
// it for global consistency. Most callers outside of the modload package should
// use LoadModGraph instead.
-func LoadModFile(ctx context.Context) *Requirements {
- rs, err := loadModFile(ctx, nil)
+func LoadModFile(loaderstate *State, ctx context.Context) *Requirements {
+ rs, err := loadModFile(loaderstate, ctx, nil)
if err != nil {
base.Fatal(err)
}
return rs
}
-func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) {
- if requirements != nil {
- return requirements, nil
+func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*Requirements, error) {
+ if loaderstate.requirements != nil {
+ return loaderstate.requirements, nil
}
- Init()
+ Init(loaderstate)
var workFile *modfile.WorkFile
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
var err error
- workFile, modRoots, err = LoadWorkFile(workFilePath)
+ workFile, loaderstate.modRoots, err = LoadWorkFile(loaderstate.workFilePath)
if err != nil {
return nil, err
}
- for _, modRoot := range modRoots {
+ for _, modRoot := range loaderstate.modRoots {
sumFile := strings.TrimSuffix(modFilePath(modRoot), ".mod") + ".sum"
modfetch.WorkspaceGoSumFiles = append(modfetch.WorkspaceGoSumFiles, sumFile)
}
- modfetch.GoSumFile = workFilePath + ".sum"
- } else if len(modRoots) == 0 {
+ modfetch.GoSumFile = loaderstate.workFilePath + ".sum"
+ } else if len(loaderstate.modRoots) == 0 {
// We're in module mode, but not inside a module.
//
// Commands like 'go build', 'go run', 'go list' have no go.mod file to
@@ -907,25 +913,25 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error)
//
// See golang.org/issue/32027.
} else {
- modfetch.GoSumFile = strings.TrimSuffix(modFilePath(modRoots[0]), ".mod") + ".sum"
+ modfetch.GoSumFile = strings.TrimSuffix(modFilePath(loaderstate.modRoots[0]), ".mod") + ".sum"
}
- if len(modRoots) == 0 {
+ if len(loaderstate.modRoots) == 0 {
// TODO(#49228): Instead of creating a fake module with an empty modroot,
// make MainModules.Len() == 0 mean that we're in module mode but not inside
// any module.
mainModule := module.Version{Path: "command-line-arguments"}
- MainModules = makeMainModules([]module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil)
+ loaderstate.MainModules = makeMainModules(loaderstate, []module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil)
var (
goVersion string
pruning modPruning
roots []module.Version
direct = map[string]bool{"go": true}
)
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
// Since we are in a workspace, the Go version for the synthetic
// "command-line-arguments" module must not exceed the Go version
// for the workspace.
- goVersion = MainModules.GoVersion()
+ goVersion = loaderstate.MainModules.GoVersion(loaderstate)
pruning = workspace
roots = []module.Version{
mainModule,
@@ -941,26 +947,26 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error)
}
}
rawGoVersion.Store(mainModule, goVersion)
- requirements = newRequirements(pruning, roots, direct)
+ loaderstate.requirements = newRequirements(loaderstate, pruning, roots, direct)
if cfg.BuildMod == "vendor" {
// For issue 56536: Some users may have GOFLAGS=-mod=vendor set.
// Make sure it behaves as though the fake module is vendored
// with no dependencies.
- requirements.initVendor(nil)
+ loaderstate.requirements.initVendor(loaderstate, nil)
}
- return requirements, nil
+ return loaderstate.requirements, nil
}
var modFiles []*modfile.File
var mainModules []module.Version
var indices []*modFileIndex
var errs []error
- for _, modroot := range modRoots {
+ for _, modroot := range loaderstate.modRoots {
gomod := modFilePath(modroot)
var fixed bool
- data, f, err := ReadModFile(gomod, fixVersion(ctx, &fixed))
+ data, f, err := ReadModFile(gomod, fixVersion(loaderstate, ctx, &fixed))
if err != nil {
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
if tooNew, ok := err.(*gover.TooNewError); ok && !strings.HasPrefix(cfg.CmdName, "work ") {
// Switching to a newer toolchain won't help - the go.work has the wrong version.
// Report this more specific error, unless we are a command like 'go work use'
@@ -975,7 +981,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error)
errs = append(errs, err)
continue
}
- if inWorkspaceMode() && !strings.HasPrefix(cfg.CmdName, "work ") {
+ if inWorkspaceMode(loaderstate) && !strings.HasPrefix(cfg.CmdName, "work ") {
// Refuse to use workspace if its go version is too old.
// Disable this check if we are a workspace command like work use or work sync,
// which will fix the problem.
@@ -987,7 +993,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error)
}
}
- if !inWorkspaceMode() {
+ if !inWorkspaceMode(loaderstate) {
ok := true
for _, g := range f.Godebug {
if err := CheckGodebug("godebug", g.Key, g.Value); err != nil {
@@ -1016,45 +1022,45 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error)
return nil, errors.Join(errs...)
}
- MainModules = makeMainModules(mainModules, modRoots, modFiles, indices, workFile)
- setDefaultBuildMod() // possibly enable automatic vendoring
- rs := requirementsFromModFiles(ctx, workFile, modFiles, opts)
+ loaderstate.MainModules = makeMainModules(loaderstate, mainModules, loaderstate.modRoots, modFiles, indices, workFile)
+ setDefaultBuildMod(loaderstate) // possibly enable automatic vendoring
+ rs := requirementsFromModFiles(loaderstate, ctx, workFile, modFiles, opts)
if cfg.BuildMod == "vendor" {
- readVendorList(VendorDir())
- versions := MainModules.Versions()
+ readVendorList(VendorDir(loaderstate))
+ versions := loaderstate.MainModules.Versions()
indexes := make([]*modFileIndex, 0, len(versions))
modFiles := make([]*modfile.File, 0, len(versions))
modRoots := make([]string, 0, len(versions))
for _, m := range versions {
- indexes = append(indexes, MainModules.Index(m))
- modFiles = append(modFiles, MainModules.ModFile(m))
- modRoots = append(modRoots, MainModules.ModRoot(m))
+ indexes = append(indexes, loaderstate.MainModules.Index(m))
+ modFiles = append(modFiles, loaderstate.MainModules.ModFile(m))
+ modRoots = append(modRoots, loaderstate.MainModules.ModRoot(m))
}
- checkVendorConsistency(indexes, modFiles, modRoots)
- rs.initVendor(vendorList)
+ checkVendorConsistency(loaderstate, indexes, modFiles, modRoots)
+ rs.initVendor(loaderstate, vendorList)
}
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
// We don't need to update the mod file so return early.
- requirements = rs
+ loaderstate.requirements = rs
return rs, nil
}
- mainModule := MainModules.mustGetSingleMainModule()
+ mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate)
- if rs.hasRedundantRoot() {
+ if rs.hasRedundantRoot(loaderstate) {
// If any module path appears more than once in the roots, we know that the
// go.mod file needs to be updated even though we have not yet loaded any
// transitive dependencies.
var err error
- rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false)
+ rs, err = updateRoots(loaderstate, ctx, rs.direct, rs, nil, nil, false)
if err != nil {
return nil, err
}
}
- if MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace {
+ if loaderstate.MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace {
// TODO(#45551): Do something more principled instead of checking
// cfg.CmdName directly here.
if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" {
@@ -1063,8 +1069,8 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error)
if opts != nil && opts.TidyGoVersion != "" {
v = opts.TidyGoVersion
}
- addGoStmt(MainModules.ModFile(mainModule), mainModule, v)
- rs = overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: v}})
+ addGoStmt(loaderstate.MainModules.ModFile(mainModule), mainModule, v)
+ rs = overrideRoots(loaderstate, ctx, rs, []module.Version{{Path: "go", Version: v}})
// We need to add a 'go' version to the go.mod file, but we must assume
// that its existing contents match something between Go 1.11 and 1.16.
@@ -1073,7 +1079,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error)
// requirements to support pruning.
if gover.Compare(v, gover.ExplicitIndirectVersion) >= 0 {
var err error
- rs, err = convertPruning(ctx, rs, pruned)
+ rs, err = convertPruning(loaderstate, ctx, rs, pruned)
if err != nil {
return nil, err
}
@@ -1083,8 +1089,8 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error)
}
}
- requirements = rs
- return requirements, nil
+ loaderstate.requirements = rs
+ return loaderstate.requirements, nil
}
func errWorkTooOld(gomod string, wf *modfile.WorkFile, goVers string) error {
@@ -1119,8 +1125,8 @@ func CheckReservedModulePath(path string) error {
// packages at multiple versions from the same module).
func CreateModFile(ctx context.Context, modPath string) {
modRoot := base.Cwd()
- modRoots = []string{modRoot}
- Init()
+ LoaderState.modRoots = []string{modRoot}
+ Init(LoaderState)
modFilePath := modFilePath(modRoot)
if _, err := fsys.Stat(modFilePath); err == nil {
base.Fatalf("go: %s already exists", modFilePath)
@@ -1156,16 +1162,16 @@ func CreateModFile(ctx context.Context, modPath string) {
fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", modPath)
modFile := new(modfile.File)
modFile.AddModuleStmt(modPath)
- MainModules = makeMainModules([]module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil)
+ LoaderState.MainModules = makeMainModules(LoaderState, []module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil)
addGoStmt(modFile, modFile.Module.Mod, gover.Local()) // Add the go directive before converted module requirements.
- rs := requirementsFromModFiles(ctx, nil, []*modfile.File{modFile}, nil)
- rs, err := updateRoots(ctx, rs.direct, rs, nil, nil, false)
+ rs := requirementsFromModFiles(LoaderState, ctx, nil, []*modfile.File{modFile}, nil)
+ rs, err := updateRoots(LoaderState, ctx, rs.direct, rs, nil, nil, false)
if err != nil {
base.Fatal(err)
}
- requirements = rs
- if err := commitRequirements(ctx, WriteOpts{}); err != nil {
+ LoaderState.requirements = rs
+ if err := commitRequirements(LoaderState, ctx, WriteOpts{}); err != nil {
base.Fatal(err)
}
@@ -1200,7 +1206,7 @@ func CreateModFile(ctx context.Context, modPath string) {
// and does nothing for versions that already appear to be canonical.
//
// The VersionFixer sets 'fixed' if it ever returns a non-canonical version.
-func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer {
+func fixVersion(loaderstate *State, ctx context.Context, fixed *bool) modfile.VersionFixer {
return func(path, vers string) (resolved string, err error) {
defer func() {
if err == nil && resolved != vers {
@@ -1233,7 +1239,7 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer {
return vers, nil
}
- info, err := Query(ctx, path, vers, "", nil)
+ info, err := Query(loaderstate, ctx, path, vers, "", nil)
if err != nil {
return "", err
}
@@ -1248,8 +1254,8 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer {
//
// This function affects the default cfg.BuildMod when outside of a module,
// so it can only be called prior to Init.
-func AllowMissingModuleImports() {
- if initialized {
+func AllowMissingModuleImports(loaderstate *State) {
+ if loaderstate.initialized {
panic("AllowMissingModuleImports after Init")
}
allowMissingModuleImports = true
@@ -1257,7 +1263,7 @@ func AllowMissingModuleImports() {
// makeMainModules creates a MainModuleSet and associated variables according to
// the given main modules.
-func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet {
+func makeMainModules(loaderstate *State, ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet {
for _, m := range ms {
if m.Version != "" {
panic("mainModulesCalled with module.Version with non empty Version field: " + fmt.Sprintf("%#v", m))
@@ -1332,7 +1338,7 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile
continue
}
var newV module.Version = r.New
- if WorkFilePath() != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) {
+ if WorkFilePath(loaderstate) != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) {
// Since we are in a workspace, we may be loading replacements from
// multiple go.mod files. Relative paths in those replacement are
// relative to the go.mod file, not the workspace, so the same string
@@ -1374,14 +1380,14 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile
// requirementsFromModFiles returns the set of non-excluded requirements from
// the global modFile.
-func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements {
+func requirementsFromModFiles(loaderstate *State, ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements {
var roots []module.Version
direct := map[string]bool{}
var pruning modPruning
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
pruning = workspace
- roots = make([]module.Version, len(MainModules.Versions()), 2+len(MainModules.Versions()))
- copy(roots, MainModules.Versions())
+ roots = make([]module.Version, len(loaderstate.MainModules.Versions()), 2+len(loaderstate.MainModules.Versions()))
+ copy(roots, loaderstate.MainModules.Versions())
goVersion := gover.FromGoWork(workFile)
var toolchain string
if workFile.Toolchain != nil {
@@ -1390,16 +1396,16 @@ func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, m
roots = appendGoAndToolchainRoots(roots, goVersion, toolchain, direct)
direct = directRequirements(modFiles)
} else {
- pruning = pruningForGoVersion(MainModules.GoVersion())
+ pruning = pruningForGoVersion(loaderstate.MainModules.GoVersion(loaderstate))
if len(modFiles) != 1 {
panic(fmt.Errorf("requirementsFromModFiles called with %v modfiles outside workspace mode", len(modFiles)))
}
modFile := modFiles[0]
- roots, direct = rootsFromModFile(MainModules.mustGetSingleMainModule(), modFile, withToolchainRoot)
+ roots, direct = rootsFromModFile(loaderstate, loaderstate.MainModules.mustGetSingleMainModule(loaderstate), modFile, withToolchainRoot)
}
gover.ModSort(roots)
- rs := newRequirements(pruning, roots, direct)
+ rs := newRequirements(loaderstate, pruning, roots, direct)
return rs
}
@@ -1422,7 +1428,7 @@ func directRequirements(modFiles []*modfile.File) map[string]bool {
return direct
}
-func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) {
+func rootsFromModFile(loaderstate *State, m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) {
direct = make(map[string]bool)
padding := 2 // Add padding for the toolchain and go version, added upon return.
if !addToolchainRoot {
@@ -1430,7 +1436,7 @@ func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot
}
roots = make([]module.Version, 0, padding+len(modFile.Require))
for _, r := range modFile.Require {
- if index := MainModules.Index(m); index != nil && index.exclude[r.Mod] {
+ if index := loaderstate.MainModules.Index(m); index != nil && index.exclude[r.Mod] {
if cfg.BuildMod == "mod" {
fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
} else {
@@ -1471,9 +1477,9 @@ func appendGoAndToolchainRoots(roots []module.Version, goVersion, toolchain stri
// setDefaultBuildMod sets a default value for cfg.BuildMod if the -mod flag
// wasn't provided. setDefaultBuildMod may be called multiple times.
-func setDefaultBuildMod() {
+func setDefaultBuildMod(loaderstate *State) {
if cfg.BuildModExplicit {
- if inWorkspaceMode() && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" {
+ if inWorkspaceMode(loaderstate) && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" {
switch cfg.CmdName {
case "work sync", "mod graph", "mod verify", "mod why":
// These commands run with BuildMod set to mod, but they don't take the
@@ -1508,7 +1514,7 @@ func setDefaultBuildMod() {
cfg.BuildMod = "readonly"
return
}
- if modRoots == nil {
+ if loaderstate.modRoots == nil {
if allowMissingModuleImports {
cfg.BuildMod = "mod"
} else {
@@ -1517,29 +1523,29 @@ func setDefaultBuildMod() {
return
}
- if len(modRoots) >= 1 {
+ if len(loaderstate.modRoots) >= 1 {
var goVersion string
var versionSource string
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
versionSource = "go.work"
- if wfg := MainModules.WorkFile().Go; wfg != nil {
+ if wfg := loaderstate.MainModules.WorkFile().Go; wfg != nil {
goVersion = wfg.Version
}
} else {
versionSource = "go.mod"
- index := MainModules.GetSingleIndexOrNil()
+ index := loaderstate.MainModules.GetSingleIndexOrNil(loaderstate)
if index != nil {
goVersion = index.goVersion
}
}
vendorDir := ""
- if workFilePath != "" {
- vendorDir = filepath.Join(filepath.Dir(workFilePath), "vendor")
+ if loaderstate.workFilePath != "" {
+ vendorDir = filepath.Join(filepath.Dir(loaderstate.workFilePath), "vendor")
} else {
- if len(modRoots) != 1 {
- panic(fmt.Errorf("outside workspace mode, but have %v modRoots", modRoots))
+ if len(loaderstate.modRoots) != 1 {
+ panic(fmt.Errorf("outside workspace mode, but have %v modRoots", loaderstate.modRoots))
}
- vendorDir = filepath.Join(modRoots[0], "vendor")
+ vendorDir = filepath.Join(loaderstate.modRoots[0], "vendor")
}
if fi, err := fsys.Stat(vendorDir); err == nil && fi.IsDir() {
if goVersion != "" {
@@ -1607,8 +1613,8 @@ func modulesTextIsForWorkspace(vendorDir string) (bool, error) {
return false, nil
}
-func mustHaveCompleteRequirements() bool {
- return cfg.BuildMod != "mod" && !inWorkspaceMode()
+func mustHaveCompleteRequirements(loaderstate *State) bool {
+ return cfg.BuildMod != "mod" && !inWorkspaceMode(loaderstate)
}
// addGoStmt adds a go directive to the go.mod file if it does not already
@@ -1803,21 +1809,21 @@ type WriteOpts struct {
// WriteGoMod writes the current build list back to go.mod.
func WriteGoMod(ctx context.Context, opts WriteOpts) error {
- requirements = LoadModFile(ctx)
- return commitRequirements(ctx, opts)
+ LoaderState.requirements = LoadModFile(LoaderState, ctx)
+ return commitRequirements(LoaderState, ctx, opts)
}
var errNoChange = errors.New("no update needed")
// UpdateGoModFromReqs returns a modified go.mod file using the current
// requirements. It does not commit these changes to disk.
-func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []byte, modFile *modfile.File, err error) {
- if MainModules.Len() != 1 || MainModules.ModRoot(MainModules.Versions()[0]) == "" {
+func UpdateGoModFromReqs(loaderstate *State, ctx context.Context, opts WriteOpts) (before, after []byte, modFile *modfile.File, err error) {
+ if loaderstate.MainModules.Len() != 1 || loaderstate.MainModules.ModRoot(loaderstate.MainModules.Versions()[0]) == "" {
// We aren't in a module, so we don't have anywhere to write a go.mod file.
return nil, nil, nil, errNoChange
}
- mainModule := MainModules.mustGetSingleMainModule()
- modFile = MainModules.ModFile(mainModule)
+ mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate)
+ modFile = loaderstate.MainModules.ModFile(mainModule)
if modFile == nil {
// command-line-arguments has no .mod file to write.
return nil, nil, nil, errNoChange
@@ -1830,7 +1836,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b
var list []*modfile.Require
toolchain := ""
goVersion := ""
- for _, m := range requirements.rootModules {
+ for _, m := range loaderstate.requirements.rootModules {
if m.Path == "go" {
goVersion = m.Version
continue
@@ -1841,7 +1847,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b
}
list = append(list, &modfile.Require{
Mod: m,
- Indirect: !requirements.direct[m.Path],
+ Indirect: !loaderstate.requirements.direct[m.Path],
})
}
@@ -1911,13 +1917,13 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b
// go.mod or go.sum are out of date in a semantically significant way.
//
// In workspace mode, commitRequirements only writes changes to go.work.sum.
-func commitRequirements(ctx context.Context, opts WriteOpts) (err error) {
- if inWorkspaceMode() {
+func commitRequirements(loaderstate *State, ctx context.Context, opts WriteOpts) (err error) {
+ if inWorkspaceMode(loaderstate) {
// go.mod files aren't updated in workspace mode, but we still want to
// update the go.work.sum file.
- return modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements())
+ return modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate))
}
- _, updatedGoMod, modFile, err := UpdateGoModFromReqs(ctx, opts)
+ _, updatedGoMod, modFile, err := UpdateGoModFromReqs(loaderstate, ctx, opts)
if err != nil {
if errors.Is(err, errNoChange) {
return nil
@@ -1925,7 +1931,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) {
return err
}
- index := MainModules.GetSingleIndexOrNil()
+ index := loaderstate.MainModules.GetSingleIndexOrNil(loaderstate)
dirty := index.modFileIsDirty(modFile) || len(opts.DropTools) > 0 || len(opts.AddTools) > 0
if dirty && cfg.BuildMod != "mod" {
// If we're about to fail due to -mod=readonly,
@@ -1939,15 +1945,15 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) {
// Don't write go.mod, but write go.sum in case we added or trimmed sums.
// 'go mod init' shouldn't write go.sum, since it will be incomplete.
if cfg.CmdName != "mod init" {
- if err := modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()); err != nil {
+ if err := modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)); err != nil {
return err
}
}
return nil
}
- mainModule := MainModules.mustGetSingleMainModule()
- modFilePath := modFilePath(MainModules.ModRoot(mainModule))
+ mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate)
+ modFilePath := modFilePath(loaderstate.MainModules.ModRoot(mainModule))
if fsys.Replaced(modFilePath) {
if dirty {
return errors.New("updates to go.mod needed, but go.mod is part of the overlay specified with -overlay")
@@ -1956,13 +1962,13 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) {
}
defer func() {
// At this point we have determined to make the go.mod file on disk equal to new.
- MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false))
+ loaderstate.MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false))
// Update go.sum after releasing the side lock and refreshing the index.
// 'go mod init' shouldn't write go.sum, since it will be incomplete.
if cfg.CmdName != "mod init" {
if err == nil {
- err = modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements())
+ err = modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate))
}
}
}()
@@ -2005,7 +2011,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) {
// including any go.mod files needed to reconstruct the MVS result
// or identify go versions,
// in addition to the checksums for every module in keepMods.
-func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool {
+func keepSums(loaderstate *State, ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool {
// Every module in the full module graph contributes its requirements,
// so in order to ensure that the build list itself is reproducible,
// we need sums for every go.mod in the graph (regardless of whether
@@ -2018,12 +2024,12 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums
// ambiguous import errors the next time we load the package.
keepModSumsForZipSums := true
if ld == nil {
- if gover.Compare(MainModules.GoVersion(), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" {
+ if gover.Compare(loaderstate.MainModules.GoVersion(loaderstate), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" {
keepModSumsForZipSums = false
}
} else {
keepPkgGoModSums := true
- if gover.Compare(ld.requirements.GoVersion(), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") {
+ if gover.Compare(ld.requirements.GoVersion(loaderstate), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") {
keepPkgGoModSums = false
keepModSumsForZipSums = false
}
@@ -2041,21 +2047,21 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums
// minor, so we maintain the previous (buggy) behavior in 'go mod tidy' to
// avoid introducing unnecessary churn.
if keepPkgGoModSums {
- r := resolveReplacement(pkg.mod)
+ r := resolveReplacement(loaderstate, pkg.mod)
keep[modkey(r)] = true
}
if rs.pruning == pruned && pkg.mod.Path != "" {
- if v, ok := rs.rootSelected(pkg.mod.Path); ok && v == pkg.mod.Version {
+ if v, ok := rs.rootSelected(loaderstate, pkg.mod.Path); ok && v == pkg.mod.Version {
// pkg was loaded from a root module, and because the main module has
// a pruned module graph we do not check non-root modules for
// conflicts for packages that can be found in roots. So we only need
// the checksums for the root modules that may contain pkg, not all
// possible modules.
for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) {
- if v, ok := rs.rootSelected(prefix); ok && v != "none" {
+ if v, ok := rs.rootSelected(loaderstate, prefix); ok && v != "none" {
m := module.Version{Path: prefix, Version: v}
- r := resolveReplacement(m)
+ r := resolveReplacement(loaderstate, m)
keep[r] = true
}
}
@@ -2063,11 +2069,11 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums
}
}
- mg, _ := rs.Graph(ctx)
+ mg, _ := rs.Graph(loaderstate, ctx)
for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) {
if v := mg.Selected(prefix); v != "none" {
m := module.Version{Path: prefix, Version: v}
- r := resolveReplacement(m)
+ r := resolveReplacement(loaderstate, m)
keep[r] = true
}
}
@@ -2079,27 +2085,27 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums
// Save sums for the root modules (or their replacements), but don't
// incur the cost of loading the graph just to find and retain the sums.
for _, m := range rs.rootModules {
- r := resolveReplacement(m)
+ r := resolveReplacement(loaderstate, m)
keep[modkey(r)] = true
if which == addBuildListZipSums {
keep[r] = true
}
}
} else {
- mg, _ := rs.Graph(ctx)
+ mg, _ := rs.Graph(loaderstate, ctx)
mg.WalkBreadthFirst(func(m module.Version) {
if _, ok := mg.RequiredBy(m); ok {
// The requirements from m's go.mod file are present in the module graph,
// so they are relevant to the MVS result regardless of whether m was
// actually selected.
- r := resolveReplacement(m)
+ r := resolveReplacement(loaderstate, m)
keep[modkey(r)] = true
}
})
if which == addBuildListZipSums {
for _, m := range mg.BuildList() {
- r := resolveReplacement(m)
+ r := resolveReplacement(loaderstate, m)
if keepModSumsForZipSums {
keep[modkey(r)] = true // we need the go version from the go.mod file to do anything useful with the zipfile
}
diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go
index 53cb6c2ffe1406..bd28d7596e160a 100644
--- a/src/cmd/go/internal/modload/list.go
+++ b/src/cmd/go/internal/modload/list.go
@@ -69,7 +69,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st
}
}
- rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode, reuse)
+ rs, mods, err := listModules(ctx, LoadModFile(LoaderState, ctx), args, mode, reuse)
type token struct{}
sem := make(chan token, runtime.GOMAXPROCS(0))
@@ -88,7 +88,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st
addVersions(ctx, m, mode&ListRetractedVersions != 0)
}
if mode&ListRetracted != 0 {
- addRetraction(ctx, m)
+ addRetraction(LoaderState, ctx, m)
}
if mode&ListDeprecated != 0 {
addDeprecation(ctx, m)
@@ -109,7 +109,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st
}
if err == nil {
- requirements = rs
+ LoaderState.requirements = rs
// TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3
// where "go mod tidy" and "go list -m -u all" fight over whether the go.sum
// should be considered up-to-date. The fix for now is to always treat the
@@ -117,7 +117,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st
// but in general list -u is looking up other checksums in the checksum database
// that won't be necessary later, so it makes sense not to write the go.sum back out.
if !ExplicitWriteGoMod && mode&ListU == 0 {
- err = commitRequirements(ctx, WriteOpts{})
+ err = commitRequirements(LoaderState, ctx, WriteOpts{})
}
}
return mods, err
@@ -126,11 +126,11 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st
func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) {
if len(args) == 0 {
var ms []*modinfo.ModulePublic
- for _, m := range MainModules.Versions() {
+ for _, m := range LoaderState.MainModules.Versions() {
if gover.IsToolchain(m.Path) {
continue
}
- ms = append(ms, moduleInfo(ctx, rs, m, mode, reuse))
+ ms = append(ms, moduleInfo(LoaderState, ctx, rs, m, mode, reuse))
}
return rs, ms, nil
}
@@ -145,25 +145,25 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
}
if arg == "all" || strings.Contains(arg, "...") {
needFullGraph = true
- if !HasModRoot() {
+ if !HasModRoot(LoaderState) {
base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot)
}
continue
}
if path, vers, found := strings.Cut(arg, "@"); found {
if vers == "upgrade" || vers == "patch" {
- if _, ok := rs.rootSelected(path); !ok || rs.pruning == unpruned {
+ if _, ok := rs.rootSelected(LoaderState, path); !ok || rs.pruning == unpruned {
needFullGraph = true
- if !HasModRoot() {
+ if !HasModRoot(LoaderState) {
base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot)
}
}
}
continue
}
- if _, ok := rs.rootSelected(arg); !ok || rs.pruning == unpruned {
+ if _, ok := rs.rootSelected(LoaderState, arg); !ok || rs.pruning == unpruned {
needFullGraph = true
- if mode&ListVersions == 0 && !HasModRoot() {
+ if mode&ListVersions == 0 && !HasModRoot(LoaderState) {
base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, ErrNoModRoot)
}
}
@@ -171,7 +171,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
var mg *ModuleGraph
if needFullGraph {
- rs, mg, mgErr = expandGraph(ctx, rs)
+ rs, mg, mgErr = expandGraph(LoaderState, ctx, rs)
}
matchedModule := map[module.Version]bool{}
@@ -179,7 +179,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
if path, vers, found := strings.Cut(arg, "@"); found {
var current string
if mg == nil {
- current, _ = rs.rootSelected(path)
+ current, _ = rs.rootSelected(LoaderState, path)
} else {
current = mg.Selected(path)
}
@@ -198,7 +198,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
// specific revision or used 'go list -retracted'.
allowed = nil
}
- info, err := queryReuse(ctx, path, vers, current, allowed, reuse)
+ info, err := queryReuse(LoaderState, ctx, path, vers, current, allowed, reuse)
if err != nil {
var origin *codehost.Origin
if info != nil {
@@ -217,7 +217,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
// *Requirements instead.
var noRS *Requirements
- mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse)
+ mod := moduleInfo(LoaderState, ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse)
if vers != mod.Version {
mod.Query = vers
}
@@ -237,7 +237,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
var v string
if mg == nil {
var ok bool
- v, ok = rs.rootSelected(arg)
+ v, ok = rs.rootSelected(LoaderState, arg)
if !ok {
// We checked rootSelected(arg) in the earlier args loop, so if there
// is no such root we should have loaded a non-nil mg.
@@ -251,7 +251,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
continue
}
if v != "none" {
- mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse))
+ mods = append(mods, moduleInfo(LoaderState, ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse))
} else if cfg.BuildMod == "vendor" {
// In vendor mode, we can't determine whether a missing module is “a
// known dependency” because the module graph is incomplete.
@@ -292,7 +292,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
fetchedMods := make([]*modinfo.ModulePublic, len(matches))
for i, m := range matches {
q.Add(func() {
- fetchedMods[i] = moduleInfo(ctx, rs, m, mode, reuse)
+ fetchedMods[i] = moduleInfo(LoaderState, ctx, rs, m, mode, reuse)
})
}
<-q.Idle()
@@ -305,13 +305,11 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List
// modinfoError wraps an error to create an error message in
// modinfo.ModuleError with minimal redundancy.
func modinfoError(path, vers string, err error) *modinfo.ModuleError {
- var nerr *NoMatchingVersionError
- var merr *module.ModuleError
- if errors.As(err, &nerr) {
+ if _, ok := errors.AsType[*NoMatchingVersionError](err); ok {
// NoMatchingVersionError contains the query, so we don't mention the
// query again in ModuleError.
err = &module.ModuleError{Path: path, Err: err}
- } else if !errors.As(err, &merr) {
+ } else if _, ok := errors.AsType[*module.ModuleError](err); !ok {
// If the error does not contain path and version, wrap it in a
// module.ModuleError.
err = &module.ModuleError{Path: path, Version: vers, Err: err}
diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go
index 8b2be3b300e9e1..ad3b80bfd954aa 100644
--- a/src/cmd/go/internal/modload/load.go
+++ b/src/cmd/go/internal/modload/load.go
@@ -250,7 +250,7 @@ type PackageOpts struct {
// LoadPackages identifies the set of packages matching the given patterns and
// loads the packages in the import graph rooted at that set.
-func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) {
+func LoadPackages(loaderstate *State, ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) {
if opts.Tags == nil {
opts.Tags = imports.Tags()
}
@@ -271,11 +271,11 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
case m.IsLocal():
// Evaluate list of file system directories on first iteration.
if m.Dirs == nil {
- matchModRoots := modRoots
+ matchModRoots := loaderstate.modRoots
if opts.MainModule != (module.Version{}) {
- matchModRoots = []string{MainModules.ModRoot(opts.MainModule)}
+ matchModRoots = []string{loaderstate.MainModules.ModRoot(opts.MainModule)}
}
- matchLocalDirs(ctx, matchModRoots, m, rs)
+ matchLocalDirs(loaderstate, ctx, matchModRoots, m, rs)
}
// Make a copy of the directory list and translate to import paths.
@@ -286,7 +286,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// the loader iterations.
m.Pkgs = m.Pkgs[:0]
for _, dir := range m.Dirs {
- pkg, err := resolveLocalPackage(ctx, dir, rs)
+ pkg, err := resolveLocalPackage(loaderstate, ctx, dir, rs)
if err != nil {
if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) {
continue // Don't include "builtin" or GOROOT/src in wildcard patterns.
@@ -294,8 +294,8 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// If we're outside of a module, ensure that the failure mode
// indicates that.
- if !HasModRoot() {
- die()
+ if !HasModRoot(loaderstate) {
+ die(loaderstate)
}
if ld != nil {
@@ -311,7 +311,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
case strings.Contains(m.Pattern(), "..."):
m.Errs = m.Errs[:0]
- mg, err := rs.Graph(ctx)
+ mg, err := rs.Graph(loaderstate, ctx)
if err != nil {
// The module graph is (or may be) incomplete — perhaps we failed to
// load the requirements of some module. This is an error in matching
@@ -321,26 +321,26 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// necessarily prevent us from loading the packages we could find.
m.Errs = append(m.Errs, err)
}
- matchPackages(ctx, m, opts.Tags, includeStd, mg.BuildList())
+ matchPackages(loaderstate, ctx, m, opts.Tags, includeStd, mg.BuildList())
case m.Pattern() == "work":
- matchModules := MainModules.Versions()
+ matchModules := loaderstate.MainModules.Versions()
if opts.MainModule != (module.Version{}) {
matchModules = []module.Version{opts.MainModule}
}
- matchPackages(ctx, m, opts.Tags, omitStd, matchModules)
+ matchPackages(loaderstate, ctx, m, opts.Tags, omitStd, matchModules)
case m.Pattern() == "all":
if ld == nil {
// The initial roots are the packages and tools in the main module.
// loadFromRoots will expand that to "all".
m.Errs = m.Errs[:0]
- matchModules := MainModules.Versions()
+ matchModules := loaderstate.MainModules.Versions()
if opts.MainModule != (module.Version{}) {
matchModules = []module.Version{opts.MainModule}
}
- matchPackages(ctx, m, opts.Tags, omitStd, matchModules)
- for tool := range MainModules.Tools() {
+ matchPackages(loaderstate, ctx, m, opts.Tags, omitStd, matchModules)
+ for tool := range loaderstate.MainModules.Tools() {
m.Pkgs = append(m.Pkgs, tool)
}
} else {
@@ -355,7 +355,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
}
case m.Pattern() == "tool":
- for tool := range MainModules.Tools() {
+ for tool := range loaderstate.MainModules.Tools() {
m.Pkgs = append(m.Pkgs, tool)
}
default:
@@ -364,12 +364,12 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
}
}
- initialRS, err := loadModFile(ctx, &opts)
+ initialRS, err := loadModFile(loaderstate, ctx, &opts)
if err != nil {
base.Fatal(err)
}
- ld := loadFromRoots(ctx, loaderParams{
+ ld := loadFromRoots(loaderstate, ctx, loaderParams{
PackageOpts: opts,
requirements: initialRS,
@@ -404,7 +404,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
if opts.Tidy {
if cfg.BuildV {
- mg, _ := ld.requirements.Graph(ctx)
+ mg, _ := ld.requirements.Graph(loaderstate, ctx)
for _, m := range initialRS.rootModules {
var unused bool
if ld.requirements.pruning == unpruned {
@@ -416,7 +416,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// m is unused if it was dropped from the roots. If it is still present
// as a transitive dependency, that transitive dependency is not needed
// by any package or test in the main module.
- _, ok := ld.requirements.rootSelected(m.Path)
+ _, ok := ld.requirements.rootSelected(loaderstate, m.Path)
unused = !ok
}
if unused {
@@ -425,9 +425,9 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
}
}
- keep := keepSums(ctx, ld, ld.requirements, loadedZipSumsOnly)
+ keep := keepSums(loaderstate, ctx, ld, ld.requirements, loadedZipSumsOnly)
compatVersion := ld.TidyCompatibleVersion
- goVersion := ld.requirements.GoVersion()
+ goVersion := ld.requirements.GoVersion(loaderstate)
if compatVersion == "" {
if gover.Compare(goVersion, gover.GoStrictVersion) < 0 {
compatVersion = gover.Prev(goVersion)
@@ -444,10 +444,10 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
compatVersion = goVersion
}
if compatPruning := pruningForGoVersion(compatVersion); compatPruning != ld.requirements.pruning {
- compatRS := newRequirements(compatPruning, ld.requirements.rootModules, ld.requirements.direct)
- ld.checkTidyCompatibility(ctx, compatRS, compatVersion)
+ compatRS := newRequirements(loaderstate, compatPruning, ld.requirements.rootModules, ld.requirements.direct)
+ ld.checkTidyCompatibility(loaderstate, ctx, compatRS, compatVersion)
- for m := range keepSums(ctx, ld, compatRS, loadedZipSumsOnly) {
+ for m := range keepSums(loaderstate, ctx, ld, compatRS, loadedZipSumsOnly) {
keep[m] = true
}
}
@@ -455,8 +455,8 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
if opts.TidyDiff {
cfg.BuildMod = "readonly"
loaded = ld
- requirements = loaded.requirements
- currentGoMod, updatedGoMod, _, err := UpdateGoModFromReqs(ctx, WriteOpts{})
+ loaderstate.requirements = loaded.requirements
+ currentGoMod, updatedGoMod, _, err := UpdateGoModFromReqs(loaderstate, ctx, WriteOpts{})
if err != nil {
base.Fatal(err)
}
@@ -466,7 +466,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// Dropping compatibility for 1.16 may result in a strictly smaller go.sum.
// Update the keep map with only the loaded.requirements.
if gover.Compare(compatVersion, "1.16") > 0 {
- keep = keepSums(ctx, loaded, requirements, addBuildListZipSums)
+ keep = keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums)
}
currentGoSum, tidyGoSum := modfetch.TidyGoSum(keep)
goSumDiff := diff.Diff("current/go.sum", currentGoSum, "tidy/go.sum", tidyGoSum)
@@ -490,7 +490,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// loaded.requirements, but here we may have also loaded (and want to
// preserve checksums for) additional entities from compatRS, which are
// only needed for compatibility with ld.TidyCompatibleVersion.
- if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements()); err != nil {
+ if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements(loaderstate)); err != nil {
base.Fatal(err)
}
}
@@ -505,7 +505,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// to call WriteGoMod itself) or if ResolveMissingImports is false (the
// command wants to examine the package graph as-is).
loaded = ld
- requirements = loaded.requirements
+ loaderstate.requirements = loaded.requirements
for _, pkg := range ld.pkgs {
if !pkg.isTest() {
@@ -515,7 +515,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
sort.Strings(loadedPackages)
if !ExplicitWriteGoMod && opts.ResolveMissingImports {
- if err := commitRequirements(ctx, WriteOpts{}); err != nil {
+ if err := commitRequirements(loaderstate, ctx, WriteOpts{}); err != nil {
base.Fatal(err)
}
}
@@ -525,7 +525,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories
// outside of the standard library and active modules.
-func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) {
+func matchLocalDirs(loaderstate *State, ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) {
if !m.IsLocal() {
panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern()))
}
@@ -543,10 +543,10 @@ func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs
}
modRoot := findModuleRoot(absDir)
- if !slices.Contains(modRoots, modRoot) && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(ctx, absDir, rs) == "" {
+ if !slices.Contains(modRoots, modRoot) && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(loaderstate, ctx, absDir, rs) == "" {
m.Dirs = []string{}
scope := "main module or its selected dependencies"
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
scope = "modules listed in go.work or their selected dependencies"
}
m.AddError(fmt.Errorf("directory prefix %s does not contain %s", base.ShortPath(absDir), scope))
@@ -558,7 +558,7 @@ func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs
}
// resolveLocalPackage resolves a filesystem path to a package path.
-func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (string, error) {
+func resolveLocalPackage(loaderstate *State, ctx context.Context, dir string, rs *Requirements) (string, error) {
var absDir string
if filepath.IsAbs(dir) {
absDir = filepath.Clean(dir)
@@ -596,13 +596,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str
}
}
- for _, mod := range MainModules.Versions() {
- modRoot := MainModules.ModRoot(mod)
+ for _, mod := range loaderstate.MainModules.Versions() {
+ modRoot := loaderstate.MainModules.ModRoot(mod)
if modRoot != "" && absDir == modRoot {
if absDir == cfg.GOROOTsrc {
return "", errPkgIsGorootSrc
}
- return MainModules.PathPrefix(mod), nil
+ return loaderstate.MainModules.PathPrefix(mod), nil
}
}
@@ -611,8 +611,8 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str
// It's not strictly necessary but helpful to keep the checks.
var pkgNotFoundErr error
pkgNotFoundLongestPrefix := ""
- for _, mainModule := range MainModules.Versions() {
- modRoot := MainModules.ModRoot(mainModule)
+ for _, mainModule := range loaderstate.MainModules.Versions() {
+ modRoot := loaderstate.MainModules.ModRoot(mainModule)
if modRoot != "" && str.HasFilePathPrefix(absDir, modRoot) && !strings.Contains(absDir[len(modRoot):], "@") {
suffix := filepath.ToSlash(str.TrimFilePathPrefix(absDir, modRoot))
if pkg, found := strings.CutPrefix(suffix, "vendor/"); found {
@@ -620,14 +620,14 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str
return "", fmt.Errorf("without -mod=vendor, directory %s has no package path", absDir)
}
- readVendorList(VendorDir())
+ readVendorList(VendorDir(loaderstate))
if _, ok := vendorPkgModule[pkg]; !ok {
return "", fmt.Errorf("directory %s is not a package listed in vendor/modules.txt", absDir)
}
return pkg, nil
}
- mainModulePrefix := MainModules.PathPrefix(mainModule)
+ mainModulePrefix := loaderstate.MainModules.PathPrefix(mainModule)
if mainModulePrefix == "" {
pkg := suffix
if pkg == "builtin" {
@@ -668,13 +668,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str
return pkg, nil
}
- pkg := pathInModuleCache(ctx, absDir, rs)
+ pkg := pathInModuleCache(loaderstate, ctx, absDir, rs)
if pkg == "" {
dirstr := fmt.Sprintf("directory %s", base.ShortPath(absDir))
if dirstr == "directory ." {
dirstr = "current directory"
}
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
if mr := findModuleRoot(absDir); mr != "" {
return "", fmt.Errorf("%s is contained in a module that is not one of the workspace modules listed in go.work. You can add the module to the workspace using:\n\tgo work use %s", dirstr, base.ShortPath(mr))
}
@@ -693,17 +693,17 @@ var (
// pathInModuleCache returns the import path of the directory dir,
// if dir is in the module cache copy of a module in our build list.
-func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string {
+func pathInModuleCache(loaderstate *State, ctx context.Context, dir string, rs *Requirements) string {
tryMod := func(m module.Version) (string, bool) {
if gover.IsToolchain(m.Path) {
return "", false
}
var root string
var err error
- if repl := Replacement(m); repl.Path != "" && repl.Version == "" {
+ if repl := Replacement(loaderstate, m); repl.Path != "" && repl.Version == "" {
root = repl.Path
if !filepath.IsAbs(root) {
- root = filepath.Join(replaceRelativeTo(), root)
+ root = filepath.Join(replaceRelativeTo(loaderstate), root)
}
} else if repl.Path != "" {
root, err = modfetch.DownloadDir(ctx, repl)
@@ -728,7 +728,7 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string
if rs.pruning == pruned {
for _, m := range rs.rootModules {
- if v, _ := rs.rootSelected(m.Path); v != m.Version {
+ if v, _ := rs.rootSelected(loaderstate, m.Path); v != m.Version {
continue // m is a root, but we have a higher root for the same path.
}
if importPath, ok := tryMod(m); ok {
@@ -747,7 +747,7 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string
// versions of root modules may differ from what we already checked above.
// Re-check those paths too.
- mg, _ := rs.Graph(ctx)
+ mg, _ := rs.Graph(loaderstate, ctx)
var importPath string
for _, m := range mg.BuildList() {
var found bool
@@ -766,8 +766,8 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string
//
// TODO(bcmills): Silencing errors seems off. Take a closer look at this and
// figure out what the error-reporting actually ought to be.
-func ImportFromFiles(ctx context.Context, gofiles []string) {
- rs := LoadModFile(ctx)
+func ImportFromFiles(loaderstate *State, ctx context.Context, gofiles []string) {
+ rs := LoadModFile(loaderstate, ctx)
tags := imports.Tags()
imports, testImports, err := imports.ScanFiles(gofiles, tags)
@@ -775,7 +775,7 @@ func ImportFromFiles(ctx context.Context, gofiles []string) {
base.Fatal(err)
}
- loaded = loadFromRoots(ctx, loaderParams{
+ loaded = loadFromRoots(loaderstate, ctx, loaderParams{
PackageOpts: PackageOpts{
Tags: tags,
ResolveMissingImports: true,
@@ -788,10 +788,10 @@ func ImportFromFiles(ctx context.Context, gofiles []string) {
return roots
},
})
- requirements = loaded.requirements
+ loaderstate.requirements = loaded.requirements
if !ExplicitWriteGoMod {
- if err := commitRequirements(ctx, WriteOpts{}); err != nil {
+ if err := commitRequirements(loaderstate, ctx, WriteOpts{}); err != nil {
base.Fatal(err)
}
}
@@ -799,11 +799,11 @@ func ImportFromFiles(ctx context.Context, gofiles []string) {
// DirImportPath returns the effective import path for dir,
// provided it is within a main module, or else returns ".".
-func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path string, m module.Version) {
- if !HasModRoot() {
+func (mms *MainModuleSet) DirImportPath(loaderstate *State, ctx context.Context, dir string) (path string, m module.Version) {
+ if !HasModRoot(loaderstate) {
return ".", module.Version{}
}
- LoadModFile(ctx) // Sets targetPrefix.
+ LoadModFile(loaderstate, ctx) // Sets targetPrefix.
if !filepath.IsAbs(dir) {
dir = filepath.Join(base.Cwd(), dir)
@@ -820,7 +820,7 @@ func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path s
return mms.PathPrefix(v), v
}
if str.HasFilePathPrefix(dir, modRoot) {
- pathPrefix := MainModules.PathPrefix(v)
+ pathPrefix := loaderstate.MainModules.PathPrefix(v)
if pathPrefix > longestPrefix {
longestPrefix = pathPrefix
longestPrefixVersion = v
@@ -853,13 +853,13 @@ func PackageModule(path string) module.Version {
// the package at path as imported from the package in parentDir.
// Lookup requires that one of the Load functions in this package has already
// been called.
-func Lookup(parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) {
+func Lookup(loaderstate *State, parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) {
if path == "" {
panic("Lookup called with empty package path")
}
if parentIsStd {
- path = loaded.stdVendor(parentPath, path)
+ path = loaded.stdVendor(loaderstate, parentPath, path)
}
pkg, ok := loaded.pkgCache.Get(path)
if !ok {
@@ -957,11 +957,11 @@ func (ld *loader) exitIfErrors(ctx context.Context) {
// goVersion reports the Go version that should be used for the loader's
// requirements: ld.TidyGoVersion if set, or ld.requirements.GoVersion()
// otherwise.
-func (ld *loader) goVersion() string {
+func (ld *loader) goVersion(loaderstate *State) string {
if ld.TidyGoVersion != "" {
return ld.TidyGoVersion
}
- return ld.requirements.GoVersion()
+ return ld.requirements.GoVersion(loaderstate)
}
// A loadPkg records information about a single loaded package.
@@ -1064,11 +1064,11 @@ func (pkg *loadPkg) isTest() bool {
// fromExternalModule reports whether pkg was loaded from a module other than
// the main module.
-func (pkg *loadPkg) fromExternalModule() bool {
+func (pkg *loadPkg) fromExternalModule(loaderstate *State) bool {
if pkg.mod.Path == "" {
return false // loaded from the standard library, not a module
}
- return !MainModules.Contains(pkg.mod.Path)
+ return !loaderstate.MainModules.Contains(pkg.mod.Path)
}
var errMissing = errors.New("cannot find package")
@@ -1079,7 +1079,7 @@ var errMissing = errors.New("cannot find package")
// The set of root packages is returned by the params.listRoots function, and
// expanded to the full set of packages by tracing imports (and possibly tests)
// as needed.
-func loadFromRoots(ctx context.Context, params loaderParams) *loader {
+func loadFromRoots(loaderstate *State, ctx context.Context, params loaderParams) *loader {
ld := &loader{
loaderParams: params,
work: par.NewQueue(runtime.GOMAXPROCS(0)),
@@ -1095,7 +1095,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
// spot-checks in modules that do not maintain the expanded go.mod
// requirements needed for graph pruning.
var err error
- ld.requirements, _, err = expandGraph(ctx, ld.requirements)
+ ld.requirements, _, err = expandGraph(loaderstate, ctx, ld.requirements)
if err != nil {
ld.error(err)
}
@@ -1103,11 +1103,11 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
ld.exitIfErrors(ctx)
updateGoVersion := func() {
- goVersion := ld.goVersion()
+ goVersion := ld.goVersion(loaderstate)
if ld.requirements.pruning != workspace {
var err error
- ld.requirements, err = convertPruning(ctx, ld.requirements, pruningForGoVersion(goVersion))
+ ld.requirements, err = convertPruning(loaderstate, ctx, ld.requirements, pruningForGoVersion(goVersion))
if err != nil {
ld.error(err)
ld.exitIfErrors(ctx)
@@ -1141,7 +1141,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
// set of root packages does not change then we can select the correct
// versions of all transitive imports on the first try and complete
// loading in a single iteration.
- changedBuildList := ld.preloadRootModules(ctx, rootPkgs)
+ changedBuildList := ld.preloadRootModules(loaderstate, ctx, rootPkgs)
if changedBuildList {
// The build list has changed, so the set of root packages may have also
// changed. Start over to pick up the changes. (Preloading roots is much
@@ -1154,7 +1154,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
inRoots := map[*loadPkg]bool{}
for _, path := range rootPkgs {
- root := ld.pkg(ctx, path, pkgIsRoot)
+ root := ld.pkg(loaderstate, ctx, path, pkgIsRoot)
if !inRoots[root] {
ld.roots = append(ld.roots, root)
inRoots[root] = true
@@ -1170,7 +1170,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
ld.buildStacks()
- changed, err := ld.updateRequirements(ctx)
+ changed, err := ld.updateRequirements(loaderstate, ctx)
if err != nil {
ld.error(err)
break
@@ -1184,12 +1184,12 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
continue
}
- if !ld.ResolveMissingImports || (!HasModRoot() && !allowMissingModuleImports) {
+ if !ld.ResolveMissingImports || (!HasModRoot(loaderstate) && !allowMissingModuleImports) {
// We've loaded as much as we can without resolving missing imports.
break
}
- modAddedBy, err := ld.resolveMissingImports(ctx)
+ modAddedBy, err := ld.resolveMissingImports(loaderstate, ctx)
if err != nil {
ld.error(err)
break
@@ -1216,7 +1216,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
// iteration so we don't need to also update it here. (That would waste time
// computing a "direct" map that we'll have to recompute later anyway.)
direct := ld.requirements.direct
- rs, err := updateRoots(ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported)
+ rs, err := updateRoots(loaderstate, ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported)
if err != nil {
// If an error was found in a newly added module, report the package
// import stack instead of the module requirement stack. Packages
@@ -1244,7 +1244,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
// Tidy the build list, if applicable, before we report errors.
// (The process of tidying may remove errors from irrelevant dependencies.)
if ld.Tidy {
- rs, err := tidyRoots(ctx, ld.requirements, ld.pkgs)
+ rs, err := tidyRoots(loaderstate, ctx, ld.requirements, ld.pkgs)
if err != nil {
ld.error(err)
} else {
@@ -1252,8 +1252,8 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
// Attempt to switch to the requested Go version. We have been using its
// pruning and semantics all along, but there may have been — and may
// still be — requirements on higher versions in the graph.
- tidy := overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}})
- mg, err := tidy.Graph(ctx)
+ tidy := overrideRoots(loaderstate, ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}})
+ mg, err := tidy.Graph(loaderstate, ctx)
if err != nil {
ld.error(err)
}
@@ -1285,7 +1285,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
if m.Path == "go" && ld.TidyGoVersion != "" {
continue
}
- if v, ok := ld.requirements.rootSelected(m.Path); !ok || v != m.Version {
+ if v, ok := ld.requirements.rootSelected(loaderstate, m.Path); !ok || v != m.Version {
ld.error(fmt.Errorf("internal error: a requirement on %v is needed but was not added during package loading (selected %s)", m, v))
}
}
@@ -1304,7 +1304,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
}
// Add importer information to checksum errors.
- if sumErr := (*ImportMissingSumError)(nil); errors.As(pkg.err, &sumErr) {
+ if sumErr, ok := errors.AsType[*ImportMissingSumError](pkg.err); ok {
if importer := pkg.stack; importer != nil {
sumErr.importer = importer.path
sumErr.importerVersion = importer.mod.Version
@@ -1312,7 +1312,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
}
}
- if stdErr := (*ImportMissingError)(nil); errors.As(pkg.err, &stdErr) && stdErr.isStd {
+ if stdErr, ok := errors.AsType[*ImportMissingError](pkg.err); ok && stdErr.isStd {
// Add importer go version information to import errors of standard
// library packages arising from newer releases.
if importer := pkg.stack; importer != nil {
@@ -1334,7 +1334,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
ld.error(fmt.Errorf("%s: %w", pkg.stackText(), pkg.err))
}
- ld.checkMultiplePaths()
+ ld.checkMultiplePaths(loaderstate)
return ld
}
@@ -1357,7 +1357,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
// The "changed" return value reports whether the update changed the selected
// version of any module that either provided a loaded package or may now
// provide a package that was previously unresolved.
-func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err error) {
+func (ld *loader) updateRequirements(loaderstate *State, ctx context.Context) (changed bool, err error) {
rs := ld.requirements
// direct contains the set of modules believed to provide packages directly
@@ -1384,22 +1384,22 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err
var maxTooNew *gover.TooNewError
for _, pkg := range ld.pkgs {
if pkg.err != nil {
- if tooNew := (*gover.TooNewError)(nil); errors.As(pkg.err, &tooNew) {
+ if tooNew, ok := errors.AsType[*gover.TooNewError](pkg.err); ok {
if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 {
maxTooNew = tooNew
}
}
}
- if pkg.mod.Version != "" || !MainModules.Contains(pkg.mod.Path) {
+ if pkg.mod.Version != "" || !loaderstate.MainModules.Contains(pkg.mod.Path) {
continue
}
for _, dep := range pkg.imports {
- if !dep.fromExternalModule() {
+ if !dep.fromExternalModule(loaderstate) {
continue
}
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
// In workspace mode / workspace pruning mode, the roots are the main modules
// rather than the main module's direct dependencies. The check below on the selected
// roots does not apply.
@@ -1412,7 +1412,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err
// of the vendor directory anyway.
continue
}
- if mg, err := rs.Graph(ctx); err != nil {
+ if mg, err := rs.Graph(loaderstate, ctx); err != nil {
return false, err
} else if _, ok := mg.RequiredBy(dep.mod); !ok {
// dep.mod is not an explicit dependency, but needs to be.
@@ -1424,7 +1424,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err
}
}
} else if pkg.err == nil && cfg.BuildMod != "mod" {
- if v, ok := rs.rootSelected(dep.mod.Path); !ok || v != dep.mod.Version {
+ if v, ok := rs.rootSelected(loaderstate, dep.mod.Path); !ok || v != dep.mod.Version {
// dep.mod is not an explicit dependency, but needs to be.
// Because we are not in "mod" mode, we will not be able to update it.
// Instead, mark the importing package with an error.
@@ -1490,21 +1490,21 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err
// roots can only increase and the set of roots can only expand. The set
// of extant root paths is finite and the set of versions of each path is
// finite, so the iteration *must* reach a stable fixed-point.
- tidy, err := tidyRoots(ctx, rs, ld.pkgs)
+ tidy, err := tidyRoots(loaderstate, ctx, rs, ld.pkgs)
if err != nil {
return false, err
}
addRoots = tidy.rootModules
}
- rs, err = updateRoots(ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported)
+ rs, err = updateRoots(loaderstate, ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported)
if err != nil {
// We don't actually know what even the root requirements are supposed to be,
// so we can't proceed with loading. Return the error to the caller
return false, err
}
- if rs.GoVersion() != ld.requirements.GoVersion() {
+ if rs.GoVersion(loaderstate) != ld.requirements.GoVersion(loaderstate) {
// A change in the selected Go version may or may not affect the set of
// loaded packages, but in some cases it can change the meaning of the "all"
// pattern, the level of pruning in the module graph, and even the set of
@@ -1515,12 +1515,12 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err
// The roots of the module graph have changed in some way (not just the
// "direct" markings). Check whether the changes affected any of the loaded
// packages.
- mg, err := rs.Graph(ctx)
+ mg, err := rs.Graph(loaderstate, ctx)
if err != nil {
return false, err
}
for _, pkg := range ld.pkgs {
- if pkg.fromExternalModule() && mg.Selected(pkg.mod.Path) != pkg.mod.Version {
+ if pkg.fromExternalModule(loaderstate) && mg.Selected(pkg.mod.Path) != pkg.mod.Version {
changed = true
break
}
@@ -1540,7 +1540,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err
//
// In some sense, we can think of this as ‘upgraded the module providing
// pkg.path from "none" to a version higher than "none"’.
- if _, _, _, _, err = importFromModules(ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil {
+ if _, _, _, _, err = importFromModules(loaderstate, ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil {
changed = true
break
}
@@ -1558,7 +1558,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err
// The newly-resolved packages are added to the addedModuleFor map, and
// resolveMissingImports returns a map from each new module version to
// the first missing package that module would resolve.
-func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) {
+func (ld *loader) resolveMissingImports(loaderstate *State, ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) {
type pkgMod struct {
pkg *loadPkg
mod *module.Version
@@ -1573,7 +1573,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod
// we should only add the missing import once.
continue
}
- if !errors.As(pkg.err, new(*ImportMissingError)) {
+ if _, ok := errors.AsType[*ImportMissingError](pkg.err); !ok {
// Leave other errors for Import or load.Packages to report.
continue
}
@@ -1582,12 +1582,11 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod
var mod module.Version
ld.work.Add(func() {
var err error
- mod, err = queryImport(ctx, pkg.path, ld.requirements)
+ mod, err = queryImport(loaderstate, ctx, pkg.path, ld.requirements)
if err != nil {
- var ime *ImportMissingError
- if errors.As(err, &ime) {
+ if ime, ok := errors.AsType[*ImportMissingError](err); ok {
for curstack := pkg.stack; curstack != nil; curstack = curstack.stack {
- if MainModules.Contains(curstack.mod.Path) {
+ if loaderstate.MainModules.Contains(curstack.mod.Path) {
ime.ImportingMainModule = curstack.mod
break
}
@@ -1625,7 +1624,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod
maxTooNewPkg *loadPkg
)
for _, pm := range pkgMods {
- if tooNew := (*gover.TooNewError)(nil); errors.As(pm.pkg.err, &tooNew) {
+ if tooNew, ok := errors.AsType[*gover.TooNewError](pm.pkg.err); ok {
if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 {
maxTooNew = tooNew
maxTooNewPkg = pm.pkg
@@ -1659,7 +1658,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod
// ld.work queue, and its test (if requested) will also be populated once
// imports have been resolved. When ld.work goes idle, all transitive imports of
// the requested package (and its test, if requested) will have been loaded.
-func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loadPkg {
+func (ld *loader) pkg(loaderstate *State, ctx context.Context, path string, flags loadPkgFlags) *loadPkg {
if flags.has(pkgImportsLoaded) {
panic("internal error: (*loader).pkg called with pkgImportsLoaded flag set")
}
@@ -1668,20 +1667,20 @@ func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loa
pkg := &loadPkg{
path: path,
}
- ld.applyPkgFlags(ctx, pkg, flags)
+ ld.applyPkgFlags(loaderstate, ctx, pkg, flags)
- ld.work.Add(func() { ld.load(ctx, pkg) })
+ ld.work.Add(func() { ld.load(loaderstate, ctx, pkg) })
return pkg
})
- ld.applyPkgFlags(ctx, pkg, flags)
+ ld.applyPkgFlags(loaderstate, ctx, pkg, flags)
return pkg
}
// applyPkgFlags updates pkg.flags to set the given flags and propagate the
// (transitive) effects of those flags, possibly loading or enqueueing further
// packages as a result.
-func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkgFlags) {
+func (ld *loader) applyPkgFlags(loaderstate *State, ctx context.Context, pkg *loadPkg, flags loadPkgFlags) {
if flags == 0 {
return
}
@@ -1709,7 +1708,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg
// so it's ok if we call it more than is strictly necessary.
wantTest := false
switch {
- case ld.allPatternIsRoot && MainModules.Contains(pkg.mod.Path):
+ case ld.allPatternIsRoot && loaderstate.MainModules.Contains(pkg.mod.Path):
// We are loading the "all" pattern, which includes packages imported by
// tests in the main module. This package is in the main module, so we
// need to identify the imports of its test even if LoadTests is not set.
@@ -1730,13 +1729,13 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg
if wantTest {
var testFlags loadPkgFlags
- if MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) {
+ if loaderstate.MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) {
// Tests of packages in the main module are in "all", in the sense that
// they cause the packages they import to also be in "all". So are tests
// of packages in "all" if "all" closes over test dependencies.
testFlags |= pkgInAll
}
- ld.pkgTest(ctx, pkg, testFlags)
+ ld.pkgTest(loaderstate, ctx, pkg, testFlags)
}
}
@@ -1744,13 +1743,13 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg
// We have just marked pkg with pkgInAll, or we have just loaded its
// imports, or both. Now is the time to propagate pkgInAll to the imports.
for _, dep := range pkg.imports {
- ld.applyPkgFlags(ctx, dep, pkgInAll)
+ ld.applyPkgFlags(loaderstate, ctx, dep, pkgInAll)
}
}
if new.has(pkgFromRoot) && !old.has(pkgFromRoot|pkgImportsLoaded) {
for _, dep := range pkg.imports {
- ld.applyPkgFlags(ctx, dep, pkgFromRoot)
+ ld.applyPkgFlags(loaderstate, ctx, dep, pkgFromRoot)
}
}
}
@@ -1758,7 +1757,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg
// preloadRootModules loads the module requirements needed to identify the
// selected version of each module providing a package in rootPkgs,
// adding new root modules to the module graph if needed.
-func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (changedBuildList bool) {
+func (ld *loader) preloadRootModules(loaderstate *State, ctx context.Context, rootPkgs []string) (changedBuildList bool) {
needc := make(chan map[module.Version]bool, 1)
needc <- map[module.Version]bool{}
for _, path := range rootPkgs {
@@ -1769,13 +1768,12 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch
// If the main module is tidy and the package is in "all" — or if we're
// lucky — we can identify all of its imports without actually loading the
// full module graph.
- m, _, _, _, err := importFromModules(ctx, path, ld.requirements, nil, ld.skipImportModFiles)
+ m, _, _, _, err := importFromModules(loaderstate, ctx, path, ld.requirements, nil, ld.skipImportModFiles)
if err != nil {
- var missing *ImportMissingError
- if errors.As(err, &missing) && ld.ResolveMissingImports {
+ if _, ok := errors.AsType[*ImportMissingError](err); ok && ld.ResolveMissingImports {
// This package isn't provided by any selected module.
// If we can find it, it will be a new root dependency.
- m, err = queryImport(ctx, path, ld.requirements)
+ m, err = queryImport(loaderstate, ctx, path, ld.requirements)
}
if err != nil {
// We couldn't identify the root module containing this package.
@@ -1788,7 +1786,7 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch
return
}
- v, ok := ld.requirements.rootSelected(m.Path)
+ v, ok := ld.requirements.rootSelected(loaderstate, m.Path)
if !ok || v != m.Version {
// We found the requested package in m, but m is not a root, so
// loadModGraph will not load its requirements. We need to promote the
@@ -1816,7 +1814,7 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch
}
gover.ModSort(toAdd)
- rs, err := updateRoots(ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported)
+ rs, err := updateRoots(loaderstate, ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported)
if err != nil {
// We are missing some root dependency, and for some reason we can't load
// enough of the module dependency graph to add the missing root. Package
@@ -1838,11 +1836,11 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch
}
// load loads an individual package.
-func (ld *loader) load(ctx context.Context, pkg *loadPkg) {
+func (ld *loader) load(loaderstate *State, ctx context.Context, pkg *loadPkg) {
var mg *ModuleGraph
if ld.requirements.pruning == unpruned {
var err error
- mg, err = ld.requirements.Graph(ctx)
+ mg, err = ld.requirements.Graph(loaderstate, ctx)
if err != nil {
// We already checked the error from Graph in loadFromRoots and/or
// updateRequirements, so we ignored the error on purpose and we should
@@ -1857,17 +1855,17 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) {
}
var modroot string
- pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles)
- if MainModules.Tools()[pkg.path] {
+ pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(loaderstate, ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles)
+ if loaderstate.MainModules.Tools()[pkg.path] {
// Tools declared by main modules are always in "all".
// We apply the package flags before returning so that missing
// tool dependencies report an error https://go.dev/issue/70582
- ld.applyPkgFlags(ctx, pkg, pkgInAll)
+ ld.applyPkgFlags(loaderstate, ctx, pkg, pkgInAll)
}
if pkg.dir == "" {
return
}
- if MainModules.Contains(pkg.mod.Path) {
+ if loaderstate.MainModules.Contains(pkg.mod.Path) {
// Go ahead and mark pkg as in "all". This provides the invariant that a
// package that is *only* imported by other packages in "all" is always
// marked as such before loading its imports.
@@ -1877,7 +1875,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) {
// about (by reducing churn on the flag bits of dependencies), and costs
// essentially nothing (these atomic flag ops are essentially free compared
// to scanning source code for imports).
- ld.applyPkgFlags(ctx, pkg, pkgInAll)
+ ld.applyPkgFlags(loaderstate, ctx, pkg, pkgInAll)
}
if ld.AllowPackage != nil {
if err := ld.AllowPackage(ctx, pkg.path, pkg.mod); err != nil {
@@ -1909,13 +1907,13 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) {
if pkg.inStd {
// Imports from packages in "std" and "cmd" should resolve using
// GOROOT/src/vendor even when "std" is not the main module.
- path = ld.stdVendor(pkg.path, path)
+ path = ld.stdVendor(loaderstate, pkg.path, path)
}
- pkg.imports = append(pkg.imports, ld.pkg(ctx, path, importFlags))
+ pkg.imports = append(pkg.imports, ld.pkg(loaderstate, ctx, path, importFlags))
}
pkg.testImports = testImports
- ld.applyPkgFlags(ctx, pkg, pkgImportsLoaded)
+ ld.applyPkgFlags(loaderstate, ctx, pkg, pkgImportsLoaded)
}
// pkgTest locates the test of pkg, creating it if needed, and updates its state
@@ -1923,7 +1921,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) {
//
// pkgTest requires that the imports of pkg have already been loaded (flagged
// with pkgImportsLoaded).
-func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg {
+func (ld *loader) pkgTest(loaderstate *State, ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg {
if pkg.isTest() {
panic("pkgTest called on a test package")
}
@@ -1938,7 +1936,7 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl
err: pkg.err,
inStd: pkg.inStd,
}
- ld.applyPkgFlags(ctx, pkg.test, testFlags)
+ ld.applyPkgFlags(loaderstate, ctx, pkg.test, testFlags)
createdTest = true
})
@@ -1951,14 +1949,14 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl
}
for _, path := range pkg.testImports {
if pkg.inStd {
- path = ld.stdVendor(test.path, path)
+ path = ld.stdVendor(loaderstate, test.path, path)
}
- test.imports = append(test.imports, ld.pkg(ctx, path, importFlags))
+ test.imports = append(test.imports, ld.pkg(loaderstate, ctx, path, importFlags))
}
pkg.testImports = nil
- ld.applyPkgFlags(ctx, test, pkgImportsLoaded)
+ ld.applyPkgFlags(loaderstate, ctx, test, pkgImportsLoaded)
} else {
- ld.applyPkgFlags(ctx, test, testFlags)
+ ld.applyPkgFlags(loaderstate, ctx, test, testFlags)
}
return test
@@ -1966,7 +1964,7 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl
// stdVendor returns the canonical import path for the package with the given
// path when imported from the standard-library package at parentPath.
-func (ld *loader) stdVendor(parentPath, path string) string {
+func (ld *loader) stdVendor(loaderstate *State, parentPath, path string) string {
if p, _, ok := fips140.ResolveImport(path); ok {
return p
}
@@ -1975,14 +1973,14 @@ func (ld *loader) stdVendor(parentPath, path string) string {
}
if str.HasPathPrefix(parentPath, "cmd") {
- if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("cmd") {
+ if !ld.VendorModulesInGOROOTSrc || !loaderstate.MainModules.Contains("cmd") {
vendorPath := pathpkg.Join("cmd", "vendor", path)
if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil {
return vendorPath
}
}
- } else if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") {
+ } else if !ld.VendorModulesInGOROOTSrc || !loaderstate.MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") {
// If we are outside of the 'std' module, resolve imports from within 'std'
// to the vendor directory.
//
@@ -2028,7 +2026,7 @@ func (ld *loader) computePatternAll() (all []string) {
// or as a replacement for another module, but not both at the same time.
//
// (See https://golang.org/issue/26607 and https://golang.org/issue/34650.)
-func (ld *loader) checkMultiplePaths() {
+func (ld *loader) checkMultiplePaths(loaderstate *State) {
mods := ld.requirements.rootModules
if cached := ld.requirements.graph.Load(); cached != nil {
if mg := cached.mg; mg != nil {
@@ -2038,7 +2036,7 @@ func (ld *loader) checkMultiplePaths() {
firstPath := map[module.Version]string{}
for _, mod := range mods {
- src := resolveReplacement(mod)
+ src := resolveReplacement(loaderstate, mod)
if prev, ok := firstPath[src]; !ok {
firstPath[src] = mod.Path
} else if prev != mod.Path {
@@ -2049,8 +2047,8 @@ func (ld *loader) checkMultiplePaths() {
// checkTidyCompatibility emits an error if any package would be loaded from a
// different module under rs than under ld.requirements.
-func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, compatVersion string) {
- goVersion := rs.GoVersion()
+func (ld *loader) checkTidyCompatibility(loaderstate *State, ctx context.Context, rs *Requirements, compatVersion string) {
+ goVersion := rs.GoVersion(loaderstate)
suggestUpgrade := false
suggestEFlag := false
suggestFixes := func() {
@@ -2067,7 +2065,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements,
fmt.Fprintln(os.Stderr)
goFlag := ""
- if goVersion != MainModules.GoVersion() {
+ if goVersion != loaderstate.MainModules.GoVersion(loaderstate) {
goFlag = " -go=" + goVersion
}
@@ -2096,7 +2094,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements,
fmt.Fprintf(os.Stderr, "For information about 'go mod tidy' compatibility, see:\n\thttps://go.dev/ref/mod#graph-pruning\n")
}
- mg, err := rs.Graph(ctx)
+ mg, err := rs.Graph(loaderstate, ctx)
if err != nil {
ld.error(fmt.Errorf("error loading go %s module graph: %w", compatVersion, err))
ld.switchIfErrors(ctx)
@@ -2134,7 +2132,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements,
pkg := pkg
ld.work.Add(func() {
- mod, _, _, _, err := importFromModules(ctx, pkg.path, rs, mg, ld.skipImportModFiles)
+ mod, _, _, _, err := importFromModules(loaderstate, ctx, pkg.path, rs, mg, ld.skipImportModFiles)
if mod != pkg.mod {
mismatches := <-mismatchMu
mismatches[pkg] = mismatch{mod: mod, err: err}
@@ -2196,14 +2194,14 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements,
// module that previously provided the package to a version that no
// longer does, or to a version for which the module source code (but
// not the go.mod file in isolation) has a checksum error.
- if missing := (*ImportMissingError)(nil); errors.As(mismatch.err, &missing) {
+ if _, ok := errors.AsType[*ImportMissingError](mismatch.err); ok {
selected := module.Version{
Path: pkg.mod.Path,
Version: mg.Selected(pkg.mod.Path),
}
ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it in %s", pkg.stackText(), pkg.mod, compatVersion, selected))
} else {
- if ambiguous := (*AmbiguousImportError)(nil); errors.As(mismatch.err, &ambiguous) {
+ if _, ok := errors.AsType[*AmbiguousImportError](mismatch.err); ok {
// TODO: Is this check needed?
}
ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it:\n\t%v", pkg.stackText(), pkg.mod, compatVersion, mismatch.err))
diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go
index 04e204cc984c59..20feb8fcacc784 100644
--- a/src/cmd/go/internal/modload/modfile.go
+++ b/src/cmd/go/internal/modload/modfile.go
@@ -76,8 +76,7 @@ func ReadModFile(gomod string, fix modfile.VersionFixer) (data []byte, f *modfil
}
func shortPathErrorList(err error) error {
- var el modfile.ErrorList
- if errors.As(err, &el) {
+ if el, ok := errors.AsType[modfile.ErrorList](err); ok {
for i := range el {
el[i].Filename = base.ShortPath(el[i].Filename)
}
@@ -143,7 +142,7 @@ func CheckAllowed(ctx context.Context, m module.Version) error {
if err := CheckExclusions(ctx, m); err != nil {
return err
}
- if err := CheckRetractions(ctx, m); err != nil {
+ if err := CheckRetractions(LoaderState, ctx, m); err != nil {
return err
}
return nil
@@ -156,8 +155,8 @@ var ErrDisallowed = errors.New("disallowed module version")
// CheckExclusions returns an error equivalent to ErrDisallowed if module m is
// excluded by the main module's go.mod file.
func CheckExclusions(ctx context.Context, m module.Version) error {
- for _, mainModule := range MainModules.Versions() {
- if index := MainModules.Index(mainModule); index != nil && index.exclude[m] {
+ for _, mainModule := range LoaderState.MainModules.Versions() {
+ if index := LoaderState.MainModules.Index(mainModule); index != nil && index.exclude[m] {
return module.VersionError(m, errExcluded)
}
}
@@ -173,14 +172,17 @@ func (e *excludedError) Is(err error) bool { return err == ErrDisallowed }
// CheckRetractions returns an error if module m has been retracted by
// its author.
-func CheckRetractions(ctx context.Context, m module.Version) (err error) {
+func CheckRetractions(loaderstate *State, ctx context.Context, m module.Version) (err error) {
defer func() {
- if retractErr := (*ModuleRetractedError)(nil); err == nil || errors.As(err, &retractErr) {
+ if err == nil {
+ return
+ }
+ if _, ok := errors.AsType[*ModuleRetractedError](err); ok {
return
}
// Attribute the error to the version being checked, not the version from
// which the retractions were to be loaded.
- if mErr := (*module.ModuleError)(nil); errors.As(err, &mErr) {
+ if mErr, ok := errors.AsType[*module.ModuleError](err); ok {
err = mErr.Err
}
err = &retractionLoadingError{m: m, err: err}
@@ -191,7 +193,7 @@ func CheckRetractions(ctx context.Context, m module.Version) (err error) {
// Cannot be retracted.
return nil
}
- if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" {
+ if repl := Replacement(loaderstate, module.Version{Path: m.Path}); repl.Path != "" {
// All versions of the module were replaced.
// Don't load retractions, since we'd just load the replacement.
return nil
@@ -208,11 +210,11 @@ func CheckRetractions(ctx context.Context, m module.Version) (err error) {
// We load the raw file here: the go.mod file may have a different module
// path that we expect if the module or its repository was renamed.
// We still want to apply retractions to other aliases of the module.
- rm, err := queryLatestVersionIgnoringRetractions(ctx, m.Path)
+ rm, err := queryLatestVersionIgnoringRetractions(loaderstate, ctx, m.Path)
if err != nil {
return err
}
- summary, err := rawGoModSummary(rm)
+ summary, err := rawGoModSummary(loaderstate, rm)
if err != nil && !errors.Is(err, gover.ErrTooNew) {
return err
}
@@ -298,7 +300,7 @@ func ShortMessage(message, emptyDefault string) string {
//
// CheckDeprecation returns an error if the message can't be loaded.
// CheckDeprecation returns "", nil if there is no deprecation message.
-func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string, err error) {
+func CheckDeprecation(loaderstate *State, ctx context.Context, m module.Version) (deprecation string, err error) {
defer func() {
if err != nil {
err = fmt.Errorf("loading deprecation for %s: %w", m.Path, err)
@@ -310,17 +312,17 @@ func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string
// Don't look up deprecation.
return "", nil
}
- if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" {
+ if repl := Replacement(loaderstate, module.Version{Path: m.Path}); repl.Path != "" {
// All versions of the module were replaced.
// We'll look up deprecation separately for the replacement.
return "", nil
}
- latest, err := queryLatestVersionIgnoringRetractions(ctx, m.Path)
+ latest, err := queryLatestVersionIgnoringRetractions(loaderstate, ctx, m.Path)
if err != nil {
return "", err
}
- summary, err := rawGoModSummary(latest)
+ summary, err := rawGoModSummary(loaderstate, latest)
if err != nil && !errors.Is(err, gover.ErrTooNew) {
return "", err
}
@@ -340,28 +342,28 @@ func replacement(mod module.Version, replace map[module.Version]module.Version)
// Replacement returns the replacement for mod, if any. If the path in the
// module.Version is relative it's relative to the single main module outside
// workspace mode, or the workspace's directory in workspace mode.
-func Replacement(mod module.Version) module.Version {
- r, foundModRoot, _ := replacementFrom(mod)
- return canonicalizeReplacePath(r, foundModRoot)
+func Replacement(loaderstate *State, mod module.Version) module.Version {
+ r, foundModRoot, _ := replacementFrom(loaderstate, mod)
+ return canonicalizeReplacePath(loaderstate, r, foundModRoot)
}
// replacementFrom returns the replacement for mod, if any, the modroot of the replacement if it appeared in a go.mod,
// and the source of the replacement. The replacement is relative to the go.work or go.mod file it appears in.
-func replacementFrom(mod module.Version) (r module.Version, modroot string, fromFile string) {
+func replacementFrom(loaderstate *State, mod module.Version) (r module.Version, modroot string, fromFile string) {
foundFrom, found, foundModRoot := "", module.Version{}, ""
- if MainModules == nil {
+ if loaderstate.MainModules == nil {
return module.Version{}, "", ""
- } else if MainModules.Contains(mod.Path) && mod.Version == "" {
+ } else if loaderstate.MainModules.Contains(mod.Path) && mod.Version == "" {
// Don't replace the workspace version of the main module.
return module.Version{}, "", ""
}
- if _, r, ok := replacement(mod, MainModules.WorkFileReplaceMap()); ok {
- return r, "", workFilePath
+ if _, r, ok := replacement(mod, loaderstate.MainModules.WorkFileReplaceMap()); ok {
+ return r, "", loaderstate.workFilePath
}
- for _, v := range MainModules.Versions() {
- if index := MainModules.Index(v); index != nil {
+ for _, v := range loaderstate.MainModules.Versions() {
+ if index := loaderstate.MainModules.Index(v); index != nil {
if from, r, ok := replacement(mod, index.replace); ok {
- modRoot := MainModules.ModRoot(v)
+ modRoot := loaderstate.MainModules.ModRoot(v)
if foundModRoot != "" && foundFrom != from && found != r {
base.Errorf("conflicting replacements found for %v in workspace modules defined by %v and %v",
mod, modFilePath(foundModRoot), modFilePath(modRoot))
@@ -374,21 +376,21 @@ func replacementFrom(mod module.Version) (r module.Version, modroot string, from
return found, foundModRoot, modFilePath(foundModRoot)
}
-func replaceRelativeTo() string {
- if workFilePath := WorkFilePath(); workFilePath != "" {
+func replaceRelativeTo(loaderstate *State) string {
+ if workFilePath := WorkFilePath(loaderstate); workFilePath != "" {
return filepath.Dir(workFilePath)
}
- return MainModules.ModRoot(MainModules.mustGetSingleMainModule())
+ return loaderstate.MainModules.ModRoot(loaderstate.MainModules.mustGetSingleMainModule(loaderstate))
}
// canonicalizeReplacePath ensures that relative, on-disk, replaced module paths
// are relative to the workspace directory (in workspace mode) or to the module's
// directory (in module mode, as they already are).
-func canonicalizeReplacePath(r module.Version, modRoot string) module.Version {
+func canonicalizeReplacePath(loaderstate *State, r module.Version, modRoot string) module.Version {
if filepath.IsAbs(r.Path) || r.Version != "" || modRoot == "" {
return r
}
- workFilePath := WorkFilePath()
+ workFilePath := WorkFilePath(loaderstate)
if workFilePath == "" {
return r
}
@@ -405,8 +407,8 @@ func canonicalizeReplacePath(r module.Version, modRoot string) module.Version {
// for m: either m itself, or the replacement for m (iff m is replaced).
// It also returns the modroot of the module providing the replacement if
// one was found.
-func resolveReplacement(m module.Version) module.Version {
- if r := Replacement(m); r.Path != "" {
+func resolveReplacement(loaderstate *State, m module.Version) module.Version {
+ if r := Replacement(loaderstate, m); r.Path != "" {
return r
}
return m
@@ -571,12 +573,12 @@ type retraction struct {
// module versions.
//
// The caller must not modify the returned summary.
-func goModSummary(m module.Version) (*modFileSummary, error) {
- if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) {
+func goModSummary(loaderstate *State, m module.Version) (*modFileSummary, error) {
+ if m.Version == "" && !inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) {
panic("internal error: goModSummary called on a main module")
}
if gover.IsToolchain(m.Path) {
- return rawGoModSummary(m)
+ return rawGoModSummary(loaderstate, m)
}
if cfg.BuildMod == "vendor" {
@@ -584,7 +586,7 @@ func goModSummary(m module.Version) (*modFileSummary, error) {
module: module.Version{Path: m.Path},
}
- readVendorList(VendorDir())
+ readVendorList(VendorDir(loaderstate))
if vendorVersion[m.Path] != m.Version {
// This module is not vendored, so packages cannot be loaded from it and
// it cannot be relevant to the build.
@@ -599,15 +601,15 @@ func goModSummary(m module.Version) (*modFileSummary, error) {
return summary, nil
}
- actual := resolveReplacement(m)
- if mustHaveSums() && actual.Version != "" {
+ actual := resolveReplacement(loaderstate, m)
+ if mustHaveSums(loaderstate) && actual.Version != "" {
key := module.Version{Path: actual.Path, Version: actual.Version + "/go.mod"}
if !modfetch.HaveSum(key) {
suggestion := fmt.Sprintf(" for go.mod file; to add it:\n\tgo mod download %s", m.Path)
return nil, module.VersionError(actual, &sumMissingError{suggestion: suggestion})
}
}
- summary, err := rawGoModSummary(actual)
+ summary, err := rawGoModSummary(loaderstate, actual)
if err != nil {
return nil, err
}
@@ -639,8 +641,8 @@ func goModSummary(m module.Version) (*modFileSummary, error) {
}
}
- for _, mainModule := range MainModules.Versions() {
- if index := MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 {
+ for _, mainModule := range loaderstate.MainModules.Versions() {
+ if index := loaderstate.MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 {
// Drop any requirements on excluded versions.
// Don't modify the cached summary though, since we might need the raw
// summary separately.
@@ -674,7 +676,7 @@ func goModSummary(m module.Version) (*modFileSummary, error) {
// rawGoModSummary cannot be used on the main module outside of workspace mode.
// The modFileSummary can still be used for retractions and deprecations
// even if a TooNewError is returned.
-func rawGoModSummary(m module.Version) (*modFileSummary, error) {
+func rawGoModSummary(loaderstate *State, m module.Version) (*modFileSummary, error) {
if gover.IsToolchain(m.Path) {
if m.Path == "go" && gover.Compare(m.Version, gover.GoStrictVersion) >= 0 {
// Declare that go 1.21.3 requires toolchain 1.21.3,
@@ -684,7 +686,7 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) {
}
return &modFileSummary{module: m}, nil
}
- if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) {
+ if m.Version == "" && !inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) {
// Calling rawGoModSummary implies that we are treating m as a module whose
// requirements aren't the roots of the module graph and can't be modified.
//
@@ -692,22 +694,22 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) {
// are the roots of the module graph and we expect them to be kept consistent.
panic("internal error: rawGoModSummary called on a main module")
}
- if m.Version == "" && inWorkspaceMode() && m.Path == "command-line-arguments" {
+ if m.Version == "" && inWorkspaceMode(loaderstate) && m.Path == "command-line-arguments" {
// "go work sync" calls LoadModGraph to make sure the module graph is valid.
// If there are no modules in the workspace, we synthesize an empty
// command-line-arguments module, which rawGoModData cannot read a go.mod for.
return &modFileSummary{module: m}, nil
- } else if m.Version == "" && inWorkspaceMode() && MainModules.Contains(m.Path) {
+ } else if m.Version == "" && inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) {
// When go get uses EnterWorkspace to check that the workspace loads properly,
// it will update the contents of the workspace module's modfile in memory. To use the updated
// contents of the modfile when doing the load, don't read from disk and instead
// recompute a summary using the updated contents of the modfile.
- if mf := MainModules.ModFile(m); mf != nil {
- return summaryFromModFile(m, MainModules.modFiles[m])
+ if mf := loaderstate.MainModules.ModFile(m); mf != nil {
+ return summaryFromModFile(m, loaderstate.MainModules.modFiles[m])
}
}
return rawGoModSummaryCache.Do(m, func() (*modFileSummary, error) {
- name, data, err := rawGoModData(m)
+ name, data, err := rawGoModData(loaderstate, m)
if err != nil {
return nil, err
}
@@ -779,15 +781,15 @@ var rawGoModSummaryCache par.ErrCache[module.Version, *modFileSummary]
//
// Unlike rawGoModSummary, rawGoModData does not cache its results in memory.
// Use rawGoModSummary instead unless you specifically need these bytes.
-func rawGoModData(m module.Version) (name string, data []byte, err error) {
+func rawGoModData(loaderstate *State, m module.Version) (name string, data []byte, err error) {
if m.Version == "" {
dir := m.Path
if !filepath.IsAbs(dir) {
- if inWorkspaceMode() && MainModules.Contains(m.Path) {
- dir = MainModules.ModRoot(m)
+ if inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) {
+ dir = loaderstate.MainModules.ModRoot(m)
} else {
// m is a replacement module with only a file path.
- dir = filepath.Join(replaceRelativeTo(), dir)
+ dir = filepath.Join(replaceRelativeTo(loaderstate), dir)
}
}
name = filepath.Join(dir, "go.mod")
@@ -823,12 +825,12 @@ func rawGoModData(m module.Version) (name string, data []byte, err error) {
//
// If the queried latest version is replaced,
// queryLatestVersionIgnoringRetractions returns the replacement.
-func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (latest module.Version, err error) {
+func queryLatestVersionIgnoringRetractions(loaderstate *State, ctx context.Context, path string) (latest module.Version, err error) {
return latestVersionIgnoringRetractionsCache.Do(path, func() (module.Version, error) {
ctx, span := trace.StartSpan(ctx, "queryLatestVersionIgnoringRetractions "+path)
defer span.Done()
- if repl := Replacement(module.Version{Path: path}); repl.Path != "" {
+ if repl := Replacement(loaderstate, module.Version{Path: path}); repl.Path != "" {
// All versions of the module were replaced.
// No need to query.
return repl, nil
@@ -838,12 +840,12 @@ func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (la
// Ignore exclusions from the main module's go.mod.
const ignoreSelected = ""
var allowAll AllowedFunc
- rev, err := Query(ctx, path, "latest", ignoreSelected, allowAll)
+ rev, err := Query(loaderstate, ctx, path, "latest", ignoreSelected, allowAll)
if err != nil {
return module.Version{}, err
}
latest := module.Version{Path: path, Version: rev.Version}
- if repl := resolveReplacement(latest); repl.Path != "" {
+ if repl := resolveReplacement(loaderstate, latest); repl.Path != "" {
latest = repl
}
return latest, nil
diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go
index 8ae2dbff1e8887..32afc866fbcc14 100644
--- a/src/cmd/go/internal/modload/mvs.go
+++ b/src/cmd/go/internal/modload/mvs.go
@@ -43,7 +43,7 @@ type mvsReqs struct {
}
func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) {
- if mod.Version == "" && MainModules.Contains(mod.Path) {
+ if mod.Version == "" && LoaderState.MainModules.Contains(mod.Path) {
// Use the build list as it existed when r was constructed, not the current
// global build list.
return r.roots, nil
@@ -53,7 +53,7 @@ func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) {
return nil, nil
}
- summary, err := goModSummary(mod)
+ summary, err := goModSummary(LoaderState, mod)
if err != nil {
return nil, err
}
@@ -79,11 +79,11 @@ func (*mvsReqs) Upgrade(m module.Version) (module.Version, error) {
return m, nil
}
-func versions(ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) {
+func versions(loaderstate *State, ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) {
// Note: modfetch.Lookup and repo.Versions are cached,
// so there's no need for us to add extra caching here.
err = modfetch.TryProxies(func(proxy string) error {
- repo, err := lookupRepo(ctx, proxy, path)
+ repo, err := lookupRepo(loaderstate, ctx, proxy, path)
if err != nil {
return err
}
@@ -111,12 +111,12 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) (versions [
//
// Since the version of a main module is not found in the version list,
// it has no previous version.
-func previousVersion(ctx context.Context, m module.Version) (module.Version, error) {
- if m.Version == "" && MainModules.Contains(m.Path) {
+func previousVersion(loaderstate *State, ctx context.Context, m module.Version) (module.Version, error) {
+ if m.Version == "" && loaderstate.MainModules.Contains(m.Path) {
return module.Version{Path: m.Path, Version: "none"}, nil
}
- list, _, err := versions(ctx, m.Path, CheckAllowed)
+ list, _, err := versions(loaderstate, ctx, m.Path, CheckAllowed)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return module.Version{Path: m.Path, Version: "none"}, nil
@@ -132,5 +132,5 @@ func previousVersion(ctx context.Context, m module.Version) (module.Version, err
func (*mvsReqs) Previous(m module.Version) (module.Version, error) {
// TODO(golang.org/issue/38714): thread tracing context through MVS.
- return previousVersion(context.TODO(), m)
+ return previousVersion(LoaderState, context.TODO(), m)
}
diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go
index c4cf55442ba69b..17a0aef21ab2e9 100644
--- a/src/cmd/go/internal/modload/query.go
+++ b/src/cmd/go/internal/modload/query.go
@@ -80,19 +80,19 @@ import (
//
// Query often returns a non-nil *RevInfo with a non-nil error,
// to provide an info.Origin that can allow the error to be cached.
-func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) {
+func Query(loaderstate *State, ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) {
ctx, span := trace.StartSpan(ctx, "modload.Query "+path)
defer span.Done()
- return queryReuse(ctx, path, query, current, allowed, nil)
+ return queryReuse(loaderstate, ctx, path, query, current, allowed, nil)
}
// queryReuse is like Query but also takes a map of module info that can be reused
// if the validation criteria in Origin are met.
-func queryReuse(ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
+func queryReuse(loaderstate *State, ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
var info *modfetch.RevInfo
err := modfetch.TryProxies(func(proxy string) (err error) {
- info, err = queryProxy(ctx, proxy, path, query, current, allowed, reuse)
+ info, err = queryProxy(loaderstate, ctx, proxy, path, query, current, allowed, reuse)
return err
})
return info, err
@@ -100,9 +100,9 @@ func queryReuse(ctx context.Context, path, query, current string, allowed Allowe
// checkReuse checks whether a revision of a given module
// for a given module may be reused, according to the information in origin.
-func checkReuse(ctx context.Context, m module.Version, old *codehost.Origin) error {
+func checkReuse(loaderstate *State, ctx context.Context, m module.Version, old *codehost.Origin) error {
return modfetch.TryProxies(func(proxy string) error {
- repo, err := lookupRepo(ctx, proxy, m.Path)
+ repo, err := lookupRepo(loaderstate, ctx, proxy, m.Path)
if err != nil {
return err
}
@@ -197,7 +197,7 @@ func (queryDisabledError) Error() string {
return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason)
}
-func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
+func queryProxy(loaderstate *State, ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) {
ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query)
defer span.Done()
@@ -211,7 +211,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
allowed = func(context.Context, module.Version) error { return nil }
}
- if MainModules.Contains(path) && (query == "upgrade" || query == "patch") {
+ if loaderstate.MainModules.Contains(path) && (query == "upgrade" || query == "patch") {
m := module.Version{Path: path}
if err := allowed(ctx, m); err != nil {
return nil, fmt.Errorf("internal error: main module version is not allowed: %w", err)
@@ -223,7 +223,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
return nil, fmt.Errorf("can't query specific version (%q) of standard-library module %q", query, path)
}
- repo, err := lookupRepo(ctx, proxy, path)
+ repo, err := lookupRepo(loaderstate, ctx, proxy, path)
if err != nil {
return nil, err
}
@@ -296,7 +296,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed
return &clone
}
- releases, prereleases, err := qm.filterVersions(ctx, versions.List)
+ releases, prereleases, err := qm.filterVersions(loaderstate, ctx, versions.List)
if err != nil {
return revWithOrigin(nil), err
}
@@ -569,7 +569,7 @@ func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool {
//
// If the allowed predicate returns an error not equivalent to ErrDisallowed,
// filterVersions returns that error.
-func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) (releases, prereleases []string, err error) {
+func (qm *queryMatcher) filterVersions(loaderstate *State, ctx context.Context, versions []string) (releases, prereleases []string, err error) {
needIncompatible := qm.preferIncompatible
var lastCompatible string
@@ -602,7 +602,7 @@ func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) (
// ignore any version with a higher (+incompatible) major version. (See
// https://golang.org/issue/34165.) Note that we even prefer a
// compatible pre-release over an incompatible release.
- ok, err := versionHasGoMod(ctx, module.Version{Path: qm.path, Version: lastCompatible})
+ ok, err := versionHasGoMod(loaderstate, ctx, module.Version{Path: qm.path, Version: lastCompatible})
if err != nil {
return nil, nil, err
}
@@ -639,11 +639,11 @@ type QueryResult struct {
// QueryPackages is like QueryPattern, but requires that the pattern match at
// least one package and omits the non-package result (if any).
-func QueryPackages(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) {
- pkgMods, modOnly, err := QueryPattern(ctx, pattern, query, current, allowed)
+func QueryPackages(loaderstate *State, ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) {
+ pkgMods, modOnly, err := QueryPattern(loaderstate, ctx, pattern, query, current, allowed)
if len(pkgMods) == 0 && err == nil {
- replacement := Replacement(modOnly.Mod)
+ replacement := Replacement(loaderstate, modOnly.Mod)
return nil, &PackageNotInModuleError{
Mod: modOnly.Mod,
Replacement: replacement,
@@ -670,7 +670,7 @@ func QueryPackages(ctx context.Context, pattern, query string, current func(stri
//
// QueryPattern always returns at least one QueryResult (which may be only
// modOnly) or a non-nil error.
-func QueryPattern(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) {
+func QueryPattern(loaderstate *State, ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) {
ctx, span := trace.StartSpan(ctx, "modload.QueryPattern "+pattern+" "+query)
defer span.Done()
@@ -693,15 +693,15 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
}
match = func(mod module.Version, roots []string, isLocal bool) *search.Match {
m := search.NewMatch(pattern)
- matchPackages(ctx, m, imports.AnyTags(), omitStd, []module.Version{mod})
+ matchPackages(loaderstate, ctx, m, imports.AnyTags(), omitStd, []module.Version{mod})
return m
}
} else {
match = func(mod module.Version, roots []string, isLocal bool) *search.Match {
m := search.NewMatch(pattern)
prefix := mod.Path
- if MainModules.Contains(mod.Path) {
- prefix = MainModules.PathPrefix(module.Version{Path: mod.Path})
+ if loaderstate.MainModules.Contains(mod.Path) {
+ prefix = loaderstate.MainModules.PathPrefix(module.Version{Path: mod.Path})
}
for _, root := range roots {
if _, ok, err := dirInModule(pattern, prefix, root, isLocal); err != nil {
@@ -715,8 +715,8 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
}
var mainModuleMatches []module.Version
- for _, mainModule := range MainModules.Versions() {
- m := match(mainModule, modRoots, true)
+ for _, mainModule := range loaderstate.MainModules.Versions() {
+ m := match(mainModule, loaderstate.modRoots, true)
if len(m.Pkgs) > 0 {
if query != "upgrade" && query != "patch" {
return nil, nil, &QueryMatchesPackagesInMainModuleError{
@@ -756,7 +756,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
var (
results []QueryResult
- candidateModules = modulePrefixesExcludingTarget(base)
+ candidateModules = modulePrefixesExcludingTarget(loaderstate, base)
)
if len(candidateModules) == 0 {
if modOnly != nil {
@@ -783,7 +783,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
pathCurrent := current(path)
r.Mod.Path = path
- r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed, nil)
+ r.Rev, err = queryProxy(loaderstate, ctx, proxy, path, query, pathCurrent, allowed, nil)
if err != nil {
return r, err
}
@@ -791,7 +791,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
if gover.IsToolchain(r.Mod.Path) {
return r, nil
}
- root, isLocal, err := fetch(ctx, r.Mod)
+ root, isLocal, err := fetch(loaderstate, ctx, r.Mod)
if err != nil {
return r, err
}
@@ -801,7 +801,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
if err := firstError(m); err != nil {
return r, err
}
- replacement := Replacement(r.Mod)
+ replacement := Replacement(loaderstate, r.Mod)
return r, &PackageNotInModuleError{
Mod: r.Mod,
Replacement: replacement,
@@ -812,7 +812,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
return r, nil
}
- allResults, err := queryPrefixModules(ctx, candidateModules, queryModule)
+ allResults, err := queryPrefixModules(loaderstate, ctx, candidateModules, queryModule)
results = allResults[:0]
for _, r := range allResults {
if len(r.Packages) == 0 {
@@ -838,11 +838,11 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin
// itself, sorted by descending length. Prefixes that are not valid module paths
// but are valid package paths (like "m" or "example.com/.gen") are included,
// since they might be replaced.
-func modulePrefixesExcludingTarget(path string) []string {
+func modulePrefixesExcludingTarget(loaderstate *State, path string) []string {
prefixes := make([]string, 0, strings.Count(path, "/")+1)
mainModulePrefixes := make(map[string]bool)
- for _, m := range MainModules.Versions() {
+ for _, m := range loaderstate.MainModules.Versions() {
mainModulePrefixes[m.Path] = true
}
@@ -863,7 +863,7 @@ func modulePrefixesExcludingTarget(path string) []string {
return prefixes
}
-func queryPrefixModules(ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) {
+func queryPrefixModules(loaderstate *State, ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) {
ctx, span := trace.StartSpan(ctx, "modload.queryPrefixModules")
defer span.Done()
@@ -905,7 +905,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod
case *PackageNotInModuleError:
// Given the option, prefer to attribute “package not in module”
// to modules other than the main one.
- if noPackage == nil || MainModules.Contains(noPackage.Mod.Path) {
+ if noPackage == nil || loaderstate.MainModules.Contains(noPackage.Mod.Path) {
noPackage = rErr
}
case *NoMatchingVersionError:
@@ -932,7 +932,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod
if notExistErr == nil {
notExistErr = rErr
}
- } else if iv := (*module.InvalidVersionError)(nil); errors.As(rErr, &iv) {
+ } else if _, ok := errors.AsType[*module.InvalidVersionError](rErr); ok {
if invalidVersion == nil {
invalidVersion = rErr
}
@@ -1096,8 +1096,8 @@ func (e *PackageNotInModuleError) ImportPath() string {
// go.mod with different content. Second, if we don't fetch the .zip, then
// we don't need to verify it in go.sum. This makes 'go list -m -u' faster
// and simpler.
-func versionHasGoMod(_ context.Context, m module.Version) (bool, error) {
- _, data, err := rawGoModData(m)
+func versionHasGoMod(loaderstate *State, _ context.Context, m module.Version) (bool, error) {
+ _, data, err := rawGoModData(loaderstate, m)
if err != nil {
return false, err
}
@@ -1117,7 +1117,7 @@ type versionRepo interface {
var _ versionRepo = modfetch.Repo(nil)
-func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err error) {
+func lookupRepo(loaderstate *State, ctx context.Context, proxy, path string) (repo versionRepo, err error) {
if path != "go" && path != "toolchain" {
err = module.CheckPath(path)
}
@@ -1127,9 +1127,9 @@ func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err
repo = emptyRepo{path: path, err: err}
}
- if MainModules == nil {
+ if loaderstate.MainModules == nil {
return repo, err
- } else if _, ok := MainModules.HighestReplaced()[path]; ok {
+ } else if _, ok := loaderstate.MainModules.HighestReplaced()[path]; ok {
return &replacementRepo{repo: repo}, nil
}
@@ -1186,8 +1186,8 @@ func (rr *replacementRepo) Versions(ctx context.Context, prefix string) (*modfet
}
versions := repoVersions.List
- for _, mm := range MainModules.Versions() {
- if index := MainModules.Index(mm); index != nil && len(index.replace) > 0 {
+ for _, mm := range LoaderState.MainModules.Versions() {
+ if index := LoaderState.MainModules.Index(mm); index != nil && len(index.replace) > 0 {
path := rr.ModulePath()
for m := range index.replace {
if m.Path == path && strings.HasPrefix(m.Version, prefix) && m.Version != "" && !module.IsPseudoVersion(m.Version) {
@@ -1215,8 +1215,8 @@ func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevI
return info, err
}
var hasReplacements bool
- for _, v := range MainModules.Versions() {
- if index := MainModules.Index(v); index != nil && len(index.replace) > 0 {
+ for _, v := range LoaderState.MainModules.Versions() {
+ if index := LoaderState.MainModules.Index(v); index != nil && len(index.replace) > 0 {
hasReplacements = true
}
}
@@ -1239,7 +1239,7 @@ func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevI
}
}
- if r := Replacement(module.Version{Path: path, Version: v}); r.Path == "" {
+ if r := Replacement(LoaderState, module.Version{Path: path, Version: v}); r.Path == "" {
return info, err
}
return rr.replacementStat(v)
@@ -1249,7 +1249,7 @@ func (rr *replacementRepo) Latest(ctx context.Context) (*modfetch.RevInfo, error
info, err := rr.repo.Latest(ctx)
path := rr.ModulePath()
- if v, ok := MainModules.HighestReplaced()[path]; ok {
+ if v, ok := LoaderState.MainModules.HighestReplaced()[path]; ok {
if v == "" {
// The only replacement is a wildcard that doesn't specify a version, so
// synthesize a pseudo-version with an appropriate major version and a
@@ -1290,7 +1290,7 @@ type QueryMatchesMainModulesError struct {
}
func (e *QueryMatchesMainModulesError) Error() string {
- if MainModules.Contains(e.Pattern) {
+ if LoaderState.MainModules.Contains(e.Pattern) {
return fmt.Sprintf("can't request version %q of the main module (%s)", e.Query, e.Pattern)
}
diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go
index 93f8f0d00d1c8d..b4487eebb0d9e9 100644
--- a/src/cmd/go/internal/modload/query_test.go
+++ b/src/cmd/go/internal/modload/query_test.go
@@ -182,7 +182,7 @@ func TestQuery(t *testing.T) {
t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+tt.current+"/"+allow, func(t *testing.T) {
t.Parallel()
- info, err := Query(ctx, tt.path, tt.query, tt.current, allowed)
+ info, err := Query(LoaderState, ctx, tt.path, tt.query, tt.current, allowed)
if tt.err != "" {
if err == nil {
t.Errorf("Query(_, %q, %q, %q, %v) = %v, want error %q", tt.path, tt.query, tt.current, allow, info.Version, tt.err)
diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go
index 9ff9738e281118..9951e68ee8e6c9 100644
--- a/src/cmd/go/internal/modload/search.go
+++ b/src/cmd/go/internal/modload/search.go
@@ -41,7 +41,7 @@ const (
// matchPackages is like m.MatchPackages, but uses a local variable (rather than
// a global) for tags, can include or exclude packages in the standard library,
// and is restricted to the given list of modules.
-func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, filter stdFilter, modules []module.Version) {
+func matchPackages(loaderstate *State, ctx context.Context, m *search.Match, tags map[string]bool, filter stdFilter, modules []module.Version) {
ctx, span := trace.StartSpan(ctx, "modload.matchPackages")
defer span.Done()
@@ -74,7 +74,7 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
)
q := par.NewQueue(runtime.GOMAXPROCS(0))
- ignorePatternsMap := parseIgnorePatterns(ctx, treeCanMatch, modules)
+ ignorePatternsMap := parseIgnorePatterns(loaderstate, ctx, treeCanMatch, modules)
walkPkgs := func(root, importPathRoot string, prune pruning) {
_, span := trace.StartSpan(ctx, "walkPkgs "+root)
defer span.Done()
@@ -171,13 +171,13 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
}
if cfg.BuildMod == "vendor" {
- for _, mod := range MainModules.Versions() {
- if modRoot := MainModules.ModRoot(mod); modRoot != "" {
- walkPkgs(modRoot, MainModules.PathPrefix(mod), pruneGoMod|pruneVendor)
+ for _, mod := range loaderstate.MainModules.Versions() {
+ if modRoot := loaderstate.MainModules.ModRoot(mod); modRoot != "" {
+ walkPkgs(modRoot, loaderstate.MainModules.PathPrefix(mod), pruneGoMod|pruneVendor)
}
}
- if HasModRoot() {
- walkPkgs(VendorDir(), "", pruneVendor)
+ if HasModRoot(loaderstate) {
+ walkPkgs(VendorDir(loaderstate), "", pruneVendor)
}
return
}
@@ -191,16 +191,16 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f
root, modPrefix string
isLocal bool
)
- if MainModules.Contains(mod.Path) {
- if MainModules.ModRoot(mod) == "" {
+ if loaderstate.MainModules.Contains(mod.Path) {
+ if loaderstate.MainModules.ModRoot(mod) == "" {
continue // If there is no main module, we can't search in it.
}
- root = MainModules.ModRoot(mod)
- modPrefix = MainModules.PathPrefix(mod)
+ root = loaderstate.MainModules.ModRoot(mod)
+ modPrefix = loaderstate.MainModules.PathPrefix(mod)
isLocal = true
} else {
var err error
- root, isLocal, err = fetch(ctx, mod)
+ root, isLocal, err = fetch(loaderstate, ctx, mod)
if err != nil {
m.AddError(err)
continue
@@ -286,17 +286,17 @@ func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeC
func MatchInModule(ctx context.Context, pattern string, m module.Version, tags map[string]bool) *search.Match {
match := search.NewMatch(pattern)
if m == (module.Version{}) {
- matchPackages(ctx, match, tags, includeStd, nil)
+ matchPackages(LoaderState, ctx, match, tags, includeStd, nil)
}
- LoadModFile(ctx) // Sets Target, needed by fetch and matchPackages.
+ LoadModFile(LoaderState, ctx) // Sets Target, needed by fetch and matchPackages.
if !match.IsLiteral() {
- matchPackages(ctx, match, tags, omitStd, []module.Version{m})
+ matchPackages(LoaderState, ctx, match, tags, omitStd, []module.Version{m})
return match
}
- root, isLocal, err := fetch(ctx, m)
+ root, isLocal, err := fetch(LoaderState, ctx, m)
if err != nil {
match.Errs = []error{err}
return match
@@ -322,7 +322,7 @@ func MatchInModule(ctx context.Context, pattern string, m module.Version, tags m
// parseIgnorePatterns collects all ignore patterns associated with the
// provided list of modules.
// It returns a map of module root -> *search.IgnorePatterns.
-func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, modules []module.Version) map[string]*search.IgnorePatterns {
+func parseIgnorePatterns(loaderstate *State, ctx context.Context, treeCanMatch func(string) bool, modules []module.Version) map[string]*search.IgnorePatterns {
ignorePatternsMap := make(map[string]*search.IgnorePatterns)
for _, mod := range modules {
if gover.IsToolchain(mod.Path) || !treeCanMatch(mod.Path) {
@@ -330,12 +330,12 @@ func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, mo
}
var modRoot string
var ignorePatterns []string
- if MainModules.Contains(mod.Path) {
- modRoot = MainModules.ModRoot(mod)
+ if loaderstate.MainModules.Contains(mod.Path) {
+ modRoot = loaderstate.MainModules.ModRoot(mod)
if modRoot == "" {
continue
}
- modIndex := MainModules.Index(mod)
+ modIndex := loaderstate.MainModules.Index(mod)
if modIndex == nil {
continue
}
@@ -344,11 +344,11 @@ func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, mo
// Skip getting ignore patterns for vendored modules because they
// do not have go.mod files.
var err error
- modRoot, _, err = fetch(ctx, mod)
+ modRoot, _, err = fetch(loaderstate, ctx, mod)
if err != nil {
continue
}
- summary, err := goModSummary(mod)
+ summary, err := goModSummary(loaderstate, mod)
if err != nil {
continue
}
diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go
index c7fe73193582ab..1fc20ad398b363 100644
--- a/src/cmd/go/internal/modload/vendor.go
+++ b/src/cmd/go/internal/modload/vendor.go
@@ -140,10 +140,10 @@ func readVendorList(vendorDir string) {
// checkVendorConsistency verifies that the vendor/modules.txt file matches (if
// go 1.14) or at least does not contradict (go 1.13 or earlier) the
// requirements and replacements listed in the main module's go.mod file.
-func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) {
+func checkVendorConsistency(loaderstate *State, indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) {
// readVendorList only needs the main module to get the directory
// the vendor directory is in.
- readVendorList(VendorDir())
+ readVendorList(VendorDir(loaderstate))
if len(modFiles) < 1 {
// We should never get here if there are zero modfiles. Either
@@ -154,7 +154,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m
}
pre114 := false
- if !inWorkspaceMode() { // workspace mode was added after Go 1.14
+ if !inWorkspaceMode(loaderstate) { // workspace mode was added after Go 1.14
if len(indexes) != 1 {
panic(fmt.Errorf("not in workspace mode but number of indexes is %v, not 1", len(indexes)))
}
@@ -188,7 +188,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m
// However, we can at least detect a version mismatch if packages were
// vendored from a non-matching version.
if vv, ok := vendorVersion[r.Mod.Path]; ok && vv != r.Mod.Version {
- vendErrorf(r.Mod, fmt.Sprintf("is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv))
+ vendErrorf(r.Mod, "is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv)
}
} else {
vendErrorf(r.Mod, "is explicitly required in go.mod, but not marked as explicit in vendor/modules.txt")
@@ -215,8 +215,8 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m
continue // Don't print the same error more than once
}
seenrep[r.Old] = true
- rNew, modRoot, replacementSource := replacementFrom(r.Old)
- rNewCanonical := canonicalizeReplacePath(rNew, modRoot)
+ rNew, modRoot, replacementSource := replacementFrom(loaderstate, r.Old)
+ rNewCanonical := canonicalizeReplacePath(loaderstate, rNew, modRoot)
vr := vendorMeta[r.Old].Replacement
if vr == (module.Version{}) {
if rNewCanonical == (module.Version{}) {
@@ -236,8 +236,8 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m
for _, modFile := range modFiles {
checkReplace(modFile.Replace)
}
- if MainModules.workFile != nil {
- checkReplace(MainModules.workFile.Replace)
+ if loaderstate.MainModules.workFile != nil {
+ checkReplace(loaderstate.MainModules.workFile.Replace)
}
for _, mod := range vendorList {
@@ -252,7 +252,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m
}
if !foundRequire {
article := ""
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
article = "a "
}
vendErrorf(mod, "is marked as explicit in vendor/modules.txt, but not explicitly required in %vgo.mod", article)
@@ -262,9 +262,9 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m
}
for _, mod := range vendorReplaced {
- r := Replacement(mod)
+ r := Replacement(loaderstate, mod)
replacementSource := "go.mod"
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
replacementSource = "the workspace"
}
if r == (module.Version{}) {
@@ -276,9 +276,9 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m
if vendErrors.Len() > 0 {
subcmd := "mod"
- if inWorkspaceMode() {
+ if inWorkspaceMode(loaderstate) {
subcmd = "work"
}
- base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir()), vendErrors, subcmd)
+ base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir(loaderstate)), vendErrors, subcmd)
}
}
diff --git a/src/cmd/go/internal/rf-cleanup.zsh b/src/cmd/go/internal/rf-cleanup.zsh
new file mode 100755
index 00000000000000..c805db56e3611e
--- /dev/null
+++ b/src/cmd/go/internal/rf-cleanup.zsh
@@ -0,0 +1,43 @@
+#!/usr/bin/env zsh
+
+set -eu -o pipefail
+
+# This is a large series of sed commands to cleanup after successful use of the
+# `rf inject` command. This script will be used to refactor the codebase to
+# eliminate global state within the module loader. Once that effort is
+# complete, this script will be removed.
+
+find . -name '*.go' -exec \
+ sed -i '
+ #
+ # CompileAction does not use loaderstate.
+ #
+ s/CompileAction(loaderstate[^ ]* \*modload.State, /CompileAction(/g
+ s/CompileAction(modload.LoaderState[^,]*, /CompileAction(/g
+ s/CompileAction(loaderstate[^,]*, /CompileAction(/g
+ #
+ # cgoAction does not use loaderstate.
+ #
+ s/cgoAction(loaderstate \*modload\.State, /cgoAction(/g
+ s/cgoAction(loaderstate, /cgoAction(/g
+ s/cgoAction(loaderstate_, /cgoAction(/g
+ #
+ # Remove redundant mentions of LoaderState from function call sites.
+ #
+ s/(modload\.LoaderState_*, loaderstate,/(loaderstate,/g
+ s/(modload\.LoaderState_*, moduleLoaderState,/(moduleLoaderState,/g
+ s/(modload\.LoaderState_*, modload\.LoaderState/(modload.LoaderState/g
+ s/(modload\.LoaderState_*, loaderstate,/(loaderstate,/g
+ s/(modload\.LoaderState_*, moduleLoaderState,/(moduleLoaderState,/g
+ s/(modload\.LoaderState_*, modload\.LoaderState,/(modload.LoaderState,/g
+ s/(loaderstate_* \*modload.State, loaderstate \*modload.State/(loaderstate *modload.State/g
+ s/(loaderstate_* \*State, loaderstate \*State/(loaderstate *State/g
+ s/(loaderstate_*, loaderstate,/(loaderstate,/g
+ s/(LoaderState_*, loaderstate,/(loaderstate,/g
+ s/(LoaderState_*, loaderState,/(loaderState,/g
+ s/(LoaderState_*, LoaderState,/(LoaderState,/g
+ s/(LoaderState_*, LoaderState,/(LoaderState,/g
+ s/(moduleLoaderState_*, loaderstate,/(loaderstate,/g
+ s/(moduleLoaderState_*, moduleLoaderState,/(moduleLoaderState,/g
+ ' {} \;
+
diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go
index b81b1a007bd79d..b6d76514b032d8 100644
--- a/src/cmd/go/internal/run/run.go
+++ b/src/cmd/go/internal/run/run.go
@@ -76,15 +76,15 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) {
// This must be done before modload.Init, but we need to call work.BuildInit
// before loading packages, since it affects package locations, e.g.,
// for -race and -msan.
- modload.ForceUseModules = true
- modload.RootMode = modload.NoRoot
- modload.AllowMissingModuleImports()
- modload.Init()
+ modload.LoaderState.ForceUseModules = true
+ modload.LoaderState.RootMode = modload.NoRoot
+ modload.AllowMissingModuleImports(modload.LoaderState)
+ modload.Init(modload.LoaderState)
} else {
- modload.InitWorkfile()
+ modload.InitWorkfile(modload.LoaderState)
}
- work.BuildInit()
+ work.BuildInit(modload.LoaderState)
b := work.NewBuilder("")
defer func() {
if err := b.Close(); err != nil {
@@ -107,18 +107,18 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) {
base.Fatalf("go: cannot run *_test.go files (%s)", file)
}
}
- p = load.GoFilesPackage(ctx, pkgOpts, files)
+ p = load.GoFilesPackage(modload.LoaderState, ctx, pkgOpts, files)
} else if len(args) > 0 && !strings.HasPrefix(args[0], "-") {
arg := args[0]
var pkgs []*load.Package
if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) {
var err error
- pkgs, err = load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args[:1])
+ pkgs, err = load.PackagesAndErrorsOutsideModule(modload.LoaderState, ctx, pkgOpts, args[:1])
if err != nil {
base.Fatal(err)
}
} else {
- pkgs = load.PackagesAndErrors(ctx, pkgOpts, args[:1])
+ pkgs = load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, args[:1])
}
if len(pkgs) == 0 {
@@ -140,7 +140,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) {
load.CheckPackageErrors([]*load.Package{p})
if cfg.BuildCover {
- load.PrepareForCoverageBuild([]*load.Package{p})
+ load.PrepareForCoverageBuild(modload.LoaderState, []*load.Package{p})
}
p.Internal.OmitDebug = true
@@ -166,7 +166,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) {
p.Internal.ExeName = p.DefaultExecName()
}
- a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p)
+ a1 := b.LinkAction(modload.LoaderState, work.ModeBuild, work.ModeBuild, p)
a1.CacheExecutable = true
a := &work.Action{Mode: "go run", Actor: work.ActorFunc(buildRunProgram), Args: cmdArgs, Deps: []*work.Action{a1}}
b.Do(ctx, a)
diff --git a/src/cmd/go/internal/telemetrystats/telemetrystats.go b/src/cmd/go/internal/telemetrystats/telemetrystats.go
index d5b642240f16b7..9586324551dfc9 100644
--- a/src/cmd/go/internal/telemetrystats/telemetrystats.go
+++ b/src/cmd/go/internal/telemetrystats/telemetrystats.go
@@ -24,7 +24,7 @@ func Increment() {
func incrementConfig() {
if !modload.WillBeEnabled() {
counter.Inc("go/mode:gopath")
- } else if workfile := modload.FindGoWork(base.Cwd()); workfile != "" {
+ } else if workfile := modload.FindGoWork(modload.LoaderState, base.Cwd()); workfile != "" {
counter.Inc("go/mode:workspace")
} else {
counter.Inc("go/mode:module")
diff --git a/src/cmd/go/internal/test/flagdefs.go b/src/cmd/go/internal/test/flagdefs.go
index 8aa0bfc2bf3120..b8b4bf649e42e7 100644
--- a/src/cmd/go/internal/test/flagdefs.go
+++ b/src/cmd/go/internal/test/flagdefs.go
@@ -9,6 +9,7 @@ package test
// passFlagToTest contains the flags that should be forwarded to
// the test binary with the prefix "test.".
var passFlagToTest = map[string]bool{
+ "artifacts": true,
"bench": true,
"benchmem": true,
"benchtime": true,
diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go
index 8bfb3c149b6c69..5e5d79a39f001f 100644
--- a/src/cmd/go/internal/test/test.go
+++ b/src/cmd/go/internal/test/test.go
@@ -192,6 +192,10 @@ and -show_bytes options of pprof control how the information is presented.
The following flags are recognized by the 'go test' command and
control the execution of any test:
+ -artifacts
+ Save test artifacts in the directory specified by -outputdir.
+ See 'go doc testing.T.ArtifactDir'.
+
-bench regexp
Run only those benchmarks matching a regular expression.
By default, no benchmarks are run.
@@ -286,6 +290,10 @@ control the execution of any test:
This will only list top-level tests. No subtest or subbenchmarks will be
shown.
+ -outputdir directory
+ Place output files from profiling and test artifacts in the
+ specified directory, by default the directory in which "go test" is running.
+
-parallel n
Allow parallel execution of test functions that call t.Parallel, and
fuzz targets that call t.Parallel when running the seed corpus.
@@ -397,10 +405,6 @@ profile the tests during execution:
Sample 1 in n stack traces of goroutines holding a
contended mutex.
- -outputdir directory
- Place output files from profiling in the specified directory,
- by default the directory in which "go test" is running.
-
-trace trace.out
Write an execution trace to the specified file before exiting.
@@ -540,6 +544,7 @@ See the documentation of the testing package for more information.
}
var (
+ testArtifacts bool // -artifacts flag
testBench string // -bench flag
testC bool // -c flag
testCoverPkgs []*load.Package // -coverpkg flag
@@ -678,7 +683,7 @@ var defaultVetFlags = []string{
func runTest(ctx context.Context, cmd *base.Command, args []string) {
pkgArgs, testArgs = testFlags(args)
- modload.InitWorkfile() // The test command does custom flag processing; initialize workspaces after that.
+ modload.InitWorkfile(modload.LoaderState) // The test command does custom flag processing; initialize workspaces after that.
if cfg.DebugTrace != "" {
var close func() error
@@ -699,12 +704,13 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
work.FindExecCmd() // initialize cached result
- work.BuildInit()
+ work.BuildInit(modload.LoaderState)
work.VetFlags = testVet.flags
work.VetExplicit = testVet.explicit
+ work.VetTool = base.Tool("vet")
pkgOpts := load.PackageOpts{ModResolveTests: true}
- pkgs = load.PackagesAndErrors(ctx, pkgOpts, pkgArgs)
+ pkgs = load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, pkgArgs)
// We *don't* call load.CheckPackageErrors here because we want to report
// loading errors as per-package test setup errors later.
if len(pkgs) == 0 {
@@ -730,12 +736,12 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
// the module cache (or permanently alter the behavior of std tests for all
// users) by writing the failing input to the package's testdata directory.
// (See https://golang.org/issue/48495 and test_fuzz_modcache.txt.)
- mainMods := modload.MainModules
+ mainMods := modload.LoaderState.MainModules
if m := pkgs[0].Module; m != nil && m.Path != "" {
if !mainMods.Contains(m.Path) {
base.Fatalf("cannot use -fuzz flag on package outside the main module")
}
- } else if pkgs[0].Standard && modload.Enabled() {
+ } else if pkgs[0].Standard && modload.Enabled(modload.LoaderState) {
// Because packages in 'std' and 'cmd' are part of the standard library,
// they are only treated as part of a module in 'go mod' subcommands and
// 'go get'. However, we still don't want to accidentally corrupt their
@@ -861,13 +867,13 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
if cfg.BuildCoverPkg != nil {
match := make([]func(*load.Package) bool, len(cfg.BuildCoverPkg))
for i := range cfg.BuildCoverPkg {
- match[i] = load.MatchPackage(cfg.BuildCoverPkg[i], base.Cwd())
+ match[i] = load.MatchPackage(modload.LoaderState, cfg.BuildCoverPkg[i], base.Cwd())
}
// Select for coverage all dependencies matching the -coverpkg
// patterns.
plist := load.TestPackageList(ctx, pkgOpts, pkgs)
- testCoverPkgs = load.SelectCoverPackages(plist, match, "test")
+ testCoverPkgs = load.SelectCoverPackages(modload.LoaderState, plist, match, "test")
if len(testCoverPkgs) > 0 {
// create a new singleton action that will collect up the
// meta-data files from all of the packages mentioned in
@@ -975,7 +981,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
// happens we'll wind up building the Q compile action
// before updating its deps to include sync/atomic).
if cfg.BuildCoverMode == "atomic" && p.ImportPath != "sync/atomic" {
- load.EnsureImport(p, "sync/atomic")
+ load.EnsureImport(modload.LoaderState, p, "sync/atomic")
}
// Tag the package for static meta-data generation if no
// test files (this works only with the new coverage
@@ -1215,7 +1221,7 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts,
}
}
- a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain)
+ a := b.LinkAction(modload.LoaderState, work.ModeBuild, work.ModeBuild, pmain)
a.Target = testDir + testBinary + cfg.ExeSuffix
if cfg.Goos == "windows" {
// There are many reserved words on Windows that,
@@ -1735,8 +1741,7 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action)
} else if errors.Is(err, exec.ErrWaitDelay) {
fmt.Fprintf(cmd.Stdout, "*** Test I/O incomplete %v after exiting.\n", cmd.WaitDelay)
}
- var ee *exec.ExitError
- if len(out) == 0 || !errors.As(err, &ee) || !ee.Exited() {
+ if ee, ok := errors.AsType[*exec.ExitError](err); !ok || !ee.Exited() || len(out) == 0 {
// If there was no test output, print the exit status so that the reason
// for failure is clear.
fmt.Fprintf(cmd.Stdout, "%s\n", err)
diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go
index 983e8f56e9af09..d6891a1d0b955b 100644
--- a/src/cmd/go/internal/test/testflag.go
+++ b/src/cmd/go/internal/test/testflag.go
@@ -44,6 +44,7 @@ func init() {
// some of them so that cmd/go knows what to do with the test output, or knows
// to build the test in a way that supports the use of the flag.
+ cf.BoolVar(&testArtifacts, "artifacts", false, "")
cf.StringVar(&testBench, "bench", "", "")
cf.Bool("benchmem", false, "")
cf.String("benchtime", "", "")
@@ -260,7 +261,7 @@ func testFlags(args []string) (packageNames, passToTest []string) {
break
}
- if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) {
+ if nf, ok := errors.AsType[cmdflag.NonFlagError](err); ok {
if !inPkgList && packageNames != nil {
// We already saw the package list previously, and this argument is not
// a flag, so it — and everything after it — must be either a value for
@@ -295,7 +296,7 @@ func testFlags(args []string) (packageNames, passToTest []string) {
inPkgList = false
}
- if nd := (cmdflag.FlagNotDefinedError{}); errors.As(err, &nd) {
+ if nd, ok := errors.AsType[cmdflag.FlagNotDefinedError](err); ok {
// This is a flag we do not know. We must assume that any args we see
// after this might be flag arguments, not package names, so make
// packageNames non-nil to indicate that the package list is complete.
@@ -392,7 +393,8 @@ func testFlags(args []string) (packageNames, passToTest []string) {
// directory, but 'go test' defaults it to the working directory of the 'go'
// command. Set it explicitly if it is needed due to some other flag that
// requests output.
- if testProfile() != "" && !outputDirSet {
+ needOutputDir := testProfile() != "" || testArtifacts
+ if needOutputDir && !outputDirSet {
injectedFlags = append(injectedFlags, "-test.outputdir="+testOutputDir.getAbs())
}
diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go
index 120ef5339bede0..e95b07d8c813aa 100644
--- a/src/cmd/go/internal/tool/tool.go
+++ b/src/cmd/go/internal/tool/tool.go
@@ -161,9 +161,9 @@ func listTools(ctx context.Context) {
fmt.Println(name)
}
- modload.InitWorkfile()
- modload.LoadModFile(ctx)
- modTools := slices.Sorted(maps.Keys(modload.MainModules.Tools()))
+ modload.InitWorkfile(modload.LoaderState)
+ modload.LoadModFile(modload.LoaderState, ctx)
+ modTools := slices.Sorted(maps.Keys(modload.LoaderState.MainModules.Tools()))
for _, tool := range modTools {
fmt.Println(tool)
}
@@ -252,11 +252,11 @@ func loadBuiltinTool(toolName string) string {
}
func loadModTool(ctx context.Context, name string) string {
- modload.InitWorkfile()
- modload.LoadModFile(ctx)
+ modload.InitWorkfile(modload.LoaderState)
+ modload.LoadModFile(modload.LoaderState, ctx)
matches := []string{}
- for tool := range modload.MainModules.Tools() {
+ for tool := range modload.LoaderState.MainModules.Tools() {
if tool == name || defaultExecName(tool) == name {
matches = append(matches, tool)
}
@@ -308,7 +308,7 @@ func buildAndRunBuiltinTool(ctx context.Context, toolName, tool string, args []s
// Ignore go.mod and go.work: we don't need them, and we want to be able
// to run the tool even if there's an issue with the module or workspace the
// user happens to be in.
- modload.RootMode = modload.NoRoot
+ modload.LoaderState.RootMode = modload.NoRoot
runFunc := func(b *work.Builder, ctx context.Context, a *work.Action) error {
cmdline := str.StringList(builtTool(a), a.Args)
@@ -336,7 +336,7 @@ func buildAndRunModtool(ctx context.Context, toolName, tool string, args []strin
}
func buildAndRunTool(ctx context.Context, tool string, args []string, runTool work.ActorFunc) {
- work.BuildInit()
+ work.BuildInit(modload.LoaderState)
b := work.NewBuilder("")
defer func() {
if err := b.Close(); err != nil {
@@ -345,11 +345,11 @@ func buildAndRunTool(ctx context.Context, tool string, args []string, runTool wo
}()
pkgOpts := load.PackageOpts{MainOnly: true}
- p := load.PackagesAndErrors(ctx, pkgOpts, []string{tool})[0]
+ p := load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, []string{tool})[0]
p.Internal.OmitDebug = true
p.Internal.ExeName = p.DefaultExecName()
- a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p)
+ a1 := b.LinkAction(modload.LoaderState, work.ModeBuild, work.ModeBuild, p)
a1.CacheExecutable = true
a := &work.Action{Mode: "go tool", Actor: runTool, Args: args, Deps: []*work.Action{a1}}
b.Do(ctx, a)
diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go
index e8712613366e8c..d54277ed1b4bea 100644
--- a/src/cmd/go/internal/toolchain/select.go
+++ b/src/cmd/go/internal/toolchain/select.go
@@ -353,9 +353,9 @@ func Exec(gotoolchain string) {
// Set up modules without an explicit go.mod, to download distribution.
modload.Reset()
- modload.ForceUseModules = true
- modload.RootMode = modload.NoRoot
- modload.Init()
+ modload.LoaderState.ForceUseModules = true
+ modload.LoaderState.RootMode = modload.NoRoot
+ modload.Init(modload.LoaderState)
// Download and unpack toolchain module into module cache.
// Note that multiple go commands might be doing this at the same time,
@@ -529,7 +529,7 @@ func raceSafeCopy(old, new string) error {
// The toolchain line overrides the version line
func modGoToolchain() (file, goVers, toolchain string) {
wd := base.UncachedCwd()
- file = modload.FindGoWork(wd)
+ file = modload.FindGoWork(modload.LoaderState, wd)
// $GOWORK can be set to a file that does not yet exist, if we are running 'go work init'.
// Do not try to load the file in that case
if _, err := os.Stat(file); err != nil {
@@ -692,9 +692,9 @@ func maybeSwitchForGoInstallVersion(minVers string) {
// command lines if we add new flags in the future.
// Set up modules without an explicit go.mod, to download go.mod.
- modload.ForceUseModules = true
- modload.RootMode = modload.NoRoot
- modload.Init()
+ modload.LoaderState.ForceUseModules = true
+ modload.LoaderState.RootMode = modload.NoRoot
+ modload.Init(modload.LoaderState)
defer modload.Reset()
// See internal/load.PackagesAndErrorsOutsideModule
@@ -705,7 +705,7 @@ func maybeSwitchForGoInstallVersion(minVers string) {
allowed = nil
}
noneSelected := func(path string) (version string) { return "none" }
- _, err := modload.QueryPackages(ctx, path, version, noneSelected, allowed)
+ _, err := modload.QueryPackages(modload.LoaderState, ctx, path, version, noneSelected, allowed)
if errors.Is(err, gover.ErrTooNew) {
// Run early switch, same one go install or go run would eventually do,
// if it understood all the command-line flags.
diff --git a/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go
index 67234ac20d4628..6a6a0eee57ce85 100644
--- a/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go
+++ b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go
@@ -155,10 +155,10 @@ func TestScripts(t *testing.T) {
t.Log(buf)
}
if err != nil {
- if notInstalled := (vcweb.ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) {
+ if _, ok := errors.AsType[vcweb.ServerNotInstalledError](err); ok || errors.Is(err, exec.ErrNotFound) {
t.Skip(err)
}
- if skip := (vcweb.SkipError{}); errors.As(err, &skip) {
+ if skip, ok := errors.AsType[vcweb.SkipError](err); ok {
if skip.Msg == "" {
t.Skip("SKIP")
} else {
diff --git a/src/cmd/go/internal/vcweb/vcweb.go b/src/cmd/go/internal/vcweb/vcweb.go
index b81ff5e63de72a..4b4e127bb042e0 100644
--- a/src/cmd/go/internal/vcweb/vcweb.go
+++ b/src/cmd/go/internal/vcweb/vcweb.go
@@ -244,9 +244,9 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
})
if err != nil {
s.logger.Print(err)
- if notFound := (ScriptNotFoundError{}); errors.As(err, ¬Found) {
+ if _, ok := errors.AsType[ScriptNotFoundError](err); ok {
http.NotFound(w, req)
- } else if notInstalled := (ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) {
+ } else if _, ok := errors.AsType[ServerNotInstalledError](err); ok || errors.Is(err, exec.ErrNotFound) {
http.Error(w, err.Error(), http.StatusNotImplemented)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go
index c26dd42b4e1a08..781bc080e89fe4 100644
--- a/src/cmd/go/internal/version/version.go
+++ b/src/cmd/go/internal/version/version.go
@@ -168,7 +168,7 @@ func scanFile(file string, info fs.FileInfo, mustPrint bool) bool {
bi, err := buildinfo.ReadFile(file)
if err != nil {
if mustPrint {
- if pathErr := (*os.PathError)(nil); errors.As(err, &pathErr) && filepath.Clean(pathErr.Path) == filepath.Clean(file) {
+ if pathErr, ok := errors.AsType[*os.PathError](err); ok && filepath.Clean(pathErr.Path) == filepath.Clean(file) {
fmt.Fprintf(os.Stderr, "%v\n", file)
} else {
// Skip errors for non-Go binaries.
diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go
index 3514be80feb8cc..e274348bd6a38e 100644
--- a/src/cmd/go/internal/vet/vet.go
+++ b/src/cmd/go/internal/vet/vet.go
@@ -2,13 +2,20 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package vet implements the “go vet” command.
+// Package vet implements the “go vet” and “go fix” commands.
package vet
import (
"context"
+ "encoding/json"
+ "errors"
"fmt"
- "path/filepath"
+ "io"
+ "os"
+ "slices"
+ "strconv"
+ "strings"
+ "sync"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
@@ -18,30 +25,39 @@ import (
"cmd/go/internal/work"
)
-// Break init loop.
-func init() {
- CmdVet.Run = runVet
-}
-
var CmdVet = &base.Command{
CustomFlags: true,
UsageLine: "go vet [build flags] [-vettool prog] [vet flags] [packages]",
Short: "report likely mistakes in packages",
Long: `
-Vet runs the Go vet command on the packages named by the import paths.
+Vet runs the Go vet tool (cmd/vet) on the named packages
+and reports diagnostics.
-For more about vet and its flags, see 'go doc cmd/vet'.
-For more about specifying packages, see 'go help packages'.
-For a list of checkers and their flags, see 'go tool vet help'.
-For details of a specific checker such as 'printf', see 'go tool vet help printf'.
+It supports these flags:
-The -vettool=prog flag selects a different analysis tool with alternative
-or additional checks.
-For example, the 'shadow' analyzer can be built and run using these commands:
+ -c int
+ display offending line with this many lines of context (default -1)
+ -json
+ emit JSON output
+ -fix
+ instead of printing each diagnostic, apply its first fix (if any)
+ -diff
+ instead of applying each fix, print the patch as a unified diff
+
+The -vettool=prog flag selects a different analysis tool with
+alternative or additional checks. For example, the 'shadow' analyzer
+can be built and run using these commands:
go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest
go vet -vettool=$(which shadow)
+Alternative vet tools should be built atop golang.org/x/tools/go/analysis/unitchecker,
+which handles the interaction with go vet.
+
+For more about specifying packages, see 'go help packages'.
+For a list of checkers and their flags, see 'go tool vet help'.
+For details of a specific checker such as 'printf', see 'go tool vet help printf'.
+
The build flags supported by go vet are those that control package resolution
and execution, such as -C, -n, -x, -v, -tags, and -toolexec.
For more about these flags, see 'go help build'.
@@ -50,9 +66,64 @@ See also: go fmt, go fix.
`,
}
-func runVet(ctx context.Context, cmd *base.Command, args []string) {
- vetFlags, pkgArgs := vetFlags(args)
- modload.InitWorkfile() // The vet command does custom flag processing; initialize workspaces after that.
+var CmdFix = &base.Command{
+ CustomFlags: true,
+ UsageLine: "go fix [build flags] [-fixtool prog] [fix flags] [packages]",
+ Short: "apply fixes suggested by static checkers",
+ Long: `
+Fix runs the Go fix tool (cmd/vet) on the named packages
+and applies suggested fixes.
+
+It supports these flags:
+
+ -diff
+ instead of applying each fix, print the patch as a unified diff
+
+The -fixtool=prog flag selects a different analysis tool with
+alternative or additional fixes; see the documentation for go vet's
+-vettool flag for details.
+
+For more about specifying packages, see 'go help packages'.
+
+For a list of fixers and their flags, see 'go tool fix help'.
+
+For details of a specific fixer such as 'hostport',
+see 'go tool fix help hostport'.
+
+The build flags supported by go fix are those that control package resolution
+and execution, such as -C, -n, -x, -v, -tags, and -toolexec.
+For more about these flags, see 'go help build'.
+
+See also: go fmt, go vet.
+ `,
+}
+
+func init() {
+ // avoid initialization cycle
+ CmdVet.Run = run
+ CmdFix.Run = run
+
+ addFlags(CmdVet)
+ addFlags(CmdFix)
+}
+
+var (
+ // "go vet -fix" causes fixes to be applied.
+ vetFixFlag = CmdVet.Flag.Bool("fix", false, "apply the first fix (if any) for each diagnostic")
+
+ // The "go fix -fix=name,..." flag is an obsolete flag formerly
+ // used to pass a list of names to the old "cmd/fix -r".
+ fixFixFlag = CmdFix.Flag.String("fix", "", "obsolete; no effect")
+)
+
+// run implements both "go vet" and "go fix".
+func run(ctx context.Context, cmd *base.Command, args []string) {
+ // Compute flags for the vet/fix tool (e.g. cmd/{vet,fix}).
+ toolFlags, pkgArgs := toolFlags(cmd, args)
+
+ // The vet/fix commands do custom flag processing;
+ // initialize workspaces after that.
+ modload.InitWorkfile(modload.LoaderState)
if cfg.DebugTrace != "" {
var close func() error
@@ -71,24 +142,85 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) {
ctx, span := trace.StartSpan(ctx, fmt.Sprint("Running ", cmd.Name(), " command"))
defer span.Done()
- work.BuildInit()
- work.VetFlags = vetFlags
- if len(vetFlags) > 0 {
- work.VetExplicit = true
+ work.BuildInit(modload.LoaderState)
+
+ // Flag theory:
+ //
+ // All flags supported by unitchecker are accepted by go {vet,fix}.
+ // Some arise from each analyzer in the tool (both to enable it
+ // and to configure it), whereas others [-V -c -diff -fix -flags -json]
+ // are core to unitchecker itself.
+ //
+ // Most are passed through to toolFlags, but not all:
+ // * -V and -flags are used by the handshake in the [toolFlags] function;
+ // * these old flags have no effect: [-all -source -tags -v]; and
+ // * the [-c -fix -diff -json] flags are handled specially
+ // as described below:
+ //
+ // command args tool args
+ // go vet => cmd/vet -json Parse stdout, print diagnostics to stderr.
+ // go vet -json => cmd/vet -json Pass stdout through.
+ // go vet -fix [-diff] => cmd/vet -fix [-diff] Pass stdout through.
+ // go fix [-diff] => cmd/fix -fix [-diff] Pass stdout through.
+ // go fix -json => cmd/fix -json Pass stdout through.
+ //
+ // Notes:
+ // * -diff requires "go vet -fix" or "go fix", and no -json.
+ // * -json output is the same in "vet" and "fix" modes,
+ // and describes both diagnostics and fixes (but does not apply them).
+ // * -c=n is supported by the unitchecker, but we reimplement it
+ // here (see printDiagnostics), and do not pass the flag through.
+
+ work.VetExplicit = len(toolFlags) > 0
+
+ if cmd.Name() == "fix" || *vetFixFlag {
+ // fix mode: 'go fix' or 'go vet -fix'
+ if jsonFlag {
+ if diffFlag {
+ base.Fatalf("-json and -diff cannot be used together")
+ }
+ } else {
+ toolFlags = append(toolFlags, "-fix")
+ if diffFlag {
+ toolFlags = append(toolFlags, "-diff")
+ }
+ }
+ if contextFlag != -1 {
+ base.Fatalf("-c flag cannot be used when applying fixes")
+ }
+ } else {
+ // vet mode: 'go vet' without -fix
+ if !jsonFlag {
+ // Post-process the JSON diagnostics on stdout and format
+ // it as "file:line: message" diagnostics on stderr.
+ // (JSON reliably frames diagnostics, fixes, and errors so
+ // that we don't have to parse stderr or interpret non-zero
+ // exit codes, and interacts better with the action cache.)
+ toolFlags = append(toolFlags, "-json")
+ work.VetHandleStdout = printJSONDiagnostics
+ }
+ if diffFlag {
+ base.Fatalf("go vet -diff flag requires -fix")
+ }
}
- if vetTool != "" {
- var err error
- work.VetTool, err = filepath.Abs(vetTool)
- if err != nil {
- base.Fatalf("%v", err)
+
+ // Implement legacy "go fix -fix=name,..." flag.
+ if *fixFixFlag != "" {
+ fmt.Fprintf(os.Stderr, "go %s: the -fix=%s flag is obsolete and has no effect", cmd.Name(), *fixFixFlag)
+
+ // The buildtag fixer is now implemented by cmd/fix.
+ if slices.Contains(strings.Split(*fixFixFlag, ","), "buildtag") {
+ fmt.Fprintf(os.Stderr, "go %s: to enable the buildtag check, use -buildtag", cmd.Name())
}
}
+ work.VetFlags = toolFlags
+
pkgOpts := load.PackageOpts{ModResolveTests: true}
- pkgs := load.PackagesAndErrors(ctx, pkgOpts, pkgArgs)
+ pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, pkgArgs)
load.CheckPackageErrors(pkgs)
if len(pkgs) == 0 {
- base.Fatalf("no packages to vet")
+ base.Fatalf("no packages to %s", cmd.Name())
}
b := work.NewBuilder("")
@@ -98,7 +230,23 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) {
}
}()
- root := &work.Action{Mode: "go vet"}
+ // To avoid file corruption from duplicate application of
+ // fixes (in fix mode), and duplicate reporting of diagnostics
+ // (in vet mode), we must run the tool only once for each
+ // source file. We achieve that by running on ptest (below)
+ // instead of p.
+ //
+ // As a side benefit, this also allows analyzers to make
+ // "closed world" assumptions and report diagnostics (such as
+ // "this symbol is unused") that might be false if computed
+ // from just the primary package p, falsified by the
+ // additional declarations in test files.
+ //
+ // We needn't worry about intermediate test variants, as they
+ // will only be executed in VetxOnly mode, for facts but not
+ // diagnostics.
+
+ root := &work.Action{Mode: "go " + cmd.Name()}
for _, p := range pkgs {
_, ptest, pxtest, perr := load.TestPackagesFor(ctx, pkgOpts, p, nil)
if perr != nil {
@@ -106,10 +254,11 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) {
continue
}
if len(ptest.GoFiles) == 0 && len(ptest.CgoFiles) == 0 && pxtest == nil {
- base.Errorf("go: can't vet %s: no Go files in %s", p.ImportPath, p.Dir)
+ base.Errorf("go: can't %s %s: no Go files in %s", cmd.Name(), p.ImportPath, p.Dir)
continue
}
if len(ptest.GoFiles) > 0 || len(ptest.CgoFiles) > 0 {
+ // The test package includes all the files of primary package.
root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, ptest))
}
if pxtest != nil {
@@ -118,3 +267,167 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) {
}
b.Do(ctx, root)
}
+
+// printJSONDiagnostics parses JSON (from the tool's stdout) and
+// prints it (to stderr) in "file:line: message" form.
+// It also ensures that we exit nonzero if there were diagnostics.
+func printJSONDiagnostics(r io.Reader) error {
+ stdout, err := io.ReadAll(r)
+ if err != nil {
+ return err
+ }
+ if len(stdout) > 0 {
+ // unitchecker emits a JSON map of the form:
+ // output maps Package ID -> Analyzer.Name -> (error | []Diagnostic);
+ var tree jsonTree
+ if err := json.Unmarshal([]byte(stdout), &tree); err != nil {
+ return fmt.Errorf("parsing JSON: %v", err)
+ }
+ for _, units := range tree {
+ for analyzer, msg := range units {
+ if msg[0] == '[' {
+ // []Diagnostic
+ var diags []jsonDiagnostic
+ if err := json.Unmarshal([]byte(msg), &diags); err != nil {
+ return fmt.Errorf("parsing JSON diagnostics: %v", err)
+ }
+ for _, diag := range diags {
+ base.SetExitStatus(1)
+ printJSONDiagnostic(analyzer, diag)
+ }
+ } else {
+ // error
+ var e jsonError
+ if err := json.Unmarshal([]byte(msg), &e); err != nil {
+ return fmt.Errorf("parsing JSON error: %v", err)
+ }
+
+ base.SetExitStatus(1)
+ return errors.New(e.Err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+var stderrMu sync.Mutex // serializes concurrent writes to stdout
+
+func printJSONDiagnostic(analyzer string, diag jsonDiagnostic) {
+ stderrMu.Lock()
+ defer stderrMu.Unlock()
+
+ type posn struct {
+ file string
+ line, col int
+ }
+ parsePosn := func(s string) (_ posn, _ bool) {
+ colon2 := strings.LastIndexByte(s, ':')
+ if colon2 < 0 {
+ return
+ }
+ colon1 := strings.LastIndexByte(s[:colon2], ':')
+ if colon1 < 0 {
+ return
+ }
+ line, err := strconv.Atoi(s[colon1+len(":") : colon2])
+ if err != nil {
+ return
+ }
+ col, err := strconv.Atoi(s[colon2+len(":"):])
+ if err != nil {
+ return
+ }
+ return posn{s[:colon1], line, col}, true
+ }
+
+ print := func(start, end, message string) {
+ if posn, ok := parsePosn(start); ok {
+ // The (*work.Shell).reportCmd method relativizes the
+ // prefix of each line of the subprocess's stdout;
+ // but filenames in JSON aren't at the start of the line,
+ // so we need to apply ShortPath here too.
+ fmt.Fprintf(os.Stderr, "%s:%d:%d: %v\n", base.ShortPath(posn.file), posn.line, posn.col, message)
+ } else {
+ fmt.Fprintf(os.Stderr, "%s: %v\n", start, message)
+ }
+
+ // -c=n: show offending line plus N lines of context.
+ // (Duplicates logic in unitchecker; see analysisflags.PrintPlain.)
+ if contextFlag >= 0 {
+ if end == "" {
+ end = start
+ }
+ var (
+ startPosn, ok1 = parsePosn(start)
+ endPosn, ok2 = parsePosn(end)
+ )
+ if ok1 && ok2 {
+ // TODO(adonovan): respect overlays (like unitchecker does).
+ data, _ := os.ReadFile(startPosn.file)
+ lines := strings.Split(string(data), "\n")
+ for i := startPosn.line - contextFlag; i <= endPosn.line+contextFlag; i++ {
+ if 1 <= i && i <= len(lines) {
+ fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1])
+ }
+ }
+ }
+ }
+ }
+
+ // TODO(adonovan): append " [analyzer]" to message. But we must first relax
+ // x/tools/go/analysis/internal/versiontest.TestVettool and revendor; sigh.
+ _ = analyzer
+ print(diag.Posn, diag.End, diag.Message)
+ for _, rel := range diag.Related {
+ print(rel.Posn, rel.End, "\t"+rel.Message)
+ }
+}
+
+// -- JSON schema --
+
+// (populated by golang.org/x/tools/go/analysis/internal/analysisflags/flags.go)
+
+// A jsonTree is a mapping from package ID to analysis name to result.
+// Each result is either a jsonError or a list of jsonDiagnostic.
+type jsonTree map[string]map[string]json.RawMessage
+
+type jsonError struct {
+ Err string `json:"error"`
+}
+
+// A TextEdit describes the replacement of a portion of a file.
+// Start and End are zero-based half-open indices into the original byte
+// sequence of the file, and New is the new text.
+type jsonTextEdit struct {
+ Filename string `json:"filename"`
+ Start int `json:"start"`
+ End int `json:"end"`
+ New string `json:"new"`
+}
+
+// A jsonSuggestedFix describes an edit that should be applied as a whole or not
+// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix
+// consists of multiple non-contiguous edits.
+type jsonSuggestedFix struct {
+ Message string `json:"message"`
+ Edits []jsonTextEdit `json:"edits"`
+}
+
+// A jsonDiagnostic describes the json schema of an analysis.Diagnostic.
+type jsonDiagnostic struct {
+ Category string `json:"category,omitempty"`
+ Posn string `json:"posn"` // e.g. "file.go:line:column"
+ End string `json:"end"`
+ Message string `json:"message"`
+ SuggestedFixes []jsonSuggestedFix `json:"suggested_fixes,omitempty"`
+ Related []jsonRelatedInformation `json:"related,omitempty"`
+}
+
+// A jsonRelated describes a secondary position and message related to
+// a primary diagnostic.
+type jsonRelatedInformation struct {
+ Posn string `json:"posn"` // e.g. "file.go:line:column"
+ End string `json:"end"`
+ Message string `json:"message"`
+}
diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go
index d0bdb58a504ae7..7342b99d6e36bb 100644
--- a/src/cmd/go/internal/vet/vetflag.go
+++ b/src/cmd/go/internal/vet/vetflag.go
@@ -21,70 +21,83 @@ import (
"cmd/go/internal/work"
)
-// go vet flag processing
-//
-// We query the flags of the tool specified by -vettool and accept any
-// of those flags plus any flag valid for 'go build'. The tool must
-// support -flags, which prints a description of its flags in JSON to
-// stdout.
-
-// vetTool specifies the vet command to run.
-// Any tool that supports the (still unpublished) vet
-// command-line protocol may be supplied; see
-// golang.org/x/tools/go/analysis/unitchecker for one
-// implementation. It is also used by tests.
-//
-// The default behavior (vetTool=="") runs 'go tool vet'.
-var vetTool string // -vettool
-
-func init() {
- // For now, we omit the -json flag for vet because we could plausibly
- // support -json specific to the vet command in the future (perhaps using
- // the same format as build -json).
- work.AddBuildFlags(CmdVet, work.OmitJSONFlag)
- CmdVet.Flag.StringVar(&vetTool, "vettool", "", "")
+// go vet/fix flag processing
+var (
+ // We query the flags of the tool specified by -{vet,fix}tool
+ // and accept any of those flags plus any flag valid for 'go
+ // build'. The tool must support -flags, which prints a
+ // description of its flags in JSON to stdout.
+
+ // toolFlag specifies the vet/fix command to run.
+ // Any toolFlag that supports the (unpublished) vet
+ // command-line protocol may be supplied; see
+ // golang.org/x/tools/go/analysis/unitchecker for the
+ // sole implementation. It is also used by tests.
+ //
+ // The default behavior ("") runs 'go tool {vet,fix}'.
+ //
+ // Do not access this flag directly; use [parseToolFlag].
+ toolFlag string // -{vet,fix}tool
+ diffFlag bool // -diff
+ jsonFlag bool // -json
+ contextFlag = -1 // -c=n
+)
+
+func addFlags(cmd *base.Command) {
+ // We run the compiler for export data.
+ // Suppress the build -json flag; we define our own.
+ work.AddBuildFlags(cmd, work.OmitJSONFlag)
+
+ cmd.Flag.StringVar(&toolFlag, cmd.Name()+"tool", "", "") // -vettool or -fixtool
+ cmd.Flag.BoolVar(&diffFlag, "diff", false, "print diff instead of applying it")
+ cmd.Flag.BoolVar(&jsonFlag, "json", false, "print diagnostics and fixes as JSON")
+ cmd.Flag.IntVar(&contextFlag, "c", -1, "display offending line with this many lines of context")
}
-func parseVettoolFlag(args []string) {
- // Extract -vettool by ad hoc flag processing:
+// parseToolFlag scans args for -{vet,fix}tool and returns the effective tool filename.
+func parseToolFlag(cmd *base.Command, args []string) string {
+ toolFlagName := cmd.Name() + "tool" // vettool or fixtool
+
+ // Extract -{vet,fix}tool by ad hoc flag processing:
// its value is needed even before we can declare
// the flags available during main flag processing.
for i, arg := range args {
- if arg == "-vettool" || arg == "--vettool" {
+ if arg == "-"+toolFlagName || arg == "--"+toolFlagName {
if i+1 >= len(args) {
log.Fatalf("%s requires a filename", arg)
}
- vetTool = args[i+1]
- return
- } else if strings.HasPrefix(arg, "-vettool=") ||
- strings.HasPrefix(arg, "--vettool=") {
- vetTool = arg[strings.IndexByte(arg, '=')+1:]
- return
+ toolFlag = args[i+1]
+ break
+ } else if strings.HasPrefix(arg, "-"+toolFlagName+"=") ||
+ strings.HasPrefix(arg, "--"+toolFlagName+"=") {
+ toolFlag = arg[strings.IndexByte(arg, '=')+1:]
+ break
}
}
-}
-// vetFlags processes the command line, splitting it at the first non-flag
-// into the list of flags and list of packages.
-func vetFlags(args []string) (passToVet, packageNames []string) {
- parseVettoolFlag(args)
-
- // Query the vet command for its flags.
- var tool string
- if vetTool == "" {
- tool = base.Tool("vet")
- } else {
- var err error
- tool, err = filepath.Abs(vetTool)
+ if toolFlag != "" {
+ tool, err := filepath.Abs(toolFlag)
if err != nil {
log.Fatal(err)
}
+ return tool
}
+
+ return base.Tool(cmd.Name()) // default to 'go tool vet|fix'
+}
+
+// toolFlags processes the command line, splitting it at the first non-flag
+// into the list of flags and list of packages.
+func toolFlags(cmd *base.Command, args []string) (passToTool, packageNames []string) {
+ tool := parseToolFlag(cmd, args)
+ work.VetTool = tool
+
+ // Query the tool for its flags.
out := new(bytes.Buffer)
- vetcmd := exec.Command(tool, "-flags")
- vetcmd.Stdout = out
- if err := vetcmd.Run(); err != nil {
- fmt.Fprintf(os.Stderr, "go: can't execute %s -flags: %v\n", tool, err)
+ toolcmd := exec.Command(tool, "-flags")
+ toolcmd.Stdout = out
+ if err := toolcmd.Run(); err != nil {
+ fmt.Fprintf(os.Stderr, "go: %s -flags failed: %v\n", tool, err)
base.SetExitStatus(2)
base.Exit()
}
@@ -99,15 +112,20 @@ func vetFlags(args []string) (passToVet, packageNames []string) {
base.Exit()
}
- // Add vet's flags to CmdVet.Flag.
+ // Add tool's flags to cmd.Flag.
//
- // Some flags, in particular -tags and -v, are known to vet but
+ // Some flags, in particular -tags and -v, are known to the tool but
// also defined as build flags. This works fine, so we omit duplicates here.
- // However some, like -x, are known to the build but not to vet.
- isVetFlag := make(map[string]bool, len(analysisFlags))
- cf := CmdVet.Flag
+ // However some, like -x, are known to the build but not to the tool.
+ isToolFlag := make(map[string]bool, len(analysisFlags))
+ cf := cmd.Flag
for _, f := range analysisFlags {
- isVetFlag[f.Name] = true
+ // We reimplement the unitchecker's -c=n flag.
+ // Don't allow it to be passed through.
+ if f.Name == "c" {
+ continue
+ }
+ isToolFlag[f.Name] = true
if cf.Lookup(f.Name) == nil {
if f.Bool {
cf.Bool(f.Name, false, "")
@@ -117,22 +135,22 @@ func vetFlags(args []string) (passToVet, packageNames []string) {
}
}
- // Record the set of vet tool flags set by GOFLAGS. We want to pass them to
- // the vet tool, but only if they aren't overridden by an explicit argument.
- base.SetFromGOFLAGS(&CmdVet.Flag)
+ // Record the set of tool flags set by GOFLAGS. We want to pass them to
+ // the tool, but only if they aren't overridden by an explicit argument.
+ base.SetFromGOFLAGS(&cmd.Flag)
addFromGOFLAGS := map[string]bool{}
- CmdVet.Flag.Visit(func(f *flag.Flag) {
- if isVetFlag[f.Name] {
+ cmd.Flag.Visit(func(f *flag.Flag) {
+ if isToolFlag[f.Name] {
addFromGOFLAGS[f.Name] = true
}
})
explicitFlags := make([]string, 0, len(args))
for len(args) > 0 {
- f, remainingArgs, err := cmdflag.ParseOne(&CmdVet.Flag, args)
+ f, remainingArgs, err := cmdflag.ParseOne(&cmd.Flag, args)
if errors.Is(err, flag.ErrHelp) {
- exitWithUsage()
+ exitWithUsage(cmd)
}
if errors.Is(err, cmdflag.ErrFlagTerminator) {
@@ -142,7 +160,7 @@ func vetFlags(args []string) (passToVet, packageNames []string) {
break
}
- if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) {
+ if _, ok := errors.AsType[cmdflag.NonFlagError](err); ok {
// Everything from here on out — including the argument we just consumed —
// must be a package name.
packageNames = args
@@ -151,12 +169,12 @@ func vetFlags(args []string) (passToVet, packageNames []string) {
if err != nil {
fmt.Fprintln(os.Stderr, err)
- exitWithUsage()
+ exitWithUsage(cmd)
}
- if isVetFlag[f.Name] {
+ if isToolFlag[f.Name] {
// Forward the raw arguments rather than cleaned equivalents, just in
- // case the vet tool parses them idiosyncratically.
+ // case the tool parses them idiosyncratically.
explicitFlags = append(explicitFlags, args[:len(args)-len(remainingArgs)]...)
// This flag has been overridden explicitly, so don't forward its implicit
@@ -168,26 +186,26 @@ func vetFlags(args []string) (passToVet, packageNames []string) {
}
// Prepend arguments from GOFLAGS before other arguments.
- CmdVet.Flag.Visit(func(f *flag.Flag) {
+ cmd.Flag.Visit(func(f *flag.Flag) {
if addFromGOFLAGS[f.Name] {
- passToVet = append(passToVet, fmt.Sprintf("-%s=%s", f.Name, f.Value))
+ passToTool = append(passToTool, fmt.Sprintf("-%s=%s", f.Name, f.Value))
}
})
- passToVet = append(passToVet, explicitFlags...)
- return passToVet, packageNames
+ passToTool = append(passToTool, explicitFlags...)
+ return passToTool, packageNames
}
-func exitWithUsage() {
- fmt.Fprintf(os.Stderr, "usage: %s\n", CmdVet.UsageLine)
- fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", CmdVet.LongName())
+func exitWithUsage(cmd *base.Command) {
+ fmt.Fprintf(os.Stderr, "usage: %s\n", cmd.UsageLine)
+ fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", cmd.LongName())
// This part is additional to what (*Command).Usage does:
- cmd := "go tool vet"
- if vetTool != "" {
- cmd = vetTool
+ tool := toolFlag
+ if tool == "" {
+ tool = "go tool " + cmd.Name()
}
- fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", cmd)
- fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", cmd)
+ fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", tool)
+ fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", tool)
base.SetExitStatus(2)
base.Exit()
diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go
index 3636f642e26401..f1f3bcea38d9d5 100644
--- a/src/cmd/go/internal/work/action.go
+++ b/src/cmd/go/internal/work/action.go
@@ -28,6 +28,7 @@ import (
"cmd/go/internal/cache"
"cmd/go/internal/cfg"
"cmd/go/internal/load"
+ "cmd/go/internal/modload"
"cmd/go/internal/str"
"cmd/go/internal/trace"
"cmd/internal/buildid"
@@ -392,7 +393,7 @@ func (b *Builder) NewObjdir() string {
// at shlibpath. For the native toolchain this list is stored, newline separated, in
// an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the
// .go_export section.
-func readpkglist(shlibpath string) (pkgs []*load.Package) {
+func readpkglist(loaderstate *modload.State, shlibpath string) (pkgs []*load.Package) {
var stk load.ImportStack
if cfg.BuildToolchainName == "gccgo" {
f, err := elf.Open(shlibpath)
@@ -412,7 +413,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) {
for _, line := range bytes.Split(data, []byte{'\n'}) {
if path, found := bytes.CutPrefix(line, pkgpath); found {
path = bytes.TrimSuffix(path, []byte{';'})
- pkgs = append(pkgs, load.LoadPackageWithFlags(string(path), base.Cwd(), &stk, nil, 0))
+ pkgs = append(pkgs, load.LoadPackageWithFlags(loaderstate, string(path), base.Cwd(), &stk, nil, 0))
}
}
} else {
@@ -423,7 +424,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) {
scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes))
for scanner.Scan() {
t := scanner.Text()
- pkgs = append(pkgs, load.LoadPackageWithFlags(t, base.Cwd(), &stk, nil, 0))
+ pkgs = append(pkgs, load.LoadPackageWithFlags(loaderstate, t, base.Cwd(), &stk, nil, 0))
}
}
return
@@ -445,7 +446,7 @@ func (b *Builder) cacheAction(mode string, p *load.Package, f func() *Action) *A
// AutoAction returns the "right" action for go build or go install of p.
func (b *Builder) AutoAction(mode, depMode BuildMode, p *load.Package) *Action {
if p.Name == "main" {
- return b.LinkAction(mode, depMode, p)
+ return b.LinkAction(modload.LoaderState, mode, depMode, p)
}
return b.CompileAction(mode, depMode, p)
}
@@ -913,7 +914,7 @@ func (b *Builder) vetAction(mode, depMode BuildMode, p *load.Package) *Action {
// LinkAction returns the action for linking p into an executable
// and possibly installing the result (according to mode).
// depMode is the action (build or install) to use when compiling dependencies.
-func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action {
+func (b *Builder) LinkAction(loaderstate *modload.State, mode, depMode BuildMode, p *load.Package) *Action {
// Construct link action.
a := b.cacheAction("link", p, func() *Action {
a := &Action{
@@ -948,7 +949,7 @@ func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action {
}
a.Target = a.Objdir + filepath.Join("exe", name) + cfg.ExeSuffix
a.built = a.Target
- b.addTransitiveLinkDeps(a, a1, "")
+ b.addTransitiveLinkDeps(loaderstate, a, a1, "")
// Sequence the build of the main package (a1) strictly after the build
// of all other dependencies that go into the link. It is likely to be after
@@ -1034,7 +1035,7 @@ func (b *Builder) installAction(a1 *Action, mode BuildMode) *Action {
// makes sure those are present in a.Deps.
// If shlib is non-empty, then a corresponds to the build and installation of shlib,
// so any rebuild of shlib should not be added as a dependency.
-func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) {
+func (b *Builder) addTransitiveLinkDeps(loaderstate *modload.State, a, a1 *Action, shlib string) {
// Expand Deps to include all built packages, for the linker.
// Use breadth-first search to find rebuilt-for-test packages
// before the standard ones.
@@ -1075,7 +1076,7 @@ func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) {
// we'll end up building an overall library or executable that depends at runtime
// on other libraries that are out-of-date, which is clearly not good either.
// We call it ModeBuggyInstall to make clear that this is not right.
- a.Deps = append(a.Deps, b.linkSharedAction(ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil))
+ a.Deps = append(a.Deps, b.linkSharedAction(loaderstate, ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil))
}
}
}
@@ -1116,21 +1117,21 @@ func (b *Builder) buildmodeShared(mode, depMode BuildMode, args []string, pkgs [
if err != nil {
base.Fatalf("%v", err)
}
- return b.linkSharedAction(mode, depMode, name, a1)
+ return b.linkSharedAction(modload.LoaderState, mode, depMode, name, a1)
}
// linkSharedAction takes a grouping action a1 corresponding to a list of built packages
// and returns an action that links them together into a shared library with the name shlib.
// If a1 is nil, shlib should be an absolute path to an existing shared library,
// and then linkSharedAction reads that library to find out the package list.
-func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Action) *Action {
+func (b *Builder) linkSharedAction(loaderstate *modload.State, mode, depMode BuildMode, shlib string, a1 *Action) *Action {
fullShlib := shlib
shlib = filepath.Base(shlib)
a := b.cacheAction("build-shlib "+shlib, nil, func() *Action {
if a1 == nil {
// TODO(rsc): Need to find some other place to store config,
// not in pkg directory. See golang.org/issue/22196.
- pkgs := readpkglist(fullShlib)
+ pkgs := readpkglist(loaderstate, fullShlib)
a1 = &Action{
Mode: "shlib packages",
}
@@ -1173,7 +1174,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac
}
}
var stk load.ImportStack
- p := load.LoadPackageWithFlags(pkg, base.Cwd(), &stk, nil, 0)
+ p := load.LoadPackageWithFlags(loaderstate, pkg, base.Cwd(), &stk, nil, 0)
if p.Error != nil {
base.Fatalf("load %s: %v", pkg, p.Error)
}
@@ -1201,7 +1202,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac
add(a, dep, true)
}
}
- b.addTransitiveLinkDeps(a, a1, shlib)
+ b.addTransitiveLinkDeps(loaderstate, a, a1, shlib)
return a
})
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index 6741b39f051cd6..45acbe85c2a6d0 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -459,8 +459,8 @@ func oneMainPkg(pkgs []*load.Package) []*load.Package {
var pkgsFilter = func(pkgs []*load.Package) []*load.Package { return pkgs }
func runBuild(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
- BuildInit()
+ modload.InitWorkfile(modload.LoaderState)
+ BuildInit(modload.LoaderState)
b := NewBuilder("")
defer func() {
if err := b.Close(); err != nil {
@@ -468,7 +468,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) {
}
}()
- pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args)
+ pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{AutoVCS: true}, args)
load.CheckPackageErrors(pkgs)
explicitO := len(cfg.BuildO) > 0
@@ -503,7 +503,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) {
}
if cfg.BuildCover {
- load.PrepareForCoverageBuild(pkgs)
+ load.PrepareForCoverageBuild(modload.LoaderState, pkgs)
}
if cfg.BuildO != "" {
@@ -694,10 +694,10 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) {
}
}
- modload.InitWorkfile()
- BuildInit()
- pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args)
- if cfg.ModulesEnabled && !modload.HasModRoot() {
+ modload.InitWorkfile(modload.LoaderState)
+ BuildInit(modload.LoaderState)
+ pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{AutoVCS: true}, args)
+ if cfg.ModulesEnabled && !modload.HasModRoot(modload.LoaderState) {
haveErrors := false
allMissingErrors := true
for _, pkg := range pkgs {
@@ -705,7 +705,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) {
continue
}
haveErrors = true
- if missingErr := (*modload.ImportMissingError)(nil); !errors.As(pkg.Error, &missingErr) {
+ if _, ok := errors.AsType[*modload.ImportMissingError](pkg.Error); !ok {
allMissingErrors = false
break
}
@@ -722,7 +722,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) {
load.CheckPackageErrors(pkgs)
if cfg.BuildCover {
- load.PrepareForCoverageBuild(pkgs)
+ load.PrepareForCoverageBuild(modload.LoaderState, pkgs)
}
InstallPackages(ctx, args, pkgs)
@@ -859,11 +859,11 @@ func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Packag
//
// See golang.org/issue/40276 for details and rationale.
func installOutsideModule(ctx context.Context, args []string) {
- modload.ForceUseModules = true
- modload.RootMode = modload.NoRoot
- modload.AllowMissingModuleImports()
- modload.Init()
- BuildInit()
+ modload.LoaderState.ForceUseModules = true
+ modload.LoaderState.RootMode = modload.NoRoot
+ modload.AllowMissingModuleImports(modload.LoaderState)
+ modload.Init(modload.LoaderState)
+ BuildInit(modload.LoaderState)
// Load packages. Ignore non-main packages.
// Print a warning if an argument contains "..." and matches no main packages.
@@ -872,7 +872,7 @@ func installOutsideModule(ctx context.Context, args []string) {
// TODO(golang.org/issue/40276): don't report errors loading non-main packages
// matched by a pattern.
pkgOpts := load.PackageOpts{MainOnly: true}
- pkgs, err := load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args)
+ pkgs, err := load.PackagesAndErrorsOutsideModule(modload.LoaderState, ctx, pkgOpts, args)
if err != nil {
base.Fatal(err)
}
diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go
index 88c24b11acc172..584c1ac6f41d23 100644
--- a/src/cmd/go/internal/work/buildid.go
+++ b/src/cmd/go/internal/work/buildid.go
@@ -148,9 +148,10 @@ func (b *Builder) toolID(name string) string {
path := base.Tool(name)
desc := "go tool " + name
- // Special case: undocumented -vettool overrides usual vet,
- // for testing vet or supplying an alternative analysis tool.
- if name == "vet" && VetTool != "" {
+ // Special case: -{vet,fix}tool overrides usual cmd/{vet,fix}
+ // for testing or supplying an alternative analysis tool.
+ // (We use only "vet" terminology in the action graph.)
+ if name == "vet" {
path = VetTool
desc = VetTool
}
diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
index 72b9177c9dbbeb..eb012d26109f57 100644
--- a/src/cmd/go/internal/work/exec.go
+++ b/src/cmd/go/internal/work/exec.go
@@ -169,9 +169,10 @@ func (b *Builder) Do(ctx context.Context, root *Action) {
a.Package.Incomplete = true
}
} else {
- var ipe load.ImportPathError
- if a.Package != nil && (!errors.As(err, &ipe) || ipe.ImportPath() != a.Package.ImportPath) {
- err = fmt.Errorf("%s: %v", a.Package.ImportPath, err)
+ if a.Package != nil {
+ if ipe, ok := errors.AsType[load.ImportPathError](err); !ok || ipe.ImportPath() != a.Package.ImportPath {
+ err = fmt.Errorf("%s: %v", a.Package.ImportPath, err)
+ }
}
sh := b.Shell(a)
sh.Errorf("%s", err)
@@ -1265,7 +1266,8 @@ func buildVetConfig(a *Action, srcfiles []string) {
}
}
-// VetTool is the path to an alternate vet tool binary.
+// VetTool is the path to the effective vet or fix tool binary.
+// The user may specify a non-default value using -{vet,fix}tool.
// The caller is expected to set it (if needed) before executing any vet actions.
var VetTool string
@@ -1273,7 +1275,13 @@ var VetTool string
// The caller is expected to set them before executing any vet actions.
var VetFlags []string
-// VetExplicit records whether the vet flags were set explicitly on the command line.
+// VetHandleStdout determines how the stdout output of each vet tool
+// invocation should be handled. The default behavior is to copy it to
+// the go command's stdout, atomically.
+var VetHandleStdout = copyToStdout
+
+// VetExplicit records whether the vet flags (which may include
+// -{vet,fix}tool) were set explicitly on the command line.
var VetExplicit bool
func (b *Builder) vet(ctx context.Context, a *Action) error {
@@ -1296,6 +1304,7 @@ func (b *Builder) vet(ctx context.Context, a *Action) error {
sh := b.Shell(a)
+ // We use "vet" terminology even when building action graphs for go fix.
vcfg.VetxOnly = a.VetxOnly
vcfg.VetxOutput = a.Objdir + "vet.out"
vcfg.Stdout = a.Objdir + "vet.stdout"
@@ -1322,7 +1331,7 @@ func (b *Builder) vet(ctx context.Context, a *Action) error {
// dependency tree turn on *more* analysis, as here.
// (The unsafeptr check does not write any facts for use by
// later vet runs, nor does unreachable.)
- if a.Package.Goroot && !VetExplicit && VetTool == "" {
+ if a.Package.Goroot && !VetExplicit && VetTool == base.Tool("vet") {
// Turn off -unsafeptr checks.
// There's too much unsafe.Pointer code
// that vet doesn't like in low-level packages
@@ -1359,13 +1368,29 @@ func (b *Builder) vet(ctx context.Context, a *Action) error {
vcfg.PackageVetx[a1.Package.ImportPath] = a1.built
}
}
- key := cache.ActionID(h.Sum())
+ vetxKey := cache.ActionID(h.Sum()) // for .vetx file
+
+ fmt.Fprintf(h, "stdout\n")
+ stdoutKey := cache.ActionID(h.Sum()) // for .stdout file
- if vcfg.VetxOnly && !cfg.BuildA {
+ // Check the cache; -a forces a rebuild.
+ if !cfg.BuildA {
c := cache.Default()
- if file, _, err := cache.GetFile(c, key); err == nil {
- a.built = file
- return nil
+ if vcfg.VetxOnly {
+ if file, _, err := cache.GetFile(c, vetxKey); err == nil {
+ a.built = file
+ return nil
+ }
+ } else {
+ // Copy cached vet.std files to stdout.
+ if file, _, err := cache.GetFile(c, stdoutKey); err == nil {
+ f, err := os.Open(file)
+ if err != nil {
+ return err
+ }
+ defer f.Close() // ignore error (can't fail)
+ return VetHandleStdout(f)
+ }
}
}
@@ -1387,31 +1412,46 @@ func (b *Builder) vet(ctx context.Context, a *Action) error {
p := a.Package
tool := VetTool
if tool == "" {
- tool = base.Tool("vet")
+ panic("VetTool unset")
}
- runErr := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg")
- // If vet wrote export data, save it for input to future vets.
+ if err := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg"); err != nil {
+ return err
+ }
+
+ // Vet tool succeeded, possibly with facts and JSON stdout. Save both in cache.
+
+ // Save facts
if f, err := os.Open(vcfg.VetxOutput); err == nil {
+ defer f.Close() // ignore error
a.built = vcfg.VetxOutput
- cache.Default().Put(key, f) // ignore error
- f.Close() // ignore error
+ cache.Default().Put(vetxKey, f) // ignore error
}
- // If vet wrote to stdout, copy it to go's stdout, atomically.
+ // Save stdout.
if f, err := os.Open(vcfg.Stdout); err == nil {
- stdoutMu.Lock()
- if _, err := io.Copy(os.Stdout, f); err != nil && runErr == nil {
- runErr = fmt.Errorf("copying vet tool stdout: %w", err)
+ defer f.Close() // ignore error
+ if err := VetHandleStdout(f); err != nil {
+ return err
}
- f.Close() // ignore error
- stdoutMu.Unlock()
+ f.Seek(0, io.SeekStart) // ignore error
+ cache.Default().Put(stdoutKey, f) // ignore error
}
- return runErr
+ return nil
}
-var stdoutMu sync.Mutex // serializes concurrent writes (e.g. JSON values) to stdout
+var stdoutMu sync.Mutex // serializes concurrent writes (of e.g. JSON values) to stdout
+
+// copyToStdout copies the stream to stdout while holding the lock.
+func copyToStdout(r io.Reader) error {
+ stdoutMu.Lock()
+ defer stdoutMu.Unlock()
+ if _, err := io.Copy(os.Stdout, r); err != nil {
+ return fmt.Errorf("copying vet tool stdout: %w", err)
+ }
+ return nil
+}
// linkActionID computes the action ID for a link action.
func (b *Builder) linkActionID(a *Action) cache.ActionID {
@@ -2153,7 +2193,7 @@ func (noToolchain) cc(b *Builder, a *Action, ofile, cfile string) error {
// gcc runs the gcc C compiler to create an object from a single C file.
func (b *Builder) gcc(a *Action, workdir, out string, flags []string, cfile string) error {
p := a.Package
- return b.ccompile(a, out, flags, cfile, b.GccCmd(p.Dir, workdir))
+ return b.ccompile(modload.LoaderState, a, out, flags, cfile, b.GccCmd(p.Dir, workdir))
}
// gas runs the gcc c compiler to create an object file from a single C assembly file.
@@ -2167,23 +2207,23 @@ func (b *Builder) gas(a *Action, workdir, out string, flags []string, sfile stri
return fmt.Errorf("package using cgo has Go assembly file %s", sfile)
}
}
- return b.ccompile(a, out, flags, sfile, b.GccCmd(p.Dir, workdir))
+ return b.ccompile(modload.LoaderState, a, out, flags, sfile, b.GccCmd(p.Dir, workdir))
}
// gxx runs the g++ C++ compiler to create an object from a single C++ file.
func (b *Builder) gxx(a *Action, workdir, out string, flags []string, cxxfile string) error {
p := a.Package
- return b.ccompile(a, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir))
+ return b.ccompile(modload.LoaderState, a, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir))
}
// gfortran runs the gfortran Fortran compiler to create an object from a single Fortran file.
func (b *Builder) gfortran(a *Action, workdir, out string, flags []string, ffile string) error {
p := a.Package
- return b.ccompile(a, out, flags, ffile, b.gfortranCmd(p.Dir, workdir))
+ return b.ccompile(modload.LoaderState, a, out, flags, ffile, b.gfortranCmd(p.Dir, workdir))
}
// ccompile runs the given C or C++ compiler and creates an object from a single source file.
-func (b *Builder) ccompile(a *Action, outfile string, flags []string, file string, compiler []string) error {
+func (b *Builder) ccompile(loaderstate *modload.State, a *Action, outfile string, flags []string, file string, compiler []string) error {
p := a.Package
sh := b.Shell(a)
file = mkAbs(p.Dir, file)
@@ -2220,7 +2260,7 @@ func (b *Builder) ccompile(a *Action, outfile string, flags []string, file strin
} else if m.Dir == "" {
// The module is in the vendor directory. Replace the entire vendor
// directory path, because the module's Dir is not filled in.
- from = modload.VendorDir()
+ from = modload.VendorDir(loaderstate)
toPath = "vendor"
} else {
from = m.Dir
@@ -2270,7 +2310,7 @@ func (b *Builder) ccompile(a *Action, outfile string, flags []string, file strin
}
}
if len(newFlags) < len(flags) {
- return b.ccompile(a, outfile, newFlags, file, compiler)
+ return b.ccompile(loaderstate, a, outfile, newFlags, file, compiler)
}
}
@@ -3343,7 +3383,7 @@ func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) {
}
srcs := []string{src}
- p := load.GoFilesPackage(context.TODO(), load.PackageOpts{}, srcs)
+ p := load.GoFilesPackage(modload.LoaderState, context.TODO(), load.PackageOpts{}, srcs)
if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, nil, "", false, "", srcs); e != nil {
return "32", nil
diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go
index e4e83dc8f9853e..a2954ab91ab60a 100644
--- a/src/cmd/go/internal/work/init.go
+++ b/src/cmd/go/internal/work/init.go
@@ -50,14 +50,14 @@ func makeCfgChangedEnv() []string {
return slices.Clip(env)
}
-func BuildInit() {
+func BuildInit(loaderstate *modload.State) {
if buildInitStarted {
base.Fatalf("go: internal error: work.BuildInit called more than once")
}
buildInitStarted = true
base.AtExit(closeBuilders)
- modload.Init()
+ modload.Init(loaderstate)
instrumentInit()
buildModeInit()
cfgChangedEnv = makeCfgChangedEnv()
diff --git a/src/cmd/go/internal/workcmd/edit.go b/src/cmd/go/internal/workcmd/edit.go
index 18730436ca8217..3778e70b687aec 100644
--- a/src/cmd/go/internal/workcmd/edit.go
+++ b/src/cmd/go/internal/workcmd/edit.go
@@ -143,8 +143,8 @@ func runEditwork(ctx context.Context, cmd *base.Command, args []string) {
if len(args) == 1 {
gowork = args[0]
} else {
- modload.InitWorkfile()
- gowork = modload.WorkFilePath()
+ modload.InitWorkfile(modload.LoaderState)
+ gowork = modload.WorkFilePath(modload.LoaderState)
}
if gowork == "" {
base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
diff --git a/src/cmd/go/internal/workcmd/init.go b/src/cmd/go/internal/workcmd/init.go
index 02240b8189fab5..20fef91d5e953c 100644
--- a/src/cmd/go/internal/workcmd/init.go
+++ b/src/cmd/go/internal/workcmd/init.go
@@ -44,11 +44,11 @@ func init() {
}
func runInit(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
+ modload.InitWorkfile(modload.LoaderState)
- modload.ForceUseModules = true
+ modload.LoaderState.ForceUseModules = true
- gowork := modload.WorkFilePath()
+ gowork := modload.WorkFilePath(modload.LoaderState)
if gowork == "" {
gowork = filepath.Join(base.Cwd(), "go.work")
}
diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go
index 719cf76c9bf12d..c58cd55ceee392 100644
--- a/src/cmd/go/internal/workcmd/sync.go
+++ b/src/cmd/go/internal/workcmd/sync.go
@@ -48,9 +48,9 @@ func init() {
}
func runSync(ctx context.Context, cmd *base.Command, args []string) {
- modload.ForceUseModules = true
- modload.InitWorkfile()
- if modload.WorkFilePath() == "" {
+ modload.LoaderState.ForceUseModules = true
+ modload.InitWorkfile(modload.LoaderState)
+ if modload.WorkFilePath(modload.LoaderState) == "" {
base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
}
@@ -60,7 +60,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) {
}
mustSelectFor := map[module.Version][]module.Version{}
- mms := modload.MainModules
+ mms := modload.LoaderState.MainModules
opts := modload.PackageOpts{
Tags: imports.AnyTags(),
@@ -73,7 +73,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) {
}
for _, m := range mms.Versions() {
opts.MainModule = m
- _, pkgs := modload.LoadPackages(ctx, opts, "all")
+ _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, opts, "all")
opts.MainModule = module.Version{} // reset
var (
@@ -91,7 +91,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) {
mustSelectFor[m] = mustSelect
}
- workFilePath := modload.WorkFilePath() // save go.work path because EnterModule clobbers it.
+ workFilePath := modload.WorkFilePath(modload.LoaderState) // save go.work path because EnterModule clobbers it.
var goV string
for _, m := range mms.Versions() {
@@ -114,12 +114,12 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) {
// so we don't write some go.mods with the "before" toolchain
// and others with the "after" toolchain. If nothing else, that
// discrepancy could show up in auto-recorded toolchain lines.
- changed, err := modload.EditBuildList(ctx, nil, mustSelectFor[m])
+ changed, err := modload.EditBuildList(modload.LoaderState, ctx, nil, mustSelectFor[m])
if err != nil {
continue
}
if changed {
- modload.LoadPackages(ctx, modload.PackageOpts{
+ modload.LoadPackages(modload.LoaderState, ctx, modload.PackageOpts{
Tags: imports.AnyTags(),
Tidy: true,
VendorModulesInGOROOTSrc: true,
@@ -131,7 +131,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) {
}, "all")
modload.WriteGoMod(ctx, modload.WriteOpts{})
}
- goV = gover.Max(goV, modload.MainModules.GoVersion())
+ goV = gover.Max(goV, modload.LoaderState.MainModules.GoVersion(modload.LoaderState))
}
wf, err := modload.ReadWorkFile(workFilePath)
diff --git a/src/cmd/go/internal/workcmd/use.go b/src/cmd/go/internal/workcmd/use.go
index afbe99d3a480db..ca8de22cca884f 100644
--- a/src/cmd/go/internal/workcmd/use.go
+++ b/src/cmd/go/internal/workcmd/use.go
@@ -61,9 +61,9 @@ func init() {
}
func runUse(ctx context.Context, cmd *base.Command, args []string) {
- modload.ForceUseModules = true
- modload.InitWorkfile()
- gowork := modload.WorkFilePath()
+ modload.LoaderState.ForceUseModules = true
+ modload.InitWorkfile(modload.LoaderState)
+ gowork := modload.WorkFilePath(modload.LoaderState)
if gowork == "" {
base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
}
diff --git a/src/cmd/go/internal/workcmd/vendor.go b/src/cmd/go/internal/workcmd/vendor.go
index f9f0cc0898836f..36c1f7b522f9e7 100644
--- a/src/cmd/go/internal/workcmd/vendor.go
+++ b/src/cmd/go/internal/workcmd/vendor.go
@@ -46,8 +46,8 @@ func init() {
}
func runVendor(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
- if modload.WorkFilePath() == "" {
+ modload.InitWorkfile(modload.LoaderState)
+ if modload.WorkFilePath(modload.LoaderState) == "" {
base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
}
diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go
index e81969ca4a3144..8cdfd9196e4cb1 100644
--- a/src/cmd/go/main.go
+++ b/src/cmd/go/main.go
@@ -24,7 +24,6 @@ import (
"cmd/go/internal/clean"
"cmd/go/internal/doc"
"cmd/go/internal/envcmd"
- "cmd/go/internal/fix"
"cmd/go/internal/fmtcmd"
"cmd/go/internal/generate"
"cmd/go/internal/help"
@@ -55,7 +54,7 @@ func init() {
clean.CmdClean,
doc.CmdDoc,
envcmd.CmdEnv,
- fix.CmdFix,
+ vet.CmdFix,
fmtcmd.CmdFmt,
generate.CmdGenerate,
modget.CmdGet,
diff --git a/src/cmd/go/testdata/script/chdir.txt b/src/cmd/go/testdata/script/chdir.txt
index a6feed6b45fce0..41def410d5fa37 100644
--- a/src/cmd/go/testdata/script/chdir.txt
+++ b/src/cmd/go/testdata/script/chdir.txt
@@ -27,6 +27,10 @@ stderr 'strings\.test'
go vet -C ../strings -n
stderr strings_test
+# go fix
+go fix -C ../strings -n
+stderr strings_test
+
# -C must be first on command line (as of Go 1.21)
! go test -n -C ../strings
stderr '^invalid value "../strings" for flag -C: -C flag must be first flag on command line$'
diff --git a/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt b/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt
index 027c434a322ec1..3cc23985a3934a 100644
--- a/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt
@@ -1,3 +1,5 @@
+skip # a 5s timeout is never going to be reliable (go.dev/issue/72140)
+
[!fuzz] skip
[short] skip
env GOCACHE=$WORK/cache
diff --git a/src/cmd/go/testdata/script/vet_asm.txt b/src/cmd/go/testdata/script/vet_asm.txt
index 8aa69ce1a3c999..c046773a06c0dc 100644
--- a/src/cmd/go/testdata/script/vet_asm.txt
+++ b/src/cmd/go/testdata/script/vet_asm.txt
@@ -1,12 +1,12 @@
-env GO111MODULE=off
-
# Issue 27665. Verify that "go vet" analyzes non-Go files.
-[!GOARCH:amd64] skip
+env GO111MODULE=off
+env GOARCH=amd64
+
! go vet -asmdecl a
stderr 'f: invalid MOVW of x'
-# -c flag shows context
+# -c=n flag shows n lines of context
! go vet -c=2 -asmdecl a
stderr '...invalid MOVW...'
stderr '1 .*TEXT'
diff --git a/src/cmd/go/testdata/script/vet_basic.txt b/src/cmd/go/testdata/script/vet_basic.txt
new file mode 100644
index 00000000000000..5ae66438ea3d81
--- /dev/null
+++ b/src/cmd/go/testdata/script/vet_basic.txt
@@ -0,0 +1,125 @@
+# Test basic features of "go vet"/"go fix" CLI.
+#
+# The example relies on two analyzers:
+# - hostport (which is included in both the fix and vet suites), and
+# - printf (which is only in the vet suite).
+# Each reports one diagnostic with a fix.
+
+# vet default flags print diagnostics to stderr. Diagnostic => nonzero exit.
+! go vet example.com/x
+stderr 'does not work with IPv6'
+stderr 'non-constant format string in call to fmt.Sprintf'
+
+# -hostport runs only one analyzer. Diagnostic => failure.
+! go vet -hostport example.com/x
+stderr 'does not work with IPv6'
+! stderr 'non-constant format string'
+
+# -timeformat runs only one analyzer. No diagnostics => success.
+go vet -timeformat example.com/x
+! stderr .
+
+# JSON output includes diagnostics and fixes. Always success.
+go vet -json example.com/x
+! stderr .
+stdout '"example.com/x": {'
+stdout '"hostport":'
+stdout '"message": "address format .* does not work with IPv6",'
+stdout '"suggested_fixes":'
+stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",'
+
+# vet -fix -diff displays a diff.
+go vet -fix -diff example.com/x
+stdout '\-var _ = fmt.Sprintf\(s\)'
+stdout '\+var _ = fmt.Sprintf\("%s", s\)'
+stdout '\-var _, _ = net.Dial\("tcp", fmt.Sprintf\("%s:%d", s, 80\)\)'
+stdout '\+var _, _ = net.Dial\("tcp", net.JoinHostPort\(s, "80"\)\)'
+
+# vet -fix quietly applies the vet suite fixes.
+cp x.go x.go.bak
+go vet -fix example.com/x
+grep 'fmt.Sprintf\("%s", s\)' x.go
+grep 'net.JoinHostPort' x.go
+! stderr .
+cp x.go.bak x.go
+
+! go vet -diff example.com/x
+stderr 'go vet -diff flag requires -fix'
+
+# go fix applies the fix suite fixes.
+go fix example.com/x
+grep 'net.JoinHostPort' x.go
+! grep 'fmt.Sprintf\("%s", s\)' x.go
+! stderr .
+cp x.go.bak x.go
+
+# Show diff of fixes from the fix suite.
+go fix -diff example.com/x
+! stdout '\-var _ = fmt.Sprintf\(s\)'
+stdout '\-var _, _ = net.Dial\("tcp", fmt.Sprintf\("%s:%d", s, 80\)\)'
+stdout '\+var _, _ = net.Dial\("tcp", net.JoinHostPort\(s, "80"\)\)'
+
+# Show fix-suite fixes in JSON form.
+go fix -json example.com/x
+! stderr .
+stdout '"example.com/x": {'
+stdout '"hostport":'
+stdout '"message": "address format .* does not work with IPv6",'
+stdout '"suggested_fixes":'
+stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",'
+! stdout '"printf":'
+! stdout '"message": "non-constant format string.*",'
+! stdout '"message": "Insert.*%s.*format.string",'
+
+# Show vet-suite fixes in JSON form.
+go vet -fix -json example.com/x
+! stderr .
+stdout '"example.com/x": {'
+stdout '"hostport":'
+stdout '"message": "address format .* does not work with IPv6",'
+stdout '"suggested_fixes":'
+stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",'
+stdout '"printf":'
+stdout '"message": "non-constant format string.*",'
+stdout '"suggested_fixes":'
+stdout '"message": "Insert.*%s.*format.string",'
+
+# Reject -diff + -json.
+! go fix -diff -json example.com/x
+stderr '-json and -diff cannot be used together'
+
+# Legacy way of selecting fixers is a no-op.
+go fix -fix=old1,old2 example.com/x
+stderr 'go fix: the -fix=old1,old2 flag is obsolete and has no effect'
+
+# -c=n flag shows n lines of context.
+! go vet -c=2 -printf example.com/x
+stderr 'x.go:12:21: non-constant format string in call to fmt.Sprintf'
+! stderr '9 '
+stderr '10 '
+stderr '11 // This call...'
+stderr '12 var _ = fmt.Sprintf\(s\)'
+stderr '13 '
+stderr '14 '
+! stderr '15 '
+
+-- go.mod --
+module example.com/x
+go 1.25
+
+-- x.go --
+package x
+
+
+import (
+ "fmt"
+ "net"
+)
+
+var s string
+
+// This call yields a "non-constant format string" diagnostic, with a fix (go vet only).
+var _ = fmt.Sprintf(s)
+
+// This call yields a hostport diagnostic, with a fix (go vet and go fix).
+var _, _ = net.Dial("tcp", fmt.Sprintf("%s:%d", s, 80))
diff --git a/src/cmd/go/testdata/script/vet_cache.txt b/src/cmd/go/testdata/script/vet_cache.txt
new file mode 100644
index 00000000000000..c84844000a43d0
--- /dev/null
+++ b/src/cmd/go/testdata/script/vet_cache.txt
@@ -0,0 +1,24 @@
+# Test that go vet's caching of vet tool actions replays
+# the recorded stderr output even after a cache hit.
+
+# Set up fresh GOCACHE.
+env GOCACHE=$WORK/gocache
+
+# First time is a cache miss.
+! go vet example.com/a
+stderr 'fmt.Sprint call has possible Printf formatting directive'
+
+# Second time is assumed to be a cache hit for the stdout JSON,
+# but we don't bother to assert it. Same diagnostics again.
+! go vet example.com/a
+stderr 'fmt.Sprint call has possible Printf formatting directive'
+
+-- go.mod --
+module example.com
+
+-- a/a.go --
+package a
+
+import "fmt"
+
+var _ = fmt.Sprint("%s") // oops!
diff --git a/src/cmd/gofmt/gofmt.go b/src/cmd/gofmt/gofmt.go
index bbb8b4fd15c2f7..ad6ad636524479 100644
--- a/src/cmd/gofmt/gofmt.go
+++ b/src/cmd/gofmt/gofmt.go
@@ -41,6 +41,9 @@ var (
// debugging
cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file")
+
+ // errors
+ errFormattingDiffers = fmt.Errorf("formatting differs from gofmt's")
)
// Keep these in sync with go/format/format.go.
@@ -218,8 +221,12 @@ func (r *reporter) Report(err error) {
panic("Report with nil error")
}
st := r.getState()
- scanner.PrintError(st.err, err)
- st.exitCode = 2
+ if err == errFormattingDiffers {
+ st.exitCode = 1
+ } else {
+ scanner.PrintError(st.err, err)
+ st.exitCode = 2
+ }
}
func (r *reporter) ExitCode() int {
@@ -281,6 +288,7 @@ func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) e
newName := filepath.ToSlash(filename)
oldName := newName + ".orig"
r.Write(diff.Diff(oldName, src, newName, res))
+ return errFormattingDiffers
}
}
diff --git a/src/cmd/gofmt/gofmt_test.go b/src/cmd/gofmt/gofmt_test.go
index 6b80673af148f5..2aba0f03ff09e9 100644
--- a/src/cmd/gofmt/gofmt_test.go
+++ b/src/cmd/gofmt/gofmt_test.go
@@ -53,10 +53,19 @@ func gofmtFlags(filename string, maxLines int) string {
return ""
}
-func runTest(t *testing.T, in, out string) {
- // process flags
- *simplifyAST = false
+// Reset global variables for all flags to their default value.
+func resetFlags() {
+ *list = false
+ *write = false
*rewriteRule = ""
+ *simplifyAST = false
+ *doDiff = false
+ *allErrors = false
+ *cpuprofile = ""
+}
+
+func runTest(t *testing.T, in, out string) {
+ resetFlags()
info, err := os.Lstat(in)
if err != nil {
t.Error(err)
@@ -159,6 +168,46 @@ func TestRewrite(t *testing.T) {
}
}
+// TestDiff runs gofmt with the -d flag on the input files and checks that the
+// expected exit code is set.
+func TestDiff(t *testing.T) {
+ tests := []struct {
+ in string
+ exitCode int
+ }{
+ {in: "testdata/exitcode.input", exitCode: 1},
+ {in: "testdata/exitcode.golden", exitCode: 0},
+ }
+
+ for _, tt := range tests {
+ resetFlags()
+ *doDiff = true
+
+ initParserMode()
+ initRewrite()
+
+ info, err := os.Lstat(tt.in)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ const maxWeight = 2 << 20
+ var buf, errBuf bytes.Buffer
+ s := newSequencer(maxWeight, &buf, &errBuf)
+ s.Add(fileWeight(tt.in, info), func(r *reporter) error {
+ return processFile(tt.in, info, nil, r)
+ })
+ if errBuf.Len() > 0 {
+ t.Logf("%q", errBuf.Bytes())
+ }
+
+ if s.GetExitCode() != tt.exitCode {
+ t.Errorf("%s: expected exit code %d, got %d", tt.in, tt.exitCode, s.GetExitCode())
+ }
+ }
+}
+
// Test case for issue 3961.
func TestCRLF(t *testing.T) {
const input = "testdata/crlf.input" // must contain CR/LF's
diff --git a/src/cmd/gofmt/testdata/exitcode.golden b/src/cmd/gofmt/testdata/exitcode.golden
new file mode 100644
index 00000000000000..06ab7d0f9a35a7
--- /dev/null
+++ b/src/cmd/gofmt/testdata/exitcode.golden
@@ -0,0 +1 @@
+package main
diff --git a/src/cmd/gofmt/testdata/exitcode.input b/src/cmd/gofmt/testdata/exitcode.input
new file mode 100644
index 00000000000000..4f2f092ce508de
--- /dev/null
+++ b/src/cmd/gofmt/testdata/exitcode.input
@@ -0,0 +1 @@
+ package main
diff --git a/src/cmd/internal/bootstrap_test/experiment_toolid_test.go b/src/cmd/internal/bootstrap_test/experiment_toolid_test.go
index ff2379c8998c76..ca292b700861a9 100644
--- a/src/cmd/internal/bootstrap_test/experiment_toolid_test.go
+++ b/src/cmd/internal/bootstrap_test/experiment_toolid_test.go
@@ -97,7 +97,7 @@ func runCmd(t *testing.T, dir string, env []string, path string, args ...string)
cmd.Env = env
out, err := cmd.Output()
if err != nil {
- if ee := (*exec.ExitError)(nil); errors.As(err, &ee) {
+ if ee, ok := errors.AsType[*exec.ExitError](err); ok {
out = append(out, ee.Stderr...)
}
t.Fatalf("%s failed:\n%s\n%s", cmd, out, err)
diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go
index 0ef13b81f6f3f1..1e2891de0a7ddc 100644
--- a/src/cmd/internal/obj/arm/asm5.go
+++ b/src/cmd/internal/obj/arm/asm5.go
@@ -579,7 +579,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
if int64(pc) > p.Pc {
- ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, pc, p)
+ ctxt.Diag("PC padding invalid: want %d, has %d: %v", p.Pc, pc, p)
}
for int64(pc) != p.Pc {
// emit 0xe1a00000 (MOVW R0, R0)
diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go
index 710dd64b304c12..814dba2c100b30 100644
--- a/src/cmd/internal/obj/arm64/a.out.go
+++ b/src/cmd/internal/obj/arm64/a.out.go
@@ -1020,6 +1020,12 @@ const (
AWORD
AYIELD
ABTI
+ APACIASP
+ AAUTIASP
+ APACIBSP
+ AAUTIBSP
+ AAUTIA1716
+ AAUTIB1716
ALAST
AB = obj.AJMP
ABL = obj.ACALL
diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go
index 379f53bab37cdb..497429d9985922 100644
--- a/src/cmd/internal/obj/arm64/anames.go
+++ b/src/cmd/internal/obj/arm64/anames.go
@@ -537,5 +537,11 @@ var Anames = []string{
"WORD",
"YIELD",
"BTI",
+ "PACIASP",
+ "AUTIASP",
+ "PACIBSP",
+ "AUTIBSP",
+ "AUTIA1716",
+ "AUTIB1716",
"LAST",
}
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index 743d09a319087d..3cb4be436b9b7f 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -34,6 +34,7 @@ import (
"cmd/internal/obj"
"cmd/internal/objabi"
"encoding/binary"
+ "errors"
"fmt"
"log"
"math"
@@ -3017,6 +3018,13 @@ func buildop(ctxt *obj.Link) {
oprangeset(ANOOP, t)
oprangeset(ADRPS, t)
+ oprangeset(APACIASP, t)
+ oprangeset(AAUTIASP, t)
+ oprangeset(APACIBSP, t)
+ oprangeset(AAUTIBSP, t)
+ oprangeset(AAUTIA1716, t)
+ oprangeset(AAUTIB1716, t)
+
case ACBZ:
oprangeset(ACBZW, t)
oprangeset(ACBNZ, t)
@@ -4354,7 +4362,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) {
// remove the NOTUSETMP flag in optab.
op := c.opirr(p, p.As)
if op&Sbit != 0 {
- c.ctxt.Diag("can not break addition/subtraction when S bit is set", p)
+ c.ctxt.Diag("can not break addition/subtraction when S bit is set (%v)", p)
}
rt, r := p.To.Reg, p.Reg
if r == obj.REG_NONE {
@@ -7016,6 +7024,24 @@ func (c *ctxt7) op0(p *obj.Prog, a obj.As) uint32 {
case ASEVL:
return SYSHINT(5)
+
+ case APACIASP:
+ return SYSHINT(25)
+
+ case AAUTIASP:
+ return SYSHINT(29)
+
+ case APACIBSP:
+ return SYSHINT(27)
+
+ case AAUTIBSP:
+ return SYSHINT(31)
+
+ case AAUTIA1716:
+ return SYSHINT(12)
+
+ case AAUTIB1716:
+ return SYSHINT(14)
}
c.ctxt.Diag("%v: bad op0 %v", p, a)
@@ -7830,3 +7856,146 @@ func (c *ctxt7) encRegShiftOrExt(p *obj.Prog, a *obj.Addr, r int16) uint32 {
func pack(q uint32, arngA, arngB uint8) uint32 {
return uint32(q)<<16 | uint32(arngA)<<8 | uint32(arngB)
}
+
+// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement.
+func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error {
+ Rnum := (reg & 31) + int16(num<<5)
+ if isAmount {
+ if num < 0 || num > 7 {
+ return errors.New("index shift amount is out of range")
+ }
+ }
+ if reg <= REG_R31 && reg >= REG_R0 {
+ if !isAmount {
+ return errors.New("invalid register extension")
+ }
+ switch ext {
+ case "UXTB":
+ if a.Type == obj.TYPE_MEM {
+ return errors.New("invalid shift for the register offset addressing mode")
+ }
+ a.Reg = REG_UXTB + Rnum
+ case "UXTH":
+ if a.Type == obj.TYPE_MEM {
+ return errors.New("invalid shift for the register offset addressing mode")
+ }
+ a.Reg = REG_UXTH + Rnum
+ case "UXTW":
+ // effective address of memory is a base register value and an offset register value.
+ if a.Type == obj.TYPE_MEM {
+ a.Index = REG_UXTW + Rnum
+ } else {
+ a.Reg = REG_UXTW + Rnum
+ }
+ case "UXTX":
+ if a.Type == obj.TYPE_MEM {
+ return errors.New("invalid shift for the register offset addressing mode")
+ }
+ a.Reg = REG_UXTX + Rnum
+ case "SXTB":
+ if a.Type == obj.TYPE_MEM {
+ return errors.New("invalid shift for the register offset addressing mode")
+ }
+ a.Reg = REG_SXTB + Rnum
+ case "SXTH":
+ if a.Type == obj.TYPE_MEM {
+ return errors.New("invalid shift for the register offset addressing mode")
+ }
+ a.Reg = REG_SXTH + Rnum
+ case "SXTW":
+ if a.Type == obj.TYPE_MEM {
+ a.Index = REG_SXTW + Rnum
+ } else {
+ a.Reg = REG_SXTW + Rnum
+ }
+ case "SXTX":
+ if a.Type == obj.TYPE_MEM {
+ a.Index = REG_SXTX + Rnum
+ } else {
+ a.Reg = REG_SXTX + Rnum
+ }
+ case "LSL":
+ a.Index = REG_LSL + Rnum
+ default:
+ return errors.New("unsupported general register extension type: " + ext)
+
+ }
+ } else if reg <= REG_V31 && reg >= REG_V0 {
+ switch ext {
+ case "B8":
+ if isIndex {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = REG_ARNG + (reg & 31) + ((ARNG_8B & 15) << 5)
+ case "B16":
+ if isIndex {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = REG_ARNG + (reg & 31) + ((ARNG_16B & 15) << 5)
+ case "H4":
+ if isIndex {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = REG_ARNG + (reg & 31) + ((ARNG_4H & 15) << 5)
+ case "H8":
+ if isIndex {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = REG_ARNG + (reg & 31) + ((ARNG_8H & 15) << 5)
+ case "S2":
+ if isIndex {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = REG_ARNG + (reg & 31) + ((ARNG_2S & 15) << 5)
+ case "S4":
+ if isIndex {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = REG_ARNG + (reg & 31) + ((ARNG_4S & 15) << 5)
+ case "D1":
+ if isIndex {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = REG_ARNG + (reg & 31) + ((ARNG_1D & 15) << 5)
+ case "D2":
+ if isIndex {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = REG_ARNG + (reg & 31) + ((ARNG_2D & 15) << 5)
+ case "Q1":
+ if isIndex {
+ return errors.New("invalid register extension")
+ }
+ a.Reg = REG_ARNG + (reg & 31) + ((ARNG_1Q & 15) << 5)
+ case "B":
+ if !isIndex {
+ return nil
+ }
+ a.Reg = REG_ELEM + (reg & 31) + ((ARNG_B & 15) << 5)
+ a.Index = num
+ case "H":
+ if !isIndex {
+ return nil
+ }
+ a.Reg = REG_ELEM + (reg & 31) + ((ARNG_H & 15) << 5)
+ a.Index = num
+ case "S":
+ if !isIndex {
+ return nil
+ }
+ a.Reg = REG_ELEM + (reg & 31) + ((ARNG_S & 15) << 5)
+ a.Index = num
+ case "D":
+ if !isIndex {
+ return nil
+ }
+ a.Reg = REG_ELEM + (reg & 31) + ((ARNG_D & 15) << 5)
+ a.Index = num
+ default:
+ return errors.New("unsupported simd register extension type: " + ext)
+ }
+ } else {
+ return errors.New("invalid register and extension combination")
+ }
+ return nil
+}
diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go
index 6513e116872a0a..b7e116bae39dc4 100644
--- a/src/cmd/internal/obj/link.go
+++ b/src/cmd/internal/obj/link.go
@@ -464,7 +464,7 @@ type LSym struct {
P []byte
R []Reloc
- Extra *interface{} // *FuncInfo, *VarInfo, *FileInfo, or *TypeInfo, if present
+ Extra *interface{} // *FuncInfo, *VarInfo, *FileInfo, *TypeInfo, or *ItabInfo, if present
Pkg string
PkgIdx int32
@@ -604,6 +604,15 @@ func (s *LSym) NewTypeInfo() *TypeInfo {
return t
}
+// TypeInfo returns the *TypeInfo associated with s, or else nil.
+func (s *LSym) TypeInfo() *TypeInfo {
+ if s.Extra == nil {
+ return nil
+ }
+ t, _ := (*s.Extra).(*TypeInfo)
+ return t
+}
+
// An ItabInfo contains information for a symbol
// that contains a runtime.itab.
type ItabInfo struct {
@@ -620,6 +629,15 @@ func (s *LSym) NewItabInfo() *ItabInfo {
return t
}
+// ItabInfo returns the *ItabInfo associated with s, or else nil.
+func (s *LSym) ItabInfo() *ItabInfo {
+ if s.Extra == nil {
+ return nil
+ }
+ i, _ := (*s.Extra).(*ItabInfo)
+ return i
+}
+
// WasmImport represents a WebAssembly (WASM) imported function with
// parameters and results translated into WASM types based on the Go function
// declaration.
@@ -1198,6 +1216,13 @@ type Link struct {
Fingerprint goobj.FingerprintType // fingerprint of symbol indices, to catch index mismatch
}
+// Assert to vet's printf checker that Link.DiagFunc is a printf-like.
+func _(ctxt *Link) {
+ ctxt.DiagFunc = func(format string, args ...any) {
+ _ = fmt.Sprintf(format, args...)
+ }
+}
+
func (ctxt *Link) Diag(format string, args ...interface{}) {
ctxt.Errors++
ctxt.DiagFunc(format, args...)
diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go
index ca6e2be4aa9eb6..5a61acac8741d6 100644
--- a/src/cmd/internal/obj/loong64/asm.go
+++ b/src/cmd/internal/obj/loong64/asm.go
@@ -2057,7 +2057,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
switch o.type_ {
default:
- c.ctxt.Diag("unknown type %d %v", o.type_)
+ c.ctxt.Diag("unknown type %d", o.type_)
prasm(p)
case 0: // pseudo ops
@@ -4438,7 +4438,7 @@ func (c *ctxt0) specialFpMovInst(a obj.As, fclass int, tclass int) uint32 {
}
}
- c.ctxt.Diag("bad class combination: %s %s,%s\n", a, fclass, tclass)
+ c.ctxt.Diag("bad class combination: %s %d,%d\n", a, fclass, tclass)
return 0
}
diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go
index 2de5a4d6c0b8fc..a55953e741432a 100644
--- a/src/cmd/internal/obj/mips/asm0.go
+++ b/src/cmd/internal/obj/mips/asm0.go
@@ -1172,7 +1172,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
}
switch o.type_ {
default:
- c.ctxt.Diag("unknown type %d %v", o.type_)
+ c.ctxt.Diag("unknown type %d", o.type_)
prasm(p)
case 0: /* pseudo ops */
diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go
index 698e5ace9ccca8..69914b1c1f36d5 100644
--- a/src/cmd/internal/obj/plist.go
+++ b/src/cmd/internal/obj/plist.go
@@ -63,12 +63,12 @@ func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc) {
switch p.To.Sym.Name {
case "go_args_stackmap":
if p.From.Type != TYPE_CONST || p.From.Offset != abi.FUNCDATA_ArgsPointerMaps {
- ctxt.Diag("%s: FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps", p.Pos)
+ ctxt.Diag("%v: FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps", p)
}
p.To.Sym = ctxt.LookupDerived(curtext, curtext.Name+".args_stackmap")
case "no_pointers_stackmap":
if p.From.Type != TYPE_CONST || p.From.Offset != abi.FUNCDATA_LocalsPointerMaps {
- ctxt.Diag("%s: FUNCDATA use of no_pointers_stackmap(SB) without FUNCDATA_LocalsPointerMaps", p.Pos)
+ ctxt.Diag("%v: FUNCDATA use of no_pointers_stackmap(SB) without FUNCDATA_LocalsPointerMaps", p)
}
// funcdata for functions with no local variables in frame.
// Define two zero-length bitmaps, because the same index is used
diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go
index 91642ffbcb0e50..74699cc398534e 100644
--- a/src/cmd/internal/obj/riscv/obj.go
+++ b/src/cmd/internal/obj/riscv/obj.go
@@ -2991,7 +2991,7 @@ func (ins *instruction) length() int {
func (ins *instruction) validate(ctxt *obj.Link) {
enc, err := encodingForAs(ins.as)
if err != nil {
- ctxt.Diag(err.Error())
+ ctxt.Diag("%v", err)
return
}
enc.validate(ctxt, ins)
@@ -3026,7 +3026,7 @@ func instructionsForOpImmediate(p *obj.Prog, as obj.As, rs int16) []*instruction
low, high, err := Split32BitImmediate(ins.imm)
if err != nil {
- p.Ctxt.Diag("%v: constant %d too large", p, ins.imm, err)
+ p.Ctxt.Diag("%v: constant %d too large: %v", p, ins.imm, err)
return nil
}
if high == 0 {
diff --git a/src/cmd/internal/robustio/robustio_darwin.go b/src/cmd/internal/robustio/robustio_darwin.go
index 99fd8ebc2fff18..69ea2479308dea 100644
--- a/src/cmd/internal/robustio/robustio_darwin.go
+++ b/src/cmd/internal/robustio/robustio_darwin.go
@@ -13,9 +13,6 @@ const errFileNotFound = syscall.ENOENT
// isEphemeralError returns true if err may be resolved by waiting.
func isEphemeralError(err error) bool {
- var errno syscall.Errno
- if errors.As(err, &errno) {
- return errno == errFileNotFound
- }
- return false
+ errno, ok := errors.AsType[syscall.Errno](err)
+ return ok && errno == errFileNotFound
}
diff --git a/src/cmd/internal/robustio/robustio_flaky.go b/src/cmd/internal/robustio/robustio_flaky.go
index c56e36ca62412a..ec1a2daea65852 100644
--- a/src/cmd/internal/robustio/robustio_flaky.go
+++ b/src/cmd/internal/robustio/robustio_flaky.go
@@ -31,8 +31,7 @@ func retry(f func() (err error, mayRetry bool)) error {
return err
}
- var errno syscall.Errno
- if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) {
+ if errno, ok := errors.AsType[syscall.Errno](err); ok && (lowestErrno == 0 || errno < lowestErrno) {
bestErr = err
lowestErrno = errno
} else if bestErr == nil {
diff --git a/src/cmd/internal/robustio/robustio_windows.go b/src/cmd/internal/robustio/robustio_windows.go
index 687dcb66f83d15..ad46ec5cfeb601 100644
--- a/src/cmd/internal/robustio/robustio_windows.go
+++ b/src/cmd/internal/robustio/robustio_windows.go
@@ -14,8 +14,7 @@ const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND
// isEphemeralError returns true if err may be resolved by waiting.
func isEphemeralError(err error) bool {
- var errno syscall.Errno
- if errors.As(err, &errno) {
+ if errno, ok := errors.AsType[syscall.Errno](err); ok {
switch errno {
case syscall.ERROR_ACCESS_DENIED,
syscall.ERROR_FILE_NOT_FOUND,
diff --git a/src/cmd/internal/script/engine.go b/src/cmd/internal/script/engine.go
index eb9344f6e2a1eb..4607868379488a 100644
--- a/src/cmd/internal/script/engine.go
+++ b/src/cmd/internal/script/engine.go
@@ -185,7 +185,7 @@ func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Wri
var lineno int
lineErr := func(err error) error {
- if errors.As(err, new(*CommandError)) {
+ if _, ok := errors.AsType[*CommandError](err); ok {
return err
}
return fmt.Errorf("%s:%d: %w", file, lineno, err)
@@ -283,7 +283,7 @@ func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Wri
// Run the command.
err = e.runCommand(s, cmd, impl)
if err != nil {
- if stop := (stopError{}); errors.As(err, &stop) {
+ if stop, ok := errors.AsType[stopError](err); ok {
// Since the 'stop' command halts execution of the entire script,
// log its message separately from the section in which it appears.
err = endSection(true)
@@ -607,13 +607,13 @@ func checkStatus(cmd *command, err error) error {
return nil
}
- if s := (stopError{}); errors.As(err, &s) {
+ if _, ok := errors.AsType[stopError](err); ok {
// This error originated in the Stop command.
// Propagate it as-is.
return cmdError(cmd, err)
}
- if w := (waitError{}); errors.As(err, &w) {
+ if _, ok := errors.AsType[waitError](err); ok {
// This error was surfaced from a background process by a call to Wait.
// Add a call frame for Wait itself, but ignore its "want" field.
// (Wait itself cannot fail to wait on commands or else it would leak
diff --git a/src/cmd/internal/script/scripttest/scripttest.go b/src/cmd/internal/script/scripttest/scripttest.go
index bace662a6722fd..349201fd188c1b 100644
--- a/src/cmd/internal/script/scripttest/scripttest.go
+++ b/src/cmd/internal/script/scripttest/scripttest.go
@@ -89,7 +89,7 @@ func Run(t testing.TB, e *script.Engine, s *script.State, filename string, testS
return e.Execute(s, filename, bufio.NewReader(testScript), log)
}()
- if skip := (skipError{}); errors.As(err, &skip) {
+ if skip, ok := errors.AsType[skipError](err); ok {
if skip.msg == "" {
t.Skip("SKIP")
} else {
diff --git a/src/cmd/internal/test2json/test2json.go b/src/cmd/internal/test2json/test2json.go
index d08ef389f82a21..f28051e1771db8 100644
--- a/src/cmd/internal/test2json/test2json.go
+++ b/src/cmd/internal/test2json/test2json.go
@@ -38,6 +38,7 @@ type event struct {
FailedBuild string `json:",omitempty"`
Key string `json:",omitempty"`
Value string `json:",omitempty"`
+ Path string `json:",omitempty"`
}
// textBytes is a hack to get JSON to emit a []byte as a string
@@ -180,6 +181,7 @@ var (
[]byte("=== FAIL "),
[]byte("=== SKIP "),
[]byte("=== ATTR "),
+ []byte("=== ARTIFACTS "),
}
reports = [][]byte{
@@ -251,7 +253,6 @@ func (c *Converter) handleInputLine(line []byte) {
// "=== RUN "
// "=== PAUSE "
// "=== CONT "
- actionColon := false
origLine := line
ok := false
indent := 0
@@ -273,7 +274,6 @@ func (c *Converter) handleInputLine(line []byte) {
}
for _, magic := range reports {
if bytes.HasPrefix(line, magic) {
- actionColon = true
ok = true
break
}
@@ -296,16 +296,11 @@ func (c *Converter) handleInputLine(line []byte) {
return
}
- // Parse out action and test name.
- i := 0
- if actionColon {
- i = bytes.IndexByte(line, ':') + 1
- }
- if i == 0 {
- i = len(updates[0])
- }
- action := strings.ToLower(strings.TrimSuffix(strings.TrimSpace(string(line[4:i])), ":"))
- name := strings.TrimSpace(string(line[i:]))
+ // Parse out action and test name from "=== ACTION: Name".
+ action, name, _ := strings.Cut(string(line[len("=== "):]), " ")
+ action = strings.TrimSuffix(action, ":")
+ action = strings.ToLower(action)
+ name = strings.TrimSpace(name)
e := &event{Action: action}
if line[0] == '-' { // PASS or FAIL report
@@ -336,7 +331,10 @@ func (c *Converter) handleInputLine(line []byte) {
c.output.write(origLine)
return
}
- if action == "attr" {
+ switch action {
+ case "artifacts":
+ name, e.Path, _ = strings.Cut(name, " ")
+ case "attr":
var rest string
name, rest, _ = strings.Cut(name, " ")
e.Key, e.Value, _ = strings.Cut(rest, " ")
diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go
index 6a60a746a5b48b..56a076002a92d4 100644
--- a/src/cmd/link/dwarf_test.go
+++ b/src/cmd/link/dwarf_test.go
@@ -364,6 +364,10 @@ func TestFlagW(t *testing.T) {
if runtime.GOOS == "aix" {
t.Skip("internal/xcoff cannot parse file without symbol table")
}
+ if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) {
+ t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH)
+ }
+
t.Parallel()
tmpdir := t.TempDir()
@@ -382,7 +386,7 @@ func TestFlagW(t *testing.T) {
{"-s", false}, // -s implies -w
{"-s -w=0", true}, // -w=0 negates the implied -w
}
- if testenv.HasCGO() {
+ if testenv.HasCGO() && runtime.GOOS != "solaris" { // Solaris linker doesn't support the -S flag
tests = append(tests,
testCase{"-w -linkmode=external", false},
testCase{"-s -linkmode=external", false},
diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go
index 68474b4484f1af..8d8ea8ac542c50 100644
--- a/src/cmd/link/internal/arm64/asm.go
+++ b/src/cmd/link/internal/arm64/asm.go
@@ -224,6 +224,28 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade
}
return true
+ case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_SUBTRACTOR*2:
+ // ARM64_RELOC_SUBTRACTOR must be followed by ARM64_RELOC_UNSIGNED.
+ // The pair of relocations resolves to the difference between two
+ // symbol addresses (each relocation specifies a symbol).
+ outer, off := ld.FoldSubSymbolOffset(ldr, targ)
+ if outer != s {
+ // TODO: support subtracted symbol in different section.
+ ldr.Errorf(s, "unsupported ARM64_RELOC_SUBTRACTOR reloc: target %s, outer %s", ldr.SymName(targ), ldr.SymName(outer))
+ break
+ }
+ su := ldr.MakeSymbolUpdater(s)
+ relocs := su.Relocs()
+ if rIdx+1 >= relocs.Count() || relocs.At(rIdx+1).Type() != objabi.MachoRelocOffset+ld.MACHO_ARM64_RELOC_UNSIGNED*2 || relocs.At(rIdx+1).Off() != r.Off() {
+ ldr.Errorf(s, "unexpected ARM64_RELOC_SUBTRACTOR reloc, must be followed by ARM64_RELOC_UNSIGNED at same offset")
+ break
+ }
+ su.SetRelocType(rIdx+1, objabi.R_PCREL)
+ su.SetRelocAdd(rIdx+1, r.Add()+int64(r.Off())+int64(r.Siz())-off)
+ // Remove the other relocation
+ su.SetRelocSiz(rIdx, 0)
+ return true
+
case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_BRANCH26*2 + pcrel:
su := ldr.MakeSymbolUpdater(s)
su.SetRelocType(rIdx, objabi.R_CALLARM64)
@@ -277,6 +299,17 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade
su.SetRelocSym(rIdx, syms.GOT)
su.SetRelocAdd(rIdx, int64(ldr.SymGot(targ)))
return true
+
+ case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_POINTER_TO_GOT*2 + pcrel:
+ if targType != sym.SDYNIMPORT {
+ ldr.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", ldr.SymName(targ))
+ }
+ ld.AddGotSym(target, ldr, syms, targ, 0)
+ su := ldr.MakeSymbolUpdater(s)
+ su.SetRelocType(rIdx, objabi.R_PCREL)
+ su.SetRelocSym(rIdx, syms.GOT)
+ su.SetRelocAdd(rIdx, r.Add()+int64(r.Siz())+int64(ldr.SymGot(targ)))
+ return true
}
// Reread the reloc to incorporate any changes in type above.
diff --git a/src/cmd/link/internal/ld/fallocate_test.go b/src/cmd/link/internal/ld/fallocate_test.go
index 3c6b7ef752edd3..f463b5b63b3b69 100644
--- a/src/cmd/link/internal/ld/fallocate_test.go
+++ b/src/cmd/link/internal/ld/fallocate_test.go
@@ -10,6 +10,7 @@ import (
"errors"
"os"
"path/filepath"
+ "runtime"
"syscall"
"testing"
)
@@ -53,12 +54,24 @@ func TestFallocate(t *testing.T) {
if got := stat.Size(); got != sz {
t.Errorf("unexpected file size: got %d, want %d", got, sz)
}
- // The number of blocks must be enough for the requested size.
- // We used to require an exact match, but it appears that
- // some file systems allocate a few extra blocks in some cases.
- // See issue #41127.
- if got, want := stat.Sys().(*syscall.Stat_t).Blocks, (sz+511)/512; got < want {
- t.Errorf("unexpected disk usage: got %d blocks, want at least %d", got, want)
+ if runtime.GOOS == "darwin" {
+ // Check the number of allocated blocks on Darwin. On Linux (and
+ // perhaps BSDs), stat's Blocks field may not be portable as it
+ // is an implementation detail of the file system. On Darwin, it
+ // is documented as "the actual number of blocks allocated for
+ // the file in 512-byte units".
+ // The check is introduced when fixing a Darwin-specific bug. On
+ // Darwin, the file allocation syscall is a bit tricky. On Linux
+ // and BSDs, it is more straightforward and unlikely to go wrong.
+ // Given these two reasons, only check it on Darwin.
+ //
+ // The number of blocks must be enough for the requested size.
+ // We used to require an exact match, but it appears that
+ // some file systems allocate a few extra blocks in some cases.
+ // See issue #41127.
+ if got, want := stat.Sys().(*syscall.Stat_t).Blocks, (sz+511)/512; got < want {
+ t.Errorf("unexpected disk usage: got %d blocks, want at least %d", got, want)
+ }
}
out.munmap()
}
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index 8d2763bb57f31a..2c861129b52f9a 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -1452,7 +1452,7 @@ func (ctxt *Link) hostlink() {
argv = append(argv, "-s")
}
} else if *FlagW {
- if !ctxt.IsAIX() { // The AIX linker's -S has different meaning
+ if !ctxt.IsAIX() && !ctxt.IsSolaris() { // The AIX and Solaris linkers' -S has different meaning
argv = append(argv, "-Wl,-S") // suppress debugging symbols
}
}
@@ -1772,7 +1772,8 @@ func (ctxt *Link) hostlink() {
}
// Force global symbols to be exported for dlopen, etc.
- if ctxt.IsELF {
+ switch {
+ case ctxt.IsELF:
if ctxt.DynlinkingGo() || ctxt.BuildMode == BuildModeCShared || !linkerFlagSupported(ctxt.Arch, argv[0], altLinker, "-Wl,--export-dynamic-symbol=main") {
argv = append(argv, "-rdynamic")
} else {
@@ -1783,10 +1784,16 @@ func (ctxt *Link) hostlink() {
sort.Strings(exports)
argv = append(argv, exports...)
}
- }
- if ctxt.HeadType == objabi.Haix {
+ case ctxt.IsAIX():
fileName := xcoffCreateExportFile(ctxt)
argv = append(argv, "-Wl,-bE:"+fileName)
+ case ctxt.IsWindows() && !slices.Contains(flagExtldflags, wlPrefix+"export-all-symbols"):
+ fileName := peCreateExportFile(ctxt, filepath.Base(outopt))
+ prefix := ""
+ if isMSVC {
+ prefix = "-Wl,-def:"
+ }
+ argv = append(argv, prefix+fileName)
}
const unusedArguments = "-Qunused-arguments"
diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go
index 431dad9d6bcbaa..c26263466616f5 100644
--- a/src/cmd/link/internal/ld/macho.go
+++ b/src/cmd/link/internal/ld/macho.go
@@ -106,11 +106,13 @@ const (
MACHO_ARM_RELOC_SECTDIFF = 2
MACHO_ARM_RELOC_BR24 = 5
MACHO_ARM64_RELOC_UNSIGNED = 0
+ MACHO_ARM64_RELOC_SUBTRACTOR = 1
MACHO_ARM64_RELOC_BRANCH26 = 2
MACHO_ARM64_RELOC_PAGE21 = 3
MACHO_ARM64_RELOC_PAGEOFF12 = 4
MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 = 5
MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6
+ MACHO_ARM64_RELOC_POINTER_TO_GOT = 7
MACHO_ARM64_RELOC_ADDEND = 10
MACHO_GENERIC_RELOC_VANILLA = 0
MACHO_FAKE_GOTPCREL = 100
diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go
index 5219a98dd47cf4..e0186f46b035d9 100644
--- a/src/cmd/link/internal/ld/pe.go
+++ b/src/cmd/link/internal/ld/pe.go
@@ -8,6 +8,7 @@
package ld
import (
+ "bytes"
"cmd/internal/objabi"
"cmd/internal/sys"
"cmd/link/internal/loader"
@@ -17,6 +18,8 @@ import (
"fmt"
"internal/buildcfg"
"math"
+ "os"
+ "path/filepath"
"slices"
"sort"
"strconv"
@@ -1748,3 +1751,46 @@ func asmbPe(ctxt *Link) {
pewrite(ctxt)
}
+
+// peCreateExportFile creates a file with exported symbols for Windows .def files.
+// ld will export all symbols, even those not marked for export, unless a .def file is provided.
+func peCreateExportFile(ctxt *Link, libName string) (fname string) {
+ fname = filepath.Join(*flagTmpdir, "export_file.def")
+ var buf bytes.Buffer
+
+ if ctxt.BuildMode == BuildModeCShared {
+ fmt.Fprintf(&buf, "LIBRARY %s\n", libName)
+ }
+ buf.WriteString("EXPORTS\n")
+
+ ldr := ctxt.loader
+ var exports []string
+ for s := range ldr.ForAllCgoExportStatic() {
+ extname := ldr.SymExtname(s)
+ if !strings.HasPrefix(extname, "_cgoexp_") {
+ continue
+ }
+ if ldr.IsFileLocal(s) {
+ continue // Only export non-static symbols
+ }
+ // Retrieve the name of the initial symbol
+ // exported by cgo.
+ // The corresponding Go symbol is:
+ // _cgoexp_hashcode_symname.
+ name := strings.SplitN(extname, "_", 4)[3]
+ exports = append(exports, name)
+ }
+ if len(exports) == 0 {
+ // See runtime/cgo/windows.go for details.
+ exports = append(exports, "_cgo_stub_export")
+ }
+ sort.Strings(exports)
+ buf.WriteString(strings.Join(exports, "\n"))
+
+ err := os.WriteFile(fname, buf.Bytes(), 0666)
+ if err != nil {
+ Errorf("WriteFile %s failed: %v", fname, err)
+ }
+
+ return fname
+}
diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go
index 759262286d39d8..2c999ccc4e3a19 100644
--- a/src/cmd/link/internal/ld/symtab.go
+++ b/src/cmd/link/internal/ld/symtab.go
@@ -645,7 +645,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind {
sliceSym(pcln.funcnametab)
// The cutab slice
- sliceSym(pcln.cutab)
+ slice(pcln.cutab, uint64(ldr.SymSize(pcln.cutab))/4)
// The filetab slice
sliceSym(pcln.filetab)
@@ -654,7 +654,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind {
sliceSym(pcln.pctab)
// The pclntab slice
- slice(pcln.pclntab, uint64(ldr.SymSize(pcln.pclntab)))
+ sliceSym(pcln.pclntab)
// The ftab slice
slice(pcln.pclntab, uint64(pcln.nfunc+1))
diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go
index 1bce2cf9b6124d..da728e25455618 100644
--- a/src/cmd/link/internal/ld/xcoff.go
+++ b/src/cmd/link/internal/ld/xcoff.go
@@ -1779,10 +1779,7 @@ func xcoffCreateExportFile(ctxt *Link) (fname string) {
var buf bytes.Buffer
ldr := ctxt.loader
- for s, nsym := loader.Sym(1), loader.Sym(ldr.NSym()); s < nsym; s++ {
- if !ldr.AttrCgoExport(s) {
- continue
- }
+ for s := range ldr.ForAllCgoExportStatic() {
extname := ldr.SymExtname(s)
if !strings.HasPrefix(extname, "._cgoexp_") {
continue
diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go
index 9f3ea3e553dd7c..0ed20d1becbb03 100644
--- a/src/cmd/link/internal/loader/loader.go
+++ b/src/cmd/link/internal/loader/loader.go
@@ -16,6 +16,7 @@ import (
"fmt"
"internal/abi"
"io"
+ "iter"
"log"
"math/bits"
"os"
@@ -1109,6 +1110,18 @@ func (l *Loader) SetAttrCgoExportStatic(i Sym, v bool) {
}
}
+// ForAllCgoExportStatic returns an iterator over all symbols
+// marked with the "cgo_export_static" compiler directive.
+func (l *Loader) ForAllCgoExportStatic() iter.Seq[Sym] {
+ return func(yield func(Sym) bool) {
+ for s := range l.attrCgoExportStatic {
+ if !yield(s) {
+ break
+ }
+ }
+ }
+}
+
// IsGeneratedSym returns true if a symbol's been previously marked as a
// generator symbol through the SetIsGeneratedSym. The functions for generator
// symbols are kept in the Link context.
@@ -2437,6 +2450,9 @@ var blockedLinknames = map[string][]string{
"sync_test.runtime_blockUntilEmptyCleanupQueue": {"sync_test"},
"time.runtimeIsBubbled": {"time"},
"unique.runtime_blockUntilEmptyCleanupQueue": {"unique"},
+ // Experimental features
+ "runtime.goroutineLeakGC": {"runtime/pprof"},
+ "runtime.goroutineleakcount": {"runtime/pprof"},
// Others
"net.newWindowsFile": {"net"}, // pushed from os
"testing/synctest.testingSynctestTest": {"testing/synctest"}, // pushed from testing
diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go
index 0125ba8e0f56be..31822d21f39d31 100644
--- a/src/cmd/link/link_test.go
+++ b/src/cmd/link/link_test.go
@@ -1532,11 +1532,13 @@ func TestFlagS(t *testing.T) {
}
cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", exe)
out, err = cmd.CombinedOutput()
- if err != nil && !errors.As(err, new(*exec.ExitError)) {
- // Error exit is fine as it may have no symbols.
- // On darwin we need to emit dynamic symbol references so it
- // actually has some symbols, and nm succeeds.
- t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out)
+ if err != nil {
+ if _, ok := errors.AsType[*exec.ExitError](err); !ok {
+ // Error exit is fine as it may have no symbols.
+ // On darwin we need to emit dynamic symbol references so it
+ // actually has some symbols, and nm succeeds.
+ t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out)
+ }
}
for _, s := range syms {
if bytes.Contains(out, []byte(s)) {
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go
index 3c7a6d6e2f1d2f..3ea470387bcf08 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -41,6 +41,15 @@ func (s *CPUSet) Zero() {
clear(s[:])
}
+// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
+// will silently ignore any invalid CPU bits in [CPUSet] so this is an
+// efficient way of resetting the CPU affinity of a process.
+func (s *CPUSet) Fill() {
+ for i := range s {
+ s[i] = ^cpuMask(0)
+ }
+}
+
func cpuBitsIndex(cpu int) int {
return cpu / _NCPUBITS
}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/fdset.go b/src/cmd/vendor/golang.org/x/sys/unix/fdset.go
index 9e83d18cd04215..62ed12645f48dd 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/fdset.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/fdset.go
@@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool {
// Zero clears the set fds.
func (fds *FdSet) Zero() {
- for i := range fds.Bits {
- fds.Bits[i] = 0
- }
+ clear(fds.Bits[:])
}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go
index 848840ae4c7586..309f5a2b0c76ae 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) {
// clear zeroes the ifreq's union field to prevent trailing garbage data from
// being sent to the kernel if an ifreq is reused.
func (ifr *Ifreq) clear() {
- for i := range ifr.raw.Ifru {
- ifr.raw.Ifru[i] = 0
- }
+ clear(ifr.raw.Ifru[:])
}
// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh b/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh
index e6f31d374df52c..d0ed6119129296 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh
@@ -49,6 +49,7 @@ esac
if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ set -e
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
index 4958a657085bcd..9439af961d98b6 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
// one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote)
- for i := 14; i < 14+IFNAMSIZ; i++ {
- sa.raw[i] = 0
- }
+ clear(sa.raw[14 : 14+IFNAMSIZ])
copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 88162099af5444..34a4676973042f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
return Statvfs1(path, buf, ST_WAIT)
}
+func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
+ var (
+ _p0 unsafe.Pointer
+ bufsize uintptr
+ )
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
/*
* Exposed directly
*/
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
index 640f6b153f0045..bd51337306019a 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
+//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents
+//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go
index 993a2297dbe1a5..358be3c7f5eece 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go
@@ -65,6 +65,22 @@ var signals = [...]string{
15: "terminated",
}
+// File flags for [os.OpenFile]. The O_ prefix is used to indicate
+// that these flags are specific to the OpenFile function.
+const (
+ O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL
+ O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT
+ O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE
+ O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS
+ O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS
+ O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE
+ O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN
+ O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS
+ O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING
+ O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED
+ O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH
+)
+
const (
FILE_READ_DATA = 0x00000001
FILE_READ_ATTRIBUTES = 0x00000080
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 641a5f4b775aa1..426151a0193d35 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -238,6 +238,7 @@ var (
procFindResourceW = modkernel32.NewProc("FindResourceW")
procFindVolumeClose = modkernel32.NewProc("FindVolumeClose")
procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose")
+ procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer")
procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers")
procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile")
procFormatMessageW = modkernel32.NewProc("FormatMessageW")
@@ -284,6 +285,7 @@ var (
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId")
+ procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents")
procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult")
procGetPriorityClass = modkernel32.NewProc("GetPriorityClass")
procGetProcAddress = modkernel32.NewProc("GetProcAddress")
@@ -2111,6 +2113,14 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
return
}
+func FlushConsoleInputBuffer(console Handle) (err error) {
+ r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func FlushFileBuffers(handle Handle) (err error) {
r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle))
if r1 == 0 {
@@ -2481,6 +2491,14 @@ func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err erro
return
}
+func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) {
var _p0 uint32
if wait {
diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go
index 53966ad2bcb362..ac22f68c34ccf1 100644
--- a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go
+++ b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go
@@ -326,11 +326,3 @@ func parseStackPCs(crash string) ([]uintptr, error) {
}
return pcs, nil
}
-
-func min(x, y int) int {
- if x < y {
- return x
- } else {
- return y
- }
-}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go
index ce92892c817690..0ca27316e6236b 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go
@@ -17,10 +17,26 @@ import (
const help = `PROGNAME is a tool for static analysis of Go programs.
-PROGNAME examines Go source code and reports suspicious constructs,
-such as Printf calls whose arguments do not align with the format
-string. It uses heuristics that do not guarantee all reports are
-genuine problems, but it can find errors not caught by the compilers.
+PROGNAME examines Go source code and reports diagnostics for
+suspicious constructs or opportunities for improvement.
+Diagnostics may include suggested fixes.
+
+An example of a suspicious construct is a Printf call whose arguments
+do not align with the format string. Analyzers may use heuristics that
+do not guarantee all reports are genuine problems, but can find
+mistakes not caught by the compiler.
+
+An example of an opportunity for improvement is a loop over
+strings.Split(doc, "\n"), which may be replaced by a loop over the
+strings.SplitSeq iterator, avoiding an array allocation.
+Diagnostics in such cases may report non-problems,
+but should carry fixes that may be safely applied.
+
+For analyzers of the first kind, use "go vet -vettool=PROGRAM"
+to run the tool and report diagnostics.
+
+For analyzers of the second kind, use "go fix -fixtool=PROGRAM"
+to run the tool and apply the fixes it suggests.
`
// Help implements the help subcommand for a multichecker or unitchecker
@@ -29,7 +45,7 @@ genuine problems, but it can find errors not caught by the compilers.
func Help(progname string, analyzers []*analysis.Analyzer, args []string) {
// No args: show summary of all analyzers.
if len(args) == 0 {
- fmt.Println(strings.Replace(help, "PROGNAME", progname, -1))
+ fmt.Println(strings.ReplaceAll(help, "PROGNAME", progname))
fmt.Println("Registered analyzers:")
fmt.Println()
sort.Slice(analyzers, func(i, j int) bool {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go
index e554c3cc90301e..b4e91edce3b88a 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go
@@ -13,9 +13,9 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/analysisinternal"
)
//go:embed doc.go
@@ -23,7 +23,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "appends",
- Doc: analysisutil.MustExtractDoc(doc, "appends"),
+ Doc: analysisinternal.MustExtractDoc(doc, "appends"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/appends",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
index efbf05d596a6d2..e9c08798449386 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
@@ -19,7 +19,7 @@ import (
"strings"
"golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+ "golang.org/x/tools/internal/analysisinternal"
)
const Doc = "report mismatches between assembly files and Go declarations"
@@ -175,7 +175,7 @@ func run(pass *analysis.Pass) (any, error) {
Files:
for _, fname := range sfiles {
- content, tf, err := analysisutil.ReadFile(pass, fname)
+ content, tf, err := analysisinternal.ReadFile(pass, fname)
if err != nil {
return nil, err
}
@@ -211,7 +211,7 @@ Files:
resultStr = "result register"
}
for _, line := range retLine {
- pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr)
+ pass.Reportf(tf.LineStart(line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr)
}
}
retLine = nil
@@ -227,7 +227,7 @@ Files:
lineno++
badf := func(format string, args ...any) {
- pass.Reportf(analysisutil.LineStart(tf, lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...))
+ pass.Reportf(tf.LineStart(lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...))
}
if arch == "" {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go
index 1914bb476168ba..8080aed020e5f9 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go
@@ -17,9 +17,11 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/astutil"
+ "golang.org/x/tools/internal/refactor"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -27,26 +29,26 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "assign",
- Doc: analysisutil.MustExtractDoc(doc, "assign"),
+ Doc: analysisinternal.MustExtractDoc(doc, "assign"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/assign",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
}
func run(pass *analysis.Pass) (any, error) {
- inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ var (
+ inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ info = pass.TypesInfo
+ )
- nodeFilter := []ast.Node{
- (*ast.AssignStmt)(nil),
- }
- inspect.Preorder(nodeFilter, func(n ast.Node) {
- stmt := n.(*ast.AssignStmt)
+ for curAssign := range inspect.Root().Preorder((*ast.AssignStmt)(nil)) {
+ stmt := curAssign.Node().(*ast.AssignStmt)
if stmt.Tok != token.ASSIGN {
- return // ignore :=
+ continue // ignore :=
}
if len(stmt.Lhs) != len(stmt.Rhs) {
// If LHS and RHS have different cardinality, they can't be the same.
- return
+ continue
}
// Delete redundant LHS, RHS pairs, taking care
@@ -61,13 +63,13 @@ func run(pass *analysis.Pass) (any, error) {
isSelfAssign := false
var le string
- if !analysisutil.HasSideEffects(pass.TypesInfo, lhs) &&
- !analysisutil.HasSideEffects(pass.TypesInfo, rhs) &&
- !isMapIndex(pass.TypesInfo, lhs) &&
+ if typesinternal.NoEffects(info, lhs) &&
+ typesinternal.NoEffects(info, rhs) &&
+ !isMapIndex(info, lhs) &&
reflect.TypeOf(lhs) == reflect.TypeOf(rhs) { // short-circuit the heavy-weight gofmt check
- le = analysisinternal.Format(pass.Fset, lhs)
- re := analysisinternal.Format(pass.Fset, rhs)
+ le = astutil.Format(pass.Fset, lhs)
+ re := astutil.Format(pass.Fset, rhs)
if le == re {
isSelfAssign = true
}
@@ -109,13 +111,14 @@ func run(pass *analysis.Pass) (any, error) {
}
if len(exprs) == 0 {
- return
+ continue
}
if len(exprs) == len(stmt.Lhs) {
// If every part of the statement is a self-assignment,
// remove the whole statement.
- edits = []analysis.TextEdit{{Pos: stmt.Pos(), End: stmt.End()}}
+ tokFile := pass.Fset.File(stmt.Pos())
+ edits = refactor.DeleteStmt(tokFile, curAssign)
}
pass.Report(analysis.Diagnostic{
@@ -126,7 +129,7 @@ func run(pass *analysis.Pass) (any, error) {
TextEdits: edits,
}},
})
- })
+ }
return nil, nil
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go
index 82d5439ce571e9..9faa3f67c1deae 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go
@@ -11,10 +11,11 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/astutil"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -22,7 +23,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "atomic",
- Doc: analysisutil.MustExtractDoc(doc, "atomic"),
+ Doc: analysisinternal.MustExtractDoc(doc, "atomic"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomic",
Requires: []*analysis.Analyzer{inspect.Analyzer},
RunDespiteErrors: true,
@@ -30,7 +31,7 @@ var Analyzer = &analysis.Analyzer{
}
func run(pass *analysis.Pass) (any, error) {
- if !analysisinternal.Imports(pass.Pkg, "sync/atomic") {
+ if !typesinternal.Imports(pass.Pkg, "sync/atomic") {
return nil, nil // doesn't directly import sync/atomic
}
@@ -54,7 +55,7 @@ func run(pass *analysis.Pass) (any, error) {
continue
}
obj := typeutil.Callee(pass.TypesInfo, call)
- if analysisinternal.IsFunctionNamed(obj, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") {
+ if typesinternal.IsFunctionNamed(obj, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") {
checkAtomicAddAssignment(pass, n.Lhs[i], call)
}
}
@@ -72,7 +73,7 @@ func checkAtomicAddAssignment(pass *analysis.Pass, left ast.Expr, call *ast.Call
arg := call.Args[0]
broken := false
- gofmt := func(e ast.Expr) string { return analysisinternal.Format(pass.Fset, e) }
+ gofmt := func(e ast.Expr) string { return astutil.Format(pass.Fset, e) }
if uarg, ok := arg.(*ast.UnaryExpr); ok && uarg.Op == token.AND {
broken = gofmt(left) == gofmt(uarg.X)
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go
index e1cf9f9b7ade10..574fafaa95dc98 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go
@@ -13,9 +13,9 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/astutil"
+ "golang.org/x/tools/internal/typesinternal"
)
const Doc = "check for common mistakes involving boolean operators"
@@ -84,7 +84,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[*
i := 0
var sets [][]ast.Expr
for j := 0; j <= len(exprs); j++ {
- if j == len(exprs) || analysisutil.HasSideEffects(info, exprs[j]) {
+ if j == len(exprs) || !typesinternal.NoEffects(info, exprs[j]) {
if i < j {
sets = append(sets, exprs[i:j])
}
@@ -104,7 +104,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[*
func (op boolOp) checkRedundant(pass *analysis.Pass, exprs []ast.Expr) {
seen := make(map[string]bool)
for _, e := range exprs {
- efmt := analysisinternal.Format(pass.Fset, e)
+ efmt := astutil.Format(pass.Fset, e)
if seen[efmt] {
pass.ReportRangef(e, "redundant %s: %s %s %s", op.name, efmt, op.tok, efmt)
} else {
@@ -150,8 +150,8 @@ func (op boolOp) checkSuspect(pass *analysis.Pass, exprs []ast.Expr) {
}
// e is of the form 'x != c' or 'x == c'.
- xfmt := analysisinternal.Format(pass.Fset, x)
- efmt := analysisinternal.Format(pass.Fset, e)
+ xfmt := astutil.Format(pass.Fset, x)
+ efmt := astutil.Format(pass.Fset, e)
if prev, found := seen[xfmt]; found {
// checkRedundant handles the case in which efmt == prev.
if efmt != prev {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go
index 6e32f298dc25e3..7dd4f249e258cf 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go
@@ -14,7 +14,7 @@ import (
"unicode"
"golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+ "golang.org/x/tools/internal/analysisinternal"
)
const Doc = "check //go:build and // +build directives"
@@ -86,7 +86,7 @@ func checkOtherFile(pass *analysis.Pass, filename string) error {
// We cannot use the Go parser, since this may not be a Go source file.
// Read the raw bytes instead.
- content, tf, err := analysisutil.ReadFile(pass, filename)
+ content, tf, err := analysisinternal.ReadFile(pass, filename)
if err != nil {
return err
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go
index d9189b5b69608f..bf1202b92b73d4 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go
@@ -18,7 +18,7 @@ import (
"strconv"
"golang.org/x/tools/go/analysis"
- "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/typesinternal"
)
const debug = false
@@ -41,7 +41,7 @@ var Analyzer = &analysis.Analyzer{
}
func run(pass *analysis.Pass) (any, error) {
- if !analysisinternal.Imports(pass.Pkg, "runtime/cgo") {
+ if !typesinternal.Imports(pass.Pkg, "runtime/cgo") {
return nil, nil // doesn't use cgo
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
index d35b85f03a721b..4190cc5900f682 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
@@ -16,8 +16,9 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/astutil"
"golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/internal/typesinternal"
"golang.org/x/tools/internal/versions"
)
@@ -86,7 +87,7 @@ func checkCopyLocksAssign(pass *analysis.Pass, assign *ast.AssignStmt, goversion
lhs := assign.Lhs
for i, x := range assign.Rhs {
if path := lockPathRhs(pass, x); path != nil {
- pass.ReportRangef(x, "assignment copies lock value to %v: %v", analysisinternal.Format(pass.Fset, assign.Lhs[i]), path)
+ pass.ReportRangef(x, "assignment copies lock value to %v: %v", astutil.Format(pass.Fset, assign.Lhs[i]), path)
lhs = nil // An lhs has been reported. We prefer the assignment warning and do not report twice.
}
}
@@ -100,7 +101,7 @@ func checkCopyLocksAssign(pass *analysis.Pass, assign *ast.AssignStmt, goversion
if id, ok := l.(*ast.Ident); ok && id.Name != "_" {
if obj := pass.TypesInfo.Defs[id]; obj != nil && obj.Type() != nil {
if path := lockPath(pass.Pkg, obj.Type(), nil); path != nil {
- pass.ReportRangef(l, "for loop iteration copies lock value to %v: %v", analysisinternal.Format(pass.Fset, l), path)
+ pass.ReportRangef(l, "for loop iteration copies lock value to %v: %v", astutil.Format(pass.Fset, l), path)
}
}
}
@@ -132,7 +133,7 @@ func checkCopyLocksCompositeLit(pass *analysis.Pass, cl *ast.CompositeLit) {
x = node.Value
}
if path := lockPathRhs(pass, x); path != nil {
- pass.ReportRangef(x, "literal copies lock value from %v: %v", analysisinternal.Format(pass.Fset, x), path)
+ pass.ReportRangef(x, "literal copies lock value from %v: %v", astutil.Format(pass.Fset, x), path)
}
}
}
@@ -166,7 +167,7 @@ func checkCopyLocksCallExpr(pass *analysis.Pass, ce *ast.CallExpr) {
}
for _, x := range ce.Args {
if path := lockPathRhs(pass, x); path != nil {
- pass.ReportRangef(x, "call of %s copies lock value: %v", analysisinternal.Format(pass.Fset, ce.Fun), path)
+ pass.ReportRangef(x, "call of %s copies lock value: %v", astutil.Format(pass.Fset, ce.Fun), path)
}
}
}
@@ -233,7 +234,7 @@ func checkCopyLocksRangeVar(pass *analysis.Pass, rtok token.Token, e ast.Expr) {
return
}
if path := lockPath(pass.Pkg, typ, nil); path != nil {
- pass.Reportf(e.Pos(), "range var %s copies lock: %v", analysisinternal.Format(pass.Fset, e), path)
+ pass.Reportf(e.Pos(), "range var %s copies lock: %v", astutil.Format(pass.Fset, e), path)
}
}
@@ -353,7 +354,7 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ
// In go1.10, sync.noCopy did not implement Locker.
// (The Unlock method was added only in CL 121876.)
// TODO(adonovan): remove workaround when we drop go1.10.
- if analysisinternal.IsTypeNamed(typ, "sync", "noCopy") {
+ if typesinternal.IsTypeNamed(typ, "sync", "noCopy") {
return []string{typ.String()}
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go
index e11957f2d099d1..3069ee9fecd847 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go
@@ -10,10 +10,10 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -23,20 +23,20 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "defers",
Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Doc: analysisinternal.MustExtractDoc(doc, "defers"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/defers",
- Doc: analysisutil.MustExtractDoc(doc, "defers"),
Run: run,
}
func run(pass *analysis.Pass) (any, error) {
- if !analysisinternal.Imports(pass.Pkg, "time") {
+ if !typesinternal.Imports(pass.Pkg, "time") {
return nil, nil
}
checkDeferCall := func(node ast.Node) bool {
switch v := node.(type) {
case *ast.CallExpr:
- if analysisinternal.IsFunctionNamed(typeutil.Callee(pass.TypesInfo, v), "time", "Since") {
+ if typesinternal.IsFunctionNamed(typeutil.Callee(pass.TypesInfo, v), "time", "Since") {
pass.Reportf(v.Pos(), "call to time.Since is not deferred")
}
case *ast.FuncLit:
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go
index bebec891408fef..c84d25842e3018 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go
@@ -14,7 +14,7 @@ import (
"unicode/utf8"
"golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+ "golang.org/x/tools/internal/analysisinternal"
)
const Doc = `check Go toolchain directives such as //go:debug
@@ -86,7 +86,7 @@ func checkGoFile(pass *analysis.Pass, f *ast.File) {
func checkOtherFile(pass *analysis.Pass, filename string) error {
// We cannot use the Go parser, since is not a Go source file.
// Read the raw bytes instead.
- content, tf, err := analysisutil.ReadFile(pass, filename)
+ content, tf, err := analysisinternal.ReadFile(pass, filename)
if err != nil {
return err
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go
index b8d29d019db0b0..b3df99929dc6fc 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go
@@ -12,22 +12,20 @@ import (
"go/types"
"golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/go/types/typeutil"
- "golang.org/x/tools/internal/analysisinternal"
+ typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex"
+ "golang.org/x/tools/internal/typesinternal/typeindex"
)
const Doc = `report passing non-pointer or non-error values to errors.As
-The errorsas analysis reports calls to errors.As where the type
+The errorsas analyzer reports calls to errors.As where the type
of the second argument is not a pointer to a type implementing error.`
var Analyzer = &analysis.Analyzer{
Name: "errorsas",
Doc: Doc,
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/errorsas",
- Requires: []*analysis.Analyzer{inspect.Analyzer},
+ Requires: []*analysis.Analyzer{typeindexanalyzer.Analyzer},
Run: run,
}
@@ -39,38 +37,31 @@ func run(pass *analysis.Pass) (any, error) {
return nil, nil
}
- if !analysisinternal.Imports(pass.Pkg, "errors") {
- return nil, nil // doesn't directly import errors
- }
-
- inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ var (
+ index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index)
+ info = pass.TypesInfo
+ )
- nodeFilter := []ast.Node{
- (*ast.CallExpr)(nil),
- }
- inspect.Preorder(nodeFilter, func(n ast.Node) {
- call := n.(*ast.CallExpr)
- obj := typeutil.Callee(pass.TypesInfo, call)
- if !analysisinternal.IsFunctionNamed(obj, "errors", "As") {
- return
- }
+ for curCall := range index.Calls(index.Object("errors", "As")) {
+ call := curCall.Node().(*ast.CallExpr)
if len(call.Args) < 2 {
- return // not enough arguments, e.g. called with return values of another function
+ continue // spread call: errors.As(pair())
}
- if err := checkAsTarget(pass, call.Args[1]); err != nil {
+
+ // Check for incorrect arguments.
+ if err := checkAsTarget(info, call.Args[1]); err != nil {
pass.ReportRangef(call, "%v", err)
+ continue
}
- })
+ }
return nil, nil
}
-var errorType = types.Universe.Lookup("error").Type()
-
// checkAsTarget reports an error if the second argument to errors.As is invalid.
-func checkAsTarget(pass *analysis.Pass, e ast.Expr) error {
- t := pass.TypesInfo.Types[e].Type
- if it, ok := t.Underlying().(*types.Interface); ok && it.NumMethods() == 0 {
- // A target of interface{} is always allowed, since it often indicates
+func checkAsTarget(info *types.Info, e ast.Expr) error {
+ t := info.Types[e].Type
+ if types.Identical(t.Underlying(), anyType) {
+ // A target of any is always allowed, since it often indicates
// a value forwarded from another source.
return nil
}
@@ -78,12 +69,16 @@ func checkAsTarget(pass *analysis.Pass, e ast.Expr) error {
if !ok {
return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type")
}
- if pt.Elem() == errorType {
+ if types.Identical(pt.Elem(), errorType) {
return errors.New("second argument to errors.As should not be *error")
}
- _, ok = pt.Elem().Underlying().(*types.Interface)
- if ok || types.Implements(pt.Elem(), errorType.Underlying().(*types.Interface)) {
- return nil
+ if !types.IsInterface(pt.Elem()) && !types.AssignableTo(pt.Elem(), errorType) {
+ return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type")
}
- return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type")
+ return nil
}
+
+var (
+ anyType = types.Universe.Lookup("any").Type()
+ errorType = types.Universe.Lookup("error").Type()
+)
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go
index ff9c8b4f818b10..809095d40a5456 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go
@@ -13,7 +13,7 @@ import (
"unicode"
"golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+ "golang.org/x/tools/internal/analysisinternal"
)
const Doc = "report assembly that clobbers the frame pointer before saving it"
@@ -98,7 +98,7 @@ func run(pass *analysis.Pass) (any, error) {
}
for _, fname := range sfiles {
- content, tf, err := analysisutil.ReadFile(pass, fname)
+ content, tf, err := analysisinternal.ReadFile(pass, fname)
if err != nil {
return nil, err
}
@@ -127,7 +127,7 @@ func run(pass *analysis.Pass) (any, error) {
}
if arch.isFPWrite(line) {
- pass.Reportf(analysisutil.LineStart(tf, lineno), "frame pointer is clobbered before saving")
+ pass.Reportf(tf.LineStart(lineno), "frame pointer is clobbered before saving")
active = false
continue
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
index e9acd96547e1a7..37ecb6523bd1ca 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
@@ -13,7 +13,6 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/internal/analysisinternal"
"golang.org/x/tools/internal/typesinternal"
)
@@ -46,7 +45,7 @@ func run(pass *analysis.Pass) (any, error) {
// Fast path: if the package doesn't import net/http,
// skip the traversal.
- if !analysisinternal.Imports(pass.Pkg, "net/http") {
+ if !typesinternal.Imports(pass.Pkg, "net/http") {
return nil, nil
}
@@ -118,7 +117,7 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool {
return false // the function called does not return two values.
}
isPtr, named := typesinternal.ReceiverNamed(res.At(0))
- if !isPtr || named == nil || !analysisinternal.IsTypeNamed(named, "net/http", "Response") {
+ if !isPtr || named == nil || !typesinternal.IsTypeNamed(named, "net/http", "Response") {
return false // the first return type is not *http.Response.
}
@@ -133,11 +132,11 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool {
return ok && id.Name == "http" // function in net/http package.
}
- if analysisinternal.IsTypeNamed(typ, "net/http", "Client") {
+ if typesinternal.IsTypeNamed(typ, "net/http", "Client") {
return true // method on http.Client.
}
ptr, ok := types.Unalias(typ).(*types.Pointer)
- return ok && analysisinternal.IsTypeNamed(ptr.Elem(), "net/http", "Client") // method on *http.Client.
+ return ok && typesinternal.IsTypeNamed(ptr.Elem(), "net/http", "Client") // method on *http.Client.
}
// restOfBlock, given a traversal stack, finds the innermost containing
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go
index 4022dbe7c22cce..a6dcf1cf8e8e1d 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go
@@ -11,8 +11,8 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/analysisinternal"
"golang.org/x/tools/internal/typeparams"
)
@@ -21,7 +21,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "ifaceassert",
- Doc: analysisutil.MustExtractDoc(doc, "ifaceassert"),
+ Doc: analysisinternal.MustExtractDoc(doc, "ifaceassert"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ifaceassert",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go
deleted file mode 100644
index d3df898d3011cd..00000000000000
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package analysisutil defines various helper functions
-// used by two or more packages beneath go/analysis.
-package analysisutil
-
-import (
- "go/ast"
- "go/token"
- "go/types"
- "os"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/internal/analysisinternal"
-)
-
-// HasSideEffects reports whether evaluation of e has side effects.
-func HasSideEffects(info *types.Info, e ast.Expr) bool {
- safe := true
- ast.Inspect(e, func(node ast.Node) bool {
- switch n := node.(type) {
- case *ast.CallExpr:
- typVal := info.Types[n.Fun]
- switch {
- case typVal.IsType():
- // Type conversion, which is safe.
- case typVal.IsBuiltin():
- // Builtin func, conservatively assumed to not
- // be safe for now.
- safe = false
- return false
- default:
- // A non-builtin func or method call.
- // Conservatively assume that all of them have
- // side effects for now.
- safe = false
- return false
- }
- case *ast.UnaryExpr:
- if n.Op == token.ARROW {
- safe = false
- return false
- }
- }
- return true
- })
- return !safe
-}
-
-// ReadFile reads a file and adds it to the FileSet
-// so that we can report errors against it using lineStart.
-func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) {
- readFile := pass.ReadFile
- if readFile == nil {
- readFile = os.ReadFile
- }
- content, err := readFile(filename)
- if err != nil {
- return nil, nil, err
- }
- tf := pass.Fset.AddFile(filename, -1, len(content))
- tf.SetLinesForContent(content)
- return content, tf, nil
-}
-
-// LineStart returns the position of the start of the specified line
-// within file f, or NoPos if there is no line of that number.
-func LineStart(f *token.File, line int) token.Pos {
- // Use binary search to find the start offset of this line.
- //
- // TODO(adonovan): eventually replace this function with the
- // simpler and more efficient (*go/token.File).LineStart, added
- // in go1.12.
-
- min := 0 // inclusive
- max := f.Size() // exclusive
- for {
- offset := (min + max) / 2
- pos := f.Pos(offset)
- posn := f.Position(pos)
- if posn.Line == line {
- return pos - (token.Pos(posn.Column) - 1)
- }
-
- if min+1 >= max {
- return token.NoPos
- }
-
- if posn.Line < line {
- min = offset
- } else {
- max = offset
- }
- }
-}
-
-var MustExtractDoc = analysisinternal.MustExtractDoc
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go
index 2580a0ac21f1f9..868226328fc5e1 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go
@@ -11,7 +11,6 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
"golang.org/x/tools/internal/analysisinternal"
@@ -24,7 +23,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "loopclosure",
- Doc: analysisutil.MustExtractDoc(doc, "loopclosure"),
+ Doc: analysisinternal.MustExtractDoc(doc, "loopclosure"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/loopclosure",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
@@ -369,5 +368,5 @@ func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method str
// Check that the receiver is a . or
// *..
_, named := typesinternal.ReceiverNamed(recv)
- return analysisinternal.IsTypeNamed(named, pkgPath, typeName)
+ return typesinternal.IsTypeNamed(named, pkgPath, typeName)
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go
index c0746789e9cb2a..dfaecf51e25a2b 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go
@@ -13,11 +13,11 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/ctrlflow"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/cfg"
"golang.org/x/tools/internal/analysisinternal"
"golang.org/x/tools/internal/astutil"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -25,7 +25,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "lostcancel",
- Doc: analysisutil.MustExtractDoc(doc, "lostcancel"),
+ Doc: analysisinternal.MustExtractDoc(doc, "lostcancel"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/lostcancel",
Run: run,
Requires: []*analysis.Analyzer{
@@ -50,7 +50,7 @@ var contextPackage = "context"
// checkLostCancel analyzes a single named or literal function.
func run(pass *analysis.Pass) (any, error) {
// Fast path: bypass check if file doesn't use context.WithCancel.
- if !analysisinternal.Imports(pass.Pkg, contextPackage) {
+ if !typesinternal.Imports(pass.Pkg, contextPackage) {
return nil, nil
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go
index fa1883b0c3402c..2b5a7c80378828 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go
@@ -14,8 +14,8 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/analysisinternal"
"golang.org/x/tools/internal/typesinternal"
)
@@ -24,7 +24,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "nilfunc",
- Doc: analysisutil.MustExtractDoc(doc, "nilfunc"),
+ Doc: analysisinternal.MustExtractDoc(doc, "nilfunc"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilfunc",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go
index eebf40208d1816..f04e44143412a4 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go
@@ -82,6 +82,16 @@
// ...
// }
//
+// A local function may also be inferred as a printf wrapper. If it
+// is assigned to a variable, each call made through that variable will
+// be checked just like a call to a function:
+//
+// logf := func(format string, args ...any) {
+// message := fmt.Sprintf(format, args...)
+// log.Printf("%s: %s", prefix, message)
+// }
+// logf("%s", 123) // logf format %s has arg 123 of wrong type int
+//
// # Specifying printf wrappers by flag
//
// The -funcs flag specifies a comma-separated list of names of
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
index f008eca36fe3c1..910ffe70d7e33a 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
@@ -18,13 +18,14 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
+ "golang.org/x/tools/go/ast/edge"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
"golang.org/x/tools/internal/analysisinternal"
"golang.org/x/tools/internal/astutil"
"golang.org/x/tools/internal/fmtstr"
"golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/internal/typesinternal"
"golang.org/x/tools/internal/versions"
)
@@ -37,11 +38,11 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "printf",
- Doc: analysisutil.MustExtractDoc(doc, "printf"),
+ Doc: analysisinternal.MustExtractDoc(doc, "printf"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/printf",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
- ResultType: reflect.TypeOf((*Result)(nil)),
+ ResultType: reflect.TypeFor[*Result](),
FactTypes: []analysis.Fact{new(isWrapper)},
}
@@ -70,7 +71,7 @@ func (kind Kind) String() string {
// Result is the printf analyzer's result type. Clients may query the result
// to learn whether a function behaves like fmt.Print or fmt.Printf.
type Result struct {
- funcs map[*types.Func]Kind
+ funcs map[types.Object]Kind
}
// Kind reports whether fn behaves like fmt.Print or fmt.Printf.
@@ -111,149 +112,210 @@ func (f *isWrapper) String() string {
func run(pass *analysis.Pass) (any, error) {
res := &Result{
- funcs: make(map[*types.Func]Kind),
+ funcs: make(map[types.Object]Kind),
}
- findPrintfLike(pass, res)
- checkCalls(pass)
+ findPrintLike(pass, res)
+ checkCalls(pass, res)
return res, nil
}
-type printfWrapper struct {
- obj *types.Func
- fdecl *ast.FuncDecl
- format *types.Var
- args *types.Var
+// A wrapper is a candidate print/printf wrapper function.
+//
+// We represent functions generally as types.Object, not *Func, so
+// that we can analyze anonymous functions such as
+//
+// printf := func(format string, args ...any) {...},
+//
+// representing them by the *types.Var symbol for the local variable
+// 'printf'.
+type wrapper struct {
+ obj types.Object // *Func or *Var
+ curBody inspector.Cursor // for *ast.BlockStmt
+ format *types.Var // optional "format string" parameter in the Func{Decl,Lit}
+ args *types.Var // "args ...any" parameter in the Func{Decl,Lit}
callers []printfCaller
- failed bool // if true, not a printf wrapper
}
type printfCaller struct {
- w *printfWrapper
+ w *wrapper
call *ast.CallExpr
}
-// maybePrintfWrapper decides whether decl (a declared function) may be a wrapper
-// around a fmt.Printf or fmt.Print function. If so it returns a printfWrapper
-// function describing the declaration. Later processing will analyze the
-// graph of potential printf wrappers to pick out the ones that are true wrappers.
-// A function may be a Printf or Print wrapper if its last argument is ...interface{}.
-// If the next-to-last argument is a string, then this may be a Printf wrapper.
-// Otherwise it may be a Print wrapper.
-func maybePrintfWrapper(info *types.Info, decl ast.Decl) *printfWrapper {
- // Look for functions with final argument type ...interface{}.
- fdecl, ok := decl.(*ast.FuncDecl)
- if !ok || fdecl.Body == nil {
- return nil
- }
- fn, ok := info.Defs[fdecl.Name].(*types.Func)
- // Type information may be incomplete.
- if !ok {
- return nil
- }
-
- sig := fn.Type().(*types.Signature)
+// formatArgsParams returns the "format string" and "args ...any"
+// parameters of a potential print or printf wrapper function.
+// (The format is nil in the print-like case.)
+func formatArgsParams(sig *types.Signature) (format, args *types.Var) {
if !sig.Variadic() {
- return nil // not variadic
+ return nil, nil // not variadic
}
params := sig.Params()
nparams := params.Len() // variadic => nonzero
- // Check final parameter is "args ...interface{}".
- args := params.At(nparams - 1)
- iface, ok := types.Unalias(args.Type().(*types.Slice).Elem()).(*types.Interface)
- if !ok || !iface.Empty() {
- return nil
- }
-
// Is second last param 'format string'?
- var format *types.Var
if nparams >= 2 {
if p := params.At(nparams - 2); p.Type() == types.Typ[types.String] {
format = p
}
}
- return &printfWrapper{
- obj: fn,
- fdecl: fdecl,
- format: format,
- args: args,
+ // Check final parameter is "args ...any".
+ // (variadic => slice)
+ args = params.At(nparams - 1)
+ iface, ok := types.Unalias(args.Type().(*types.Slice).Elem()).(*types.Interface)
+ if !ok || !iface.Empty() {
+ return nil, nil
}
+
+ return format, args
}
-// findPrintfLike scans the entire package to find printf-like functions.
-func findPrintfLike(pass *analysis.Pass, res *Result) (any, error) {
- // Gather potential wrappers and call graph between them.
- byObj := make(map[*types.Func]*printfWrapper)
- var wrappers []*printfWrapper
- for _, file := range pass.Files {
- for _, decl := range file.Decls {
- w := maybePrintfWrapper(pass.TypesInfo, decl)
- if w == nil {
- continue
+// findPrintLike scans the entire package to find print or printf-like functions.
+// When it returns, all such functions have been identified.
+func findPrintLike(pass *analysis.Pass, res *Result) {
+ var (
+ inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ info = pass.TypesInfo
+ )
+
+ // Pass 1: gather candidate wrapper functions (and populate wrappers).
+ var (
+ wrappers []*wrapper
+ byObj = make(map[types.Object]*wrapper)
+ )
+ for cur := range inspect.Root().Preorder((*ast.FuncDecl)(nil), (*ast.FuncLit)(nil)) {
+ var (
+ curBody inspector.Cursor // for *ast.BlockStmt
+ sig *types.Signature
+ obj types.Object
+ )
+ switch f := cur.Node().(type) {
+ case *ast.FuncDecl:
+ // named function or method:
+ //
+ // func wrapf(format string, args ...any) {...}
+ if f.Body != nil {
+ curBody = cur.ChildAt(edge.FuncDecl_Body, -1)
+ obj = info.Defs[f.Name]
+ sig = obj.Type().(*types.Signature)
}
- byObj[w.obj] = w
- wrappers = append(wrappers, w)
- }
- }
- // Walk the graph to figure out which are really printf wrappers.
- for _, w := range wrappers {
- // Scan function for calls that could be to other printf-like functions.
- ast.Inspect(w.fdecl.Body, func(n ast.Node) bool {
- if w.failed {
- return false
+ case *ast.FuncLit:
+ // anonymous function directly assigned to a variable:
+ //
+ // var wrapf = func(format string, args ...any) {...}
+ // wrapf := func(format string, args ...any) {...}
+ // wrapf = func(format string, args ...any) {...}
+ //
+ // The LHS may also be a struct field x.wrapf or
+ // an imported var pkg.Wrapf.
+ //
+ sig = info.TypeOf(f).(*types.Signature)
+ curBody = cur.ChildAt(edge.FuncLit_Body, -1)
+ var lhs ast.Expr
+ switch ek, idx := cur.ParentEdge(); ek {
+ case edge.ValueSpec_Values:
+ curName := cur.Parent().ChildAt(edge.ValueSpec_Names, idx)
+ lhs = curName.Node().(*ast.Ident)
+ case edge.AssignStmt_Rhs:
+ curLhs := cur.Parent().ChildAt(edge.AssignStmt_Lhs, idx)
+ lhs = curLhs.Node().(ast.Expr)
}
- // TODO: Relax these checks; issue 26555.
- if assign, ok := n.(*ast.AssignStmt); ok {
- for _, lhs := range assign.Lhs {
- if match(pass.TypesInfo, lhs, w.format) ||
- match(pass.TypesInfo, lhs, w.args) {
- // Modifies the format
- // string or args in
- // some way, so not a
- // simple wrapper.
- w.failed = true
- return false
- }
+ switch lhs := lhs.(type) {
+ case *ast.Ident:
+ // variable: wrapf = func(...)
+ obj = info.ObjectOf(lhs).(*types.Var)
+ case *ast.SelectorExpr:
+ if sel, ok := info.Selections[lhs]; ok {
+ // struct field: x.wrapf = func(...)
+ obj = sel.Obj().(*types.Var)
+ } else {
+ // imported var: pkg.Wrapf = func(...)
+ obj = info.Uses[lhs.Sel].(*types.Var)
}
}
- if un, ok := n.(*ast.UnaryExpr); ok && un.Op == token.AND {
- if match(pass.TypesInfo, un.X, w.format) ||
- match(pass.TypesInfo, un.X, w.args) {
- // Taking the address of the
- // format string or args,
- // so not a simple wrapper.
- w.failed = true
- return false
+ }
+ if obj != nil {
+ format, args := formatArgsParams(sig)
+ if args != nil {
+ // obj (the symbol for a function/method, or variable
+ // assigned to an anonymous function) is a potential
+ // print or printf wrapper.
+ //
+ // Later processing will analyze the graph of potential
+ // wrappers and their function bodies to pick out the
+ // ones that are true wrappers.
+ w := &wrapper{
+ obj: obj,
+ curBody: curBody,
+ format: format, // non-nil => printf
+ args: args,
}
+ byObj[w.obj] = w
+ wrappers = append(wrappers, w)
}
+ }
+ }
- call, ok := n.(*ast.CallExpr)
- if !ok || len(call.Args) == 0 || !match(pass.TypesInfo, call.Args[len(call.Args)-1], w.args) {
- return true
- }
+ // Pass 2: scan the body of each wrapper function
+ // for calls to other printf-like functions.
+ //
+ // Also, reject tricky cases where the parameters
+ // are potentially mutated by AssignStmt or UnaryExpr.
+ // TODO: Relax these checks; issue 26555.
+ for _, w := range wrappers {
+ scan:
+ for cur := range w.curBody.Preorder(
+ (*ast.AssignStmt)(nil),
+ (*ast.UnaryExpr)(nil),
+ (*ast.CallExpr)(nil),
+ ) {
+ switch n := cur.Node().(type) {
+ case *ast.AssignStmt:
+ // If the wrapper updates format or args
+ // it is not a simple wrapper.
+ for _, lhs := range n.Lhs {
+ if w.format != nil && match(info, lhs, w.format) ||
+ match(info, lhs, w.args) {
+ break scan
+ }
+ }
- fn, kind := printfNameAndKind(pass, call)
- if kind != 0 {
- checkPrintfFwd(pass, w, call, kind, res)
- return true
- }
+ case *ast.UnaryExpr:
+ // If the wrapper computes &format or &args,
+ // it is not a simple wrapper.
+ if n.Op == token.AND &&
+ (w.format != nil && match(info, n.X, w.format) ||
+ match(info, n.X, w.args)) {
+ break scan
+ }
- // If the call is to another function in this package,
- // maybe we will find out it is printf-like later.
- // Remember this call for later checking.
- if fn != nil && fn.Pkg() == pass.Pkg && byObj[fn] != nil {
- callee := byObj[fn]
- callee.callers = append(callee.callers, printfCaller{w, call})
+ case *ast.CallExpr:
+ if len(n.Args) > 0 && match(info, n.Args[len(n.Args)-1], w.args) {
+ if callee := typeutil.Callee(pass.TypesInfo, n); callee != nil {
+
+ // Call from one wrapper candidate to another?
+ // Record the edge so that if callee is found to be
+ // a true wrapper, w will be too.
+ if w2, ok := byObj[callee]; ok {
+ w2.callers = append(w2.callers, printfCaller{w, n})
+ }
+
+ // Is the candidate a true wrapper, because it calls
+ // a known print{,f}-like function from the allowlist
+ // or an imported fact, or another wrapper found
+ // to be a true wrapper?
+ // If so, convert all w's callers to kind.
+ kind := callKind(pass, callee, res)
+ if kind != KindNone {
+ checkForward(pass, w, n, kind, res)
+ }
+ }
+ }
}
-
- return true
- })
+ }
}
- return nil, nil
}
func match(info *types.Info, arg ast.Expr, param *types.Var) bool {
@@ -261,9 +323,9 @@ func match(info *types.Info, arg ast.Expr, param *types.Var) bool {
return ok && info.ObjectOf(id) == param
}
-// checkPrintfFwd checks that a printf-forwarding wrapper is forwarding correctly.
+// checkForward checks that a forwarding wrapper is forwarding correctly.
// It diagnoses writing fmt.Printf(format, args) instead of fmt.Printf(format, args...).
-func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, kind Kind, res *Result) {
+func checkForward(pass *analysis.Pass, w *wrapper, call *ast.CallExpr, kind Kind, res *Result) {
matched := kind == KindPrint ||
kind != KindNone && len(call.Args) >= 2 && match(pass.TypesInfo, call.Args[len(call.Args)-2], w.format)
if !matched {
@@ -292,18 +354,39 @@ func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, k
pass.ReportRangef(call, "missing ... in args forwarded to %s-like function", desc)
return
}
- fn := w.obj
- var fact isWrapper
- if !pass.ImportObjectFact(fn, &fact) {
- fact.Kind = kind
- pass.ExportObjectFact(fn, &fact)
- res.funcs[fn] = kind
+
+ // If the candidate's print{,f} status becomes known,
+ // propagate it back to all its so-far known callers.
+ if res.funcs[w.obj] != kind {
+ res.funcs[w.obj] = kind
+
+ // Export a fact.
+ // (This is a no-op for local symbols.)
+ // We can't export facts on a symbol of another package,
+ // but we can treat the symbol as a wrapper within
+ // the current analysis unit.
+ if w.obj.Pkg() == pass.Pkg {
+ // Facts are associated with origins.
+ pass.ExportObjectFact(origin(w.obj), &isWrapper{Kind: kind})
+ }
+
+ // Propagate kind back to known callers.
for _, caller := range w.callers {
- checkPrintfFwd(pass, caller.w, caller.call, kind, res)
+ checkForward(pass, caller.w, caller.call, kind, res)
}
}
}
+func origin(obj types.Object) types.Object {
+ switch obj := obj.(type) {
+ case *types.Func:
+ return obj.Origin()
+ case *types.Var:
+ return obj.Origin()
+ }
+ return obj
+}
+
// isPrint records the print functions.
// If a key ends in 'f' then it is assumed to be a formatted print.
//
@@ -412,7 +495,7 @@ func stringConstantExpr(pass *analysis.Pass, expr ast.Expr) (string, bool) {
// checkCalls triggers the print-specific checks for calls that invoke a print
// function.
-func checkCalls(pass *analysis.Pass) {
+func checkCalls(pass *analysis.Pass, res *Result) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{
(*ast.File)(nil),
@@ -426,48 +509,60 @@ func checkCalls(pass *analysis.Pass) {
fileVersion = versions.Lang(versions.FileVersion(pass.TypesInfo, n))
case *ast.CallExpr:
- fn, kind := printfNameAndKind(pass, n)
- switch kind {
- case KindPrintf, KindErrorf:
- checkPrintf(pass, fileVersion, kind, n, fn.FullName())
- case KindPrint:
- checkPrint(pass, n, fn.FullName())
+ if callee := typeutil.Callee(pass.TypesInfo, n); callee != nil {
+ kind := callKind(pass, callee, res)
+ switch kind {
+ case KindPrintf, KindErrorf:
+ checkPrintf(pass, fileVersion, kind, n, fullname(callee))
+ case KindPrint:
+ checkPrint(pass, n, fullname(callee))
+ }
}
}
})
}
-func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, kind Kind) {
- fn, _ = typeutil.Callee(pass.TypesInfo, call).(*types.Func)
- if fn == nil {
- return nil, 0
+func fullname(obj types.Object) string {
+ if fn, ok := obj.(*types.Func); ok {
+ return fn.FullName()
}
+ return obj.Name()
+}
- // Facts are associated with generic declarations, not instantiations.
- fn = fn.Origin()
-
- _, ok := isPrint[fn.FullName()]
+// callKind returns the symbol of the called function
+// and its print/printf kind, if any.
+// (The symbol may be a var for an anonymous function.)
+// The result is memoized in res.funcs.
+func callKind(pass *analysis.Pass, obj types.Object, res *Result) Kind {
+ kind, ok := res.funcs[obj]
if !ok {
- // Next look up just "printf", for use with -printf.funcs.
- _, ok = isPrint[strings.ToLower(fn.Name())]
- }
- if ok {
- if fn.FullName() == "fmt.Errorf" {
- kind = KindErrorf
- } else if strings.HasSuffix(fn.Name(), "f") {
- kind = KindPrintf
+ // cache miss
+ _, ok := isPrint[fullname(obj)]
+ if !ok {
+ // Next look up just "printf", for use with -printf.funcs.
+ _, ok = isPrint[strings.ToLower(obj.Name())]
+ }
+ if ok {
+ // well-known printf functions
+ if fullname(obj) == "fmt.Errorf" {
+ kind = KindErrorf
+ } else if strings.HasSuffix(obj.Name(), "f") {
+ kind = KindPrintf
+ } else {
+ kind = KindPrint
+ }
} else {
- kind = KindPrint
+ // imported wrappers
+ // Facts are associated with generic declarations, not instantiations.
+ obj = origin(obj)
+ var fact isWrapper
+ if pass.ImportObjectFact(obj, &fact) {
+ kind = fact.Kind
+ }
}
- return fn, kind
+ res.funcs[obj] = kind // cache
}
-
- var fact isWrapper
- if pass.ImportObjectFact(fn, &fact) {
- return fn, fact.Kind
- }
-
- return fn, KindNone
+ return kind
}
// isFormatter reports whether t could satisfy fmt.Formatter.
@@ -490,7 +585,7 @@ func isFormatter(typ types.Type) bool {
sig := fn.Type().(*types.Signature)
return sig.Params().Len() == 2 &&
sig.Results().Len() == 0 &&
- analysisinternal.IsTypeNamed(sig.Params().At(0).Type(), "fmt", "State") &&
+ typesinternal.IsTypeNamed(sig.Params().At(0).Type(), "fmt", "State") &&
types.Identical(sig.Params().At(1).Type(), types.Typ[types.Rune])
}
@@ -729,7 +824,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma
if reason != "" {
details = " (" + reason + ")"
}
- pass.ReportRangef(rng, "%s format %s uses non-int %s%s as argument of *", name, operation.Text, analysisinternal.Format(pass.Fset, arg), details)
+ pass.ReportRangef(rng, "%s format %s uses non-int %s%s as argument of *", name, operation.Text, astutil.Format(pass.Fset, arg), details)
return false
}
}
@@ -756,7 +851,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma
}
arg := call.Args[verbArgIndex]
if isFunctionValue(pass, arg) && verb != 'p' && verb != 'T' {
- pass.ReportRangef(rng, "%s format %s arg %s is a func value, not called", name, operation.Text, analysisinternal.Format(pass.Fset, arg))
+ pass.ReportRangef(rng, "%s format %s arg %s is a func value, not called", name, operation.Text, astutil.Format(pass.Fset, arg))
return false
}
if reason, ok := matchArgType(pass, v.typ, arg); !ok {
@@ -768,14 +863,14 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma
if reason != "" {
details = " (" + reason + ")"
}
- pass.ReportRangef(rng, "%s format %s has arg %s of wrong type %s%s", name, operation.Text, analysisinternal.Format(pass.Fset, arg), typeString, details)
+ pass.ReportRangef(rng, "%s format %s has arg %s of wrong type %s%s", name, operation.Text, astutil.Format(pass.Fset, arg), typeString, details)
return false
}
// Detect recursive formatting via value's String/Error methods.
// The '#' flag suppresses the methods, except with %x, %X, and %q.
if v.typ&argString != 0 && v.verb != 'T' && (!strings.Contains(operation.Flags, "#") || strings.ContainsRune("qxX", v.verb)) {
if methodName, ok := recursiveStringer(pass, arg); ok {
- pass.ReportRangef(rng, "%s format %s with arg %s causes recursive %s method call", name, operation.Text, analysisinternal.Format(pass.Fset, arg), methodName)
+ pass.ReportRangef(rng, "%s format %s with arg %s causes recursive %s method call", name, operation.Text, astutil.Format(pass.Fset, arg), methodName)
return false
}
}
@@ -927,7 +1022,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, name string) {
if sel, ok := call.Args[0].(*ast.SelectorExpr); ok {
if x, ok := sel.X.(*ast.Ident); ok {
if x.Name == "os" && strings.HasPrefix(sel.Sel.Name, "Std") {
- pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", name, analysisinternal.Format(pass.Fset, call.Args[0]))
+ pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", name, astutil.Format(pass.Fset, call.Args[0]))
}
}
}
@@ -961,10 +1056,10 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, name string) {
}
for _, arg := range args {
if isFunctionValue(pass, arg) {
- pass.ReportRangef(call, "%s arg %s is a func value, not called", name, analysisinternal.Format(pass.Fset, arg))
+ pass.ReportRangef(call, "%s arg %s is a func value, not called", name, astutil.Format(pass.Fset, arg))
}
if methodName, ok := recursiveStringer(pass, arg); ok {
- pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", name, analysisinternal.Format(pass.Fset, arg), methodName)
+ pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", name, astutil.Format(pass.Fset, arg), methodName)
}
}
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go
index 57987b3d203a75..366927326fcede 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go
@@ -20,7 +20,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/astutil"
"golang.org/x/tools/internal/typeparams"
)
@@ -123,7 +123,7 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) {
}
}
if amt >= minSize {
- ident := analysisinternal.Format(pass.Fset, x)
+ ident := astutil.Format(pass.Fset, x)
qualifier := ""
if len(sizes) > 1 {
qualifier = "may be "
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go
index 78a2fa5ea3bd34..934f3913c2763b 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go
@@ -18,9 +18,9 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -29,14 +29,14 @@ var doc string
// Analyzer describes sigchanyzer analysis function detector.
var Analyzer = &analysis.Analyzer{
Name: "sigchanyzer",
- Doc: analysisutil.MustExtractDoc(doc, "sigchanyzer"),
+ Doc: analysisinternal.MustExtractDoc(doc, "sigchanyzer"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/sigchanyzer",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
}
func run(pass *analysis.Pass) (any, error) {
- if !analysisinternal.Imports(pass.Pkg, "os/signal") {
+ if !typesinternal.Imports(pass.Pkg, "os/signal") {
return nil, nil // doesn't directly import signal
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go
index c1ac960435d41f..2cb91c73299374 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go
@@ -17,10 +17,10 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/astutil"
"golang.org/x/tools/internal/typesinternal"
)
@@ -29,7 +29,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "slog",
- Doc: analysisutil.MustExtractDoc(doc, "slog"),
+ Doc: analysisinternal.MustExtractDoc(doc, "slog"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/slog",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
@@ -115,10 +115,10 @@ func run(pass *analysis.Pass) (any, error) {
default:
if unknownArg == nil {
pass.ReportRangef(arg, "%s arg %q should be a string or a slog.Attr (possible missing key or value)",
- shortName(fn), analysisinternal.Format(pass.Fset, arg))
+ shortName(fn), astutil.Format(pass.Fset, arg))
} else {
pass.ReportRangef(arg, "%s arg %q should probably be a string or a slog.Attr (previous arg %q cannot be a key)",
- shortName(fn), analysisinternal.Format(pass.Fset, arg), analysisinternal.Format(pass.Fset, unknownArg))
+ shortName(fn), astutil.Format(pass.Fset, arg), astutil.Format(pass.Fset, unknownArg))
}
// Stop here so we report at most one missing key per call.
return
@@ -158,7 +158,7 @@ func run(pass *analysis.Pass) (any, error) {
}
func isAttr(t types.Type) bool {
- return analysisinternal.IsTypeNamed(t, "log/slog", "Attr")
+ return typesinternal.IsTypeNamed(t, "log/slog", "Attr")
}
// shortName returns a name for the function that is shorter than FullName.
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go
index a0bdf001abd602..ca303ae5c15c6d 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go
@@ -12,8 +12,8 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/analysisinternal"
)
//go:embed doc.go
@@ -21,7 +21,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "stdmethods",
- Doc: analysisutil.MustExtractDoc(doc, "stdmethods"),
+ Doc: analysisinternal.MustExtractDoc(doc, "stdmethods"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stdmethods",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
index 7dbff1e4d8d4ee..19c72d2cf938ae 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
@@ -13,9 +13,9 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/refactor"
"golang.org/x/tools/internal/typeparams"
"golang.org/x/tools/internal/typesinternal"
)
@@ -25,7 +25,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "stringintconv",
- Doc: analysisutil.MustExtractDoc(doc, "stringintconv"),
+ Doc: analysisinternal.MustExtractDoc(doc, "stringintconv"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stringintconv",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
@@ -198,7 +198,7 @@ func run(pass *analysis.Pass) (any, error) {
// the type has methods, as some {String,GoString,Format}
// may change the behavior of fmt.Sprint.
if len(ttypes) == 1 && len(vtypes) == 1 && types.NewMethodSet(V0).Len() == 0 {
- _, prefix, importEdits := analysisinternal.AddImport(pass.TypesInfo, file, "fmt", "fmt", "Sprint", arg.Pos())
+ prefix, importEdits := refactor.AddImport(pass.TypesInfo, file, "fmt", "fmt", "Sprint", arg.Pos())
if types.Identical(T0, types.Typ[types.String]) {
// string(x) -> fmt.Sprint(x)
addFix("Format the number as a decimal", append(importEdits,
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
index 360ba0e74d89d3..eba4e56bb0532b 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
@@ -13,7 +13,6 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
"golang.org/x/tools/internal/analysisinternal"
@@ -31,7 +30,7 @@ func init() {
var Analyzer = &analysis.Analyzer{
Name: "testinggoroutine",
- Doc: analysisutil.MustExtractDoc(doc, "testinggoroutine"),
+ Doc: analysisinternal.MustExtractDoc(doc, "testinggoroutine"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/testinggoroutine",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
@@ -40,7 +39,7 @@ var Analyzer = &analysis.Analyzer{
func run(pass *analysis.Pass) (any, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
- if !analysisinternal.Imports(pass.Pkg, "testing") {
+ if !typesinternal.Imports(pass.Pkg, "testing") {
return nil, nil
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go
index d4e9b025324a18..a0ed5ab14e8e14 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go
@@ -15,8 +15,8 @@ import (
"unicode/utf8"
"golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -24,7 +24,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "tests",
- Doc: analysisutil.MustExtractDoc(doc, "tests"),
+ Doc: analysisinternal.MustExtractDoc(doc, "tests"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/tests",
Run: run,
}
@@ -258,7 +258,7 @@ func isTestingType(typ types.Type, testingType string) bool {
if !ok {
return false
}
- return analysisinternal.IsTypeNamed(ptr.Elem(), "testing", testingType)
+ return typesinternal.IsTypeNamed(ptr.Elem(), "testing", testingType)
}
// Validate that fuzz target function's arguments are of accepted types.
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go
index 4fdbb2b5415eff..45b6822c17606e 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go
@@ -16,10 +16,10 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/typesinternal"
)
const badFormat = "2006-02-01"
@@ -30,7 +30,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "timeformat",
- Doc: analysisutil.MustExtractDoc(doc, "timeformat"),
+ Doc: analysisinternal.MustExtractDoc(doc, "timeformat"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/timeformat",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
@@ -39,7 +39,7 @@ var Analyzer = &analysis.Analyzer{
func run(pass *analysis.Pass) (any, error) {
// Note: (time.Time).Format is a method and can be a typeutil.Callee
// without directly importing "time". So we cannot just skip this package
- // when !analysisutil.Imports(pass.Pkg, "time").
+ // when !analysisinternal.Imports(pass.Pkg, "time").
// TODO(taking): Consider using a prepass to collect typeutil.Callees.
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
@@ -50,8 +50,8 @@ func run(pass *analysis.Pass) (any, error) {
inspect.Preorder(nodeFilter, func(n ast.Node) {
call := n.(*ast.CallExpr)
obj := typeutil.Callee(pass.TypesInfo, call)
- if !analysisinternal.IsMethodNamed(obj, "time", "Time", "Format") &&
- !analysisinternal.IsFunctionNamed(obj, "time", "Parse") {
+ if !typesinternal.IsMethodNamed(obj, "time", "Time", "Format") &&
+ !typesinternal.IsFunctionNamed(obj, "time", "Parse") {
return
}
if len(call.Args) > 0 {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
index 26e894bd4000eb..4de48c83930648 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
@@ -11,9 +11,9 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/analysisinternal"
"golang.org/x/tools/internal/typesinternal"
)
@@ -22,7 +22,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "unmarshal",
- Doc: analysisutil.MustExtractDoc(doc, "unmarshal"),
+ Doc: analysisinternal.MustExtractDoc(doc, "unmarshal"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unmarshal",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
@@ -39,7 +39,7 @@ func run(pass *analysis.Pass) (any, error) {
// Note: (*"encoding/json".Decoder).Decode, (* "encoding/gob".Decoder).Decode
// and (* "encoding/xml".Decoder).Decode are methods and can be a typeutil.Callee
// without directly importing their packages. So we cannot just skip this package
- // when !analysisutil.Imports(pass.Pkg, "encoding/...").
+ // when !analysisinternal.Imports(pass.Pkg, "encoding/...").
// TODO(taking): Consider using a prepass to collect typeutil.Callees.
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go
index 317f034992bb5d..668a33529989d1 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go
@@ -14,8 +14,9 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/refactor"
)
//go:embed doc.go
@@ -23,7 +24,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "unreachable",
- Doc: analysisutil.MustExtractDoc(doc, "unreachable"),
+ Doc: analysisinternal.MustExtractDoc(doc, "unreachable"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unreachable",
Requires: []*analysis.Analyzer{inspect.Analyzer},
RunDespiteErrors: true,
@@ -188,6 +189,11 @@ func (d *deadState) findDead(stmt ast.Stmt) {
case *ast.EmptyStmt:
// do not warn about unreachable empty statements
default:
+ var (
+ inspect = d.pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+ curStmt, _ = inspect.Root().FindNode(stmt)
+ tokFile = d.pass.Fset.File(stmt.Pos())
+ )
// (This call to pass.Report is a frequent source
// of diagnostics beyond EOF in a truncated file;
// see #71659.)
@@ -196,11 +202,8 @@ func (d *deadState) findDead(stmt ast.Stmt) {
End: stmt.End(),
Message: "unreachable code",
SuggestedFixes: []analysis.SuggestedFix{{
- Message: "Remove",
- TextEdits: []analysis.TextEdit{{
- Pos: stmt.Pos(),
- End: stmt.End(),
- }},
+ Message: "Remove",
+ TextEdits: refactor.DeleteStmt(tokFile, curStmt),
}},
})
d.reachable = true // silence error about next statement
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go
index 57c6da64ff30e3..24ff723390f597 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go
@@ -14,9 +14,9 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -24,7 +24,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "unsafeptr",
- Doc: analysisutil.MustExtractDoc(doc, "unsafeptr"),
+ Doc: analysisinternal.MustExtractDoc(doc, "unsafeptr"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unsafeptr",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
@@ -105,7 +105,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool {
}
switch sel.Sel.Name {
case "Pointer", "UnsafeAddr":
- if analysisinternal.IsTypeNamed(info.Types[sel.X].Type, "reflect", "Value") {
+ if typesinternal.IsTypeNamed(info.Types[sel.X].Type, "reflect", "Value") {
return true
}
}
@@ -153,5 +153,5 @@ func hasBasicType(info *types.Info, x ast.Expr, kind types.BasicKind) bool {
// isReflectHeader reports whether t is reflect.SliceHeader or reflect.StringHeader.
func isReflectHeader(t types.Type) bool {
- return analysisinternal.IsTypeNamed(t, "reflect", "SliceHeader", "StringHeader")
+ return typesinternal.IsTypeNamed(t, "reflect", "SliceHeader", "StringHeader")
}
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
index ed4cf7ae0be35b..57ad4f0769924c 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go
@@ -23,7 +23,6 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
"golang.org/x/tools/internal/analysisinternal"
@@ -34,7 +33,7 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "unusedresult",
- Doc: analysisutil.MustExtractDoc(doc, "unusedresult"),
+ Doc: analysisinternal.MustExtractDoc(doc, "unusedresult"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedresult",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go
index 14c6986eabab3b..88e4cc8677691e 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go
@@ -13,10 +13,10 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
"golang.org/x/tools/internal/analysisinternal"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -24,14 +24,14 @@ var doc string
var Analyzer = &analysis.Analyzer{
Name: "waitgroup",
- Doc: analysisutil.MustExtractDoc(doc, "waitgroup"),
+ Doc: analysisinternal.MustExtractDoc(doc, "waitgroup"),
URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup",
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
}
func run(pass *analysis.Pass) (any, error) {
- if !analysisinternal.Imports(pass.Pkg, "sync") {
+ if !typesinternal.Imports(pass.Pkg, "sync") {
return nil, nil // doesn't directly import sync
}
@@ -44,7 +44,7 @@ func run(pass *analysis.Pass) (any, error) {
if push {
call := n.(*ast.CallExpr)
obj := typeutil.Callee(pass.TypesInfo, call)
- if analysisinternal.IsMethodNamed(obj, "sync", "WaitGroup", "Add") &&
+ if typesinternal.IsMethodNamed(obj, "sync", "WaitGroup", "Add") &&
hasSuffix(stack, wantSuffix) &&
backindex(stack, 1) == backindex(stack, 2).(*ast.BlockStmt).List[0] { // ExprStmt must be Block's first stmt
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
index 7b805b882bf2df..b407bc7791547f 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go
@@ -75,7 +75,6 @@ type Config struct {
VetxOutput string // where to write file of fact information
Stdout string // write stdout (e.g. JSON, unified diff) to this file
SucceedOnTypecheckFailure bool // obsolete awful hack; see #18395 and below
- WarnDiagnostics bool // printing diagnostics should not cause a non-zero exit
}
// Main is the main function of a vet-like analysis tool that must be
@@ -87,18 +86,9 @@ type Config struct {
// -V=full describe executable for build caching
// foo.cfg perform separate modular analyze on the single
// unit described by a JSON config file foo.cfg.
-//
-// Also, subject to approval of proposal #71859:
-//
// -fix don't print each diagnostic, apply its first fix
// -diff don't apply a fix, print the diff (requires -fix)
-//
-// Additionally, the environment variable GOVET has the value "vet" or
-// "fix" depending on whether the command is being invoked by "go vet",
-// to report diagnostics, or "go fix", to apply fixes. This is
-// necessary so that callers of Main can select their analyzer suite
-// before flag parsing. (Vet analyzers must report real code problems,
-// whereas Fix analyzers may fix non-problems such as style issues.)
+// -json print diagnostics and fixes in JSON form
func Main(analyzers ...*analysis.Analyzer) {
progname := filepath.Base(os.Args[0])
log.SetFlags(0)
@@ -163,7 +153,7 @@ func Run(configFile string, analyzers []*analysis.Analyzer) {
// In VetxOnly mode, the analysis is run only for facts.
if !cfg.VetxOnly {
- code = processResults(fset, cfg.ID, results, cfg.WarnDiagnostics)
+ code = processResults(fset, cfg.ID, results)
}
os.Exit(code)
@@ -187,7 +177,7 @@ func readConfig(filename string) (*Config, error) {
return cfg, nil
}
-func processResults(fset *token.FileSet, id string, results []result, warnDiagnostics bool) (exit int) {
+func processResults(fset *token.FileSet, id string, results []result) (exit int) {
if analysisflags.Fix {
// Don't print the diagnostics,
// but apply all fixes from the root actions.
@@ -236,9 +226,7 @@ func processResults(fset *token.FileSet, id string, results []result, warnDiagno
for _, res := range results {
for _, diag := range res.diagnostics {
analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag)
- if !warnDiagnostics {
- exit = 1
- }
+ exit = 1
}
}
}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
index cea89d34dac45b..970d7507f02d0a 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
@@ -2,166 +2,39 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package analysisinternal provides gopls' internal analyses with a
-// number of helper functions that operate on typed syntax trees.
+// Package analysisinternal provides helper functions for use in both
+// the analysis drivers in go/analysis and gopls, and in various
+// analyzers.
+//
+// TODO(adonovan): this is not ideal as it may lead to unnecessary
+// dependencies between drivers and analyzers. Split into analyzerlib
+// and driverlib?
package analysisinternal
import (
- "bytes"
"cmp"
"fmt"
- "go/ast"
- "go/printer"
- "go/scanner"
"go/token"
- "go/types"
- "iter"
- pathpkg "path"
+ "os"
"slices"
- "strings"
"golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/internal/moreiters"
- "golang.org/x/tools/internal/typesinternal"
)
-// Deprecated: this heuristic is ill-defined.
-// TODO(adonovan): move to sole use in gopls/internal/cache.
-func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos {
- // Get the end position for the type error.
- file := fset.File(start)
- if file == nil {
- return start
- }
- if offset := file.PositionFor(start, false).Offset; offset > len(src) {
- return start
- } else {
- src = src[offset:]
- }
-
- // Attempt to find a reasonable end position for the type error.
- //
- // TODO(rfindley): the heuristic implemented here is unclear. It looks like
- // it seeks the end of the primary operand starting at start, but that is not
- // quite implemented (for example, given a func literal this heuristic will
- // return the range of the func keyword).
- //
- // We should formalize this heuristic, or deprecate it by finally proposing
- // to add end position to all type checker errors.
- //
- // Nevertheless, ensure that the end position at least spans the current
- // token at the cursor (this was golang/go#69505).
- end := start
- {
- var s scanner.Scanner
- fset := token.NewFileSet()
- f := fset.AddFile("", fset.Base(), len(src))
- s.Init(f, src, nil /* no error handler */, scanner.ScanComments)
- pos, tok, lit := s.Scan()
- if tok != token.SEMICOLON && token.Pos(f.Base()) <= pos && pos <= token.Pos(f.Base()+f.Size()) {
- off := file.Offset(pos) + len(lit)
- src = src[off:]
- end += token.Pos(off)
- }
+// ReadFile reads a file and adds it to the FileSet in pass
+// so that we can report errors against it using lineStart.
+func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) {
+ readFile := pass.ReadFile
+ if readFile == nil {
+ readFile = os.ReadFile
}
-
- // Look for bytes that might terminate the current operand. See note above:
- // this is imprecise.
- if width := bytes.IndexAny(src, " \n,():;[]+-*/"); width > 0 {
- end += token.Pos(width)
+ content, err := readFile(filename)
+ if err != nil {
+ return nil, nil, err
}
- return end
-}
-
-// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types.
-// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within
-// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that
-// is unrecognized.
-func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string {
-
- // Initialize matches to contain the variable types we are searching for.
- matches := make(map[types.Type][]string)
- for _, typ := range typs {
- if typ == nil {
- continue // TODO(adonovan): is this reachable?
- }
- matches[typ] = nil // create entry
- }
-
- seen := map[types.Object]struct{}{}
- ast.Inspect(node, func(n ast.Node) bool {
- if n == nil {
- return false
- }
- // Prevent circular definitions. If 'pos' is within an assignment statement, do not
- // allow any identifiers in that assignment statement to be selected. Otherwise,
- // we could do the following, where 'x' satisfies the type of 'f0':
- //
- // x := fakeStruct{f0: x}
- //
- if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() {
- return false
- }
- if n.End() > pos {
- return n.Pos() <= pos
- }
- ident, ok := n.(*ast.Ident)
- if !ok || ident.Name == "_" {
- return true
- }
- obj := info.Defs[ident]
- if obj == nil || obj.Type() == nil {
- return true
- }
- if _, ok := obj.(*types.TypeName); ok {
- return true
- }
- // Prevent duplicates in matches' values.
- if _, ok = seen[obj]; ok {
- return true
- }
- seen[obj] = struct{}{}
- // Find the scope for the given position. Then, check whether the object
- // exists within the scope.
- innerScope := pkg.Scope().Innermost(pos)
- if innerScope == nil {
- return true
- }
- _, foundObj := innerScope.LookupParent(ident.Name, pos)
- if foundObj != obj {
- return true
- }
- // The object must match one of the types that we are searching for.
- // TODO(adonovan): opt: use typeutil.Map?
- if names, ok := matches[obj.Type()]; ok {
- matches[obj.Type()] = append(names, ident.Name)
- } else {
- // If the object type does not exactly match
- // any of the target types, greedily find the first
- // target type that the object type can satisfy.
- for typ := range matches {
- if equivalentTypes(obj.Type(), typ) {
- matches[typ] = append(matches[typ], ident.Name)
- }
- }
- }
- return true
- })
- return matches
-}
-
-func equivalentTypes(want, got types.Type) bool {
- if types.Identical(want, got) {
- return true
- }
- // Code segment to help check for untyped equality from (golang/go#32146).
- if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 {
- if lhs, ok := got.Underlying().(*types.Basic); ok {
- return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType
- }
- }
- return types.AssignableTo(want, got)
+ tf := pass.Fset.AddFile(filename, -1, len(content))
+ tf.SetLinesForContent(content)
+ return content, tf, nil
}
// A ReadFileFunc is a function that returns the
@@ -193,207 +66,6 @@ func CheckReadable(pass *analysis.Pass, filename string) error {
return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename)
}
-// AddImport checks whether this file already imports pkgpath and that
-// the import is in scope at pos. If so, it returns the name under
-// which it was imported and no edits. Otherwise, it adds a new import
-// of pkgpath, using a name derived from the preferred name, and
-// returns the chosen name, a prefix to be concatenated with member to
-// form a qualified name, and the edit for the new import.
-//
-// The member argument indicates the name of the desired symbol within
-// the imported package. This is needed in the case when the existing
-// import is a dot import, because then it is possible that the
-// desired symbol is shadowed by other declarations in the current
-// package. If member is not shadowed at pos, AddImport returns (".",
-// "", nil). (AddImport accepts the caller's implicit claim that the
-// imported package declares member.)
-//
-// Use a preferredName of "_" to request a blank import;
-// member is ignored in this case.
-//
-// It does not mutate its arguments.
-func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (name, prefix string, newImport []analysis.TextEdit) {
- // Find innermost enclosing lexical block.
- scope := info.Scopes[file].Innermost(pos)
- if scope == nil {
- panic("no enclosing lexical block")
- }
-
- // Is there an existing import of this package?
- // If so, are we in its scope? (not shadowed)
- for _, spec := range file.Imports {
- pkgname := info.PkgNameOf(spec)
- if pkgname != nil && pkgname.Imported().Path() == pkgpath {
- name = pkgname.Name()
- if preferredName == "_" {
- // Request for blank import; any existing import will do.
- return name, "", nil
- }
- if name == "." {
- // The scope of ident must be the file scope.
- if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] {
- return name, "", nil
- }
- } else if _, obj := scope.LookupParent(name, pos); obj == pkgname {
- return name, name + ".", nil
- }
- }
- }
-
- // We must add a new import.
-
- // Ensure we have a fresh name.
- newName := preferredName
- if preferredName != "_" {
- newName = FreshName(scope, pos, preferredName)
- }
-
- // Create a new import declaration either before the first existing
- // declaration (which must exist), including its comments; or
- // inside the declaration, if it is an import group.
- //
- // Use a renaming import whenever the preferred name is not
- // available, or the chosen name does not match the last
- // segment of its path.
- newText := fmt.Sprintf("%q", pkgpath)
- if newName != preferredName || newName != pathpkg.Base(pkgpath) {
- newText = fmt.Sprintf("%s %q", newName, pkgpath)
- }
-
- decl0 := file.Decls[0]
- var before ast.Node = decl0
- switch decl0 := decl0.(type) {
- case *ast.GenDecl:
- if decl0.Doc != nil {
- before = decl0.Doc
- }
- case *ast.FuncDecl:
- if decl0.Doc != nil {
- before = decl0.Doc
- }
- }
- if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() {
- // Have existing grouped import ( ... ) decl.
- if IsStdPackage(pkgpath) && len(gd.Specs) > 0 {
- // Add spec for a std package before
- // first existing spec, followed by
- // a blank line if the next one is non-std.
- first := gd.Specs[0].(*ast.ImportSpec)
- pos = first.Pos()
- if !IsStdPackage(first.Path.Value) {
- newText += "\n"
- }
- newText += "\n\t"
- } else {
- // Add spec at end of group.
- pos = gd.Rparen
- newText = "\t" + newText + "\n"
- }
- } else {
- // No import decl, or non-grouped import.
- // Add a new import decl before first decl.
- // (gofmt will merge multiple import decls.)
- pos = before.Pos()
- newText = "import " + newText + "\n\n"
- }
- return newName, newName + ".", []analysis.TextEdit{{
- Pos: pos,
- End: pos,
- NewText: []byte(newText),
- }}
-}
-
-// FreshName returns the name of an identifier that is undefined
-// at the specified position, based on the preferred name.
-func FreshName(scope *types.Scope, pos token.Pos, preferred string) string {
- newName := preferred
- for i := 0; ; i++ {
- if _, obj := scope.LookupParent(newName, pos); obj == nil {
- break // fresh
- }
- newName = fmt.Sprintf("%s%d", preferred, i)
- }
- return newName
-}
-
-// Format returns a string representation of the node n.
-func Format(fset *token.FileSet, n ast.Node) string {
- var buf strings.Builder
- printer.Fprint(&buf, fset, n) // ignore errors
- return buf.String()
-}
-
-// Imports returns true if path is imported by pkg.
-func Imports(pkg *types.Package, path string) bool {
- for _, imp := range pkg.Imports() {
- if imp.Path() == path {
- return true
- }
- }
- return false
-}
-
-// IsTypeNamed reports whether t is (or is an alias for) a
-// package-level defined type with the given package path and one of
-// the given names. It returns false if t is nil.
-//
-// This function avoids allocating the concatenation of "pkg.Name",
-// which is important for the performance of syntax matching.
-func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool {
- if named, ok := types.Unalias(t).(*types.Named); ok {
- tname := named.Obj()
- return tname != nil &&
- typesinternal.IsPackageLevel(tname) &&
- tname.Pkg().Path() == pkgPath &&
- slices.Contains(names, tname.Name())
- }
- return false
-}
-
-// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a
-// package-level defined type with the given package path and one of the given
-// names. It returns false if t is not a pointer type.
-func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool {
- r := typesinternal.Unpointer(t)
- if r == t {
- return false
- }
- return IsTypeNamed(r, pkgPath, names...)
-}
-
-// IsFunctionNamed reports whether obj is a package-level function
-// defined in the given package and has one of the given names.
-// It returns false if obj is nil.
-//
-// This function avoids allocating the concatenation of "pkg.Name",
-// which is important for the performance of syntax matching.
-func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool {
- f, ok := obj.(*types.Func)
- return ok &&
- typesinternal.IsPackageLevel(obj) &&
- f.Pkg().Path() == pkgPath &&
- f.Type().(*types.Signature).Recv() == nil &&
- slices.Contains(names, f.Name())
-}
-
-// IsMethodNamed reports whether obj is a method defined on a
-// package-level type with the given package and type name, and has
-// one of the given names. It returns false if obj is nil.
-//
-// This function avoids allocating the concatenation of "pkg.TypeName.Name",
-// which is important for the performance of syntax matching.
-func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool {
- if fn, ok := obj.(*types.Func); ok {
- if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
- _, T := typesinternal.ReceiverNamed(recv)
- return T != nil &&
- IsTypeNamed(T, pkgPath, typeName) &&
- slices.Contains(names, fn.Name())
- }
- }
- return false
-}
-
// ValidateFixes validates the set of fixes for a single diagnostic.
// Any error indicates a bug in the originating analyzer.
//
@@ -496,172 +168,6 @@ func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error {
return nil
}
-// CanImport reports whether one package is allowed to import another.
-//
-// TODO(adonovan): allow customization of the accessibility relation
-// (e.g. for Bazel).
-func CanImport(from, to string) bool {
- // TODO(adonovan): better segment hygiene.
- if to == "internal" || strings.HasPrefix(to, "internal/") {
- // Special case: only std packages may import internal/...
- // We can't reliably know whether we're in std, so we
- // use a heuristic on the first segment.
- first, _, _ := strings.Cut(from, "/")
- if strings.Contains(first, ".") {
- return false // example.com/foo ∉ std
- }
- if first == "testdata" {
- return false // testdata/foo ∉ std
- }
- }
- if strings.HasSuffix(to, "/internal") {
- return strings.HasPrefix(from, to[:len(to)-len("/internal")])
- }
- if i := strings.LastIndex(to, "/internal/"); i >= 0 {
- return strings.HasPrefix(from, to[:i])
- }
- return true
-}
-
-// DeleteStmt returns the edits to remove the [ast.Stmt] identified by
-// curStmt, if it is contained within a BlockStmt, CaseClause,
-// CommClause, or is the STMT in switch STMT; ... {...}. It returns nil otherwise.
-func DeleteStmt(fset *token.FileSet, curStmt inspector.Cursor) []analysis.TextEdit {
- stmt := curStmt.Node().(ast.Stmt)
- // if the stmt is on a line by itself delete the whole line
- // otherwise just delete the statement.
-
- // this logic would be a lot simpler with the file contents, and somewhat simpler
- // if the cursors included the comments.
-
- tokFile := fset.File(stmt.Pos())
- lineOf := tokFile.Line
- stmtStartLine, stmtEndLine := lineOf(stmt.Pos()), lineOf(stmt.End())
-
- var from, to token.Pos
- // bounds of adjacent syntax/comments on same line, if any
- limits := func(left, right token.Pos) {
- if lineOf(left) == stmtStartLine {
- from = left
- }
- if lineOf(right) == stmtEndLine {
- to = right
- }
- }
- // TODO(pjw): there are other places a statement might be removed:
- // IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] .
- // (removing the blocks requires more rewriting than this routine would do)
- // CommCase = "case" ( SendStmt | RecvStmt ) | "default" .
- // (removing the stmt requires more rewriting, and it's unclear what the user means)
- switch parent := curStmt.Parent().Node().(type) {
- case *ast.SwitchStmt:
- limits(parent.Switch, parent.Body.Lbrace)
- case *ast.TypeSwitchStmt:
- limits(parent.Switch, parent.Body.Lbrace)
- if parent.Assign == stmt {
- return nil // don't let the user break the type switch
- }
- case *ast.BlockStmt:
- limits(parent.Lbrace, parent.Rbrace)
- case *ast.CommClause:
- limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace)
- if parent.Comm == stmt {
- return nil // maybe the user meant to remove the entire CommClause?
- }
- case *ast.CaseClause:
- limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace)
- case *ast.ForStmt:
- limits(parent.For, parent.Body.Lbrace)
-
- default:
- return nil // not one of ours
- }
-
- if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine {
- from = prev.Node().End() // preceding statement ends on same line
- }
- if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine {
- to = next.Node().Pos() // following statement begins on same line
- }
- // and now for the comments
-Outer:
- for _, cg := range enclosingFile(curStmt).Comments {
- for _, co := range cg.List {
- if lineOf(co.End()) < stmtStartLine {
- continue
- } else if lineOf(co.Pos()) > stmtEndLine {
- break Outer // no more are possible
- }
- if lineOf(co.End()) == stmtStartLine && co.End() < stmt.Pos() {
- if !from.IsValid() || co.End() > from {
- from = co.End()
- continue // maybe there are more
- }
- }
- if lineOf(co.Pos()) == stmtEndLine && co.Pos() > stmt.End() {
- if !to.IsValid() || co.Pos() < to {
- to = co.Pos()
- continue // maybe there are more
- }
- }
- }
- }
- // if either from or to is valid, just remove the statement
- // otherwise remove the line
- edit := analysis.TextEdit{Pos: stmt.Pos(), End: stmt.End()}
- if from.IsValid() || to.IsValid() {
- // remove just the statement.
- // we can't tell if there is a ; or whitespace right after the statement
- // ideally we'd like to remove the former and leave the latter
- // (if gofmt has run, there likely won't be a ;)
- // In type switches we know there's a semicolon somewhere after the statement,
- // but the extra work for this special case is not worth it, as gofmt will fix it.
- return []analysis.TextEdit{edit}
- }
- // remove the whole line
- for lineOf(edit.Pos) == stmtStartLine {
- edit.Pos--
- }
- edit.Pos++ // get back tostmtStartLine
- for lineOf(edit.End) == stmtEndLine {
- edit.End++
- }
- return []analysis.TextEdit{edit}
-}
-
-// Comments returns an iterator over the comments overlapping the specified interval.
-func Comments(file *ast.File, start, end token.Pos) iter.Seq[*ast.Comment] {
- // TODO(adonovan): optimize use binary O(log n) instead of linear O(n) search.
- return func(yield func(*ast.Comment) bool) {
- for _, cg := range file.Comments {
- for _, co := range cg.List {
- if co.Pos() > end {
- return
- }
- if co.End() < start {
- continue
- }
-
- if !yield(co) {
- return
- }
- }
- }
- }
-}
-
-// IsStdPackage reports whether the specified package path belongs to a
-// package in the standard library (including internal dependencies).
-func IsStdPackage(path string) bool {
- // A standard package has no dot in its first segment.
- // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".)
- slash := strings.IndexByte(path, '/')
- if slash < 0 {
- slash = len(path)
- }
- return !strings.Contains(path[:slash], ".") && path != "testdata"
-}
-
// Range returns an [analysis.Range] for the specified start and end positions.
func Range(pos, end token.Pos) analysis.Range {
return tokenRange{pos, end}
@@ -672,9 +178,3 @@ type tokenRange struct{ StartPos, EndPos token.Pos }
func (r tokenRange) Pos() token.Pos { return r.StartPos }
func (r tokenRange) End() token.Pos { return r.EndPos }
-
-// enclosingFile returns the syntax tree for the file enclosing c.
-func enclosingFile(c inspector.Cursor) *ast.File {
- c, _ = moreiters.First(c.Enclosing((*ast.File)(nil)))
- return c.Node().(*ast.File)
-}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go
index bfb5900f1b3195..c6cdf5997e2194 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go
@@ -35,7 +35,7 @@ import (
//
// var Analyzer = &analysis.Analyzer{
// Name: "halting",
-// Doc: analysisutil.MustExtractDoc(doc, "halting"),
+// Doc: analysisinternal.MustExtractDoc(doc, "halting"),
// ...
// }
func MustExtractDoc(content, name string) string {
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go
index c3a256c987cbc3..7e52aeaaac590a 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go
@@ -7,6 +7,7 @@ package astutil
import (
"go/ast"
"go/token"
+ "iter"
"strings"
)
@@ -111,3 +112,24 @@ func Directives(g *ast.CommentGroup) (res []*Directive) {
}
return
}
+
+// Comments returns an iterator over the comments overlapping the specified interval.
+func Comments(file *ast.File, start, end token.Pos) iter.Seq[*ast.Comment] {
+ // TODO(adonovan): optimize use binary O(log n) instead of linear O(n) search.
+ return func(yield func(*ast.Comment) bool) {
+ for _, cg := range file.Comments {
+ for _, co := range cg.List {
+ if co.Pos() > end {
+ return
+ }
+ if co.End() < start {
+ continue
+ }
+
+ if !yield(co) {
+ return
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go
index c945de02d4a130..210f392387b426 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go
@@ -26,6 +26,14 @@ func Equal(x, y ast.Node, identical func(x, y *ast.Ident) bool) bool {
return equal(reflect.ValueOf(x), reflect.ValueOf(y), identical)
}
+// EqualSyntax reports whether x and y are equal.
+// Identifiers are considered equal if they are spelled the same.
+// Comments are ignored.
+func EqualSyntax(x, y ast.Expr) bool {
+ sameName := func(x, y *ast.Ident) bool { return x.Name == y.Name }
+ return Equal(x, y, sameName)
+}
+
func equal(x, y reflect.Value, identical func(x, y *ast.Ident) bool) bool {
// Ensure types are the same
if x.Type() != y.Type() {
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go
index 14189155e4e895..a1c09835041bf0 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go
@@ -6,7 +6,13 @@ package astutil
import (
"go/ast"
+ "go/printer"
"go/token"
+ "strings"
+
+ "golang.org/x/tools/go/ast/edge"
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/moreiters"
)
// PreorderStack traverses the tree rooted at root,
@@ -67,3 +73,47 @@ func NodeContains(n ast.Node, pos token.Pos) bool {
}
return start <= pos && pos <= end
}
+
+// IsChildOf reports whether cur.ParentEdge is ek.
+//
+// TODO(adonovan): promote to a method of Cursor.
+func IsChildOf(cur inspector.Cursor, ek edge.Kind) bool {
+ got, _ := cur.ParentEdge()
+ return got == ek
+}
+
+// EnclosingFile returns the syntax tree for the file enclosing c.
+//
+// TODO(adonovan): promote this to a method of Cursor.
+func EnclosingFile(c inspector.Cursor) *ast.File {
+ c, _ = moreiters.First(c.Enclosing((*ast.File)(nil)))
+ return c.Node().(*ast.File)
+}
+
+// DocComment returns the doc comment for a node, if any.
+func DocComment(n ast.Node) *ast.CommentGroup {
+ switch n := n.(type) {
+ case *ast.FuncDecl:
+ return n.Doc
+ case *ast.GenDecl:
+ return n.Doc
+ case *ast.ValueSpec:
+ return n.Doc
+ case *ast.TypeSpec:
+ return n.Doc
+ case *ast.File:
+ return n.Doc
+ case *ast.ImportSpec:
+ return n.Doc
+ case *ast.Field:
+ return n.Doc
+ }
+ return nil
+}
+
+// Format returns a string representation of the node n.
+func Format(fset *token.FileSet, n ast.Node) string {
+ var buf strings.Builder
+ printer.Fprint(&buf, fset, n) // ignore errors
+ return buf.String()
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go
index 4c346706a7566c..7b7c5cc677beb4 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go
@@ -378,10 +378,7 @@ func (e *editGraph) twoDone(df, db int) (int, bool) {
return 0, false // diagonals cannot overlap
}
kmin := max(-df, -db+e.delta)
- kmax := db + e.delta
- if df < kmax {
- kmax = df
- }
+ kmax := min(df, db+e.delta)
for k := kmin; k <= kmax; k += 2 {
x := e.vf.get(df, k)
u := e.vb.get(db, k-e.delta)
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go
index 2d72d2630435b8..811bb216ea2f24 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go
@@ -103,11 +103,3 @@ func commonSuffixLenString(a, b string) int {
}
return i
}
-
-func min(x, y int) int {
- if x < y {
- return x
- } else {
- return y
- }
-}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/packagepath/packagepath.go b/src/cmd/vendor/golang.org/x/tools/internal/packagepath/packagepath.go
new file mode 100644
index 00000000000000..fa39a13f9eae51
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/packagepath/packagepath.go
@@ -0,0 +1,49 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package packagepath provides metadata operations on package path
+// strings.
+package packagepath
+
+// (This package should not depend on go/ast.)
+import "strings"
+
+// CanImport reports whether one package is allowed to import another.
+//
+// TODO(adonovan): allow customization of the accessibility relation
+// (e.g. for Bazel).
+func CanImport(from, to string) bool {
+ // TODO(adonovan): better segment hygiene.
+ if to == "internal" || strings.HasPrefix(to, "internal/") {
+ // Special case: only std packages may import internal/...
+ // We can't reliably know whether we're in std, so we
+ // use a heuristic on the first segment.
+ first, _, _ := strings.Cut(from, "/")
+ if strings.Contains(first, ".") {
+ return false // example.com/foo ∉ std
+ }
+ if first == "testdata" {
+ return false // testdata/foo ∉ std
+ }
+ }
+ if strings.HasSuffix(to, "/internal") {
+ return strings.HasPrefix(from, to[:len(to)-len("/internal")])
+ }
+ if i := strings.LastIndex(to, "/internal/"); i >= 0 {
+ return strings.HasPrefix(from, to[:i])
+ }
+ return true
+}
+
+// IsStdPackage reports whether the specified package path belongs to a
+// package in the standard library (including internal dependencies).
+func IsStdPackage(path string) bool {
+ // A standard package has no dot in its first segment.
+ // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".)
+ slash := strings.IndexByte(path, '/')
+ if slash < 0 {
+ slash = len(path)
+ }
+ return !strings.Contains(path[:slash], ".") && path != "testdata"
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go
new file mode 100644
index 00000000000000..6df01d8ef9c779
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go
@@ -0,0 +1,484 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package refactor
+
+// This file defines operations for computing deletion edits.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "slices"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/ast/edge"
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/astutil"
+ "golang.org/x/tools/internal/typesinternal"
+ "golang.org/x/tools/internal/typesinternal/typeindex"
+)
+
+// DeleteVar returns edits to delete the declaration of a variable or
+// constant whose defining identifier is curId.
+//
+// It handles variants including:
+// - GenDecl > ValueSpec versus AssignStmt;
+// - RHS expression has effects, or not;
+// - entire statement/declaration may be eliminated;
+// and removes associated comments.
+//
+// If it cannot make the necessary edits, such as for a function
+// parameter or result, it returns nil.
+func DeleteVar(tokFile *token.File, info *types.Info, curId inspector.Cursor) []analysis.TextEdit {
+ switch ek, _ := curId.ParentEdge(); ek {
+ case edge.ValueSpec_Names:
+ return deleteVarFromValueSpec(tokFile, info, curId)
+
+ case edge.AssignStmt_Lhs:
+ return deleteVarFromAssignStmt(tokFile, info, curId)
+ }
+
+ // e.g. function receiver, parameter, or result,
+ // or "switch v := expr.(T) {}" (which has no object).
+ return nil
+}
+
+// deleteVarFromValueSpec returns edits to delete the declaration of a
+// variable or constant within a ValueSpec.
+//
+// Precondition: curId is Ident beneath ValueSpec.Names beneath GenDecl.
+//
+// See also [deleteVarFromAssignStmt], which has parallel structure.
+func deleteVarFromValueSpec(tokFile *token.File, info *types.Info, curIdent inspector.Cursor) []analysis.TextEdit {
+ var (
+ id = curIdent.Node().(*ast.Ident)
+ curSpec = curIdent.Parent()
+ spec = curSpec.Node().(*ast.ValueSpec)
+ )
+
+ declaresOtherNames := slices.ContainsFunc(spec.Names, func(name *ast.Ident) bool {
+ return name != id && name.Name != "_"
+ })
+ noRHSEffects := !slices.ContainsFunc(spec.Values, func(rhs ast.Expr) bool {
+ return !typesinternal.NoEffects(info, rhs)
+ })
+ if !declaresOtherNames && noRHSEffects {
+ // The spec is no longer needed, either to declare
+ // other variables, or for its side effects.
+ return DeleteSpec(tokFile, curSpec)
+ }
+
+ // The spec is still needed, either for
+ // at least one LHS, or for effects on RHS.
+ // Blank out or delete just one LHS.
+
+ _, index := curIdent.ParentEdge() // index of LHS within ValueSpec.Names
+
+ // If there is no RHS, we can delete the LHS.
+ if len(spec.Values) == 0 {
+ var pos, end token.Pos
+ if index == len(spec.Names)-1 {
+ // Delete final name.
+ //
+ // var _, lhs1 T
+ // ------
+ pos = spec.Names[index-1].End()
+ end = spec.Names[index].End()
+ } else {
+ // Delete non-final name.
+ //
+ // var lhs0, _ T
+ // ------
+ pos = spec.Names[index].Pos()
+ end = spec.Names[index+1].Pos()
+ }
+ return []analysis.TextEdit{{
+ Pos: pos,
+ End: end,
+ }}
+ }
+
+ // If the assignment is n:n and the RHS has no effects,
+ // we can delete the LHS and its corresponding RHS.
+ if len(spec.Names) == len(spec.Values) &&
+ typesinternal.NoEffects(info, spec.Values[index]) {
+
+ if index == len(spec.Names)-1 {
+ // Delete final items.
+ //
+ // var _, lhs1 = rhs0, rhs1
+ // ------ ------
+ return []analysis.TextEdit{
+ {
+ Pos: spec.Names[index-1].End(),
+ End: spec.Names[index].End(),
+ },
+ {
+ Pos: spec.Values[index-1].End(),
+ End: spec.Values[index].End(),
+ },
+ }
+ } else {
+ // Delete non-final items.
+ //
+ // var lhs0, _ = rhs0, rhs1
+ // ------ ------
+ return []analysis.TextEdit{
+ {
+ Pos: spec.Names[index].Pos(),
+ End: spec.Names[index+1].Pos(),
+ },
+ {
+ Pos: spec.Values[index].Pos(),
+ End: spec.Values[index+1].Pos(),
+ },
+ }
+ }
+ }
+
+ // We cannot delete the RHS.
+ // Blank out the LHS.
+ return []analysis.TextEdit{{
+ Pos: id.Pos(),
+ End: id.End(),
+ NewText: []byte("_"),
+ }}
+}
+
+// Precondition: curId is Ident beneath AssignStmt.Lhs.
+//
+// See also [deleteVarFromValueSpec], which has parallel structure.
+func deleteVarFromAssignStmt(tokFile *token.File, info *types.Info, curIdent inspector.Cursor) []analysis.TextEdit {
+ var (
+ id = curIdent.Node().(*ast.Ident)
+ curStmt = curIdent.Parent()
+ assign = curStmt.Node().(*ast.AssignStmt)
+ )
+
+ declaresOtherNames := slices.ContainsFunc(assign.Lhs, func(lhs ast.Expr) bool {
+ lhsId, ok := lhs.(*ast.Ident)
+ return ok && lhsId != id && lhsId.Name != "_"
+ })
+ noRHSEffects := !slices.ContainsFunc(assign.Rhs, func(rhs ast.Expr) bool {
+ return !typesinternal.NoEffects(info, rhs)
+ })
+ if !declaresOtherNames && noRHSEffects {
+ // The assignment is no longer needed, either to
+ // declare other variables, or for its side effects.
+ if edits := DeleteStmt(tokFile, curStmt); edits != nil {
+ return edits
+ }
+ // Statement could not not be deleted in this context.
+ // Fall back to conservative deletion.
+ }
+
+ // The assign is still needed, either for
+ // at least one LHS, or for effects on RHS,
+ // or because it cannot deleted because of its context.
+ // Blank out or delete just one LHS.
+
+ // If the assignment is 1:1 and the RHS has no effects,
+ // we can delete the LHS and its corresponding RHS.
+ _, index := curIdent.ParentEdge()
+ if len(assign.Lhs) > 1 &&
+ len(assign.Lhs) == len(assign.Rhs) &&
+ typesinternal.NoEffects(info, assign.Rhs[index]) {
+
+ if index == len(assign.Lhs)-1 {
+ // Delete final items.
+ //
+ // _, lhs1 := rhs0, rhs1
+ // ------ ------
+ return []analysis.TextEdit{
+ {
+ Pos: assign.Lhs[index-1].End(),
+ End: assign.Lhs[index].End(),
+ },
+ {
+ Pos: assign.Rhs[index-1].End(),
+ End: assign.Rhs[index].End(),
+ },
+ }
+ } else {
+ // Delete non-final items.
+ //
+ // lhs0, _ := rhs0, rhs1
+ // ------ ------
+ return []analysis.TextEdit{
+ {
+ Pos: assign.Lhs[index].Pos(),
+ End: assign.Lhs[index+1].Pos(),
+ },
+ {
+ Pos: assign.Rhs[index].Pos(),
+ End: assign.Rhs[index+1].Pos(),
+ },
+ }
+ }
+ }
+
+ // We cannot delete the RHS.
+ // Blank out the LHS.
+ edits := []analysis.TextEdit{{
+ Pos: id.Pos(),
+ End: id.End(),
+ NewText: []byte("_"),
+ }}
+
+ // If this eliminates the final variable declared by
+ // an := statement, we need to turn it into an =
+ // assignment to avoid a "no new variables on left
+ // side of :=" error.
+ if !declaresOtherNames {
+ edits = append(edits, analysis.TextEdit{
+ Pos: assign.TokPos,
+ End: assign.TokPos + token.Pos(len(":=")),
+ NewText: []byte("="),
+ })
+ }
+
+ return edits
+}
+
+// DeleteSpec returns edits to delete the {Type,Value}Spec identified by curSpec.
+//
+// TODO(adonovan): add test suite. Test for consts as well.
+func DeleteSpec(tokFile *token.File, curSpec inspector.Cursor) []analysis.TextEdit {
+ var (
+ spec = curSpec.Node().(ast.Spec)
+ curDecl = curSpec.Parent()
+ decl = curDecl.Node().(*ast.GenDecl)
+ )
+
+ // If it is the sole spec in the decl,
+ // delete the entire decl.
+ if len(decl.Specs) == 1 {
+ return DeleteDecl(tokFile, curDecl)
+ }
+
+ // Delete the spec and its comments.
+ _, index := curSpec.ParentEdge() // index of ValueSpec within GenDecl.Specs
+ pos, end := spec.Pos(), spec.End()
+ if doc := astutil.DocComment(spec); doc != nil {
+ pos = doc.Pos() // leading comment
+ }
+ if index == len(decl.Specs)-1 {
+ // Delete final spec.
+ if c := eolComment(spec); c != nil {
+ // var (v int // comment \n)
+ end = c.End()
+ }
+ } else {
+ // Delete non-final spec.
+ // var ( a T; b T )
+ // -----
+ end = decl.Specs[index+1].Pos()
+ }
+ return []analysis.TextEdit{{
+ Pos: pos,
+ End: end,
+ }}
+}
+
+// DeleteDecl returns edits to delete the ast.Decl identified by curDecl.
+//
+// TODO(adonovan): add test suite.
+func DeleteDecl(tokFile *token.File, curDecl inspector.Cursor) []analysis.TextEdit {
+ decl := curDecl.Node().(ast.Decl)
+
+ ek, _ := curDecl.ParentEdge()
+ switch ek {
+ case edge.DeclStmt_Decl:
+ return DeleteStmt(tokFile, curDecl.Parent())
+
+ case edge.File_Decls:
+ pos, end := decl.Pos(), decl.End()
+ if doc := astutil.DocComment(decl); doc != nil {
+ pos = doc.Pos()
+ }
+
+ // Delete free-floating comments on same line as rparen.
+ // var (...) // comment
+ var (
+ file = curDecl.Parent().Node().(*ast.File)
+ lineOf = tokFile.Line
+ declEndLine = lineOf(decl.End())
+ )
+ for _, cg := range file.Comments {
+ for _, c := range cg.List {
+ if c.Pos() < end {
+ continue // too early
+ }
+ commentEndLine := lineOf(c.End())
+ if commentEndLine > declEndLine {
+ break // too late
+ } else if lineOf(c.Pos()) == declEndLine && commentEndLine == declEndLine {
+ end = c.End()
+ }
+ }
+ }
+
+ return []analysis.TextEdit{{
+ Pos: pos,
+ End: end,
+ }}
+
+ default:
+ panic(fmt.Sprintf("Decl parent is %v, want DeclStmt or File", ek))
+ }
+}
+
+// DeleteStmt returns the edits to remove the [ast.Stmt] identified by
+// curStmt, if it is contained within a BlockStmt, CaseClause,
+// CommClause, or is the STMT in switch STMT; ... {...}. It returns nil otherwise.
+func DeleteStmt(tokFile *token.File, curStmt inspector.Cursor) []analysis.TextEdit {
+ stmt := curStmt.Node().(ast.Stmt)
+ // if the stmt is on a line by itself delete the whole line
+ // otherwise just delete the statement.
+
+ // this logic would be a lot simpler with the file contents, and somewhat simpler
+ // if the cursors included the comments.
+
+ lineOf := tokFile.Line
+ stmtStartLine, stmtEndLine := lineOf(stmt.Pos()), lineOf(stmt.End())
+
+ var from, to token.Pos
+ // bounds of adjacent syntax/comments on same line, if any
+ limits := func(left, right token.Pos) {
+ if lineOf(left) == stmtStartLine {
+ from = left
+ }
+ if lineOf(right) == stmtEndLine {
+ to = right
+ }
+ }
+ // TODO(pjw): there are other places a statement might be removed:
+ // IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] .
+ // (removing the blocks requires more rewriting than this routine would do)
+ // CommCase = "case" ( SendStmt | RecvStmt ) | "default" .
+ // (removing the stmt requires more rewriting, and it's unclear what the user means)
+ switch parent := curStmt.Parent().Node().(type) {
+ case *ast.SwitchStmt:
+ limits(parent.Switch, parent.Body.Lbrace)
+ case *ast.TypeSwitchStmt:
+ limits(parent.Switch, parent.Body.Lbrace)
+ if parent.Assign == stmt {
+ return nil // don't let the user break the type switch
+ }
+ case *ast.BlockStmt:
+ limits(parent.Lbrace, parent.Rbrace)
+ case *ast.CommClause:
+ limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace)
+ if parent.Comm == stmt {
+ return nil // maybe the user meant to remove the entire CommClause?
+ }
+ case *ast.CaseClause:
+ limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace)
+ case *ast.ForStmt:
+ limits(parent.For, parent.Body.Lbrace)
+
+ default:
+ return nil // not one of ours
+ }
+
+ if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine {
+ from = prev.Node().End() // preceding statement ends on same line
+ }
+ if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine {
+ to = next.Node().Pos() // following statement begins on same line
+ }
+ // and now for the comments
+Outer:
+ for _, cg := range astutil.EnclosingFile(curStmt).Comments {
+ for _, co := range cg.List {
+ if lineOf(co.End()) < stmtStartLine {
+ continue
+ } else if lineOf(co.Pos()) > stmtEndLine {
+ break Outer // no more are possible
+ }
+ if lineOf(co.End()) == stmtStartLine && co.End() < stmt.Pos() {
+ if !from.IsValid() || co.End() > from {
+ from = co.End()
+ continue // maybe there are more
+ }
+ }
+ if lineOf(co.Pos()) == stmtEndLine && co.Pos() > stmt.End() {
+ if !to.IsValid() || co.Pos() < to {
+ to = co.Pos()
+ continue // maybe there are more
+ }
+ }
+ }
+ }
+ // if either from or to is valid, just remove the statement
+ // otherwise remove the line
+ edit := analysis.TextEdit{Pos: stmt.Pos(), End: stmt.End()}
+ if from.IsValid() || to.IsValid() {
+ // remove just the statement.
+ // we can't tell if there is a ; or whitespace right after the statement
+ // ideally we'd like to remove the former and leave the latter
+ // (if gofmt has run, there likely won't be a ;)
+ // In type switches we know there's a semicolon somewhere after the statement,
+ // but the extra work for this special case is not worth it, as gofmt will fix it.
+ return []analysis.TextEdit{edit}
+ }
+ // remove the whole line
+ for lineOf(edit.Pos) == stmtStartLine {
+ edit.Pos--
+ }
+ edit.Pos++ // get back tostmtStartLine
+ for lineOf(edit.End) == stmtEndLine {
+ edit.End++
+ }
+ return []analysis.TextEdit{edit}
+}
+
+// DeleteUnusedVars computes the edits required to delete the
+// declarations of any local variables whose last uses are in the
+// curDelend subtree, which is about to be deleted.
+func DeleteUnusedVars(index *typeindex.Index, info *types.Info, tokFile *token.File, curDelend inspector.Cursor) []analysis.TextEdit {
+ // TODO(adonovan): we might want to generalize this by
+ // splitting the two phases below, so that we can gather
+ // across a whole sequence of deletions then finally compute the
+ // set of variables that are no longer wanted.
+
+ // Count number of deletions of each var.
+ delcount := make(map[*types.Var]int)
+ for curId := range curDelend.Preorder((*ast.Ident)(nil)) {
+ id := curId.Node().(*ast.Ident)
+ if v, ok := info.Uses[id].(*types.Var); ok &&
+ typesinternal.GetVarKind(v) == typesinternal.LocalVar { // always false before go1.25
+ delcount[v]++
+ }
+ }
+
+ // Delete declaration of each var that became unused.
+ var edits []analysis.TextEdit
+ for v, count := range delcount {
+ if len(slices.Collect(index.Uses(v))) == count {
+ if curDefId, ok := index.Def(v); ok {
+ edits = append(edits, DeleteVar(tokFile, info, curDefId)...)
+ }
+ }
+ }
+ return edits
+}
+
+func eolComment(n ast.Node) *ast.CommentGroup {
+ // TODO(adonovan): support:
+ // func f() {...} // comment
+ switch n := n.(type) {
+ case *ast.GenDecl:
+ if !n.TokPos.IsValid() && len(n.Specs) == 1 {
+ return eolComment(n.Specs[0])
+ }
+ case *ast.ValueSpec:
+ return n.Comment
+ case *ast.TypeSpec:
+ return n.Comment
+ }
+ return nil
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/imports.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/imports.go
new file mode 100644
index 00000000000000..b5440d896b9bfc
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/imports.go
@@ -0,0 +1,127 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package refactor
+
+// This file defines operations for computing edits to imports.
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ pathpkg "path"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/internal/packagepath"
+)
+
+// AddImport returns the prefix (either "pkg." or "") that should be
+// used to qualify references to the desired symbol (member) imported
+// from the specified package, plus any necessary edits to the file's
+// import declaration to add a new import.
+//
+// If the import already exists, and is accessible at pos, AddImport
+// returns the existing name and no edits. (If the existing import is
+// a dot import, the prefix is "".)
+//
+// Otherwise, it adds a new import, using a local name derived from
+// the preferred name. To request a blank import, use a preferredName
+// of "_", and discard the prefix result; member is ignored in this
+// case.
+//
+// AddImport accepts the caller's implicit claim that the imported
+// package declares member.
+//
+// AddImport does not mutate its arguments.
+func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (prefix string, edits []analysis.TextEdit) {
+ // Find innermost enclosing lexical block.
+ scope := info.Scopes[file].Innermost(pos)
+ if scope == nil {
+ panic("no enclosing lexical block")
+ }
+
+ // Is there an existing import of this package?
+ // If so, are we in its scope? (not shadowed)
+ for _, spec := range file.Imports {
+ pkgname := info.PkgNameOf(spec)
+ if pkgname != nil && pkgname.Imported().Path() == pkgpath {
+ name := pkgname.Name()
+ if preferredName == "_" {
+ // Request for blank import; any existing import will do.
+ return "", nil
+ }
+ if name == "." {
+ // The scope of ident must be the file scope.
+ if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] {
+ return "", nil
+ }
+ } else if _, obj := scope.LookupParent(name, pos); obj == pkgname {
+ return name + ".", nil
+ }
+ }
+ }
+
+ // We must add a new import.
+
+ // Ensure we have a fresh name.
+ newName := preferredName
+ if preferredName != "_" {
+ newName = FreshName(scope, pos, preferredName)
+ }
+
+ // Create a new import declaration either before the first existing
+ // declaration (which must exist), including its comments; or
+ // inside the declaration, if it is an import group.
+ //
+ // Use a renaming import whenever the preferred name is not
+ // available, or the chosen name does not match the last
+ // segment of its path.
+ newText := fmt.Sprintf("%q", pkgpath)
+ if newName != preferredName || newName != pathpkg.Base(pkgpath) {
+ newText = fmt.Sprintf("%s %q", newName, pkgpath)
+ }
+
+ decl0 := file.Decls[0]
+ var before ast.Node = decl0
+ switch decl0 := decl0.(type) {
+ case *ast.GenDecl:
+ if decl0.Doc != nil {
+ before = decl0.Doc
+ }
+ case *ast.FuncDecl:
+ if decl0.Doc != nil {
+ before = decl0.Doc
+ }
+ }
+ if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() {
+ // Have existing grouped import ( ... ) decl.
+ if packagepath.IsStdPackage(pkgpath) && len(gd.Specs) > 0 {
+ // Add spec for a std package before
+ // first existing spec, followed by
+ // a blank line if the next one is non-std.
+ first := gd.Specs[0].(*ast.ImportSpec)
+ pos = first.Pos()
+ if !packagepath.IsStdPackage(first.Path.Value) {
+ newText += "\n"
+ }
+ newText += "\n\t"
+ } else {
+ // Add spec at end of group.
+ pos = gd.Rparen
+ newText = "\t" + newText + "\n"
+ }
+ } else {
+ // No import decl, or non-grouped import.
+ // Add a new import decl before first decl.
+ // (gofmt will merge multiple import decls.)
+ pos = before.Pos()
+ newText = "import " + newText + "\n\n"
+ }
+ return newName + ".", []analysis.TextEdit{{
+ Pos: pos,
+ End: pos,
+ NewText: []byte(newText),
+ }}
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go
new file mode 100644
index 00000000000000..27b975089618b5
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go
@@ -0,0 +1,29 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package refactor provides operators to compute common textual edits
+// for refactoring tools.
+//
+// This package should not use features of the analysis API
+// other than [analysis.TextEdit].
+package refactor
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+)
+
+// FreshName returns the name of an identifier that is undefined
+// at the specified position, based on the preferred name.
+func FreshName(scope *types.Scope, pos token.Pos, preferred string) string {
+ newName := preferred
+ for i := 0; ; i++ {
+ if _, obj := scope.LookupParent(newName, pos); obj == nil {
+ break // fresh
+ }
+ newName = fmt.Sprintf("%s%d", preferred, i)
+ }
+ return newName
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/fx.go
new file mode 100644
index 00000000000000..c846a53d5fe7f5
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/fx.go
@@ -0,0 +1,88 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// NoEffects reports whether the expression has no side effects, i.e., it
+// does not modify the memory state. This function is conservative: it may
+// return false even when the expression has no effect.
+func NoEffects(info *types.Info, expr ast.Expr) bool {
+ noEffects := true
+ ast.Inspect(expr, func(n ast.Node) bool {
+ switch v := n.(type) {
+ case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr,
+ *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr,
+ *ast.StarExpr, *ast.CompositeLit,
+ // non-expressions that may appear within expressions
+ *ast.KeyValueExpr,
+ *ast.FieldList,
+ *ast.Field,
+ *ast.Ellipsis,
+ *ast.IndexListExpr:
+ // No effect.
+
+ case *ast.ArrayType,
+ *ast.StructType,
+ *ast.ChanType,
+ *ast.FuncType,
+ *ast.MapType,
+ *ast.InterfaceType:
+ // Type syntax: no effects, recursively.
+ // Prune descent.
+ return false
+
+ case *ast.UnaryExpr:
+ // Channel send <-ch has effects.
+ if v.Op == token.ARROW {
+ noEffects = false
+ }
+
+ case *ast.CallExpr:
+ // Type conversion has no effects.
+ if !info.Types[v.Fun].IsType() {
+ if CallsPureBuiltin(info, v) {
+ // A call such as len(e) has no effects of its
+ // own, though the subexpression e might.
+ } else {
+ noEffects = false
+ }
+ }
+
+ case *ast.FuncLit:
+ // A FuncLit has no effects, but do not descend into it.
+ return false
+
+ default:
+ // All other expressions have effects
+ noEffects = false
+ }
+
+ return noEffects
+ })
+ return noEffects
+}
+
+// CallsPureBuiltin reports whether call is a call of a built-in
+// function that is a pure computation over its operands (analogous to
+// a + operator). Because it does not depend on program state, it may
+// be evaluated at any point--though not necessarily at multiple
+// points (consider new, make).
+func CallsPureBuiltin(info *types.Info, call *ast.CallExpr) bool {
+ if id, ok := ast.Unparen(call.Fun).(*ast.Ident); ok {
+ if b, ok := info.ObjectOf(id).(*types.Builtin); ok {
+ switch b.Name() {
+ case "len", "cap", "complex", "imag", "real", "make", "new", "max", "min":
+ return true
+ }
+ // Not: append clear close copy delete panic print println recover
+ }
+ }
+ return false
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
new file mode 100644
index 00000000000000..f2affec4fba929
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
@@ -0,0 +1,71 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+ "slices"
+)
+
+// IsTypeNamed reports whether t is (or is an alias for) a
+// package-level defined type with the given package path and one of
+// the given names. It returns false if t is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool {
+ if named, ok := types.Unalias(t).(*types.Named); ok {
+ tname := named.Obj()
+ return tname != nil &&
+ IsPackageLevel(tname) &&
+ tname.Pkg().Path() == pkgPath &&
+ slices.Contains(names, tname.Name())
+ }
+ return false
+}
+
+// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a
+// package-level defined type with the given package path and one of the given
+// names. It returns false if t is not a pointer type.
+func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool {
+ r := Unpointer(t)
+ if r == t {
+ return false
+ }
+ return IsTypeNamed(r, pkgPath, names...)
+}
+
+// IsFunctionNamed reports whether obj is a package-level function
+// defined in the given package and has one of the given names.
+// It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool {
+ f, ok := obj.(*types.Func)
+ return ok &&
+ IsPackageLevel(obj) &&
+ f.Pkg().Path() == pkgPath &&
+ f.Type().(*types.Signature).Recv() == nil &&
+ slices.Contains(names, f.Name())
+}
+
+// IsMethodNamed reports whether obj is a method defined on a
+// package-level type with the given package and type name, and has
+// one of the given names. It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.TypeName.Name",
+// which is important for the performance of syntax matching.
+func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool {
+ if fn, ok := obj.(*types.Func); ok {
+ if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
+ _, T := ReceiverNamed(recv)
+ return T != nil &&
+ IsTypeNamed(T, pkgPath, typeName) &&
+ slices.Contains(names, fn.Name())
+ }
+ }
+ return false
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go
index a5cd7e8dbfcb9d..fef74a78560480 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -2,8 +2,20 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package typesinternal provides access to internal go/types APIs that are not
-// yet exported.
+// Package typesinternal provides helpful operators for dealing with
+// go/types:
+//
+// - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]);
+// - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]);
+// - helpers for working with the [go/types] API (e.g. [NewTypesInfo]);
+// - access to internal go/types APIs that are not yet
+// exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and
+// - common algorithms related to types (e.g. [TooNewStdSymbols]).
+//
+// See also:
+// - [golang.org/x/tools/internal/astutil], for operations on untyped syntax;
+// - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers;
+// - [golang.org/x/tools/internal/refactor], for operators to compute text edits.
package typesinternal
import (
@@ -13,6 +25,7 @@ import (
"reflect"
"unsafe"
+ "golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/internal/aliases"
)
@@ -60,6 +73,9 @@ func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, o
// which is often excessive.)
//
// If pkg is nil, it is equivalent to [*types.Package.Name].
+//
+// TODO(adonovan): all uses of this with TypeString should be
+// eliminated when https://go.dev/issues/75604 is resolved.
func NameRelativeTo(pkg *types.Package) types.Qualifier {
return func(other *types.Package) string {
if pkg != nil && pkg == other {
@@ -153,3 +169,31 @@ func NewTypesInfo() *types.Info {
FileVersions: map[*ast.File]string{},
}
}
+
+// EnclosingScope returns the innermost block logically enclosing the cursor.
+func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope {
+ for cur := range cur.Enclosing() {
+ n := cur.Node()
+ // A function's Scope is associated with its FuncType.
+ switch f := n.(type) {
+ case *ast.FuncDecl:
+ n = f.Type
+ case *ast.FuncLit:
+ n = f.Type
+ }
+ if b := info.Scopes[n]; b != nil {
+ return b
+ }
+ }
+ panic("no Scope for *ast.File")
+}
+
+// Imports reports whether path is imported by pkg.
+func Imports(pkg *types.Package, path string) bool {
+ for _, imp := range pkg.Imports() {
+ if imp.Path() == path {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
index e5da0495111ba1..26499cdd2e70fb 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
@@ -2,39 +2,22 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package typesinternal
+//go:build go1.25
-// TODO(adonovan): when CL 645115 lands, define the go1.25 version of
-// this API that actually does something.
+package typesinternal
import "go/types"
-type VarKind uint8
+type VarKind = types.VarKind
const (
- _ VarKind = iota // (not meaningful)
- PackageVar // a package-level variable
- LocalVar // a local variable
- RecvVar // a method receiver variable
- ParamVar // a function parameter variable
- ResultVar // a function result variable
- FieldVar // a struct field
+ PackageVar = types.PackageVar
+ LocalVar = types.LocalVar
+ RecvVar = types.RecvVar
+ ParamVar = types.ParamVar
+ ResultVar = types.ResultVar
+ FieldVar = types.FieldVar
)
-func (kind VarKind) String() string {
- return [...]string{
- 0: "VarKind(0)",
- PackageVar: "PackageVar",
- LocalVar: "LocalVar",
- RecvVar: "RecvVar",
- ParamVar: "ParamVar",
- ResultVar: "ResultVar",
- FieldVar: "FieldVar",
- }[kind]
-}
-
-// GetVarKind returns an invalid VarKind.
-func GetVarKind(v *types.Var) VarKind { return 0 }
-
-// SetVarKind has no effect.
-func SetVarKind(v *types.Var, kind VarKind) {}
+func GetVarKind(v *types.Var) VarKind { return v.Kind() }
+func SetVarKind(v *types.Var, kind VarKind) { v.SetKind(kind) }
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go
new file mode 100644
index 00000000000000..17b1804b4e85e1
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.25
+
+package typesinternal
+
+import "go/types"
+
+type VarKind uint8
+
+const (
+ _ VarKind = iota // (not meaningful)
+ PackageVar // a package-level variable
+ LocalVar // a local variable
+ RecvVar // a method receiver variable
+ ParamVar // a function parameter variable
+ ResultVar // a function result variable
+ FieldVar // a struct field
+)
+
+func (kind VarKind) String() string {
+ return [...]string{
+ 0: "VarKind(0)",
+ PackageVar: "PackageVar",
+ LocalVar: "LocalVar",
+ RecvVar: "RecvVar",
+ ParamVar: "ParamVar",
+ ResultVar: "ResultVar",
+ FieldVar: "FieldVar",
+ }[kind]
+}
+
+// GetVarKind returns an invalid VarKind.
+func GetVarKind(v *types.Var) VarKind { return 0 }
+
+// SetVarKind has no effect.
+func SetVarKind(v *types.Var, kind VarKind) {}
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
index d272949c177189..453bba2ad5e840 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -204,23 +204,12 @@ func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
}
}
-// IsZeroExpr uses simple syntactic heuristics to report whether expr
-// is a obvious zero value, such as 0, "", nil, or false.
-// It cannot do better without type information.
-func IsZeroExpr(expr ast.Expr) bool {
- switch e := expr.(type) {
- case *ast.BasicLit:
- return e.Value == "0" || e.Value == `""`
- case *ast.Ident:
- return e.Name == "nil" || e.Name == "false"
- default:
- return false
- }
-}
-
// TypeExpr returns syntax for the specified type. References to named types
// are qualified by an appropriate (optional) qualifier function.
// It may panic for types such as Tuple or Union.
+//
+// See also https://go.dev/issues/75604, which will provide a robust
+// Type-to-valid-Go-syntax formatter.
func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
switch t := t.(type) {
case *types.Basic:
diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt
index 133271355f2caf..f4c60cc3281b78 100644
--- a/src/cmd/vendor/modules.txt
+++ b/src/cmd/vendor/modules.txt
@@ -28,7 +28,7 @@ golang.org/x/arch/x86/x86asm
# golang.org/x/build v0.0.0-20250806225920-b7c66c047964
## explicit; go 1.23.0
golang.org/x/build/relnote
-# golang.org/x/mod v0.28.0
+# golang.org/x/mod v0.29.0
## explicit; go 1.24.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
@@ -43,12 +43,12 @@ golang.org/x/mod/zip
## explicit; go 1.24.0
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.36.0
+# golang.org/x/sys v0.37.0
## explicit; go 1.24.0
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
-# golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053
+# golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8
## explicit; go 1.24.0
golang.org/x/telemetry
golang.org/x/telemetry/counter
@@ -63,7 +63,7 @@ golang.org/x/telemetry/internal/upload
# golang.org/x/term v0.34.0
## explicit; go 1.23.0
golang.org/x/term
-# golang.org/x/text v0.29.0
+# golang.org/x/text v0.30.0
## explicit; go 1.24.0
golang.org/x/text/cases
golang.org/x/text/internal
@@ -73,7 +73,7 @@ golang.org/x/text/internal/tag
golang.org/x/text/language
golang.org/x/text/transform
golang.org/x/text/unicode/norm
-# golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4
+# golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5
## explicit; go 1.24.0
golang.org/x/tools/cmd/bisect
golang.org/x/tools/cover
@@ -97,7 +97,6 @@ golang.org/x/tools/go/analysis/passes/hostport
golang.org/x/tools/go/analysis/passes/httpresponse
golang.org/x/tools/go/analysis/passes/ifaceassert
golang.org/x/tools/go/analysis/passes/inspect
-golang.org/x/tools/go/analysis/passes/internal/analysisutil
golang.org/x/tools/go/analysis/passes/loopclosure
golang.org/x/tools/go/analysis/passes/lostcancel
golang.org/x/tools/go/analysis/passes/nilfunc
@@ -133,6 +132,8 @@ golang.org/x/tools/internal/diff/lcs
golang.org/x/tools/internal/facts
golang.org/x/tools/internal/fmtstr
golang.org/x/tools/internal/moreiters
+golang.org/x/tools/internal/packagepath
+golang.org/x/tools/internal/refactor
golang.org/x/tools/internal/stdlib
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
diff --git a/src/cmd/vet/doc.go b/src/cmd/vet/doc.go
index 8e72c252ed9984..ca208845615c8b 100644
--- a/src/cmd/vet/doc.go
+++ b/src/cmd/vet/doc.go
@@ -40,6 +40,7 @@ To list the available checks, run "go tool vet help":
directive check Go toolchain directives such as //go:debug
errorsas report passing non-pointer or non-error values to errors.As
framepointer report assembly that clobbers the frame pointer before saving it
+ hostport check format of addresses passed to net.Dial
httpresponse check for mistakes using HTTP responses
ifaceassert detect impossible interface-to-interface type assertions
loopclosure check references to loop variables from within nested functions
@@ -50,6 +51,7 @@ To list the available checks, run "go tool vet help":
sigchanyzer check for unbuffered channel of os.Signal
slog check for invalid structured logging calls
stdmethods check signature of methods of well-known interfaces
+ stdversion report uses of too-new standard library symbols
stringintconv check for string(int) conversions
structtag check that struct field tags conform to reflect.StructTag.Get
testinggoroutine report calls to (*testing.T).Fatal from goroutines started by a test
diff --git a/src/cmd/vet/main.go b/src/cmd/vet/main.go
index 49f4e2f3425694..e7164a46b0a323 100644
--- a/src/cmd/vet/main.go
+++ b/src/cmd/vet/main.go
@@ -7,10 +7,8 @@ package main
import (
"cmd/internal/objabi"
"cmd/internal/telemetry/counter"
- "flag"
-
- "golang.org/x/tools/go/analysis/unitchecker"
+ "golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/appends"
"golang.org/x/tools/go/analysis/passes/asmdecl"
"golang.org/x/tools/go/analysis/passes/assign"
@@ -46,52 +44,57 @@ import (
"golang.org/x/tools/go/analysis/passes/unsafeptr"
"golang.org/x/tools/go/analysis/passes/unusedresult"
"golang.org/x/tools/go/analysis/passes/waitgroup"
+ "golang.org/x/tools/go/analysis/unitchecker"
)
func main() {
+ // Keep consistent with cmd/fix/main.go!
counter.Open()
objabi.AddVersionFlag()
-
counter.Inc("vet/invocations")
- unitchecker.Main(
- appends.Analyzer,
- asmdecl.Analyzer,
- assign.Analyzer,
- atomic.Analyzer,
- bools.Analyzer,
- buildtag.Analyzer,
- cgocall.Analyzer,
- composite.Analyzer,
- copylock.Analyzer,
- defers.Analyzer,
- directive.Analyzer,
- errorsas.Analyzer,
- framepointer.Analyzer,
- httpresponse.Analyzer,
- hostport.Analyzer,
- ifaceassert.Analyzer,
- loopclosure.Analyzer,
- lostcancel.Analyzer,
- nilfunc.Analyzer,
- printf.Analyzer,
- shift.Analyzer,
- sigchanyzer.Analyzer,
- slog.Analyzer,
- stdmethods.Analyzer,
- stdversion.Analyzer,
- stringintconv.Analyzer,
- structtag.Analyzer,
- tests.Analyzer,
- testinggoroutine.Analyzer,
- timeformat.Analyzer,
- unmarshal.Analyzer,
- unreachable.Analyzer,
- unsafeptr.Analyzer,
- unusedresult.Analyzer,
- waitgroup.Analyzer,
- )
- // It's possible that unitchecker will exit early. In
- // those cases the flags won't be counted.
- counter.CountFlags("vet/flag:", *flag.CommandLine)
+ unitchecker.Main(suite...) // (never returns)
+}
+
+// The vet suite analyzers report diagnostics.
+// (Diagnostics must describe real problems, but need not
+// suggest fixes, and fixes are not necessarily safe to apply.)
+var suite = []*analysis.Analyzer{
+ appends.Analyzer,
+ asmdecl.Analyzer,
+ assign.Analyzer,
+ atomic.Analyzer,
+ bools.Analyzer,
+ buildtag.Analyzer,
+ cgocall.Analyzer,
+ composite.Analyzer,
+ copylock.Analyzer,
+ defers.Analyzer,
+ directive.Analyzer,
+ errorsas.Analyzer,
+ // fieldalignment.Analyzer omitted: too noisy
+ framepointer.Analyzer,
+ httpresponse.Analyzer,
+ hostport.Analyzer,
+ ifaceassert.Analyzer,
+ loopclosure.Analyzer,
+ lostcancel.Analyzer,
+ nilfunc.Analyzer,
+ printf.Analyzer,
+ // shadow.Analyzer omitted: too noisy
+ shift.Analyzer,
+ sigchanyzer.Analyzer,
+ slog.Analyzer,
+ stdmethods.Analyzer,
+ stdversion.Analyzer,
+ stringintconv.Analyzer,
+ structtag.Analyzer,
+ tests.Analyzer,
+ testinggoroutine.Analyzer,
+ timeformat.Analyzer,
+ unmarshal.Analyzer,
+ unreachable.Analyzer,
+ unsafeptr.Analyzer,
+ unusedresult.Analyzer,
+ waitgroup.Analyzer,
}
diff --git a/src/cmd/vet/testdata/print/print.go b/src/cmd/vet/testdata/print/print.go
index e00222c42b5aef..3761da420bea3c 100644
--- a/src/cmd/vet/testdata/print/print.go
+++ b/src/cmd/vet/testdata/print/print.go
@@ -162,7 +162,7 @@ func PrintfTests() {
Printf("hi") // ok
const format = "%s %s\n"
Printf(format, "hi", "there")
- Printf(format, "hi") // ERROR "Printf format %s reads arg #2, but call has 1 arg$"
+ Printf(format, "hi") // ERROR "Printf format %s reads arg #2, but call has 1 arg"
Printf("%s %d %.3v %q", "str", 4) // ERROR "Printf format %.3v reads arg #3, but call has 2 args"
f := new(ptrStringer)
f.Warn(0, "%s", "hello", 3) // ERROR "Warn call has possible Printf formatting directive %s"
diff --git a/src/cmd/vet/vet_test.go b/src/cmd/vet/vet_test.go
index 54eabca938c3a9..0d509de528f247 100644
--- a/src/cmd/vet/vet_test.go
+++ b/src/cmd/vet/vet_test.go
@@ -28,7 +28,8 @@ func TestMain(m *testing.M) {
os.Exit(0)
}
- os.Setenv("GO_VETTEST_IS_VET", "1") // Set for subprocesses to inherit.
+ // Set for subprocesses to inherit.
+ os.Setenv("GO_VETTEST_IS_VET", "1") // ignore error
os.Exit(m.Run())
}
@@ -115,7 +116,7 @@ func TestVet(t *testing.T) {
cmd.Env = append(os.Environ(), "GOWORK=off")
cmd.Dir = "testdata/rangeloop"
cmd.Stderr = new(strings.Builder) // all vet output goes to stderr
- cmd.Run()
+ cmd.Run() // ignore error
stderr := cmd.Stderr.(fmt.Stringer).String()
filename := filepath.FromSlash("testdata/rangeloop/rangeloop.go")
@@ -134,7 +135,7 @@ func TestVet(t *testing.T) {
if err := errorCheck(stderr, false, filename, filepath.Base(filename)); err != nil {
t.Errorf("error check failed: %s", err)
- t.Log("vet stderr:\n", cmd.Stderr)
+ t.Logf("vet stderr:\n<<%s>>", cmd.Stderr)
}
})
@@ -146,7 +147,7 @@ func TestVet(t *testing.T) {
cmd.Env = append(os.Environ(), "GOWORK=off")
cmd.Dir = "testdata/stdversion"
cmd.Stderr = new(strings.Builder) // all vet output goes to stderr
- cmd.Run()
+ cmd.Run() // ignore error
stderr := cmd.Stderr.(fmt.Stringer).String()
filename := filepath.FromSlash("testdata/stdversion/stdversion.go")
@@ -165,7 +166,7 @@ func TestVet(t *testing.T) {
if err := errorCheck(stderr, false, filename, filepath.Base(filename)); err != nil {
t.Errorf("error check failed: %s", err)
- t.Log("vet stderr:\n", cmd.Stderr)
+ t.Logf("vet stderr:\n<<%s>>", cmd.Stderr)
}
})
}
@@ -184,7 +185,7 @@ func cgoEnabled(t *testing.T) bool {
func errchk(c *exec.Cmd, files []string, t *testing.T) {
output, err := c.CombinedOutput()
if _, ok := err.(*exec.ExitError); !ok {
- t.Logf("vet output:\n%s", output)
+ t.Logf("vet output:\n<<%s>>", output)
t.Fatal(err)
}
fullshort := make([]string, 0, len(files)*2)
@@ -205,7 +206,6 @@ func TestTags(t *testing.T) {
"x testtag y": 1,
"othertag": 2,
} {
- tag, wantFile := tag, wantFile
t.Run(tag, func(t *testing.T) {
t.Parallel()
t.Logf("-tags=%s", tag)
@@ -266,7 +266,7 @@ func errorCheck(outStr string, wantAuto bool, fullshort ...string) (err error) {
errmsgs, out = partitionStrings(we.prefix, out)
}
if len(errmsgs) == 0 {
- errs = append(errs, fmt.Errorf("%s:%d: missing error %q", we.file, we.lineNum, we.reStr))
+ errs = append(errs, fmt.Errorf("%s:%d: missing error %q (prefix: %s)", we.file, we.lineNum, we.reStr, we.prefix))
continue
}
matched := false
diff --git a/src/crypto/internal/entropy/entropy.go b/src/crypto/internal/entropy/entropy.go
index 5319e9e47a7455..73fd5298007a11 100644
--- a/src/crypto/internal/entropy/entropy.go
+++ b/src/crypto/internal/entropy/entropy.go
@@ -3,9 +3,11 @@
// license that can be found in the LICENSE file.
// Package entropy provides the passive entropy source for the FIPS 140-3
-// module. It is only used in FIPS mode by [crypto/internal/fips140/drbg.Read].
+// module. It is only used in FIPS mode by [crypto/internal/fips140/drbg.Read]
+// from the FIPS 140-3 Go Cryptographic Module v1.0.0. Later versions of the
+// module have an internal CPU jitter-based entropy source.
//
-// This complies with IG 9.3.A, Additional Comment 12, which until January 1,
+// This complied with IG 9.3.A, Additional Comment 12, which until January 1,
// 2026 allows new modules to meet an [earlier version] of Resolution 2(b):
// "A software module that contains an approved DRBG that receives a LOAD
// command (or its logical equivalent) with entropy obtained from [...] inside
diff --git a/src/crypto/internal/fips140/drbg/rand.go b/src/crypto/internal/fips140/drbg/rand.go
index c1a3ea0ae658ff..3ccb018e326047 100644
--- a/src/crypto/internal/fips140/drbg/rand.go
+++ b/src/crypto/internal/fips140/drbg/rand.go
@@ -9,21 +9,53 @@
package drbg
import (
- "crypto/internal/entropy"
"crypto/internal/fips140"
+ "crypto/internal/fips140/entropy"
"crypto/internal/randutil"
"crypto/internal/sysrand"
"io"
"sync"
+ "sync/atomic"
)
-var drbgs = sync.Pool{
+// memory is a scratch buffer that is accessed between samples by the entropy
+// source to expose it to memory access timings.
+//
+// We reuse it and share it between Seed calls to avoid the significant (~500µs)
+// cost of zeroing a new allocation every time. The entropy source accesses it
+// using atomics (and doesn't care about its contents).
+//
+// It should end up in the .noptrbss section, and become backed by physical pages
+// at first use. This ensures that programs that do not use the FIPS 140-3 module
+// do not incur any memory use or initialization penalties.
+var memory entropy.ScratchBuffer
+
+func getEntropy() *[SeedSize]byte {
+ var retries int
+ seed, err := entropy.Seed(&memory)
+ for err != nil {
+ // The CPU jitter-based SP 800-90B entropy source has a non-negligible
+ // chance of failing the startup health tests.
+ //
+ // Each time it does, it enters a permanent failure state, and we
+ // restart it anew. This is not expected to happen more than a few times
+ // in a row.
+ if retries++; retries > 100 {
+ panic("fips140/drbg: failed to obtain initial entropy")
+ }
+ seed, err = entropy.Seed(&memory)
+ }
+ return &seed
+}
+
+// getEntropy is very slow (~500µs), so we don't want it on the hot path.
+// We keep both a persistent DRBG instance and a pool of additional instances.
+// Occasional uses will use drbgInstance, even if the pool was emptied since the
+// last use. Frequent concurrent uses will fill the pool and use it.
+var drbgInstance atomic.Pointer[Counter]
+var drbgPool = sync.Pool{
New: func() any {
- var c *Counter
- entropy.Depleted(func(seed *[48]byte) {
- c = NewCounter(seed)
- })
- return c
+ return NewCounter(getEntropy())
},
}
@@ -44,8 +76,15 @@ func Read(b []byte) {
additionalInput := new([SeedSize]byte)
sysrand.Read(additionalInput[:16])
- drbg := drbgs.Get().(*Counter)
- defer drbgs.Put(drbg)
+ drbg := drbgInstance.Swap(nil)
+ if drbg == nil {
+ drbg = drbgPool.Get().(*Counter)
+ }
+ defer func() {
+ if !drbgInstance.CompareAndSwap(nil, drbg) {
+ drbgPool.Put(drbg)
+ }
+ }()
for len(b) > 0 {
size := min(len(b), maxRequestSize)
@@ -54,9 +93,7 @@ func Read(b []byte) {
// Section 9.3.2: if Generate reports a reseed is required, the
// additional input is passed to Reseed along with the entropy and
// then nulled before the next Generate call.
- entropy.Depleted(func(seed *[48]byte) {
- drbg.Reseed(seed, additionalInput)
- })
+ drbg.Reseed(getEntropy(), additionalInput)
additionalInput = nil
continue
}
diff --git a/src/crypto/internal/fips140/entropy/entropy.go b/src/crypto/internal/fips140/entropy/entropy.go
new file mode 100644
index 00000000000000..f5b2f53752a028
--- /dev/null
+++ b/src/crypto/internal/fips140/entropy/entropy.go
@@ -0,0 +1,204 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package entropy implements a CPU jitter-based SP 800-90B entropy source.
+package entropy
+
+import (
+ "crypto/internal/fips140deps/time"
+ "errors"
+ "sync/atomic"
+ "unsafe"
+)
+
+// Version returns the version of the entropy source.
+//
+// This is independent of the FIPS 140-3 module version, in order to reuse the
+// ESV certificate across module versions.
+func Version() string {
+ return "v1.0.0"
+}
+
+// ScratchBuffer is a large buffer that will be written to using atomics, to
+// generate noise from memory access timings. Its contents do not matter.
+type ScratchBuffer [1 << 25]byte
+
+// Seed returns a 384-bit seed with full entropy.
+//
+// memory is passed in to allow changing the allocation strategy without
+// modifying the frozen and certified entropy source in this package.
+//
+// Seed returns an error if the entropy source startup health tests fail, which
+// has a non-negligible chance of happening.
+func Seed(memory *ScratchBuffer) ([48]byte, error) {
+ // Collect w = 1024 samples, each certified to provide no less than h = 0.5
+ // bits of entropy, for a total of hᵢₙ = w × h = 512 bits of entropy, over
+ // nᵢₙ = w × n = 8192 bits of input data.
+ var samples [1024]byte
+ if err := Samples(samples[:], memory); err != nil {
+ return [48]byte{}, err
+ }
+
+ // Use a vetted unkeyed conditioning component, SHA-384, with nw = 384 and
+ // nₒᵤₜ = 384. Per the formula in SP 800-90B Section 3.1.5.1.2, the output
+ // entropy hₒᵤₜ is:
+ //
+ // sage: n_in = 8192
+ // sage: n_out = 384
+ // sage: nw = 384
+ // sage: h_in = 512
+ // sage: P_high = 2^(-h_in)
+ // sage: P_low = (1 - P_high) / (2^n_in - 1)
+ // sage: n = min(n_out, nw)
+ // sage: ψ = 2^(n_in - n) * P_low + P_high
+ // sage: U = 2^(n_in - n) + sqrt(2 * n * 2^(n_in - n) * ln(2))
+ // sage: ω = U * P_low
+ // sage: h_out = -log(max(ψ, ω), 2)
+ // sage: h_out.n()
+ // 384.000000000000
+ //
+ // According to Implementation Guidance D.K, Resolution 19, since
+ //
+ // - the conditioning component is vetted,
+ // - hᵢₙ = 512 ≥ nₒᵤₜ + 64 = 448, and
+ // - nₒᵤₜ ≤ security strength of SHA-384 = 384 (per SP 800-107 Rev. 1, Table 1),
+ //
+ // we can claim the output has full entropy.
+ return SHA384(&samples), nil
+}
+
+// Samples starts a new entropy source, collects the requested number of
+// samples, conducts startup health tests, and returns the samples or an error
+// if the health tests fail.
+//
+// The health tests have a non-negligible chance of failing.
+func Samples(samples []uint8, memory *ScratchBuffer) error {
+ if len(samples) < 1024 {
+ return errors.New("entropy: at least 1024 samples are required for startup health tests")
+ }
+ s := newSource(memory)
+ for range 4 {
+ // Warm up the source to avoid any initial bias.
+ _ = s.Sample()
+ }
+ for i := range samples {
+ samples[i] = s.Sample()
+ }
+ if err := RepetitionCountTest(samples); err != nil {
+ return err
+ }
+ if err := AdaptiveProportionTest(samples); err != nil {
+ return err
+ }
+ return nil
+}
+
+type source struct {
+ memory *ScratchBuffer
+ lcgState uint32
+ previous int64
+}
+
+func newSource(memory *ScratchBuffer) *source {
+ return &source{
+ memory: memory,
+ lcgState: uint32(time.HighPrecisionNow()),
+ previous: time.HighPrecisionNow(),
+ }
+}
+
+// touchMemory performs a write to memory at the given index.
+//
+// The memory slice is passed in and may be shared across sources e.g. to avoid
+// the significant (~500µs) cost of zeroing a new allocation on every [Seed] call.
+func touchMemory(memory *ScratchBuffer, idx uint32) {
+ idx = idx / 4 * 4 // align to 32 bits
+ u32 := (*uint32)(unsafe.Pointer(&memory[idx]))
+ last := atomic.LoadUint32(u32)
+ atomic.SwapUint32(u32, last+13)
+}
+
+func (s *source) Sample() uint8 {
+ // Perform a few memory accesses in an unpredictable pattern to expose the
+ // next measurement to as much system noise as possible.
+ memory, lcgState := s.memory, s.lcgState
+ if memory == nil { // remove the nil check from the inlined touchMemory calls
+ panic("entropy: nil memory buffer")
+ }
+ for range 64 {
+ lcgState = 1664525*lcgState + 1013904223
+ // Discard the lower bits, which tend to fall into short cycles.
+ idx := (lcgState >> 6) & (1<<25 - 1)
+ touchMemory(memory, idx)
+ }
+ s.lcgState = lcgState
+
+ t := time.HighPrecisionNow()
+ sample := t - s.previous
+ s.previous = t
+
+ // Reduce the symbol space to 256 values, assuming most of the entropy is in
+ // the least-significant bits, which represent the highest-resolution timing
+ // differences.
+ return uint8(sample)
+}
+
+// RepetitionCountTest implements the repetition count test from SP 800-90B
+// Section 4.4.1. It returns an error if any symbol is repeated C = 41 or more
+// times in a row.
+//
+// This C value is calculated from a target failure probability α = 2⁻²⁰ and a
+// claimed min-entropy per symbol h = 0.5 bits, using the formula in SP 800-90B
+// Section 4.4.1.
+//
+// sage: α = 2^-20
+// sage: H = 0.5
+// sage: 1 + ceil(-log(α, 2) / H)
+// 41
+func RepetitionCountTest(samples []uint8) error {
+ x := samples[0]
+ count := 1
+ for _, y := range samples[1:] {
+ if y == x {
+ count++
+ if count >= 41 {
+ return errors.New("entropy: repetition count health test failed")
+ }
+ } else {
+ x = y
+ count = 1
+ }
+ }
+ return nil
+}
+
+// AdaptiveProportionTest implements the adaptive proportion test from SP 800-90B
+// Section 4.4.2. It returns an error if any symbol appears C = 410 or more
+// times in the last W = 512 samples.
+//
+// This C value is calculated from a target failure probability α = 2⁻²⁰, a
+// window size W = 512, and a claimed min-entropy per symbol h = 0.5 bits, using
+// the formula in SP 800-90B Section 4.4.2, equivalent to the Microsoft Excel
+// formula 1+CRITBINOM(W, power(2,(−H)),1−α).
+//
+// sage: from scipy.stats import binom
+// sage: α = 2^-20
+// sage: H = 0.5
+// sage: W = 512
+// sage: C = 1 + binom.ppf(1 - α, W, 2**(-H))
+// sage: ceil(C)
+// 410
+func AdaptiveProportionTest(samples []uint8) error {
+ var counts [256]int
+ for i, x := range samples {
+ counts[x]++
+ if i >= 512 {
+ counts[samples[i-512]]--
+ }
+ if counts[x] >= 410 {
+ return errors.New("entropy: adaptive proportion health test failed")
+ }
+ }
+ return nil
+}
diff --git a/src/crypto/internal/fips140/entropy/sha384.go b/src/crypto/internal/fips140/entropy/sha384.go
new file mode 100644
index 00000000000000..c20f76b57979af
--- /dev/null
+++ b/src/crypto/internal/fips140/entropy/sha384.go
@@ -0,0 +1,226 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package entropy
+
+import "math/bits"
+
+// This file includes a SHA-384 implementation to insulate the entropy source
+// from any changes in the FIPS 140-3 module's crypto/internal/fips140/sha512
+// package. We support 1024-byte inputs for the entropy source, and arbitrary
+// length inputs for ACVP testing.
+
+var initState = [8]uint64{
+ 0xcbbb9d5dc1059ed8,
+ 0x629a292a367cd507,
+ 0x9159015a3070dd17,
+ 0x152fecd8f70e5939,
+ 0x67332667ffc00b31,
+ 0x8eb44a8768581511,
+ 0xdb0c2e0d64f98fa7,
+ 0x47b5481dbefa4fa4,
+}
+
+func SHA384(p *[1024]byte) [48]byte {
+ h := initState
+
+ sha384Block(&h, (*[128]byte)(p[0:128]))
+ sha384Block(&h, (*[128]byte)(p[128:256]))
+ sha384Block(&h, (*[128]byte)(p[256:384]))
+ sha384Block(&h, (*[128]byte)(p[384:512]))
+ sha384Block(&h, (*[128]byte)(p[512:640]))
+ sha384Block(&h, (*[128]byte)(p[640:768]))
+ sha384Block(&h, (*[128]byte)(p[768:896]))
+ sha384Block(&h, (*[128]byte)(p[896:1024]))
+
+ var padlen [128]byte
+ padlen[0] = 0x80
+ bePutUint64(padlen[112+8:], 1024*8)
+ sha384Block(&h, &padlen)
+
+ return digestBytes(&h)
+}
+
+func TestingOnlySHA384(p []byte) [48]byte {
+ if len(p) == 1024 {
+ return SHA384((*[1024]byte)(p))
+ }
+
+ h := initState
+ bitLen := uint64(len(p)) * 8
+
+ // Process full 128-byte blocks.
+ for len(p) >= 128 {
+ sha384Block(&h, (*[128]byte)(p[:128]))
+ p = p[128:]
+ }
+
+ // Process final block and padding.
+ var finalBlock [128]byte
+ copy(finalBlock[:], p)
+ finalBlock[len(p)] = 0x80
+ if len(p) >= 112 {
+ sha384Block(&h, &finalBlock)
+ finalBlock = [128]byte{}
+ }
+ bePutUint64(finalBlock[112+8:], bitLen)
+ sha384Block(&h, &finalBlock)
+
+ return digestBytes(&h)
+}
+
+func digestBytes(h *[8]uint64) [48]byte {
+ var digest [48]byte
+ bePutUint64(digest[0:], h[0])
+ bePutUint64(digest[8:], h[1])
+ bePutUint64(digest[16:], h[2])
+ bePutUint64(digest[24:], h[3])
+ bePutUint64(digest[32:], h[4])
+ bePutUint64(digest[40:], h[5])
+ return digest
+}
+
+var _K = [...]uint64{
+ 0x428a2f98d728ae22,
+ 0x7137449123ef65cd,
+ 0xb5c0fbcfec4d3b2f,
+ 0xe9b5dba58189dbbc,
+ 0x3956c25bf348b538,
+ 0x59f111f1b605d019,
+ 0x923f82a4af194f9b,
+ 0xab1c5ed5da6d8118,
+ 0xd807aa98a3030242,
+ 0x12835b0145706fbe,
+ 0x243185be4ee4b28c,
+ 0x550c7dc3d5ffb4e2,
+ 0x72be5d74f27b896f,
+ 0x80deb1fe3b1696b1,
+ 0x9bdc06a725c71235,
+ 0xc19bf174cf692694,
+ 0xe49b69c19ef14ad2,
+ 0xefbe4786384f25e3,
+ 0x0fc19dc68b8cd5b5,
+ 0x240ca1cc77ac9c65,
+ 0x2de92c6f592b0275,
+ 0x4a7484aa6ea6e483,
+ 0x5cb0a9dcbd41fbd4,
+ 0x76f988da831153b5,
+ 0x983e5152ee66dfab,
+ 0xa831c66d2db43210,
+ 0xb00327c898fb213f,
+ 0xbf597fc7beef0ee4,
+ 0xc6e00bf33da88fc2,
+ 0xd5a79147930aa725,
+ 0x06ca6351e003826f,
+ 0x142929670a0e6e70,
+ 0x27b70a8546d22ffc,
+ 0x2e1b21385c26c926,
+ 0x4d2c6dfc5ac42aed,
+ 0x53380d139d95b3df,
+ 0x650a73548baf63de,
+ 0x766a0abb3c77b2a8,
+ 0x81c2c92e47edaee6,
+ 0x92722c851482353b,
+ 0xa2bfe8a14cf10364,
+ 0xa81a664bbc423001,
+ 0xc24b8b70d0f89791,
+ 0xc76c51a30654be30,
+ 0xd192e819d6ef5218,
+ 0xd69906245565a910,
+ 0xf40e35855771202a,
+ 0x106aa07032bbd1b8,
+ 0x19a4c116b8d2d0c8,
+ 0x1e376c085141ab53,
+ 0x2748774cdf8eeb99,
+ 0x34b0bcb5e19b48a8,
+ 0x391c0cb3c5c95a63,
+ 0x4ed8aa4ae3418acb,
+ 0x5b9cca4f7763e373,
+ 0x682e6ff3d6b2b8a3,
+ 0x748f82ee5defb2fc,
+ 0x78a5636f43172f60,
+ 0x84c87814a1f0ab72,
+ 0x8cc702081a6439ec,
+ 0x90befffa23631e28,
+ 0xa4506cebde82bde9,
+ 0xbef9a3f7b2c67915,
+ 0xc67178f2e372532b,
+ 0xca273eceea26619c,
+ 0xd186b8c721c0c207,
+ 0xeada7dd6cde0eb1e,
+ 0xf57d4f7fee6ed178,
+ 0x06f067aa72176fba,
+ 0x0a637dc5a2c898a6,
+ 0x113f9804bef90dae,
+ 0x1b710b35131c471b,
+ 0x28db77f523047d84,
+ 0x32caab7b40c72493,
+ 0x3c9ebe0a15c9bebc,
+ 0x431d67c49c100d4c,
+ 0x4cc5d4becb3e42b6,
+ 0x597f299cfc657e2a,
+ 0x5fcb6fab3ad6faec,
+ 0x6c44198c4a475817,
+}
+
+func sha384Block(dh *[8]uint64, p *[128]byte) {
+ var w [80]uint64
+ for i := range 80 {
+ if i < 16 {
+ w[i] = beUint64(p[i*8:])
+ } else {
+ v1 := w[i-2]
+ t1 := bits.RotateLeft64(v1, -19) ^ bits.RotateLeft64(v1, -61) ^ (v1 >> 6)
+ v2 := w[i-15]
+ t2 := bits.RotateLeft64(v2, -1) ^ bits.RotateLeft64(v2, -8) ^ (v2 >> 7)
+
+ w[i] = t1 + w[i-7] + t2 + w[i-16]
+ }
+ }
+
+ a, b, c, d, e, f, g, h := dh[0], dh[1], dh[2], dh[3], dh[4], dh[5], dh[6], dh[7]
+
+ for i := range 80 {
+ t1 := h + (bits.RotateLeft64(e, -14) ^ bits.RotateLeft64(e, -18) ^
+ bits.RotateLeft64(e, -41)) + ((e & f) ^ (^e & g)) + _K[i] + w[i]
+ t2 := (bits.RotateLeft64(a, -28) ^ bits.RotateLeft64(a, -34) ^
+ bits.RotateLeft64(a, -39)) + ((a & b) ^ (a & c) ^ (b & c))
+
+ h = g
+ g = f
+ f = e
+ e = d + t1
+ d = c
+ c = b
+ b = a
+ a = t1 + t2
+ }
+
+ dh[0] += a
+ dh[1] += b
+ dh[2] += c
+ dh[3] += d
+ dh[4] += e
+ dh[5] += f
+ dh[6] += g
+ dh[7] += h
+}
+
+func beUint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
+
+func bePutUint64(b []byte, v uint64) {
+ _ = b[7] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 56)
+ b[1] = byte(v >> 48)
+ b[2] = byte(v >> 40)
+ b[3] = byte(v >> 32)
+ b[4] = byte(v >> 24)
+ b[5] = byte(v >> 16)
+ b[6] = byte(v >> 8)
+ b[7] = byte(v)
+}
diff --git a/src/crypto/internal/fips140/fips140.go b/src/crypto/internal/fips140/fips140.go
index d03219b540e27f..76054b00684e2b 100644
--- a/src/crypto/internal/fips140/fips140.go
+++ b/src/crypto/internal/fips140/fips140.go
@@ -48,6 +48,8 @@ func Supported() error {
}
// See EnableFIPS in cmd/internal/obj/fips.go for commentary.
+ // Also, js/wasm and windows/386 don't have good enough timers
+ // for the CPU jitter entropy source.
switch {
case runtime.GOARCH == "wasm",
runtime.GOOS == "windows" && runtime.GOARCH == "386",
diff --git a/src/crypto/internal/fips140/mlkem/cast.go b/src/crypto/internal/fips140/mlkem/cast.go
index a432d1fdab0e2b..ea089c1b76c0c0 100644
--- a/src/crypto/internal/fips140/mlkem/cast.go
+++ b/src/crypto/internal/fips140/mlkem/cast.go
@@ -9,9 +9,10 @@ import (
"crypto/internal/fips140"
_ "crypto/internal/fips140/check"
"errors"
+ "sync"
)
-func init() {
+var fipsSelfTest = sync.OnceFunc(func() {
fips140.CAST("ML-KEM-768", func() error {
var d = &[32]byte{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
@@ -40,14 +41,12 @@ func init() {
dk := &DecapsulationKey768{}
kemKeyGen(dk, d, z)
ek := dk.EncapsulationKey()
- Ke, c := ek.EncapsulateInternal(m)
- Kd, err := dk.Decapsulate(c)
- if err != nil {
- return err
- }
+ var cc [CiphertextSize768]byte
+ Ke, _ := kemEncaps(&cc, ek, m)
+ Kd := kemDecaps(dk, &cc)
if !bytes.Equal(Ke, K) || !bytes.Equal(Kd, K) {
return errors.New("unexpected result")
}
return nil
})
-}
+})
diff --git a/src/crypto/internal/fips140/mlkem/mlkem1024.go b/src/crypto/internal/fips140/mlkem/mlkem1024.go
index 1419cf20fa9c67..edde161422cb6f 100644
--- a/src/crypto/internal/fips140/mlkem/mlkem1024.go
+++ b/src/crypto/internal/fips140/mlkem/mlkem1024.go
@@ -113,6 +113,7 @@ func GenerateKey1024() (*DecapsulationKey1024, error) {
}
func generateKey1024(dk *DecapsulationKey1024) (*DecapsulationKey1024, error) {
+ fipsSelfTest()
var d [32]byte
drbg.Read(d[:])
var z [32]byte
@@ -126,6 +127,7 @@ func generateKey1024(dk *DecapsulationKey1024) (*DecapsulationKey1024, error) {
// GenerateKeyInternal1024 is a derandomized version of GenerateKey1024,
// exclusively for use in tests.
func GenerateKeyInternal1024(d, z *[32]byte) *DecapsulationKey1024 {
+ fipsSelfTest()
dk := &DecapsulationKey1024{}
kemKeyGen1024(dk, d, z)
return dk
@@ -278,6 +280,7 @@ func (ek *EncapsulationKey1024) Encapsulate() (sharedKey, ciphertext []byte) {
}
func (ek *EncapsulationKey1024) encapsulate(cc *[CiphertextSize1024]byte) (sharedKey, ciphertext []byte) {
+ fipsSelfTest()
var m [messageSize]byte
drbg.Read(m[:])
// Note that the modulus check (step 2 of the encapsulation key check from
@@ -289,6 +292,7 @@ func (ek *EncapsulationKey1024) encapsulate(cc *[CiphertextSize1024]byte) (share
// EncapsulateInternal is a derandomized version of Encapsulate, exclusively for
// use in tests.
func (ek *EncapsulationKey1024) EncapsulateInternal(m *[32]byte) (sharedKey, ciphertext []byte) {
+ fipsSelfTest()
cc := &[CiphertextSize1024]byte{}
return kemEncaps1024(cc, ek, m)
}
@@ -394,6 +398,7 @@ func pkeEncrypt1024(cc *[CiphertextSize1024]byte, ex *encryptionKey1024, m *[mes
//
// The shared key must be kept secret.
func (dk *DecapsulationKey1024) Decapsulate(ciphertext []byte) (sharedKey []byte, err error) {
+ fipsSelfTest()
if len(ciphertext) != CiphertextSize1024 {
return nil, errors.New("mlkem: invalid ciphertext length")
}
diff --git a/src/crypto/internal/fips140/mlkem/mlkem768.go b/src/crypto/internal/fips140/mlkem/mlkem768.go
index 298660e4e977dd..088c2954de6a5c 100644
--- a/src/crypto/internal/fips140/mlkem/mlkem768.go
+++ b/src/crypto/internal/fips140/mlkem/mlkem768.go
@@ -172,6 +172,7 @@ func GenerateKey768() (*DecapsulationKey768, error) {
}
func generateKey(dk *DecapsulationKey768) (*DecapsulationKey768, error) {
+ fipsSelfTest()
var d [32]byte
drbg.Read(d[:])
var z [32]byte
@@ -185,6 +186,7 @@ func generateKey(dk *DecapsulationKey768) (*DecapsulationKey768, error) {
// GenerateKeyInternal768 is a derandomized version of GenerateKey768,
// exclusively for use in tests.
func GenerateKeyInternal768(d, z *[32]byte) *DecapsulationKey768 {
+ fipsSelfTest()
dk := &DecapsulationKey768{}
kemKeyGen(dk, d, z)
return dk
@@ -337,6 +339,7 @@ func (ek *EncapsulationKey768) Encapsulate() (sharedKey, ciphertext []byte) {
}
func (ek *EncapsulationKey768) encapsulate(cc *[CiphertextSize768]byte) (sharedKey, ciphertext []byte) {
+ fipsSelfTest()
var m [messageSize]byte
drbg.Read(m[:])
// Note that the modulus check (step 2 of the encapsulation key check from
@@ -348,6 +351,7 @@ func (ek *EncapsulationKey768) encapsulate(cc *[CiphertextSize768]byte) (sharedK
// EncapsulateInternal is a derandomized version of Encapsulate, exclusively for
// use in tests.
func (ek *EncapsulationKey768) EncapsulateInternal(m *[32]byte) (sharedKey, ciphertext []byte) {
+ fipsSelfTest()
cc := &[CiphertextSize768]byte{}
return kemEncaps(cc, ek, m)
}
@@ -453,6 +457,7 @@ func pkeEncrypt(cc *[CiphertextSize768]byte, ex *encryptionKey, m *[messageSize]
//
// The shared key must be kept secret.
func (dk *DecapsulationKey768) Decapsulate(ciphertext []byte) (sharedKey []byte, err error) {
+ fipsSelfTest()
if len(ciphertext) != CiphertextSize768 {
return nil, errors.New("mlkem: invalid ciphertext length")
}
diff --git a/src/crypto/internal/fips140/subtle/xor_asm.go b/src/crypto/internal/fips140/subtle/xor_asm.go
index b07239da3e31c1..bb85aefef4013e 100644
--- a/src/crypto/internal/fips140/subtle/xor_asm.go
+++ b/src/crypto/internal/fips140/subtle/xor_asm.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build (amd64 || arm64 || mips || mipsle || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego
+//go:build (amd64 || arm64 || ppc64 || ppc64le || riscv64) && !purego
package subtle
diff --git a/src/crypto/internal/fips140/subtle/xor_generic.go b/src/crypto/internal/fips140/subtle/xor_generic.go
index ed484bc630e98d..0b31eec60197d3 100644
--- a/src/crypto/internal/fips140/subtle/xor_generic.go
+++ b/src/crypto/internal/fips140/subtle/xor_generic.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build (!amd64 && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego
+//go:build (!amd64 && !arm64 && !loong64 && !ppc64 && !ppc64le && !riscv64) || purego
package subtle
diff --git a/src/crypto/internal/fips140/subtle/xor_mips64x.s b/src/crypto/internal/fips140/subtle/xor_mips64x.s
deleted file mode 100644
index e580235914aeaf..00000000000000
--- a/src/crypto/internal/fips140/subtle/xor_mips64x.s
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2025 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (mips64 || mips64le) && !purego
-
-#include "textflag.h"
-
-// func xorBytes(dst, a, b *byte, n int)
-TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0
- MOVV dst+0(FP), R1
- MOVV a+8(FP), R2
- MOVV b+16(FP), R3
- MOVV n+24(FP), R4
-
-xor_64_check:
- SGTU $64, R4, R5 // R5 = 1 if (64 > R4)
- BNE R5, xor_32_check
-xor_64:
- MOVV (R2), R6
- MOVV 8(R2), R7
- MOVV 16(R2), R8
- MOVV 24(R2), R9
- MOVV (R3), R10
- MOVV 8(R3), R11
- MOVV 16(R3), R12
- MOVV 24(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVV R10, (R1)
- MOVV R11, 8(R1)
- MOVV R12, 16(R1)
- MOVV R13, 24(R1)
- MOVV 32(R2), R6
- MOVV 40(R2), R7
- MOVV 48(R2), R8
- MOVV 56(R2), R9
- MOVV 32(R3), R10
- MOVV 40(R3), R11
- MOVV 48(R3), R12
- MOVV 56(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVV R10, 32(R1)
- MOVV R11, 40(R1)
- MOVV R12, 48(R1)
- MOVV R13, 56(R1)
- ADDV $64, R2
- ADDV $64, R3
- ADDV $64, R1
- SUBV $64, R4
- SGTU $64, R4, R5
- BEQ R0, R5, xor_64
- BEQ R0, R4, end
-
-xor_32_check:
- SGTU $32, R4, R5
- BNE R5, xor_16_check
-xor_32:
- MOVV (R2), R6
- MOVV 8(R2), R7
- MOVV 16(R2), R8
- MOVV 24(R2), R9
- MOVV (R3), R10
- MOVV 8(R3), R11
- MOVV 16(R3), R12
- MOVV 24(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVV R10, (R1)
- MOVV R11, 8(R1)
- MOVV R12, 16(R1)
- MOVV R13, 24(R1)
- ADDV $32, R2
- ADDV $32, R3
- ADDV $32, R1
- SUBV $32, R4
- BEQ R0, R4, end
-
-xor_16_check:
- SGTU $16, R4, R5
- BNE R5, xor_8_check
-xor_16:
- MOVV (R2), R6
- MOVV 8(R2), R7
- MOVV (R3), R8
- MOVV 8(R3), R9
- XOR R6, R8
- XOR R7, R9
- MOVV R8, (R1)
- MOVV R9, 8(R1)
- ADDV $16, R2
- ADDV $16, R3
- ADDV $16, R1
- SUBV $16, R4
- BEQ R0, R4, end
-
-xor_8_check:
- SGTU $8, R4, R5
- BNE R5, xor_4_check
-xor_8:
- MOVV (R2), R6
- MOVV (R3), R7
- XOR R6, R7
- MOVV R7, (R1)
- ADDV $8, R1
- ADDV $8, R2
- ADDV $8, R3
- SUBV $8, R4
- BEQ R0, R4, end
-
-xor_4_check:
- SGTU $4, R4, R5
- BNE R5, xor_2_check
-xor_4:
- MOVW (R2), R6
- MOVW (R3), R7
- XOR R6, R7
- MOVW R7, (R1)
- ADDV $4, R2
- ADDV $4, R3
- ADDV $4, R1
- SUBV $4, R4
- BEQ R0, R4, end
-
-xor_2_check:
- SGTU $2, R4, R5
- BNE R5, xor_1
-xor_2:
- MOVH (R2), R6
- MOVH (R3), R7
- XOR R6, R7
- MOVH R7, (R1)
- ADDV $2, R2
- ADDV $2, R3
- ADDV $2, R1
- SUBV $2, R4
- BEQ R0, R4, end
-
-xor_1:
- MOVB (R2), R6
- MOVB (R3), R7
- XOR R6, R7
- MOVB R7, (R1)
-
-end:
- RET
diff --git a/src/crypto/internal/fips140/subtle/xor_mipsx.s b/src/crypto/internal/fips140/subtle/xor_mipsx.s
deleted file mode 100644
index 1a6b3f409dddc9..00000000000000
--- a/src/crypto/internal/fips140/subtle/xor_mipsx.s
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2025 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (mips || mipsle) && !purego
-
-#include "textflag.h"
-
-// func xorBytes(dst, a, b *byte, n int)
-TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0
- MOVW dst+0(FP), R1
- MOVW a+4(FP), R2
- MOVW b+8(FP), R3
- MOVW n+12(FP), R4
-
- SGTU $64, R4, R5 // R5 = 1 if (64 > R4)
- BNE R5, xor_32_check
-xor_64:
- MOVW (R2), R6
- MOVW 4(R2), R7
- MOVW 8(R2), R8
- MOVW 12(R2), R9
- MOVW (R3), R10
- MOVW 4(R3), R11
- MOVW 8(R3), R12
- MOVW 12(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVW R10, (R1)
- MOVW R11, 4(R1)
- MOVW R12, 8(R1)
- MOVW R13, 12(R1)
- MOVW 16(R2), R6
- MOVW 20(R2), R7
- MOVW 24(R2), R8
- MOVW 28(R2), R9
- MOVW 16(R3), R10
- MOVW 20(R3), R11
- MOVW 24(R3), R12
- MOVW 28(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVW R10, 16(R1)
- MOVW R11, 20(R1)
- MOVW R12, 24(R1)
- MOVW R13, 28(R1)
- MOVW 32(R2), R6
- MOVW 36(R2), R7
- MOVW 40(R2), R8
- MOVW 44(R2), R9
- MOVW 32(R3), R10
- MOVW 36(R3), R11
- MOVW 40(R3), R12
- MOVW 44(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVW R10, 32(R1)
- MOVW R11, 36(R1)
- MOVW R12, 40(R1)
- MOVW R13, 44(R1)
- MOVW 48(R2), R6
- MOVW 52(R2), R7
- MOVW 56(R2), R8
- MOVW 60(R2), R9
- MOVW 48(R3), R10
- MOVW 52(R3), R11
- MOVW 56(R3), R12
- MOVW 60(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVW R10, 48(R1)
- MOVW R11, 52(R1)
- MOVW R12, 56(R1)
- MOVW R13, 60(R1)
- ADD $64, R2
- ADD $64, R3
- ADD $64, R1
- SUB $64, R4
- SGTU $64, R4, R5
- BEQ R0, R5, xor_64
- BEQ R0, R4, end
-
-xor_32_check:
- SGTU $32, R4, R5
- BNE R5, xor_16_check
-xor_32:
- MOVW (R2), R6
- MOVW 4(R2), R7
- MOVW 8(R2), R8
- MOVW 12(R2), R9
- MOVW (R3), R10
- MOVW 4(R3), R11
- MOVW 8(R3), R12
- MOVW 12(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVW R10, (R1)
- MOVW R11, 4(R1)
- MOVW R12, 8(R1)
- MOVW R13, 12(R1)
- MOVW 16(R2), R6
- MOVW 20(R2), R7
- MOVW 24(R2), R8
- MOVW 28(R2), R9
- MOVW 16(R3), R10
- MOVW 20(R3), R11
- MOVW 24(R3), R12
- MOVW 28(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVW R10, 16(R1)
- MOVW R11, 20(R1)
- MOVW R12, 24(R1)
- MOVW R13, 28(R1)
- ADD $32, R2
- ADD $32, R3
- ADD $32, R1
- SUB $32, R4
- BEQ R0, R4, end
-
-xor_16_check:
- SGTU $16, R4, R5
- BNE R5, xor_8_check
-xor_16:
- MOVW (R2), R6
- MOVW 4(R2), R7
- MOVW 8(R2), R8
- MOVW 12(R2), R9
- MOVW (R3), R10
- MOVW 4(R3), R11
- MOVW 8(R3), R12
- MOVW 12(R3), R13
- XOR R6, R10
- XOR R7, R11
- XOR R8, R12
- XOR R9, R13
- MOVW R10, (R1)
- MOVW R11, 4(R1)
- MOVW R12, 8(R1)
- MOVW R13, 12(R1)
- ADD $16, R2
- ADD $16, R3
- ADD $16, R1
- SUB $16, R4
- BEQ R0, R4, end
-
-xor_8_check:
- SGTU $8, R4, R5
- BNE R5, xor_4_check
-xor_8:
- MOVW (R2), R6
- MOVW 4(R2), R7
- MOVW (R3), R8
- MOVW 4(R3), R9
- XOR R6, R8
- XOR R7, R9
- MOVW R8, (R1)
- MOVW R9, 4(R1)
- ADD $8, R1
- ADD $8, R2
- ADD $8, R3
- SUB $8, R4
- BEQ R0, R4, end
-
-xor_4_check:
- SGTU $4, R4, R5
- BNE R5, xor_2_check
-xor_4:
- MOVW (R2), R6
- MOVW (R3), R7
- XOR R6, R7
- MOVW R7, (R1)
- ADD $4, R2
- ADD $4, R3
- ADD $4, R1
- SUB $4, R4
- BEQ R0, R4, end
-
-xor_2_check:
- SGTU $2, R4, R5
- BNE R5, xor_1
-xor_2:
- MOVH (R2), R6
- MOVH (R3), R7
- XOR R6, R7
- MOVH R7, (R1)
- ADD $2, R2
- ADD $2, R3
- ADD $2, R1
- SUB $2, R4
- BEQ R0, R4, end
-
-xor_1:
- MOVB (R2), R6
- MOVB (R3), R7
- XOR R6, R7
- MOVB R7, (R1)
-
-end:
- RET
diff --git a/src/crypto/internal/fips140deps/fipsdeps_test.go b/src/crypto/internal/fips140deps/fipsdeps_test.go
index 2c3bc8184e71bc..97552dc1ce10f1 100644
--- a/src/crypto/internal/fips140deps/fipsdeps_test.go
+++ b/src/crypto/internal/fips140deps/fipsdeps_test.go
@@ -88,7 +88,8 @@ func TestImports(t *testing.T) {
}
}
- // Ensure that all packages except check and check's dependencies import check.
+ // Ensure that all packages except check, check's dependencies, and the
+ // entropy source (which is used only from .../fips140/drbg) import check.
for pkg := range allPackages {
switch pkg {
case "crypto/internal/fips140/check":
@@ -99,6 +100,7 @@ func TestImports(t *testing.T) {
case "crypto/internal/fips140/sha3":
case "crypto/internal/fips140/sha256":
case "crypto/internal/fips140/sha512":
+ case "crypto/internal/fips140/entropy":
default:
if !importCheck[pkg] {
t.Errorf("package %s does not import crypto/internal/fips140/check", pkg)
diff --git a/src/crypto/internal/fips140deps/time/time.go b/src/crypto/internal/fips140deps/time/time.go
new file mode 100644
index 00000000000000..eea37b772e4351
--- /dev/null
+++ b/src/crypto/internal/fips140deps/time/time.go
@@ -0,0 +1,21 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package time
+
+import "time"
+
+var start = time.Now()
+
+// HighPrecisionNow returns a high-resolution timestamp suitable for measuring
+// small time differences. It uses the time package's monotonic clock.
+//
+// Its unit, epoch, and resolution are unspecified, and may change, but can be
+// assumed to be sufficiently precise to measure time differences on the order
+// of tens to hundreds of nanoseconds.
+func HighPrecisionNow() int64 {
+ return int64(time.Since(start))
+}
diff --git a/src/crypto/internal/fips140deps/time/time_windows.go b/src/crypto/internal/fips140deps/time/time_windows.go
new file mode 100644
index 00000000000000..410ede4ee91705
--- /dev/null
+++ b/src/crypto/internal/fips140deps/time/time_windows.go
@@ -0,0 +1,17 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package time
+
+import "internal/syscall/windows"
+
+// HighPrecisionNow returns a high-resolution timestamp suitable for measuring
+// small time differences. It uses Windows' QueryPerformanceCounter.
+//
+// Its unit, epoch, and resolution are unspecified, and may change, but can be
+// assumed to be sufficiently precise to measure time differences on the order
+// of tens to hundreds of nanoseconds.
+func HighPrecisionNow() int64 {
+ return windows.QueryPerformanceCounter()
+}
diff --git a/src/crypto/internal/fips140test/cast_test.go b/src/crypto/internal/fips140test/cast_test.go
index b043a71f04effa..5bbc964b617b2b 100644
--- a/src/crypto/internal/fips140test/cast_test.go
+++ b/src/crypto/internal/fips140test/cast_test.go
@@ -48,8 +48,8 @@ var allCASTs = []string{
"HKDF-SHA2-256",
"HMAC-SHA2-256",
"KAS-ECC-SSC P-256",
- "ML-KEM PCT",
- "ML-KEM PCT",
+ "ML-KEM PCT", // -768
+ "ML-KEM PCT", // -1024
"ML-KEM-768",
"PBKDF2",
"RSA sign and verify PCT",
@@ -104,29 +104,44 @@ func TestAllCASTs(t *testing.T) {
// TestConditionals causes the conditional CASTs and PCTs to be invoked.
func TestConditionals(t *testing.T) {
- mlkem.GenerateKey768()
+ // ML-KEM PCT
+ kMLKEM, err := mlkem.GenerateKey768()
+ if err != nil {
+ t.Error(err)
+ } else {
+ // ML-KEM-768
+ kMLKEM.EncapsulationKey().Encapsulate()
+ }
+ // ECDH PCT
kDH, err := ecdh.GenerateKey(ecdh.P256(), rand.Reader)
if err != nil {
t.Error(err)
} else {
+ // KAS-ECC-SSC P-256
ecdh.ECDH(ecdh.P256(), kDH, kDH.PublicKey())
}
+ // ECDSA PCT
kDSA, err := ecdsa.GenerateKey(ecdsa.P256(), rand.Reader)
if err != nil {
t.Error(err)
} else {
+ // ECDSA P-256 SHA2-512 sign and verify
ecdsa.SignDeterministic(ecdsa.P256(), sha256.New, kDSA, make([]byte, 32))
}
+ // Ed25519 sign and verify PCT
k25519, err := ed25519.GenerateKey()
if err != nil {
t.Error(err)
} else {
+ // Ed25519 sign and verify
ed25519.Sign(k25519, make([]byte, 32))
}
+ // RSA sign and verify PCT
kRSA, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Error(err)
} else {
+ // RSASSA-PKCS-v1.5 2048-bit sign and verify
rsa.SignPKCS1v15(kRSA, crypto.SHA256.String(), make([]byte, 32))
}
t.Log("completed successfully")
diff --git a/src/crypto/internal/fips140test/entropy_test.go b/src/crypto/internal/fips140test/entropy_test.go
new file mode 100644
index 00000000000000..a33e2e7bbc907d
--- /dev/null
+++ b/src/crypto/internal/fips140test/entropy_test.go
@@ -0,0 +1,276 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !fips140v1.0
+
+package fipstest
+
+import (
+ "bytes"
+ "crypto/internal/cryptotest"
+ "crypto/internal/fips140/drbg"
+ "crypto/internal/fips140/entropy"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/hex"
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+var flagEntropySamples = flag.String("entropy-samples", "", "store entropy samples with the provided `suffix`")
+var flagNISTSP80090B = flag.Bool("nist-sp800-90b", false, "run NIST SP 800-90B tests (requires docker)")
+
+func TestEntropySamples(t *testing.T) {
+ cryptotest.MustSupportFIPS140(t)
+ now := time.Now().UTC()
+
+ var seqSamples [1_000_000]uint8
+ samplesOrTryAgain(t, seqSamples[:])
+ seqSamplesName := fmt.Sprintf("entropy_samples_sequential_%s_%s_%s_%s_%s.bin", entropy.Version(),
+ runtime.GOOS, runtime.GOARCH, *flagEntropySamples, now.Format("20060102T150405Z"))
+ if *flagEntropySamples != "" {
+ if err := os.WriteFile(seqSamplesName, seqSamples[:], 0644); err != nil {
+ t.Fatalf("failed to write samples to %q: %v", seqSamplesName, err)
+ }
+ t.Logf("wrote %s", seqSamplesName)
+ }
+
+ var restartSamples [1000][1000]uint8
+ for i := range restartSamples {
+ var samples [1024]uint8
+ samplesOrTryAgain(t, samples[:])
+ copy(restartSamples[i][:], samples[:])
+ }
+ restartSamplesName := fmt.Sprintf("entropy_samples_restart_%s_%s_%s_%s_%s.bin", entropy.Version(),
+ runtime.GOOS, runtime.GOARCH, *flagEntropySamples, now.Format("20060102T150405Z"))
+ if *flagEntropySamples != "" {
+ f, err := os.Create(restartSamplesName)
+ if err != nil {
+ t.Fatalf("failed to create %q: %v", restartSamplesName, err)
+ }
+ for i := range restartSamples {
+ if _, err := f.Write(restartSamples[i][:]); err != nil {
+ t.Fatalf("failed to write samples to %q: %v", restartSamplesName, err)
+ }
+ }
+ if err := f.Close(); err != nil {
+ t.Fatalf("failed to close %q: %v", restartSamplesName, err)
+ }
+ t.Logf("wrote %s", restartSamplesName)
+ }
+
+ if *flagNISTSP80090B {
+ if *flagEntropySamples == "" {
+ t.Fatalf("-nist-sp800-90b requires -entropy-samples to be set too")
+ }
+
+ // Check if the nist-sp800-90b docker image is already present,
+ // and build it otherwise.
+ if err := testenv.Command(t,
+ "docker", "image", "inspect", "nist-sp800-90b",
+ ).Run(); err != nil {
+ t.Logf("building nist-sp800-90b docker image")
+ dockerfile := filepath.Join(t.TempDir(), "Dockerfile.SP800-90B_EntropyAssessment")
+ if err := os.WriteFile(dockerfile, []byte(NISTSP80090BDockerfile), 0644); err != nil {
+ t.Fatalf("failed to write Dockerfile: %v", err)
+ }
+ out, err := testenv.Command(t,
+ "docker", "build", "-t", "nist-sp800-90b", "-f", dockerfile, "/var/empty",
+ ).CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed to build nist-sp800-90b docker image: %v\n%s", err, out)
+ }
+ }
+
+ pwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("failed to get current working directory: %v", err)
+ }
+ t.Logf("running ea_non_iid analysis")
+ out, err := testenv.Command(t,
+ "docker", "run", "--rm", "-v", fmt.Sprintf("%s:%s", pwd, pwd), "-w", pwd,
+ "nist-sp800-90b", "ea_non_iid", seqSamplesName, "8",
+ ).CombinedOutput()
+ if err != nil {
+ t.Fatalf("ea_non_iid failed: %v\n%s", err, out)
+ }
+ t.Logf("\n%s", out)
+
+ H_I := string(out)
+ H_I = strings.TrimSpace(H_I[strings.LastIndexByte(H_I, ' ')+1:])
+ t.Logf("running ea_restart analysis with H_I = %s", H_I)
+ out, err = testenv.Command(t,
+ "docker", "run", "--rm", "-v", fmt.Sprintf("%s:%s", pwd, pwd), "-w", pwd,
+ "nist-sp800-90b", "ea_restart", restartSamplesName, "8", H_I,
+ ).CombinedOutput()
+ if err != nil {
+ t.Fatalf("ea_restart failed: %v\n%s", err, out)
+ }
+ t.Logf("\n%s", out)
+ }
+}
+
+var NISTSP80090BDockerfile = `
+FROM ubuntu:24.04
+RUN apt-get update && apt-get install -y build-essential git \
+ libbz2-dev libdivsufsort-dev libjsoncpp-dev libgmp-dev libmpfr-dev libssl-dev \
+ && rm -rf /var/lib/apt/lists/*
+RUN git clone --depth 1 https://github.com/usnistgov/SP800-90B_EntropyAssessment.git
+RUN cd SP800-90B_EntropyAssessment && git checkout 8924f158c97e7b805e0f95247403ad4c44b9cd6f
+WORKDIR ./SP800-90B_EntropyAssessment/cpp/
+RUN make all
+RUN cd selftest && ./selftest
+RUN cp ea_non_iid ea_restart /usr/local/bin/
+`
+
+var memory entropy.ScratchBuffer
+
+// samplesOrTryAgain calls entropy.Samples up to 10 times until it succeeds.
+// Samples has a non-negligible chance of failing the health tests, as required
+// by SP 800-90B.
+func samplesOrTryAgain(t *testing.T, samples []uint8) {
+ t.Helper()
+ for range 10 {
+ if err := entropy.Samples(samples, &memory); err != nil {
+ t.Logf("entropy.Samples() failed: %v", err)
+ continue
+ }
+ return
+ }
+ t.Fatal("entropy.Samples() failed 10 times in a row")
+}
+
+func TestEntropySHA384(t *testing.T) {
+ var input [1024]uint8
+ for i := range input {
+ input[i] = uint8(i)
+ }
+ want := sha512.Sum384(input[:])
+ got := entropy.SHA384(&input)
+ if got != want {
+ t.Errorf("SHA384() = %x, want %x", got, want)
+ }
+
+ for l := range 1024*3 + 1 {
+ input := make([]byte, l)
+ rand.Read(input)
+ want := sha512.Sum384(input)
+ got := entropy.TestingOnlySHA384(input)
+ if got != want {
+ t.Errorf("TestingOnlySHA384(%d bytes) = %x, want %x", l, got, want)
+ }
+ }
+}
+
+func TestEntropyRepetitionCountTest(t *testing.T) {
+ good := bytes.Repeat(append(bytes.Repeat([]uint8{42}, 40), 1), 100)
+ if err := entropy.RepetitionCountTest(good); err != nil {
+ t.Errorf("RepetitionCountTest(good) = %v, want nil", err)
+ }
+
+ bad := bytes.Repeat([]uint8{0}, 40)
+ bad = append(bad, bytes.Repeat([]uint8{1}, 40)...)
+ bad = append(bad, bytes.Repeat([]uint8{42}, 41)...)
+ bad = append(bad, bytes.Repeat([]uint8{2}, 40)...)
+ if err := entropy.RepetitionCountTest(bad); err == nil {
+ t.Error("RepetitionCountTest(bad) = nil, want error")
+ }
+
+ bad = bytes.Repeat([]uint8{42}, 41)
+ if err := entropy.RepetitionCountTest(bad); err == nil {
+ t.Error("RepetitionCountTest(bad) = nil, want error")
+ }
+}
+
+func TestEntropyAdaptiveProportionTest(t *testing.T) {
+ good := bytes.Repeat([]uint8{0}, 409)
+ good = append(good, bytes.Repeat([]uint8{1}, 512-409)...)
+ good = append(good, bytes.Repeat([]uint8{0}, 409)...)
+ if err := entropy.AdaptiveProportionTest(good); err != nil {
+ t.Errorf("AdaptiveProportionTest(good) = %v, want nil", err)
+ }
+
+ // These fall out of the window.
+ bad := bytes.Repeat([]uint8{1}, 100)
+ bad = append(bad, bytes.Repeat([]uint8{1, 2, 3, 4, 5, 6}, 100)...)
+ // These are in the window.
+ bad = append(bad, bytes.Repeat([]uint8{42}, 410)...)
+ if err := entropy.AdaptiveProportionTest(bad[:len(bad)-1]); err != nil {
+ t.Errorf("AdaptiveProportionTest(bad[:len(bad)-1]) = %v, want nil", err)
+ }
+ if err := entropy.AdaptiveProportionTest(bad); err == nil {
+ t.Error("AdaptiveProportionTest(bad) = nil, want error")
+ }
+}
+
+func TestEntropyUnchanged(t *testing.T) {
+ testenv.MustHaveSource(t)
+
+ h := sha256.New()
+ root := os.DirFS("../fips140/entropy")
+ if err := fs.WalkDir(root, ".", func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if d.IsDir() {
+ return nil
+ }
+ data, err := fs.ReadFile(root, path)
+ if err != nil {
+ return err
+ }
+ t.Logf("Hashing %s (%d bytes)", path, len(data))
+ fmt.Fprintf(h, "%s %d\n", path, len(data))
+ h.Write(data)
+ return nil
+ }); err != nil {
+ t.Fatalf("WalkDir: %v", err)
+ }
+
+ // The crypto/internal/fips140/entropy package is certified as a FIPS 140-3
+ // entropy source through the Entropy Source Validation program,
+ // independently of the FIPS 140-3 module. It must not change even across
+ // FIPS 140-3 module versions, in order to reuse the ESV certificate.
+ exp := "2541273241ae8aafe55026328354ed3799df1e2fb308b2097833203a42911b53"
+ if got := hex.EncodeToString(h.Sum(nil)); got != exp {
+ t.Errorf("hash of crypto/internal/fips140/entropy = %s, want %s", got, exp)
+ }
+}
+
+func TestEntropyRace(t *testing.T) {
+ // Check that concurrent calls to Seed don't trigger the race detector.
+ for range 16 {
+ go func() {
+ _, _ = entropy.Seed(&memory)
+ }()
+ }
+ // Same, with the higher-level DRBG.
+ for range 16 {
+ go func() {
+ var b [64]byte
+ drbg.Read(b[:])
+ }()
+ }
+}
+
+var sink byte
+
+func BenchmarkEntropySeed(b *testing.B) {
+ for b.Loop() {
+ seed, err := entropy.Seed(&memory)
+ if err != nil {
+ b.Fatalf("entropy.Seed() failed: %v", err)
+ }
+ sink ^= seed[0]
+ }
+}
diff --git a/src/crypto/sha256/sha256_test.go b/src/crypto/sha256/sha256_test.go
index 11b24db7d6b0a0..a18a536ba2896f 100644
--- a/src/crypto/sha256/sha256_test.go
+++ b/src/crypto/sha256/sha256_test.go
@@ -471,3 +471,17 @@ func BenchmarkHash256K(b *testing.B) {
func BenchmarkHash1M(b *testing.B) {
benchmarkSize(b, 1024*1024)
}
+
+func TestAllocatonsWithTypeAsserts(t *testing.T) {
+ cryptotest.SkipTestAllocations(t)
+ allocs := testing.AllocsPerRun(100, func() {
+ h := New()
+ h.Write([]byte{1, 2, 3})
+ marshaled, _ := h.(encoding.BinaryMarshaler).MarshalBinary()
+ marshaled, _ = h.(encoding.BinaryAppender).AppendBinary(marshaled[:0])
+ h.(encoding.BinaryUnmarshaler).UnmarshalBinary(marshaled)
+ })
+ if allocs != 0 {
+ t.Fatalf("allocs = %v; want = 0", allocs)
+ }
+}
diff --git a/src/crypto/tls/bogo_shim_test.go b/src/crypto/tls/bogo_shim_test.go
index 7cab568db80953..8f171d925959c8 100644
--- a/src/crypto/tls/bogo_shim_test.go
+++ b/src/crypto/tls/bogo_shim_test.go
@@ -11,8 +11,10 @@ import (
"encoding/base64"
"encoding/json"
"encoding/pem"
+ "errors"
"flag"
"fmt"
+ "html/template"
"internal/byteorder"
"internal/testenv"
"io"
@@ -25,10 +27,13 @@ import (
"strconv"
"strings"
"testing"
+ "time"
"golang.org/x/crypto/cryptobyte"
)
+const boringsslModVer = "v0.0.0-20250620172916-f51d8b099832"
+
var (
port = flag.String("port", "", "")
server = flag.Bool("server", false, "")
@@ -537,6 +542,7 @@ func orderlyShutdown(tlsConn *Conn) {
}
func TestBogoSuite(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
if testing.Short() {
t.Skip("skipping in short mode")
}
@@ -555,9 +561,9 @@ func TestBogoSuite(t *testing.T) {
var bogoDir string
if *bogoLocalDir != "" {
+ ensureLocalBogo(t, *bogoLocalDir)
bogoDir = *bogoLocalDir
} else {
- const boringsslModVer = "v0.0.0-20250620172916-f51d8b099832"
bogoDir = cryptotest.FetchModule(t, "boringssl.googlesource.com/boringssl.git", boringsslModVer)
}
@@ -606,6 +612,12 @@ func TestBogoSuite(t *testing.T) {
t.Fatalf("failed to parse results JSON: %s", err)
}
+ if *bogoReport != "" {
+ if err := generateReport(results, *bogoReport); err != nil {
+ t.Fatalf("failed to generate report: %v", err)
+ }
+ }
+
// assertResults contains test results we want to make sure
// are present in the output. They are only checked if -bogo-filter
// was not passed.
@@ -655,6 +667,66 @@ func TestBogoSuite(t *testing.T) {
}
}
+// ensureLocalBogo fetches BoringSSL to localBogoDir at the correct revision
+// (from boringsslModVer) if localBogoDir doesn't already exist.
+//
+// If localBogoDir does exist, ensureLocalBogo fails the test if it isn't
+// a directory.
+func ensureLocalBogo(t *testing.T, localBogoDir string) {
+ t.Helper()
+
+ if stat, err := os.Stat(localBogoDir); err == nil {
+ if !stat.IsDir() {
+ t.Fatalf("local bogo dir (%q) exists but is not a directory", localBogoDir)
+ }
+
+ t.Logf("using local bogo checkout from %q", localBogoDir)
+ return
+ } else if !errors.Is(err, os.ErrNotExist) {
+ t.Fatalf("failed to stat local bogo dir (%q): %v", localBogoDir, err)
+ }
+
+ testenv.MustHaveExecPath(t, "git")
+
+ idx := strings.LastIndex(boringsslModVer, "-")
+ if idx == -1 || idx == len(boringsslModVer)-1 {
+ t.Fatalf("invalid boringsslModVer format: %q", boringsslModVer)
+ }
+ commitSHA := boringsslModVer[idx+1:]
+
+ t.Logf("cloning boringssl@%s to %q", commitSHA, localBogoDir)
+ cloneCmd := testenv.Command(t, "git", "clone", "--no-checkout", "https://boringssl.googlesource.com/boringssl", localBogoDir)
+ if err := cloneCmd.Run(); err != nil {
+ t.Fatalf("git clone failed: %v", err)
+ }
+
+ checkoutCmd := testenv.Command(t, "git", "checkout", commitSHA)
+ checkoutCmd.Dir = localBogoDir
+ if err := checkoutCmd.Run(); err != nil {
+ t.Fatalf("git checkout failed: %v", err)
+ }
+
+ t.Logf("using fresh local bogo checkout from %q", localBogoDir)
+ return
+}
+
+func generateReport(results bogoResults, outPath string) error {
+ data := reportData{
+ Results: results,
+ Timestamp: time.Unix(int64(results.SecondsSinceEpoch), 0).Format("2006-01-02 15:04:05"),
+ Revision: boringsslModVer,
+ }
+
+ tmpl := template.Must(template.New("report").Parse(reportTemplate))
+ file, err := os.Create(outPath)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ return tmpl.Execute(file, data)
+}
+
// bogoResults is a copy of boringssl.googlesource.com/boringssl/testresults.Results
type bogoResults struct {
Version int `json:"version"`
@@ -669,3 +741,127 @@ type bogoResults struct {
Error string `json:"error,omitempty"`
} `json:"tests"`
}
+
+type reportData struct {
+ Results bogoResults
+ SkipReasons map[string]string
+ Timestamp string
+ Revision string
+}
+
+const reportTemplate = `
+
+
+
+ BoGo Results Report
+
+
+
+