diff --git a/api/next/51945.txt b/api/next/51945.txt new file mode 100644 index 00000000000000..7db1f093e5a2a5 --- /dev/null +++ b/api/next/51945.txt @@ -0,0 +1 @@ +pkg errors, func AsType[$0 error](error) ($0, bool) #51945 diff --git a/api/next/71287.txt b/api/next/71287.txt new file mode 100644 index 00000000000000..c1e09a1f523082 --- /dev/null +++ b/api/next/71287.txt @@ -0,0 +1,4 @@ +pkg testing, method (*B) ArtifactDir() string #71287 +pkg testing, method (*F) ArtifactDir() string #71287 +pkg testing, method (*T) ArtifactDir() string #71287 +pkg testing, type TB interface, ArtifactDir() string #71287 diff --git a/api/next/73161.txt b/api/next/73161.txt new file mode 100644 index 00000000000000..86526b597a9463 --- /dev/null +++ b/api/next/73161.txt @@ -0,0 +1 @@ +pkg net/http/httputil, type ReverseProxy struct, Director //deprecated #73161 diff --git a/doc/go_spec.html b/doc/go_spec.html index b67eaf9999ab1e..d6c7962a961b88 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -1,6 +1,6 @@ @@ -3173,7 +3173,7 @@

Composite literals

Unless the LiteralType is a type parameter, -its underlying type +its underlying type must be a struct, array, slice, or map type (the syntax enforces this constraint except when the type is given as a TypeName). @@ -4873,7 +4873,7 @@

Operator precedence

x <= f() // x <= f() ^a >> b // (^a) >> b f() || g() // f() || g() -x == y+1 && <-chanInt > 0 // (x == (y+1)) && ((<-chanInt) > 0) +x == y+1 && <-chanInt > 0 // (x == (y+1)) && ((<-chanInt) > 0) @@ -6635,7 +6635,7 @@

For statements with for clause

 var prints []func()
-for i := 0; i < 5; i++ {
+for i := 0; i < 5; i++ {
 	prints = append(prints, func() { println(i) })
 	i++
 }
@@ -6772,7 +6772,7 @@ 

For statements with range clause

variable, which must be of integer type. Otherwise, if the iteration variable is declared by the "range" clause or is absent, the type of the iteration values is the default type for n. -If n <= 0, the loop does not run any iterations. +If n <= 0, the loop does not run any iterations.
  • @@ -7799,7 +7799,7 @@

    Min and max

    -min(x, y)    == if x <= y then x else y
    +min(x, y)    == if x <= y then x else y
     min(x, y, z) == min(min(x, y), z)
     
    diff --git a/doc/godebug.md b/doc/godebug.md index aaa0f9dd55e570..c12ce5311d90d1 100644 --- a/doc/godebug.md +++ b/doc/godebug.md @@ -153,6 +153,16 @@ for example, see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables) and the [go command documentation](/cmd/go#hdr-Build_and_test_caching). +### Go 1.26 + +Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number +of cookies that net/http will accept when parsing HTTP headers. If the number of +cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing +will fail early. The default value is `httpcookiemaxnum=3000`. Setting +`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite +number of cookies. To avoid denial of service attacks, this setting and default +was backported to Go 1.25.2 and Go 1.24.8. + ### Go 1.25 Go 1.25 added a new `decoratemappings` setting that controls whether the Go diff --git a/doc/next/3-tools.md b/doc/next/3-tools.md index 9459a5490e7904..c0a4601c0b9e74 100644 --- a/doc/next/3-tools.md +++ b/doc/next/3-tools.md @@ -7,5 +7,15 @@ a replacement for `go tool doc`: it takes the same flags and arguments and has the same behavior. + +The `go fix` command, following the pattern of `go vet` in Go 1.10, +now uses the Go analysis framework (`golang.org/x/tools/go/analysis`). +This means the same analyzers that provide diagnostics in `go vet` +can be used to suggest and apply fixes in `go fix`. +The `go fix` command's historical fixers, all of which were obsolete, +have been removed and replaced by a suite of new analyzers that +offer fixes to use newer features of the language and library. + + ### Cgo {#cgo} diff --git a/doc/next/6-stdlib/99-minor/errors/51945.md b/doc/next/6-stdlib/99-minor/errors/51945.md new file mode 100644 index 00000000000000..44ac7222e6d990 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/errors/51945.md @@ -0,0 +1,2 @@ +The new [AsType] function is a generic version of [As]. It is type-safe, faster, +and, in most cases, easier to use. diff --git a/doc/next/6-stdlib/99-minor/net/http/httputil/73161.md b/doc/next/6-stdlib/99-minor/net/http/httputil/73161.md new file mode 100644 index 00000000000000..f6318f85534746 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/net/http/httputil/73161.md @@ -0,0 +1,11 @@ +The [ReverseProxy.Director] configuration field is deprecated +in favor of [ReverseProxy.Rewrite]. + +A malicious client can remove headers added by a `Director` function +by designating those headers as hop-by-hop. Since there is no way to address +this problem within the scope of the `Director` API, we added a new +`Rewrite` hook in Go 1.20. `Rewrite` hooks are provided with both the +unmodified inbound request received by the proxy and the outbound request +which will be sent by the proxy. + +Since the `Director` hook is fundamentally unsafe, we are now deprecating it. diff --git a/doc/next/6-stdlib/99-minor/testing/71287.md b/doc/next/6-stdlib/99-minor/testing/71287.md new file mode 100644 index 00000000000000..82cac638101099 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/testing/71287.md @@ -0,0 +1,18 @@ +The new methods [T.ArtifactDir], [B.ArtifactDir], and [F.ArtifactDir] +return a directory in which to write test output files (artifacts). + +When the `-artifacts` flag is provided to `go test`, +this directory will be located under the output directory +(specified with `-outputdir`, or the current directory by default). +Otherwise, artifacts are stored in a temporary directory +which is removed after the test completes. + +The first call to `ArtifactDir` when `-artifacts` is provided +writes the location of the directory to the test log. + +For example, in a test named `TestArtifacts`, +`t.ArtifactDir()` emits: + +``` +=== ARTIFACTS Test /path/to/artifact/dir +``` diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go index 7b3945ff153144..ad31bbb64aaa5c 100644 --- a/src/archive/tar/common.go +++ b/src/archive/tar/common.go @@ -39,6 +39,7 @@ var ( errMissData = errors.New("archive/tar: sparse file references non-existent data") errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") + errSparseTooLong = errors.New("archive/tar: sparse map too long") ) type headerError []string diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go index 8483fb52a28f66..16ac2f5b17c28b 100644 --- a/src/archive/tar/reader.go +++ b/src/archive/tar/reader.go @@ -531,12 +531,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { cntNewline int64 buf bytes.Buffer blk block + totalSize int ) // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. feedTokens := func(n int64) error { for cntNewline < n { + totalSize += len(blk) + if totalSize > maxSpecialFileSize { + return errSparseTooLong + } if _, err := mustReadFull(r, blk[:]); err != nil { return err } @@ -569,8 +574,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { } // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. + // numEntries is trusted after this since feedTokens limits the number of + // tokens based on maxSpecialFileSize. if err := feedTokens(2 * numEntries); err != nil { return nil, err } diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go index 99340a30471914..fca53dae741bd5 100644 --- a/src/archive/tar/reader_test.go +++ b/src/archive/tar/reader_test.go @@ -621,6 +621,11 @@ func TestReader(t *testing.T) { }, Format: FormatPAX, }}, + }, { + // Small compressed file that uncompresses to + // a file with a very large GNU 1.0 sparse map. + file: "testdata/gnu-sparse-many-zeros.tar.bz2", + err: errSparseTooLong, }} for _, v := range vectors { diff --git a/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 new file mode 100644 index 00000000000000..751d7fd4b68be1 Binary files /dev/null and b/src/archive/tar/testdata/gnu-sparse-many-zeros.tar.bz2 differ diff --git a/src/cmd/asm/internal/arch/arm64.go b/src/cmd/asm/internal/arch/arm64.go index 87ccb8c0409313..d562e5907d6fc1 100644 --- a/src/cmd/asm/internal/arch/arm64.go +++ b/src/cmd/asm/internal/arch/arm64.go @@ -195,149 +195,6 @@ func ARM64RegisterShift(reg, op, count int16) (int64, error) { return int64(reg&31)<<16 | int64(op)<<22 | int64(uint16(count)), nil } -// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement. -func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error { - Rnum := (reg & 31) + int16(num<<5) - if isAmount { - if num < 0 || num > 7 { - return errors.New("index shift amount is out of range") - } - } - if reg <= arm64.REG_R31 && reg >= arm64.REG_R0 { - if !isAmount { - return errors.New("invalid register extension") - } - switch ext { - case "UXTB": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_UXTB + Rnum - case "UXTH": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_UXTH + Rnum - case "UXTW": - // effective address of memory is a base register value and an offset register value. - if a.Type == obj.TYPE_MEM { - a.Index = arm64.REG_UXTW + Rnum - } else { - a.Reg = arm64.REG_UXTW + Rnum - } - case "UXTX": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_UXTX + Rnum - case "SXTB": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_SXTB + Rnum - case "SXTH": - if a.Type == obj.TYPE_MEM { - return errors.New("invalid shift for the register offset addressing mode") - } - a.Reg = arm64.REG_SXTH + Rnum - case "SXTW": - if a.Type == obj.TYPE_MEM { - a.Index = arm64.REG_SXTW + Rnum - } else { - a.Reg = arm64.REG_SXTW + Rnum - } - case "SXTX": - if a.Type == obj.TYPE_MEM { - a.Index = arm64.REG_SXTX + Rnum - } else { - a.Reg = arm64.REG_SXTX + Rnum - } - case "LSL": - a.Index = arm64.REG_LSL + Rnum - default: - return errors.New("unsupported general register extension type: " + ext) - - } - } else if reg <= arm64.REG_V31 && reg >= arm64.REG_V0 { - switch ext { - case "B8": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5) - case "B16": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5) - case "H4": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5) - case "H8": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5) - case "S2": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5) - case "S4": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5) - case "D1": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1D & 15) << 5) - case "D2": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5) - case "Q1": - if isIndex { - return errors.New("invalid register extension") - } - a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1Q & 15) << 5) - case "B": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5) - a.Index = num - case "H": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5) - a.Index = num - case "S": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5) - a.Index = num - case "D": - if !isIndex { - return nil - } - a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5) - a.Index = num - default: - return errors.New("unsupported simd register extension type: " + ext) - } - } else { - return errors.New("invalid register and extension combination") - } - return nil -} - // ARM64RegisterArrangement constructs an ARM64 vector register arrangement. func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) { var curQ, curSize uint16 diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go index 8f8f6dcc346a44..545f6c7553351b 100644 --- a/src/cmd/asm/internal/asm/parse.go +++ b/src/cmd/asm/internal/asm/parse.go @@ -775,7 +775,7 @@ func (p *Parser) registerExtension(a *obj.Addr, name string, prefix rune) { switch p.arch.Family { case sys.ARM64: - err := arch.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex) + err := arm64.ARM64RegisterExtension(a, ext, reg, num, isAmount, isIndex) if err != nil { p.errorf("%v", err) } diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 236f1a66979099..109a3d8316678b 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -1894,4 +1894,12 @@ next: BTI J // 9f2403d5 BTI JC // df2403d5 +// Pointer Authentication Codes (PAC) + PACIASP // 3f2303d5 + AUTIASP // bf2303d5 + PACIBSP // 7f2303d5 + AUTIBSP // ff2303d5 + AUTIA1716 // 9f2103d5 + AUTIB1716 // df2103d5 + END diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s index 55890ce3e631a4..ce88e3ca540f13 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64error.s +++ b/src/cmd/asm/internal/asm/testdata/arm64error.s @@ -422,4 +422,10 @@ TEXT errors(SB),$0 SHA1H V1.B16, V2.B16 // ERROR "invalid operands" BTI // ERROR "missing operand" BTI PLDL1KEEP // ERROR "illegal argument" + PACIASP C // ERROR "illegal combination" + AUTIASP R2 // ERROR "illegal combination" + PACIBSP R0 // ERROR "illegal combination" + AUTIBSP C // ERROR "illegal combination" + AUTIA1716 $45 // ERROR "illegal combination" + AUTIB1716 R0 // ERROR "illegal combination" RET diff --git a/src/cmd/cgo/internal/testcshared/cshared_test.go b/src/cmd/cgo/internal/testcshared/cshared_test.go index f1c30f8f9a2b2a..2ce705adba44f3 100644 --- a/src/cmd/cgo/internal/testcshared/cshared_test.go +++ b/src/cmd/cgo/internal/testcshared/cshared_test.go @@ -8,6 +8,7 @@ import ( "bufio" "bytes" "cmd/cgo/internal/cgotest" + "cmp" "debug/elf" "debug/pe" "encoding/binary" @@ -272,7 +273,7 @@ func createHeaders() error { // which results in the linkers output implib getting overwritten at each step. So instead build the // import library the traditional way, using a def file. err = os.WriteFile("libgo.def", - []byte("LIBRARY libgo.dll\nEXPORTS\n\tDidInitRun\n\tDidMainRun\n\tDivu\n\tFromPkg\n\t_cgo_dummy_export\n"), + []byte("LIBRARY libgo.dll\nEXPORTS\n\tDidInitRun\n\tDidMainRun\n\tDivu\n\tFromPkg\n"), 0644) if err != nil { return fmt.Errorf("unable to write def file: %v", err) @@ -375,9 +376,23 @@ func TestExportedSymbols(t *testing.T) { } } -func checkNumberOfExportedFunctionsWindows(t *testing.T, prog string, exportedFunctions int, wantAll bool) { +func checkNumberOfExportedSymbolsWindows(t *testing.T, exportedSymbols int, wantAll bool) { + t.Parallel() tmpdir := t.TempDir() + prog := ` +package main +import "C" +func main() {} +` + + for i := range exportedSymbols { + prog += fmt.Sprintf(` +//export GoFunc%d +func GoFunc%d() {} +`, i, i) + } + srcfile := filepath.Join(tmpdir, "test.go") objfile := filepath.Join(tmpdir, "test.dll") if err := os.WriteFile(srcfile, []byte(prog), 0666); err != nil { @@ -443,18 +458,19 @@ func checkNumberOfExportedFunctionsWindows(t *testing.T, prog string, exportedFu t.Fatalf("binary.Read failed: %v", err) } - // Only the two exported functions and _cgo_dummy_export should be exported. + exportedSymbols = cmp.Or(exportedSymbols, 1) // _cgo_stub_export is exported if there are no other symbols exported + // NumberOfNames is the number of functions exported with a unique name. // NumberOfFunctions can be higher than that because it also counts // functions exported only by ordinal, a unique number asigned by the linker, // and linkers might add an unknown number of their own ordinal-only functions. if wantAll { - if e.NumberOfNames <= uint32(exportedFunctions) { - t.Errorf("got %d exported names, want > %d", e.NumberOfNames, exportedFunctions) + if e.NumberOfNames <= uint32(exportedSymbols) { + t.Errorf("got %d exported names, want > %d", e.NumberOfNames, exportedSymbols) } } else { - if e.NumberOfNames > uint32(exportedFunctions) { - t.Errorf("got %d exported names, want <= %d", e.NumberOfNames, exportedFunctions) + if e.NumberOfNames != uint32(exportedSymbols) { + t.Errorf("got %d exported names, want %d", e.NumberOfNames, exportedSymbols) } } } @@ -470,43 +486,14 @@ func TestNumberOfExportedFunctions(t *testing.T) { t.Parallel() - const prog0 = ` -package main - -import "C" - -func main() { -} -` - - const prog2 = ` -package main - -import "C" - -//export GoFunc -func GoFunc() { - println(42) -} - -//export GoFunc2 -func GoFunc2() { - println(24) -} - -func main() { -} -` - // All programs export _cgo_dummy_export, so add 1 to the expected counts. - t.Run("OnlyExported/0", func(t *testing.T) { - checkNumberOfExportedFunctionsWindows(t, prog0, 0+1, false) - }) - t.Run("OnlyExported/2", func(t *testing.T) { - checkNumberOfExportedFunctionsWindows(t, prog2, 2+1, false) - }) - t.Run("All", func(t *testing.T) { - checkNumberOfExportedFunctionsWindows(t, prog2, 2+1, true) - }) + for i := range 3 { + t.Run(fmt.Sprintf("OnlyExported/%d", i), func(t *testing.T) { + checkNumberOfExportedSymbolsWindows(t, i, false) + }) + t.Run(fmt.Sprintf("All/%d", i), func(t *testing.T) { + checkNumberOfExportedSymbolsWindows(t, i, true) + }) + } } // test1: shared library can be dynamically loaded and exported symbols are accessible. diff --git a/src/cmd/cgo/internal/testout/out_test.go b/src/cmd/cgo/internal/testout/out_test.go new file mode 100644 index 00000000000000..81dfa365871372 --- /dev/null +++ b/src/cmd/cgo/internal/testout/out_test.go @@ -0,0 +1,144 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package out_test + +import ( + "bufio" + "bytes" + "fmt" + "internal/testenv" + "internal/goarch" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" +) + +type methodAlign struct { + Method string + Align int +} + +var wantAligns = map[string]int{ + "ReturnEmpty": 1, + "ReturnOnlyUint8": 1, + "ReturnOnlyUint16": 2, + "ReturnOnlyUint32": 4, + "ReturnOnlyUint64": goarch.PtrSize, + "ReturnOnlyInt": goarch.PtrSize, + "ReturnOnlyPtr": goarch.PtrSize, + "ReturnByteSlice": goarch.PtrSize, + "ReturnString": goarch.PtrSize, + "InputAndReturnUint8": 1, + "MixedTypes": goarch.PtrSize, +} + +// TestAligned tests that the generated _cgo_export.c file has the wanted +// align attributes for struct types used as arguments or results of +// //exported functions. +func TestAligned(t *testing.T) { + testenv.MustHaveGoRun(t) + testenv.MustHaveCGO(t) + + testdata, err := filepath.Abs("testdata") + if err != nil { + t.Fatal(err) + } + + objDir := t.TempDir() + + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "cgo", + "-objdir", objDir, + filepath.Join(testdata, "aligned.go")) + cmd.Stderr = new(bytes.Buffer) + + err = cmd.Run() + if err != nil { + t.Fatalf("%#q: %v\n%s", cmd, err, cmd.Stderr) + } + + haveAligns, err := parseAlign(filepath.Join(objDir, "_cgo_export.c")) + if err != nil { + t.Fatal(err) + } + + // Check that we have all the wanted methods + if len(haveAligns) != len(wantAligns) { + t.Fatalf("have %d methods with aligned, want %d", len(haveAligns), len(wantAligns)) + } + + for i := range haveAligns { + method := haveAligns[i].Method + haveAlign := haveAligns[i].Align + + wantAlign, ok := wantAligns[method] + if !ok { + t.Errorf("method %s: have aligned %d, want missing entry", method, haveAlign) + } else if haveAlign != wantAlign { + t.Errorf("method %s: have aligned %d, want %d", method, haveAlign, wantAlign) + } + } +} + +func parseAlign(filename string) ([]methodAlign, error) { + file, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + + var results []methodAlign + scanner := bufio.NewScanner(file) + + // Regex to match function declarations like "struct MethodName_return MethodName(" + funcRegex := regexp.MustCompile(`^struct\s+(\w+)_return\s+(\w+)\(`) + // Regex to match simple function declarations like "GoSlice MethodName(" + simpleFuncRegex := regexp.MustCompile(`^Go\w+\s+(\w+)\(`) + // Regex to match void-returning exported functions like "void ReturnEmpty(" + voidFuncRegex := regexp.MustCompile(`^void\s+(\w+)\(`) + // Regex to match align attributes like "__attribute__((aligned(8)))" + alignRegex := regexp.MustCompile(`__attribute__\(\(aligned\((\d+)\)\)\)`) + + var currentMethod string + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + // Check if this line declares a function with struct return type + if matches := funcRegex.FindStringSubmatch(line); matches != nil { + currentMethod = matches[2] // Extract the method name + } else if matches := simpleFuncRegex.FindStringSubmatch(line); matches != nil { + // Check if this line declares a function with simple return type (like GoSlice) + currentMethod = matches[1] // Extract the method name + } else if matches := voidFuncRegex.FindStringSubmatch(line); matches != nil { + // Check if this line declares a void-returning function + currentMethod = matches[1] // Extract the method name + } + + // Check if this line contains align information + if alignMatches := alignRegex.FindStringSubmatch(line); alignMatches != nil && currentMethod != "" { + alignStr := alignMatches[1] + align, err := strconv.Atoi(alignStr) + if err != nil { + // Skip this entry if we can't parse the align as integer + currentMethod = "" + continue + } + results = append(results, methodAlign{ + Method: currentMethod, + Align: align, + }) + currentMethod = "" // Reset for next method + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error reading file: %w", err) + } + + return results, nil +} diff --git a/src/cmd/cgo/internal/testout/testdata/aligned.go b/src/cmd/cgo/internal/testout/testdata/aligned.go new file mode 100644 index 00000000000000..cea6f2889a0cad --- /dev/null +++ b/src/cmd/cgo/internal/testout/testdata/aligned.go @@ -0,0 +1,63 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +//export ReturnEmpty +func ReturnEmpty() { + return +} + +//export ReturnOnlyUint8 +func ReturnOnlyUint8() (uint8, uint8, uint8) { + return 1, 2, 3 +} + +//export ReturnOnlyUint16 +func ReturnOnlyUint16() (uint16, uint16, uint16) { + return 1, 2, 3 +} + +//export ReturnOnlyUint32 +func ReturnOnlyUint32() (uint32, uint32, uint32) { + return 1, 2, 3 +} + +//export ReturnOnlyUint64 +func ReturnOnlyUint64() (uint64, uint64, uint64) { + return 1, 2, 3 +} + +//export ReturnOnlyInt +func ReturnOnlyInt() (int, int, int) { + return 1, 2, 3 +} + +//export ReturnOnlyPtr +func ReturnOnlyPtr() (*int, *int, *int) { + a, b, c := 1, 2, 3 + return &a, &b, &c +} + +//export ReturnString +func ReturnString() string { + return "hello" +} + +//export ReturnByteSlice +func ReturnByteSlice() []byte { + return []byte{1, 2, 3} +} + +//export InputAndReturnUint8 +func InputAndReturnUint8(a, b, c uint8) (uint8, uint8, uint8) { + return a, b, c +} + +//export MixedTypes +func MixedTypes(a uint8, b uint16, c uint32, d uint64, e int, f *int) (uint8, uint16, uint32, uint64, int, *int) { + return a, b, c, d, e, f +} diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index dfa54e41d33399..a2bcdf89c5ad44 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -949,6 +949,8 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { fmt.Fprintf(gotype, "struct {\n") off := int64(0) npad := 0 + // the align is at least 1 (for char) + maxAlign := int64(1) argField := func(typ ast.Expr, namePat string, args ...interface{}) { name := fmt.Sprintf(namePat, args...) t := p.cgoType(typ) @@ -963,6 +965,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { noSourceConf.Fprint(gotype, fset, typ) fmt.Fprintf(gotype, "\n") off += t.Size + // keep track of the maximum alignment among all fields + // so that we can align the struct correctly + if t.Align > maxAlign { + maxAlign = t.Align + } } if fn.Recv != nil { argField(fn.Recv.List[0].Type, "recv") @@ -1005,12 +1012,8 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { } // Build the wrapper function compiled by gcc. - gccExport := "" - if goos == "windows" { - gccExport = "__declspec(dllexport) " - } var s strings.Builder - fmt.Fprintf(&s, "%s%s %s(", gccExport, gccResult, exp.ExpName) + fmt.Fprintf(&s, "%s %s(", gccResult, exp.ExpName) if fn.Recv != nil { s.WriteString(p.cgoType(fn.Recv.List[0].Type).C.String()) s.WriteString(" recv") @@ -1051,7 +1054,11 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { // string.h for memset, and is also robust to C++ // types with constructors. Both GCC and LLVM optimize // this into just zeroing _cgo_a. - fmt.Fprintf(fgcc, "\ttypedef %s %v _cgo_argtype;\n", ctype.String(), p.packedAttribute()) + // + // The struct should be aligned to the maximum alignment + // of any of its fields. This to avoid alignment + // issues. + fmt.Fprintf(fgcc, "\ttypedef %s %v __attribute__((aligned(%d))) _cgo_argtype;\n", ctype.String(), p.packedAttribute(), maxAlign) fmt.Fprintf(fgcc, "\tstatic _cgo_argtype _cgo_zero;\n") fmt.Fprintf(fgcc, "\t_cgo_argtype _cgo_a = _cgo_zero;\n") if gccResult != "void" && (len(fntype.Results.List) > 1 || len(fntype.Results.List[0].Names) > 1) { diff --git a/src/cmd/compile/README.md b/src/cmd/compile/README.md index 1089348030d78b..02429d5688659e 100644 --- a/src/cmd/compile/README.md +++ b/src/cmd/compile/README.md @@ -289,9 +289,9 @@ dependencies, so is not suitable for distributed build systems.) ``` After that, your edit/compile/test cycle can be similar to: ``` - <... make edits to cmd/compile source ...> + [... make edits to cmd/compile source ...] $ toolstash restore && go install cmd/compile # restore known good tools to build compiler - <... 'go build', 'go test', etc. ...> # use freshly built compiler + [... 'go build', 'go test', etc. ...] # use freshly built compiler ``` * toolstash also allows comparing the installed vs. stashed copy of diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 7bc0e536e941e6..43ecb6b4b715b4 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -1189,8 +1189,9 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if dstReg == srcReg { break } - tmpReg1 := int16(arm64.REG_R24) - tmpReg2 := int16(arm64.REG_R25) + tmpReg1 := int16(arm64.REG_R25) + tmpFReg1 := int16(arm64.REG_F16) + tmpFReg2 := int16(arm64.REG_F17) n := v.AuxInt if n < 16 { v.Fatalf("Move too small %d", n) @@ -1198,10 +1199,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Generate copying instructions. var off int64 + for n >= 32 { + // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2) + // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg) + move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false) + off += 32 + n -= 32 + } for n >= 16 { - // LDP off(srcReg), (tmpReg1, tmpReg2) - // STP (tmpReg1, tmpReg2), off(dstReg) - move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false) + // FMOVQ off(src), tmpFReg1 + // FMOVQ tmpFReg1, off(dst) + move16(s, srcReg, dstReg, tmpFReg1, off, false) off += 16 n -= 16 } @@ -1223,9 +1231,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { if dstReg == srcReg { break } - countReg := int16(arm64.REG_R23) - tmpReg1 := int16(arm64.REG_R24) - tmpReg2 := int16(arm64.REG_R25) + countReg := int16(arm64.REG_R24) + tmpReg1 := int16(arm64.REG_R25) + tmpFReg1 := int16(arm64.REG_F16) + tmpFReg2 := int16(arm64.REG_F17) n := v.AuxInt loopSize := int64(64) if n < 3*loopSize { @@ -1251,10 +1260,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Move loopSize bytes starting at srcReg to dstReg. // Increment srcReg and destReg by loopSize as a side effect. - for range loopSize / 16 { - // LDP.P 16(srcReg), (tmpReg1, tmpReg2) - // STP.P (tmpReg1, tmpReg2), 16(dstReg) - move16(s, srcReg, dstReg, tmpReg1, tmpReg2, 0, true) + for range loopSize / 32 { + // FLDPQ.P 32(srcReg), (tmpFReg1, tmpFReg2) + // FSTPQ.P (tmpFReg1, tmpFReg2), 32(dstReg) + move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, 0, true) } // Decrement loop count. // SUB $1, countReg @@ -1276,10 +1285,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // Copy any fractional portion. var off int64 + for n >= 32 { + // FLDPQ off(srcReg), (tmpFReg1, tmpFReg2) + // FSTPQ (tmpFReg1, tmpFReg2), off(dstReg) + move32(s, srcReg, dstReg, tmpFReg1, tmpFReg2, off, false) + off += 32 + n -= 32 + } for n >= 16 { - // LDP off(srcReg), (tmpReg1, tmpReg2) - // STP (tmpReg1, tmpReg2), off(dstReg) - move16(s, srcReg, dstReg, tmpReg1, tmpReg2, off, false) + // FMOVQ off(src), tmpFReg1 + // FMOVQ tmpFReg1, off(dst) + move16(s, srcReg, dstReg, tmpFReg1, off, false) off += 16 n -= 16 } @@ -1699,26 +1715,55 @@ func zero8(s *ssagen.State, reg int16, off int64) { p.To.Offset = off } -// move16 copies 16 bytes at src+off to dst+off. +// move32 copies 32 bytes at src+off to dst+off. // Uses registers tmp1 and tmp2. -// If postInc is true, increment src and dst by 16. -func move16(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) { - // LDP off(src), (tmp1, tmp2) - ld := s.Prog(arm64.ALDP) +// If postInc is true, increment src and dst by 32. +func move32(s *ssagen.State, src, dst, tmp1, tmp2 int16, off int64, postInc bool) { + // FLDPQ off(src), (tmp1, tmp2) + ld := s.Prog(arm64.AFLDPQ) ld.From.Type = obj.TYPE_MEM ld.From.Reg = src ld.From.Offset = off ld.To.Type = obj.TYPE_REGREG ld.To.Reg = tmp1 ld.To.Offset = int64(tmp2) - // STP (tmp1, tmp2), off(dst) - st := s.Prog(arm64.ASTP) + // FSTPQ (tmp1, tmp2), off(dst) + st := s.Prog(arm64.AFSTPQ) st.From.Type = obj.TYPE_REGREG st.From.Reg = tmp1 st.From.Offset = int64(tmp2) st.To.Type = obj.TYPE_MEM st.To.Reg = dst st.To.Offset = off + if postInc { + if off != 0 { + panic("can't postinc with non-zero offset") + } + ld.Scond = arm64.C_XPOST + st.Scond = arm64.C_XPOST + ld.From.Offset = 32 + st.To.Offset = 32 + } +} + +// move16 copies 16 bytes at src+off to dst+off. +// Uses register tmp1 +// If postInc is true, increment src and dst by 16. +func move16(s *ssagen.State, src, dst, tmp1 int16, off int64, postInc bool) { + // FMOVQ off(src), tmp1 + ld := s.Prog(arm64.AFMOVQ) + ld.From.Type = obj.TYPE_MEM + ld.From.Reg = src + ld.From.Offset = off + ld.To.Type = obj.TYPE_REG + ld.To.Reg = tmp1 + // FMOVQ tmp1, off(dst) + st := s.Prog(arm64.AFMOVQ) + st.From.Type = obj.TYPE_REG + st.From.Reg = tmp1 + st.To.Type = obj.TYPE_MEM + st.To.Reg = dst + st.To.Offset = off if postInc { if off != 0 { panic("can't postinc with non-zero offset") diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go index 85873dcc40e1b3..9e8ab2f488bb4d 100644 --- a/src/cmd/compile/internal/base/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -20,6 +20,7 @@ type DebugFlags struct { Append int `help:"print information about append compilation"` Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation" concurrent:"ok"` Closure int `help:"print information about closure compilation"` + Converthash string `help:"hash value for use in debugging changes to platform-dependent float-to-[u]int conversion" concurrent:"ok"` Defer int `help:"print information about defer compilation"` DisableNil int `help:"disable nil checks" concurrent:"ok"` DumpInlFuncProps string `help:"dump function properties from inl heuristics to specified file"` diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index a0ed876cfc8e0e..1d211e0a2dd9f4 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -262,6 +262,12 @@ func ParseFlags() { Debug.LoopVar = 1 } + if Debug.Converthash != "" { + ConvertHash = NewHashDebug("converthash", Debug.Converthash, nil) + } else { + // quietly disable the convert hash changes + ConvertHash = NewHashDebug("converthash", "qn", nil) + } if Debug.Fmahash != "" { FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil) } diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go index fa63deb46a3c01..edf567457cb04b 100644 --- a/src/cmd/compile/internal/base/hashdebug.go +++ b/src/cmd/compile/internal/base/hashdebug.go @@ -53,6 +53,7 @@ func (d *HashDebug) SetInlineSuffixOnly(b bool) *HashDebug { // The default compiler-debugging HashDebug, for "-d=gossahash=..." var hashDebug *HashDebug +var ConvertHash *HashDebug // for debugging float-to-[u]int conversion changes var FmaHash *HashDebug // for debugging fused-multiply-add floating point changes var LoopVarHash *HashDebug // for debugging shared/private loop variable changes var PGOHash *HashDebug // for debugging PGO optimization decisions diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go index 119f06fbc03351..9e3348c1ecca89 100644 --- a/src/cmd/compile/internal/base/print.go +++ b/src/cmd/compile/internal/base/print.go @@ -220,7 +220,7 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) { fmt.Printf("\n") // If this is a released compiler version, ask for a bug report. - if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") { + if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") && !strings.Contains(buildcfg.Version, "devel") { fmt.Printf("\n") fmt.Printf("Please file a bug report including a short program that triggers the error.\n") fmt.Printf("https://go.dev/issue/new\n") diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go index 372d05809401ff..cb4608a0246574 100644 --- a/src/cmd/compile/internal/devirtualize/devirtualize.go +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -18,9 +18,11 @@ import ( "cmd/compile/internal/types" ) +const go126ImprovedConcreteTypeAnalysis = true + // StaticCall devirtualizes the given call if possible when the concrete callee // is available statically. -func StaticCall(call *ir.CallExpr) { +func StaticCall(s *State, call *ir.CallExpr) { // For promoted methods (including value-receiver methods promoted // to pointer-receivers), the interface method wrapper may contain // expressions that can panic (e.g., ODEREF, ODOTPTR, @@ -40,15 +42,31 @@ func StaticCall(call *ir.CallExpr) { } sel := call.Fun.(*ir.SelectorExpr) - r := ir.StaticValue(sel.X) - if r.Op() != ir.OCONVIFACE { - return - } - recv := r.(*ir.ConvExpr) + var typ *types.Type + if go126ImprovedConcreteTypeAnalysis { + typ = concreteType(s, sel.X) + if typ == nil { + return + } - typ := recv.X.Type() - if typ.IsInterface() { - return + // Don't create type-assertions that would be impossible at compile-time. + // This can happen in such case: any(0).(interface {A()}).A(), this typechecks without + // any errors, but will cause a runtime panic. We statically know that int(0) does not + // implement that interface, thus we skip the devirtualization, as it is not possible + // to make an assertion: any(0).(interface{A()}).(int) (int does not implement interface{A()}). + if !typecheck.Implements(typ, sel.X.Type()) { + return + } + } else { + r := ir.StaticValue(sel.X) + if r.Op() != ir.OCONVIFACE { + return + } + recv := r.(*ir.ConvExpr) + typ = recv.X.Type() + if typ.IsInterface() { + return + } } // If typ is a shape type, then it was a type argument originally @@ -99,8 +117,27 @@ func StaticCall(call *ir.CallExpr) { return } - dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil) - dt.SetType(typ) + dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, typ) + + if go126ImprovedConcreteTypeAnalysis { + // Consider: + // + // var v Iface + // v.A() + // v = &Impl{} + // + // Here in the devirtualizer, we determine the concrete type of v as being an *Impl, + // but it can still be a nil interface, we have not detected that. The v.(*Impl) + // type assertion that we make here would also have failed, but with a different + // panic "pkg.Iface is nil, not *pkg.Impl", where previously we would get a nil panic. + // We fix this, by introducing an additional nilcheck on the itab. + // Calling a method on an nil interface (in most cases) is a bug in a program, so it is fine + // to devirtualize and further (possibly) inline them, even though we would never reach + // the called function. + dt.UseNilPanic = true + dt.SetPos(call.Pos()) + } + x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true) switch x.Op() { case ir.ODOTMETH: @@ -138,3 +175,413 @@ func StaticCall(call *ir.CallExpr) { // Desugar OCALLMETH, if we created one (#57309). typecheck.FixMethodCall(call) } + +const concreteTypeDebug = false + +// concreteType determines the concrete type of n, following OCONVIFACEs and type asserts. +// Returns nil when the concrete type could not be determined, or when there are multiple +// (different) types assigned to an interface. +func concreteType(s *State, n ir.Node) (typ *types.Type) { + typ = concreteType1(s, n, make(map[*ir.Name]struct{})) + if typ == &noType { + return nil + } + if typ != nil && typ.IsInterface() { + base.FatalfAt(n.Pos(), "typ.IsInterface() = true; want = false; typ = %v", typ) + } + return typ +} + +// noType is a sentinel value returned by [concreteType1]. +var noType types.Type + +// concreteType1 analyzes the node n and returns its concrete type if it is statically known. +// Otherwise, it returns a nil Type, indicating that a concrete type was not determined. +// When n is known to be statically nil or a self-assignment is detected, in returns a sentinel [noType] type instead. +func concreteType1(s *State, n ir.Node, seen map[*ir.Name]struct{}) (outT *types.Type) { + nn := n // for debug messages + + if concreteTypeDebug { + defer func() { + t := "&noType" + if outT != &noType { + t = outT.String() + } + base.Warn("concreteType1(%v) -> %v", nn, t) + }() + } + + for { + if concreteTypeDebug { + base.Warn("concreteType1(%v): analyzing %v", nn, n) + } + + if !n.Type().IsInterface() { + return n.Type() + } + + switch n1 := n.(type) { + case *ir.ConvExpr: + if n1.Op() == ir.OCONVNOP { + if !n1.Type().IsInterface() || !types.Identical(n1.Type().Underlying(), n1.X.Type().Underlying()) { + // As we check (directly before this switch) whether n is an interface, thus we should only reach + // here for iface conversions where both operands are the same. + base.FatalfAt(n1.Pos(), "not identical/interface types found n1.Type = %v; n1.X.Type = %v", n1.Type(), n1.X.Type()) + } + n = n1.X + continue + } + if n1.Op() == ir.OCONVIFACE { + n = n1.X + continue + } + case *ir.InlinedCallExpr: + if n1.Op() == ir.OINLCALL { + n = n1.SingleResult() + continue + } + case *ir.ParenExpr: + n = n1.X + continue + case *ir.TypeAssertExpr: + n = n1.X + continue + } + break + } + + if n.Op() != ir.ONAME { + return nil + } + + name := n.(*ir.Name).Canonical() + if name.Class != ir.PAUTO { + return nil + } + + if name.Op() != ir.ONAME { + base.FatalfAt(name.Pos(), "name.Op = %v; want = ONAME", n.Op()) + } + + // name.Curfn must be set, as we checked name.Class != ir.PAUTO before. + if name.Curfn == nil { + base.FatalfAt(name.Pos(), "name.Curfn = nil; want not nil") + } + + if name.Addrtaken() { + return nil // conservatively assume it's reassigned with a different type indirectly + } + + if _, ok := seen[name]; ok { + return &noType // Already analyzed assignments to name, no need to do that twice. + } + seen[name] = struct{}{} + + if concreteTypeDebug { + base.Warn("concreteType1(%v): analyzing assignments to %v", nn, name) + } + + var typ *types.Type + for _, v := range s.assignments(name) { + var t *types.Type + switch v := v.(type) { + case *types.Type: + t = v + case ir.Node: + t = concreteType1(s, v, seen) + if t == &noType { + continue + } + } + if t == nil || (typ != nil && !types.Identical(typ, t)) { + return nil + } + typ = t + } + + if typ == nil { + // Variable either declared with zero value, or only assigned with nil. + return &noType + } + + return typ +} + +// assignment can be one of: +// - nil - assignment from an interface type. +// - *types.Type - assignment from a concrete type (non-interface). +// - ir.Node - assignment from a ir.Node. +// +// In most cases assignment should be an [ir.Node], but in cases where we +// do not follow the data-flow, we return either a concrete type (*types.Type) or a nil. +// For example in range over a slice, if the slice elem is of an interface type, then we return +// a nil, otherwise the elem's concrete type (We do so because we do not analyze assignment to the +// slice being ranged-over). +type assignment any + +// State holds precomputed state for use in [StaticCall]. +type State struct { + // ifaceAssignments maps interface variables to all their assignments + // defined inside functions stored in the analyzedFuncs set. + // Note: it does not include direct assignments to nil. + ifaceAssignments map[*ir.Name][]assignment + + // ifaceCallExprAssigns stores every [*ir.CallExpr], which has an interface + // result, that is assigned to a variable. + ifaceCallExprAssigns map[*ir.CallExpr][]ifaceAssignRef + + // analyzedFuncs is a set of Funcs that were analyzed for iface assignments. + analyzedFuncs map[*ir.Func]struct{} +} + +type ifaceAssignRef struct { + name *ir.Name // ifaceAssignments[name] + assignmentIndex int // ifaceAssignments[name][assignmentIndex] + returnIndex int // (*ir.CallExpr).Result(returnIndex) +} + +// InlinedCall updates the [State] to take into account a newly inlined call. +func (s *State) InlinedCall(fun *ir.Func, origCall *ir.CallExpr, inlinedCall *ir.InlinedCallExpr) { + if _, ok := s.analyzedFuncs[fun]; !ok { + // Full analyze has not been yet executed for the provided function, so we can skip it for now. + // When no devirtualization happens in a function, it is unnecessary to analyze it. + return + } + + // Analyze assignments in the newly inlined function. + s.analyze(inlinedCall.Init()) + s.analyze(inlinedCall.Body) + + refs, ok := s.ifaceCallExprAssigns[origCall] + if !ok { + return + } + delete(s.ifaceCallExprAssigns, origCall) + + // Update assignments to reference the new ReturnVars of the inlined call. + for _, ref := range refs { + vt := &s.ifaceAssignments[ref.name][ref.assignmentIndex] + if *vt != nil { + base.Fatalf("unexpected non-nil assignment") + } + if concreteTypeDebug { + base.Warn( + "InlinedCall(%v, %v): replacing interface node in (%v,%v) to %v (typ %v)", + origCall, inlinedCall, ref.name, ref.assignmentIndex, + inlinedCall.ReturnVars[ref.returnIndex], + inlinedCall.ReturnVars[ref.returnIndex].Type(), + ) + } + + // Update ifaceAssignments with an ir.Node from the inlined function’s ReturnVars. + // This may enable future devirtualization of calls that reference ref.name. + // We will get calls to [StaticCall] from the interleaved package, + // to try devirtualize such calls afterwards. + *vt = inlinedCall.ReturnVars[ref.returnIndex] + } +} + +// assignments returns all assignments to n. +func (s *State) assignments(n *ir.Name) []assignment { + fun := n.Curfn + if fun == nil { + base.FatalfAt(n.Pos(), "n.Curfn = ") + } + if n.Class != ir.PAUTO { + base.FatalfAt(n.Pos(), "n.Class = %v; want = PAUTO", n.Class) + } + + if !n.Type().IsInterface() { + base.FatalfAt(n.Pos(), "name passed to assignments is not of an interface type: %v", n.Type()) + } + + // Analyze assignments in func, if not analyzed before. + if _, ok := s.analyzedFuncs[fun]; !ok { + if concreteTypeDebug { + base.Warn("assignments(): analyzing assignments in %v func", fun) + } + if s.analyzedFuncs == nil { + s.ifaceAssignments = make(map[*ir.Name][]assignment) + s.ifaceCallExprAssigns = make(map[*ir.CallExpr][]ifaceAssignRef) + s.analyzedFuncs = make(map[*ir.Func]struct{}) + } + s.analyzedFuncs[fun] = struct{}{} + s.analyze(fun.Init()) + s.analyze(fun.Body) + } + + return s.ifaceAssignments[n] +} + +// analyze analyzes every assignment to interface variables in nodes, updating [State]. +func (s *State) analyze(nodes ir.Nodes) { + assign := func(name ir.Node, assignment assignment) (*ir.Name, int) { + if name == nil || name.Op() != ir.ONAME || ir.IsBlank(name) { + return nil, -1 + } + + n, ok := ir.OuterValue(name).(*ir.Name) + if !ok || n.Curfn == nil { + return nil, -1 + } + + // Do not track variables that are not of interface types. + // For devirtualization they are unnecessary, we will not even look them up. + if !n.Type().IsInterface() { + return nil, -1 + } + + n = n.Canonical() + if n.Op() != ir.ONAME { + base.FatalfAt(n.Pos(), "n.Op = %v; want = ONAME", n.Op()) + } + if n.Class != ir.PAUTO { + return nil, -1 + } + + switch a := assignment.(type) { + case nil: + case *types.Type: + if a != nil && a.IsInterface() { + assignment = nil // non-concrete type + } + case ir.Node: + // nil assignment, we can safely ignore them, see [StaticCall]. + if ir.IsNil(a) { + return nil, -1 + } + default: + base.Fatalf("unexpected type: %v", assignment) + } + + if concreteTypeDebug { + base.Warn("analyze(): assignment found %v = %v", name, assignment) + } + + s.ifaceAssignments[n] = append(s.ifaceAssignments[n], assignment) + return n, len(s.ifaceAssignments[n]) - 1 + } + + var do func(n ir.Node) + do = func(n ir.Node) { + switch n.Op() { + case ir.OAS: + n := n.(*ir.AssignStmt) + if rhs := n.Y; rhs != nil { + for { + if r, ok := rhs.(*ir.ParenExpr); ok { + rhs = r.X + continue + } + break + } + if call, ok := rhs.(*ir.CallExpr); ok && call.Fun != nil { + retTyp := call.Fun.Type().Results()[0].Type + n, idx := assign(n.X, retTyp) + if n != nil && retTyp.IsInterface() { + // We have a call expression, that returns an interface, store it for later evaluation. + // In case this func gets inlined later, we will update the assignment (added before) + // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X. + s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, 0}) + } + } else { + assign(n.X, rhs) + } + } + case ir.OAS2: + n := n.(*ir.AssignListStmt) + for i, p := range n.Lhs { + if n.Rhs[i] != nil { + assign(p, n.Rhs[i]) + } + } + case ir.OAS2DOTTYPE: + n := n.(*ir.AssignListStmt) + if n.Rhs[0] == nil { + base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n) + } + assign(n.Lhs[0], n.Rhs[0]) + assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize + case ir.OAS2MAPR, ir.OAS2RECV, ir.OSELRECV2: + n := n.(*ir.AssignListStmt) + if n.Rhs[0] == nil { + base.FatalfAt(n.Pos(), "n.Rhs[0] == nil; n = %v", n) + } + assign(n.Lhs[0], n.Rhs[0].Type()) + assign(n.Lhs[1], nil) // boolean does not have methods to devirtualize + case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) + rhs := n.Rhs[0] + for { + if r, ok := rhs.(*ir.ParenExpr); ok { + rhs = r.X + continue + } + break + } + if call, ok := rhs.(*ir.CallExpr); ok { + for i, p := range n.Lhs { + retTyp := call.Fun.Type().Results()[i].Type + n, idx := assign(p, retTyp) + if n != nil && retTyp.IsInterface() { + // We have a call expression, that returns an interface, store it for later evaluation. + // In case this func gets inlined later, we will update the assignment (added before) + // with a reference to ReturnVars, see [State.InlinedCall], which might allow for future devirtualizing of n.X. + s.ifaceCallExprAssigns[call] = append(s.ifaceCallExprAssigns[call], ifaceAssignRef{n, idx, i}) + } + } + } else if call, ok := rhs.(*ir.InlinedCallExpr); ok { + for i, p := range n.Lhs { + assign(p, call.ReturnVars[i]) + } + } else { + base.FatalfAt(n.Pos(), "unexpected type %T in OAS2FUNC Rhs[0]", call) + } + case ir.ORANGE: + n := n.(*ir.RangeStmt) + xTyp := n.X.Type() + + // Range over an array pointer. + if xTyp.IsPtr() && xTyp.Elem().IsArray() { + xTyp = xTyp.Elem() + } + + if xTyp.IsArray() || xTyp.IsSlice() { + assign(n.Key, nil) // integer does not have methods to devirtualize + assign(n.Value, xTyp.Elem()) + } else if xTyp.IsChan() { + assign(n.Key, xTyp.Elem()) + base.AssertfAt(n.Value == nil, n.Pos(), "n.Value != nil in range over chan") + } else if xTyp.IsMap() { + assign(n.Key, xTyp.Key()) + assign(n.Value, xTyp.Elem()) + } else if xTyp.IsInteger() || xTyp.IsString() { + // Range over int/string, results do not have methods, so nothing to devirtualize. + assign(n.Key, nil) + assign(n.Value, nil) + } else { + // We will not reach here in case of an range-over-func, as it is + // rewrtten to function calls in the noder package. + base.FatalfAt(n.Pos(), "range over unexpected type %v", n.X.Type()) + } + case ir.OSWITCH: + n := n.(*ir.SwitchStmt) + if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok { + for _, v := range n.Cases { + if v.Var == nil { + base.Assert(guard.Tag == nil) + continue + } + assign(v.Var, guard.X) + } + } + case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) + if _, ok := s.analyzedFuncs[n.Func]; !ok { + s.analyzedFuncs[n.Func] = struct{}{} + ir.Visit(n.Func, do) + } + } + } + ir.VisitList(nodes, do) +} diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go index 6b34830b3dd5e6..59250edfef0078 100644 --- a/src/cmd/compile/internal/escape/escape.go +++ b/src/cmd/compile/internal/escape/escape.go @@ -563,7 +563,10 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) { if ro == nil { base.Fatalf("no ReassignOracle for function %v with closure parent %v", fn, fn.ClosureParent) } - if s := ro.StaticValue(*r); s.Op() == ir.OLITERAL { + + s := ro.StaticValue(*r) + switch s.Op() { + case ir.OLITERAL: lit, ok := s.(*ir.BasicLit) if !ok || lit.Val().Kind() != constant.Int { base.Fatalf("unexpected BasicLit Kind") @@ -577,6 +580,14 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) { assignTemp(n.Pos(), *r, n.PtrInit()) *r = ir.NewBasicLit(n.Pos(), (*r).Type(), lit.Val()) } + case ir.OLEN: + x := ro.StaticValue(s.(*ir.UnaryExpr).X) + if x.Op() == ir.OSLICELIT { + x := x.(*ir.CompLitExpr) + // Preserve any side effects of the original expression, then update the value. + assignTemp(n.Pos(), *r, n.PtrInit()) + *r = ir.NewBasicLit(n.Pos(), types.Types[types.TINT], constant.MakeInt64(x.Len)) + } } } case ir.OCONVIFACE: diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go index 954cc306fc81d3..c83bbdb718df56 100644 --- a/src/cmd/compile/internal/inline/interleaved/interleaved.go +++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go @@ -45,6 +45,8 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { inlState := make(map[*ir.Func]*inlClosureState) calleeUseCounts := make(map[*ir.Func]int) + var state devirtualize.State + // Pre-process all the functions, adding parentheses around call sites and starting their "inl state". for _, fn := range typecheck.Target.Funcs { bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn) @@ -58,7 +60,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { // Do a first pass at counting call sites. for i := range s.parens { - s.resolve(i) + s.resolve(&state, i) } } @@ -102,10 +104,11 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { for { for i := l0; i < l1; i++ { // can't use "range parens" here paren := s.parens[i] - if new := s.edit(i); new != nil { + if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil { // Update AST and recursively mark nodes. - paren.X = new - ir.EditChildren(new, s.mark) // mark may append to parens + paren.X = inlinedCall + ir.EditChildren(inlinedCall, s.mark) // mark may append to parens + state.InlinedCall(s.fn, origCall, inlinedCall) done = false } } @@ -114,7 +117,7 @@ func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgoir.Profile) { break } for i := l0; i < l1; i++ { - s.resolve(i) + s.resolve(&state, i) } } @@ -188,7 +191,7 @@ type inlClosureState struct { // resolve attempts to resolve a call to a potentially inlineable callee // and updates use counts on the callees. Returns the call site count // for that callee. -func (s *inlClosureState) resolve(i int) (*ir.Func, int) { +func (s *inlClosureState) resolve(state *devirtualize.State, i int) (*ir.Func, int) { p := s.parens[i] if i < len(s.resolved) { if callee := s.resolved[i]; callee != nil { @@ -200,7 +203,7 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) { if !ok { // previously inlined return nil, -1 } - devirtualize.StaticCall(call) + devirtualize.StaticCall(state, call) if callee := inline.InlineCallTarget(s.fn, call, s.profile); callee != nil { for len(s.resolved) <= i { s.resolved = append(s.resolved, nil) @@ -213,23 +216,23 @@ func (s *inlClosureState) resolve(i int) (*ir.Func, int) { return nil, 0 } -func (s *inlClosureState) edit(i int) ir.Node { +func (s *inlClosureState) edit(state *devirtualize.State, i int) (*ir.CallExpr, *ir.InlinedCallExpr) { n := s.parens[i].X call, ok := n.(*ir.CallExpr) if !ok { - return nil + return nil, nil } // This is redundant with earlier calls to // resolve, but because things can change it // must be re-checked. - callee, count := s.resolve(i) + callee, count := s.resolve(state, i) if count <= 0 { - return nil + return nil, nil } if inlCall := inline.TryInlineCall(s.fn, call, s.bigCaller, s.profile, count == 1 && callee.ClosureParent != nil); inlCall != nil { - return inlCall + return call, inlCall } - return nil + return nil, nil } // Mark inserts parentheses, and is called repeatedly. @@ -338,16 +341,18 @@ func (s *inlClosureState) unparenthesize() { // returns. func (s *inlClosureState) fixpoint() bool { changed := false + var state devirtualize.State ir.WithFunc(s.fn, func() { done := false for !done { done = true for i := 0; i < len(s.parens); i++ { // can't use "range parens" here paren := s.parens[i] - if new := s.edit(i); new != nil { + if origCall, inlinedCall := s.edit(&state, i); inlinedCall != nil { // Update AST and recursively mark nodes. - paren.X = new - ir.EditChildren(new, s.mark) // mark may append to parens + paren.X = inlinedCall + ir.EditChildren(inlinedCall, s.mark) // mark may append to parens + state.InlinedCall(s.fn, origCall, inlinedCall) done = false changed = true } diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 9d34f27ce53363..037957b676a033 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -677,6 +677,11 @@ type TypeAssertExpr struct { // An internal/abi.TypeAssert descriptor to pass to the runtime. Descriptor *obj.LSym + + // When set to true, if this assert would panic, then use a nil pointer panic + // instead of an interface conversion panic. + // It must not be set for type asserts using the commaok form. + UseNilPanic bool } func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr { diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go index ee0f52fbf3f3b8..f8eb4578809312 100644 --- a/src/cmd/compile/internal/ir/symtab.go +++ b/src/cmd/compile/internal/ir/symtab.go @@ -13,47 +13,50 @@ import ( var Syms symsStruct type symsStruct struct { - AssertE2I *obj.LSym - AssertE2I2 *obj.LSym - Asanread *obj.LSym - Asanwrite *obj.LSym - CgoCheckMemmove *obj.LSym - CgoCheckPtrWrite *obj.LSym - CheckPtrAlignment *obj.LSym - Deferproc *obj.LSym - Deferprocat *obj.LSym - DeferprocStack *obj.LSym - Deferreturn *obj.LSym - Duffcopy *obj.LSym - Duffzero *obj.LSym - GCWriteBarrier [8]*obj.LSym - Goschedguarded *obj.LSym - Growslice *obj.LSym - InterfaceSwitch *obj.LSym - MallocGC *obj.LSym - Memmove *obj.LSym - Msanread *obj.LSym - Msanwrite *obj.LSym - Msanmove *obj.LSym - Newobject *obj.LSym - Newproc *obj.LSym - PanicBounds *obj.LSym - PanicExtend *obj.LSym - Panicdivide *obj.LSym - Panicshift *obj.LSym - PanicdottypeE *obj.LSym - PanicdottypeI *obj.LSym - Panicnildottype *obj.LSym - Panicoverflow *obj.LSym - Racefuncenter *obj.LSym - Racefuncexit *obj.LSym - Raceread *obj.LSym - Racereadrange *obj.LSym - Racewrite *obj.LSym - Racewriterange *obj.LSym - TypeAssert *obj.LSym - WBZero *obj.LSym - WBMove *obj.LSym + AssertE2I *obj.LSym + AssertE2I2 *obj.LSym + Asanread *obj.LSym + Asanwrite *obj.LSym + CgoCheckMemmove *obj.LSym + CgoCheckPtrWrite *obj.LSym + CheckPtrAlignment *obj.LSym + Deferproc *obj.LSym + Deferprocat *obj.LSym + DeferprocStack *obj.LSym + Deferreturn *obj.LSym + Duffcopy *obj.LSym + Duffzero *obj.LSym + GCWriteBarrier [8]*obj.LSym + Goschedguarded *obj.LSym + Growslice *obj.LSym + InterfaceSwitch *obj.LSym + MallocGC *obj.LSym + MallocGCSmallNoScan [27]*obj.LSym + MallocGCSmallScanNoHeader [27]*obj.LSym + MallocGCTiny [16]*obj.LSym + Memmove *obj.LSym + Msanread *obj.LSym + Msanwrite *obj.LSym + Msanmove *obj.LSym + Newobject *obj.LSym + Newproc *obj.LSym + PanicBounds *obj.LSym + PanicExtend *obj.LSym + Panicdivide *obj.LSym + Panicshift *obj.LSym + PanicdottypeE *obj.LSym + PanicdottypeI *obj.LSym + Panicnildottype *obj.LSym + Panicoverflow *obj.LSym + Racefuncenter *obj.LSym + Racefuncexit *obj.LSym + Raceread *obj.LSym + Racereadrange *obj.LSym + Racewrite *obj.LSym + Racewriterange *obj.LSym + TypeAssert *obj.LSym + WBZero *obj.LSym + WBMove *obj.LSym // Wasm SigPanic *obj.LSym Staticuint64s *obj.LSym diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index 41eb2dce1cc50f..a8a45b02697079 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -2961,6 +2961,7 @@ func (r *reader) multiExpr() []ir.Node { as.Def = true for i := range results { tmp := r.temp(pos, r.typ()) + tmp.Defn = as as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp)) as.Lhs.Append(tmp) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index 7d3efef5cdc837..0bea99e38de1bc 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -162,10 +162,19 @@ (Cvt64to32F ...) => (CVTSQ2SS ...) (Cvt64to64F ...) => (CVTSQ2SD ...) -(Cvt32Fto32 ...) => (CVTTSS2SL ...) -(Cvt32Fto64 ...) => (CVTTSS2SQ ...) -(Cvt64Fto32 ...) => (CVTTSD2SL ...) -(Cvt64Fto64 ...) => (CVTTSD2SQ ...) +// Float, to int. +// To make AMD64 "overflow" return max positive instead of max negative, compute +// y and not x, smear the sign bit, and xor. +(Cvt32Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) +(Cvt64Fto32 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) + +(Cvt32Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSS2SQ x) (NOTQ (MOVQf2i (CVTSS2SD x))) ))) +(Cvt64Fto64 x) && base.ConvertHash.MatchPos(v.Pos, nil) => (XORQ y (SARQconst [63] (ANDQ y:(CVTTSD2SQ x) (NOTQ (MOVQf2i x))))) + +(Cvt32Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SL x) +(Cvt32Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSS2SQ x) +(Cvt64Fto32 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SL x) +(Cvt64Fto64 x) && !base.ConvertHash.MatchPos(v.Pos, nil) => (CVTTSD2SQ x) (Cvt32Fto64F ...) => (CVTSS2SD ...) (Cvt64Fto32F ...) => (CVTSD2SS ...) diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go index 43072ae9130ede..cc3758d10956d4 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go @@ -144,8 +144,9 @@ func init() { gpspsbg = gpspg | buildReg("SB") fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r25 = buildReg("R25") r24to25 = buildReg("R24 R25") - r23to25 = buildReg("R23 R24 R25") + f16to17 = buildReg("F16 F17") rz = buildReg("ZERO") first16 = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15") ) @@ -599,8 +600,8 @@ func init() { aux: "Int64", argLength: 3, reg: regInfo{ - inputs: []regMask{gp &^ r24to25, gp &^ r24to25}, - clobbers: r24to25, // TODO: figure out needIntTemp x2 + inputs: []regMask{gp &^ r25, gp &^ r25}, + clobbers: r25 | f16to17, // TODO: figure out needIntTemp + x2 for floats }, faultOnNilArg0: true, faultOnNilArg1: true, @@ -617,8 +618,8 @@ func init() { aux: "Int64", argLength: 3, reg: regInfo{ - inputs: []regMask{gp &^ r23to25, gp &^ r23to25}, - clobbers: r23to25, // TODO: figure out needIntTemp x3 + inputs: []regMask{gp &^ r24to25, gp &^ r24to25}, + clobbers: r24to25 | f16to17, // TODO: figure out needIntTemp x2 + x2 for floats clobbersArg0: true, clobbersArg1: true, }, diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go index a85a566660eee6..7e8b8bf497b8ff 100644 --- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go @@ -143,6 +143,7 @@ func init() { gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg | rz}} + gpoldatom = regInfo{inputs: []regMask{gpspsbg, gpg}} gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} preldreg = regInfo{inputs: []regMask{gpspg}} @@ -431,6 +432,12 @@ func init() { faultOnNilArg1: true, }, + // Atomic operations. + // + // resultNotInArgs is needed by all ops lowering to LoongArch + // atomic memory access instructions, because these instructions + // are defined to require rd != rj && rd != rk per the ISA spec. + // atomic loads. // load from arg0. arg1=mem. // returns so they can be properly ordered with other loads. @@ -500,8 +507,8 @@ func init() { // Atomic 32 bit AND/OR. // *arg0 &= (|=) arg1. arg2=mem. returns nil. - {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, - {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicAnd32", argLength: 3, reg: gpoldatom, asm: "AMANDDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicOr32", argLength: 3, reg: gpoldatom, asm: "AMORDBW", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, // Atomic 32,64 bit AND/OR. // *arg0 &= (|=) arg1. arg2=mem. returns . auxint must be zero. diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules index f3bd8d8b4f18f1..f632a01109f764 100644 --- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules @@ -55,12 +55,9 @@ (ZeroExt32to64 x:(I64Load32U _ _)) => x (ZeroExt16to(64|32) x:(I64Load16U _ _)) => x (ZeroExt8to(64|32|16) x:(I64Load8U _ _)) => x -(SignExt32to64 x) && buildcfg.GOWASM.SignExt => (I64Extend32S x) -(SignExt8to(64|32|16) x) && buildcfg.GOWASM.SignExt => (I64Extend8S x) -(SignExt16to(64|32) x) && buildcfg.GOWASM.SignExt => (I64Extend16S x) -(SignExt32to64 x) => (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32])) -(SignExt16to(64|32) x) => (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) -(SignExt8to(64|32|16) x) => (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) +(SignExt32to64 x) => (I64Extend32S x) +(SignExt8to(64|32|16) x) => (I64Extend8S x) +(SignExt16to(64|32) x) => (I64Extend16S x) (ZeroExt32to64 x) => (I64And x (I64Const [0xffffffff])) (ZeroExt16to(64|32) x) => (I64And x (I64Const [0xffff])) (ZeroExt8to(64|32|16) x) => (I64And x (I64Const [0xff])) diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index 58872ca85a3961..af9c24f53fd245 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -50,10 +50,10 @@ (Cvt32to64F (Const32 [c])) => (Const64F [float64(c)]) (Cvt64to32F (Const64 [c])) => (Const32F [float32(c)]) (Cvt64to64F (Const64 [c])) => (Const64F [float64(c)]) -(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)]) -(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)]) -(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)]) -(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)]) +(Cvt32Fto32 (Const32F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)]) +(Cvt32Fto64 (Const32F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)]) +(Cvt64Fto32 (Const64F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)]) +(Cvt64Fto64 (Const64F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)]) (Round32F x:(Const32F)) => x (Round64F x:(Const64F)) => x (CvtBoolToUint8 (ConstBool [false])) => (Const8 [0]) @@ -347,6 +347,22 @@ (OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) (OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) +// NaN check: ( x != x || x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) x) ) +(OrB (Neq64F x x) ((Less|Leq)64F x y:(Const64F [c]))) => (Not ((Leq|Less)64F y x)) +(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) x)) => (Not ((Leq|Less)64F x y)) +(OrB (Neq32F x x) ((Less|Leq)32F x y:(Const32F [c]))) => (Not ((Leq|Less)32F y x)) +(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) x)) => (Not ((Leq|Less)32F x y)) + +// NaN check: ( x != x || Abs(x) (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) Abs(x) ) +(OrB (Neq64F x x) ((Less|Leq)64F abs:(Abs x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y abs)) +(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) abs:(Abs x))) => (Not ((Leq|Less)64F abs y)) + +// NaN check: ( x != x || -x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) -x) ) +(OrB (Neq64F x x) ((Less|Leq)64F neg:(Neg64F x) y:(Const64F [c]))) => (Not ((Leq|Less)64F y neg)) +(OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) neg:(Neg64F x))) => (Not ((Leq|Less)64F neg y)) +(OrB (Neq32F x x) ((Less|Leq)32F neg:(Neg32F x) y:(Const32F [c]))) => (Not ((Leq|Less)32F y neg)) +(OrB (Neq32F x x) ((Less|Leq)32F y:(Const32F [c]) neg:(Neg32F x))) => (Not ((Leq|Less)32F neg y)) + // Canonicalize x-const to x+(-const) (Sub64 x (Const64 [c])) && x.Op != OpConst64 => (Add64 (Const64 [-c]) x) (Sub32 x (Const32 [c])) && x.Op != OpConst32 => (Add32 (Const32 [-c]) x) @@ -989,6 +1005,10 @@ (Const64 [0]) (Const64 [0])) +// Special rule to help constant slicing; len > 0 implies cap > 0 implies Slicemask is all 1 +(SliceMake (AddPtr x (And64 y (Slicemask _))) w:(Const64 [c]) z) && c > 0 => (SliceMake (AddPtr x y) w z) +(SliceMake (AddPtr x (And32 y (Slicemask _))) w:(Const32 [c]) z) && c > 0 => (SliceMake (AddPtr x y) w z) + // interface ops (ConstInterface) => (IMake @@ -2045,28 +2065,32 @@ // for rewriting results of some late-expanded rewrites (below) (SelectN [n] m:(MakeResult ___)) => m.Args[n] +// TODO(matloob): Try out having non-zeroing mallocs for prointerless +// memory, and leaving the zeroing here. Then the compiler can remove +// the zeroing if the user has explicit writes to the whole object. + // for late-expanded calls, recognize newobject and remove zeroing and nilchecks -(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) - && isSameCall(call.Aux, "runtime.newobject") +(Zero (SelectN [0] call:(StaticLECall ___)) mem:(SelectN [1] call)) + && isMalloc(call.Aux) => mem -(Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) +(Store (SelectN [0] call:(StaticLECall ___)) x mem:(SelectN [1] call)) && isConstZero(x) - && isSameCall(call.Aux, "runtime.newobject") + && isMalloc(call.Aux) => mem -(Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call)) +(Store (OffPtr (SelectN [0] call:(StaticLECall ___))) x mem:(SelectN [1] call)) && isConstZero(x) - && isSameCall(call.Aux, "runtime.newobject") + && isMalloc(call.Aux) => mem -(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) - && isSameCall(call.Aux, "runtime.newobject") +(NilCheck ptr:(SelectN [0] call:(StaticLECall ___)) _) + && isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr -(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) - && isSameCall(call.Aux, "runtime.newobject") +(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall ___))) _) + && isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr @@ -2079,6 +2103,9 @@ && warnRule(fe.Debug_checknil(), v, "removed nil check") => ptr +// .dict args are always non-nil. +(NilCheck ptr:(Arg {sym}) _) && isDictArgSym(sym) => ptr + // Nil checks of nil checks are redundant. // See comment at the end of https://go-review.googlesource.com/c/go/+/537775. (NilCheck ptr:(NilCheck _ _) _ ) => ptr diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 9e67e8339992c7..d0adff788c0a4f 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -7,6 +7,7 @@ package ssa import ( "cmd/compile/internal/ir" "cmd/compile/internal/types" + "cmd/internal/obj" ) // dse does dead-store elimination on the Function. @@ -213,7 +214,7 @@ func elimDeadAutosGeneric(f *Func) { case OpAddr, OpLocalAddr: // Propagate the address if it points to an auto. n, ok := v.Aux.(*ir.Name) - if !ok || n.Class != ir.PAUTO { + if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } if addr[v] == nil { @@ -224,7 +225,7 @@ func elimDeadAutosGeneric(f *Func) { case OpVarDef: // v should be eliminated if we eliminate the auto. n, ok := v.Aux.(*ir.Name) - if !ok || n.Class != ir.PAUTO { + if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } if elim[v] == nil { @@ -240,7 +241,7 @@ func elimDeadAutosGeneric(f *Func) { // may not be used by the inline code, but will be used by // panic processing). n, ok := v.Aux.(*ir.Name) - if !ok || n.Class != ir.PAUTO { + if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } if !used.Has(n) { @@ -373,7 +374,7 @@ func elimUnreadAutos(f *Func) { if !ok { continue } - if n.Class != ir.PAUTO { + if n.Class != ir.PAUTO && !isABIInternalParam(f, n) { continue } @@ -413,3 +414,16 @@ func elimUnreadAutos(f *Func) { store.Op = OpCopy } } + +// isABIInternalParam returns whether n is a parameter of an ABIInternal +// function. For dead store elimination, we can treat parameters the same +// way as autos. Storing to a parameter can be removed if it is not read +// or address-taken. +// +// We check ABI here because for a cgo_unsafe_arg function (which is ABI0), +// all the args are effectively address-taken, but not necessarily have +// an Addr or LocalAddr op. We could probably just check for cgo_unsafe_arg, +// but ABIInternal is mostly what matters. +func isABIInternalParam(f *Func, n *ir.Name) bool { + return n.Class == ir.PPARAM && f.ABISelf.Which() == obj.ABIInternal +} diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 5736f0b8126484..fc8cb3f2fef0af 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -102,6 +102,7 @@ func (c *Config) NewFunc(fe Frontend, cache *Cache) *Func { NamedValues: make(map[LocalSlot][]*Value), CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot), CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot), + OwnAux: &AuxCall{}, } } diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go index a1e639e0486b62..4639d674e145fa 100644 --- a/src/cmd/compile/internal/ssa/func_test.go +++ b/src/cmd/compile/internal/ssa/func_test.go @@ -250,6 +250,11 @@ func Exit(arg string) ctrl { return ctrl{BlockExit, arg, []string{}} } +// Ret specifies a BlockRet. +func Ret(arg string) ctrl { + return ctrl{BlockRet, arg, []string{}} +} + // Eq specifies a BlockAMD64EQ. func Eq(cond, sub, alt string) ctrl { return ctrl{BlockAMD64EQ, cond, []string{sub, alt}} diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go index 68defde7b4b956..0cee91b532b101 100644 --- a/src/cmd/compile/internal/ssa/fuse.go +++ b/src/cmd/compile/internal/ssa/fuse.go @@ -9,8 +9,8 @@ import ( "fmt" ) -// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange). -func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) } +// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck). +func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck) } // fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect). func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect) } @@ -21,6 +21,7 @@ const ( fuseTypePlain fuseType = 1 << iota fuseTypeIf fuseTypeIntInRange + fuseTypeNanCheck fuseTypeBranchRedirect fuseTypeShortCircuit ) @@ -38,7 +39,10 @@ func fuse(f *Func, typ fuseType) { changed = fuseBlockIf(b) || changed } if typ&fuseTypeIntInRange != 0 { - changed = fuseIntegerComparisons(b) || changed + changed = fuseIntInRange(b) || changed + } + if typ&fuseTypeNanCheck != 0 { + changed = fuseNanCheck(b) || changed } if typ&fuseTypePlain != 0 { changed = fuseBlockPlain(b) || changed diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go index f5fb84b0d73532..b6eb8fcb90dfbc 100644 --- a/src/cmd/compile/internal/ssa/fuse_comparisons.go +++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go @@ -4,21 +4,36 @@ package ssa -// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5', -// which can be optimized to 'unsigned(x-1) < 4'. -// -// Look for branch structure like: +// fuseIntInRange transforms integer range checks to remove the short-circuit operator. For example, +// it would convert `if 1 <= x && x < 5 { ... }` into `if (1 <= x) & (x < 5) { ... }`. Rewrite rules +// can then optimize these into unsigned range checks, `if unsigned(x-1) < 4 { ... }` in this case. +func fuseIntInRange(b *Block) bool { + return fuseComparisons(b, canOptIntInRange) +} + +// fuseNanCheck replaces the short-circuit operators between NaN checks and comparisons with +// constants. For example, it would transform `if x != x || x > 1.0 { ... }` into +// `if (x != x) | (x > 1.0) { ... }`. Rewrite rules can then merge the NaN check with the comparison, +// in this case generating `if !(x <= 1.0) { ... }`. +func fuseNanCheck(b *Block) bool { + return fuseComparisons(b, canOptNanCheck) +} + +// fuseComparisons looks for control graphs that match this pattern: // -// p +// p - predecessor // |\ -// | b +// | b - block // |/ \ -// s0 s1 +// s0 s1 - successors // -// In our example, p has control '1 <= x', b has control 'x < 5', -// and s0 and s1 are the if and else results of the comparison. +// This pattern is typical for if statements such as `if x || y { ... }` and `if x && y { ... }`. // -// This will be optimized into: +// If canOptControls returns true when passed the control values for p and b then fuseComparisons +// will try to convert p into a plain block with only one successor (b) and modify b's control +// value to include p's control value (effectively causing b to be speculatively executed). +// +// This transformation results in a control graph that will now look like this: // // p // \ @@ -26,9 +41,12 @@ package ssa // / \ // s0 s1 // -// where b has the combined control value 'unsigned(x-1) < 4'. // Later passes will then fuse p and b. -func fuseIntegerComparisons(b *Block) bool { +// +// In other words `if x || y { ... }` will become `if x | y { ... }` and `if x && y { ... }` will +// become `if x & y { ... }`. This is a useful transformation because we can then use rewrite +// rules to optimize `x | y` and `x & y`. +func fuseComparisons(b *Block, canOptControls func(a, b *Value, op Op) bool) bool { if len(b.Preds) != 1 { return false } @@ -45,14 +63,6 @@ func fuseIntegerComparisons(b *Block) bool { return false } - // Check if the control values combine to make an integer inequality that - // can be further optimized later. - bc := b.Controls[0] - pc := p.Controls[0] - if !areMergeableInequalities(bc, pc) { - return false - } - // If the first (true) successors match then we have a disjunction (||). // If the second (false) successors match then we have a conjunction (&&). for i, op := range [2]Op{OpOrB, OpAndB} { @@ -60,6 +70,13 @@ func fuseIntegerComparisons(b *Block) bool { continue } + // Check if the control values can be usefully combined. + bc := b.Controls[0] + pc := p.Controls[0] + if !canOptControls(bc, pc, op) { + return false + } + // TODO(mundaym): should we also check the cost of executing b? // Currently we might speculatively execute b even if b contains // a lot of instructions. We could just check that len(b.Values) @@ -125,7 +142,7 @@ func isUnsignedInequality(v *Value) bool { return false } -func areMergeableInequalities(x, y *Value) bool { +func canOptIntInRange(x, y *Value, op Op) bool { // We need both inequalities to be either in the signed or unsigned domain. // TODO(mundaym): it would also be good to merge when we have an Eq op that // could be transformed into a Less/Leq. For example in the unsigned @@ -155,3 +172,60 @@ func areMergeableInequalities(x, y *Value) bool { } return false } + +// canOptNanCheck reports whether one of arguments is a NaN check and the other +// is a comparison with a constant that can be combined together. +// +// Examples (c must be a constant): +// +// v != v || v < c => !(c <= v) +// v != v || v <= c => !(c < v) +// v != v || c < v => !(v <= c) +// v != v || c <= v => !(v < c) +func canOptNanCheck(x, y *Value, op Op) bool { + if op != OpOrB { + return false + } + + for i := 0; i <= 1; i, x, y = i+1, y, x { + if len(x.Args) != 2 || x.Args[0] != x.Args[1] { + continue + } + v := x.Args[0] + switch x.Op { + case OpNeq64F: + if y.Op != OpLess64F && y.Op != OpLeq64F { + return false + } + for j := 0; j <= 1; j++ { + a, b := y.Args[j], y.Args[j^1] + if a.Op != OpConst64F { + continue + } + // Sign bit operations not affect NaN check results. This special case allows us + // to optimize statements like `if v != v || Abs(v) > c { ... }`. + if (b.Op == OpAbs || b.Op == OpNeg64F) && b.Args[0] == v { + return true + } + return b == v + } + case OpNeq32F: + if y.Op != OpLess32F && y.Op != OpLeq32F { + return false + } + for j := 0; j <= 1; j++ { + a, b := y.Args[j], y.Args[j^1] + if a.Op != OpConst32F { + continue + } + // Sign bit operations not affect NaN check results. This special case allows us + // to optimize statements like `if v != v || -v > c { ... }`. + if b.Op == OpNeg32F && b.Args[0] == v { + return true + } + return b == v + } + } + } + return false +} diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 92adf5341b2574..061f1333382af4 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -23199,10 +23199,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 - {1, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 + {0, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 + {1, 318767103}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30 }, - clobbers: 25165824, // R24 R25 + clobbers: 422212481843200, // R25 F16 F17 }, }, { @@ -23213,10 +23213,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 306184191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R26 R30 - {1, 306184191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R26 R30 + {0, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 + {1, 310378495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R26 R30 }, - clobbers: 29360128, // R23 R24 R25 + clobbers: 422212490231808, // R24 R25 F16 F17 clobbersArg0: true, clobbersArg1: true, }, @@ -26375,9 +26375,6 @@ var opcodeTable = [...]opInfo{ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, }, }, { @@ -26392,9 +26389,6 @@ var opcodeTable = [...]opInfo{ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, - outputs: []outputInfo{ - {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 - }, }, }, { diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 309229b4d753b7..c9bce23b286476 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -1240,6 +1240,173 @@ func (ft *factsTable) cleanup(f *Func) { f.Cache.freeBoolSlice(ft.recurseCheck) } +// addSlicesOfSameLen finds the slices that are in the same block and whose Op +// is OpPhi and always have the same length, then add the equality relationship +// between them to ft. If two slices start out with the same length and decrease +// in length by the same amount on each round of the loop (or in the if block), +// then we think their lengths are always equal. +// +// See https://go.dev/issues/75144 +// +// In fact, we are just propagating the equality +// +// if len(a) == len(b) { // from here +// for len(a) > 4 { +// a = a[4:] +// b = b[4:] +// } +// if len(a) == len(b) { // to here +// return true +// } +// } +// +// or change the for to if: +// +// if len(a) == len(b) { // from here +// if len(a) > 4 { +// a = a[4:] +// b = b[4:] +// } +// if len(a) == len(b) { // to here +// return true +// } +// } +func addSlicesOfSameLen(ft *factsTable, b *Block) { + // Let w points to the first value we're interested in, and then we + // only process those values ​​that appear to be the same length as w, + // looping only once. This should be enough in most cases. And u is + // similar to w, see comment for predIndex. + var u, w *Value + var i, j, k sliceInfo + isInterested := func(v *Value) bool { + j = getSliceInfo(v) + return j.sliceWhere != sliceUnknown + } + for _, v := range b.Values { + if v.Uses == 0 { + continue + } + if v.Op == OpPhi && len(v.Args) == 2 && ft.lens[v.ID] != nil && isInterested(v) { + if j.predIndex == 1 && ft.lens[v.Args[0].ID] != nil { + // found v = (Phi x (SliceMake _ (Add64 (Const64 [n]) (SliceLen x)) _))) or + // v = (Phi x (SliceMake _ (Add64 (Const64 [n]) (SliceLen v)) _))) + if w == nil { + k = j + w = v + continue + } + // propagate the equality + if j == k && ft.orderS.Equal(ft.lens[v.Args[0].ID], ft.lens[w.Args[0].ID]) { + ft.update(b, ft.lens[v.ID], ft.lens[w.ID], signed, eq) + } + } else if j.predIndex == 0 && ft.lens[v.Args[1].ID] != nil { + // found v = (Phi (SliceMake _ (Add64 (Const64 [n]) (SliceLen x)) _)) x) or + // v = (Phi (SliceMake _ (Add64 (Const64 [n]) (SliceLen v)) _)) x) + if u == nil { + i = j + u = v + continue + } + // propagate the equality + if j == i && ft.orderS.Equal(ft.lens[v.Args[1].ID], ft.lens[u.Args[1].ID]) { + ft.update(b, ft.lens[v.ID], ft.lens[u.ID], signed, eq) + } + } + } + } +} + +type sliceWhere int + +const ( + sliceUnknown sliceWhere = iota + sliceInFor + sliceInIf +) + +// predIndex is used to indicate the branch represented by the predecessor +// block in which the slicing operation occurs. +type predIndex int + +type sliceInfo struct { + lengthDiff int64 + sliceWhere + predIndex +} + +// getSliceInfo returns the negative increment of the slice length in a slice +// operation by examine the Phi node at the merge block. So, we only interest +// in the slice operation if it is inside a for block or an if block. +// Otherwise it returns sliceInfo{0, sliceUnknown, 0}. +// +// For the following for block: +// +// for len(a) > 4 { +// a = a[4:] +// } +// +// vp = (Phi v3 v9) +// v5 = (SliceLen vp) +// v7 = (Add64 (Const64 [-4]) v5) +// v9 = (SliceMake _ v7 _) +// +// returns sliceInfo{-4, sliceInFor, 1} +// +// For a subsequent merge block after an if block: +// +// if len(a) > 4 { +// a = a[4:] +// } +// a // here +// +// vp = (Phi v3 v9) +// v5 = (SliceLen v3) +// v7 = (Add64 (Const64 [-4]) v5) +// v9 = (SliceMake _ v7 _) +// +// returns sliceInfo{-4, sliceInIf, 1} +// +// Returns sliceInfo{0, sliceUnknown, 0} if it is not the slice +// operation we are interested in. +func getSliceInfo(vp *Value) (inf sliceInfo) { + if vp.Op != OpPhi || len(vp.Args) != 2 { + return + } + var i predIndex + var l *Value // length for OpSliceMake + if vp.Args[0].Op != OpSliceMake && vp.Args[1].Op == OpSliceMake { + l = vp.Args[1].Args[1] + i = 1 + } else if vp.Args[0].Op == OpSliceMake && vp.Args[1].Op != OpSliceMake { + l = vp.Args[0].Args[1] + i = 0 + } else { + return + } + var op Op + switch l.Op { + case OpAdd64: + op = OpConst64 + case OpAdd32: + op = OpConst32 + default: + return + } + if l.Args[0].Op == op && l.Args[1].Op == OpSliceLen && l.Args[1].Args[0] == vp { + return sliceInfo{l.Args[0].AuxInt, sliceInFor, i} + } + if l.Args[1].Op == op && l.Args[0].Op == OpSliceLen && l.Args[0].Args[0] == vp { + return sliceInfo{l.Args[1].AuxInt, sliceInFor, i} + } + if l.Args[0].Op == op && l.Args[1].Op == OpSliceLen && l.Args[1].Args[0] == vp.Args[1-i] { + return sliceInfo{l.Args[0].AuxInt, sliceInIf, i} + } + if l.Args[1].Op == op && l.Args[0].Op == OpSliceLen && l.Args[0].Args[0] == vp.Args[1-i] { + return sliceInfo{l.Args[1].AuxInt, sliceInIf, i} + } + return +} + // prove removes redundant BlockIf branches that can be inferred // from previous dominating comparisons. // @@ -1505,6 +1672,9 @@ func prove(f *Func) { addBranchRestrictions(ft, parent, branch) } + // Add slices of the same length start from current block. + addSlicesOfSameLen(ft, node.block) + if ft.unsat { // node.block is unreachable. // Remove it and don't visit @@ -1766,7 +1936,8 @@ func (ft *factsTable) flowLimit(v *Value) bool { b := ft.limits[v.Args[1].ID] sub := ft.newLimit(v, a.sub(b, uint(v.Type.Size())*8)) mod := ft.detectSignedMod(v) - return sub || mod + inferred := ft.detectSliceLenRelation(v) + return sub || mod || inferred case OpNeg64, OpNeg32, OpNeg16, OpNeg8: a := ft.limits[v.Args[0].ID] bitsize := uint(v.Type.Size()) * 8 @@ -1947,6 +2118,68 @@ func (ft *factsTable) detectSignedMod(v *Value) bool { // TODO: non-powers-of-2 return false } + +// detectSliceLenRelation matches the pattern where +// 1. v := slicelen - index, OR v := slicecap - index +// AND +// 2. index <= slicelen - K +// THEN +// +// slicecap - index >= slicelen - index >= K +// +// Note that "index" is not useed for indexing in this pattern, but +// in the motivating example (chunked slice iteration) it is. +func (ft *factsTable) detectSliceLenRelation(v *Value) (inferred bool) { + if v.Op != OpSub64 { + return false + } + + if !(v.Args[0].Op == OpSliceLen || v.Args[0].Op == OpSliceCap) { + return false + } + + slice := v.Args[0].Args[0] + index := v.Args[1] + + for o := ft.orderings[index.ID]; o != nil; o = o.next { + if o.d != signed { + continue + } + or := o.r + if or != lt && or != lt|eq { + continue + } + ow := o.w + if ow.Op != OpAdd64 && ow.Op != OpSub64 { + continue + } + var lenOffset *Value + if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice { + lenOffset = ow.Args[1] + } else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice { + lenOffset = ow.Args[0] + } + if lenOffset == nil || lenOffset.Op != OpConst64 { + continue + } + K := lenOffset.AuxInt + if ow.Op == OpAdd64 { + K = -K + } + if K < 0 { + continue + } + if or == lt { + K++ + } + if K < 0 { // We hate thinking about overflow + continue + } + inferred = inferred || ft.signedMin(v, K) + } + return inferred +} + func (ft *factsTable) detectSignedModByPowerOfTwo(v *Value) bool { // We're looking for: // @@ -2174,6 +2407,76 @@ func unsignedSubUnderflows(a, b uint64) bool { return a < b } +// checkForChunkedIndexBounds looks for index expressions of the form +// A[i+delta] where delta < K and i <= len(A)-K. That is, this is a chunked +// iteration where the index is not directly compared to the length. +// if isReslice, then delta can be equal to K. +func checkForChunkedIndexBounds(ft *factsTable, b *Block, index, bound *Value, isReslice bool) bool { + if bound.Op != OpSliceLen && bound.Op != OpSliceCap { + return false + } + + // this is a slice bounds check against len or capacity, + // and refers back to a prior check against length, which + // will also work for the cap since that is not smaller + // than the length. + + slice := bound.Args[0] + lim := ft.limits[index.ID] + if lim.min < 0 { + return false + } + i, delta := isConstDelta(index) + if i == nil { + return false + } + if delta < 0 { + return false + } + // special case for blocked iteration over a slice. + // slicelen > i + delta && <==== if clauses above + // && index >= 0 <==== if clause above + // delta >= 0 && <==== if clause above + // slicelen-K >/>= x <==== checked below + // && K >=/> delta <==== checked below + // then v > w + // example: i <=/< len - 4/3 means i+{0,1,2,3} are legal indices + for o := ft.orderings[i.ID]; o != nil; o = o.next { + if o.d != signed { + continue + } + if ow := o.w; ow.Op == OpAdd64 { + var lenOffset *Value + if bound := ow.Args[0]; bound.Op == OpSliceLen && bound.Args[0] == slice { + lenOffset = ow.Args[1] + } else if bound := ow.Args[1]; bound.Op == OpSliceLen && bound.Args[0] == slice { + lenOffset = ow.Args[0] + } + if lenOffset == nil || lenOffset.Op != OpConst64 { + continue + } + if K := -lenOffset.AuxInt; K >= 0 { + or := o.r + if isReslice { + K++ + } + if or == lt { + or = lt | eq + K++ + } + if K < 0 { // We hate thinking about overflow + continue + } + + if delta < K && or == lt|eq { + return true + } + } + } + } + return false +} + func addLocalFacts(ft *factsTable, b *Block) { // Propagate constant ranges among values in this block. // We do this before the second loop so that we have the @@ -2285,6 +2588,20 @@ func addLocalFacts(ft *factsTable, b *Block) { if v.Args[0].Op == OpSliceMake { ft.update(b, v, v.Args[0].Args[2], signed, eq) } + case OpIsInBounds: + if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1], false) { + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %s for blocked indexing", v.Op) + } + ft.booleanTrue(v) + } + case OpIsSliceInBounds: + if checkForChunkedIndexBounds(ft, b, v.Args[0], v.Args[1], true) { + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %s for blocked reslicing", v.Op) + } + ft.booleanTrue(v) + } case OpPhi: addLocalFactsPhi(ft, v) } @@ -2382,24 +2699,38 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { switch v.Op { case OpSlicemask: // Replace OpSlicemask operations in b with constants where possible. - x, delta := isConstDelta(v.Args[0]) - if x == nil { + cap := v.Args[0] + x, delta := isConstDelta(cap) + if x != nil { + // slicemask(x + y) + // if x is larger than -y (y is negative), then slicemask is -1. + lim := ft.limits[x.ID] + if lim.umin > uint64(-delta) { + if cap.Op == OpAdd64 { + v.reset(OpConst64) + } else { + v.reset(OpConst32) + } + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved slicemask not needed") + } + v.AuxInt = -1 + } break } - // slicemask(x + y) - // if x is larger than -y (y is negative), then slicemask is -1. - lim := ft.limits[x.ID] - if lim.umin > uint64(-delta) { - if v.Args[0].Op == OpAdd64 { + lim := ft.limits[cap.ID] + if lim.umin > 0 { + if cap.Type.Size() == 8 { v.reset(OpConst64) } else { v.reset(OpConst32) } if b.Func.pass.debug > 0 { - b.Func.Warnl(v.Pos, "Proved slicemask not needed") + b.Func.Warnl(v.Pos, "Proved slicemask not needed (by limit)") } v.AuxInt = -1 } + case OpCtz8, OpCtz16, OpCtz32, OpCtz64: // On some architectures, notably amd64, we can generate much better // code for CtzNN if we know that the argument is non-zero. diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 1ce85a8f63b76a..0009de4fa69608 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -609,6 +609,29 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos } else if v.rematerializeable() { // Rematerialize instead of loading from the spill location. c = v.copyIntoWithXPos(s.curBlock, pos) + // We need to consider its output mask and potentially issue a Copy + // if there are register mask conflicts. + // This currently happens for the SIMD package only between GP and FP + // register. Because Intel's vector extension can put integer value into + // FP, which is seen as a vector. Example instruction: VPSLL[BWDQ] + // Because GP and FP masks do not overlap, mask & outputMask == 0 + // detects this situation thoroughly. + sourceMask := s.regspec(c).outputs[0].regs + if mask&sourceMask == 0 && !onWasmStack { + s.setOrig(c, v) + s.assignReg(s.allocReg(sourceMask, v), v, c) + // v.Type for the new OpCopy is likely wrong and it might delay the problem + // until ssa to asm lowering, which might need the types to generate the right + // assembly for OpCopy. For Intel's GP to FP move, it happens to be that + // MOV instruction has such a variant so it happens to be right. + // But it's unclear for other architectures or situations, and the problem + // might be exposed when the assembler sees illegal instructions. + // Right now make we still pick v.Type, because at least its size should be correct + // for the rematerialization case the amd64 SIMD package exposed. + // TODO: We might need to figure out a way to find the correct type or make + // the asm lowering use reg info only for OpCopy. + c = s.curBlock.NewValue1(pos, OpCopy, v.Type, c) + } } else { // Load v from its spill location. spill := s.makeSpill(v, s.curBlock) @@ -2538,7 +2561,29 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString()) } if dstReg { - x = v.copyInto(e.p) + // We want to rematerialize v into a register that is incompatible with v's op's register mask. + // Instead of setting the wrong register for the rematerialized v, we should find the right register + // for it and emit an additional copy to move to the desired register. + // For #70451. + if e.s.regspec(v).outputs[0].regs®Mask(1< regDebug { @@ -2764,6 +2806,7 @@ func (s *regAllocState) computeLive() { s.live = make([][]liveInfo, f.NumBlocks()) s.desired = make([]desiredState, f.NumBlocks()) var phis []*Value + rematIDs := make([]ID, 0, 64) live := f.newSparseMapPos(f.NumValues()) defer f.retSparseMapPos(live) @@ -2816,9 +2859,20 @@ func (s *regAllocState) computeLive() { continue } if opcodeTable[v.Op].call { + rematIDs = rematIDs[:0] c := live.contents() for i := range c { c[i].val += unlikelyDistance + vid := c[i].key + if s.values[vid].rematerializeable { + rematIDs = append(rematIDs, vid) + } + } + // We don't spill rematerializeable values, and assuming they + // are live across a call would only force shuffle to add some + // (dead) constant rematerialization. Remove them. + for _, r := range rematIDs { + live.remove(r) } } for _, a := range v.Args { diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go index 0f69b852d12971..12f5820f1ff81d 100644 --- a/src/cmd/compile/internal/ssa/regalloc_test.go +++ b/src/cmd/compile/internal/ssa/regalloc_test.go @@ -6,6 +6,7 @@ package ssa import ( "cmd/compile/internal/types" + "cmd/internal/obj/x86" "fmt" "testing" ) @@ -264,6 +265,48 @@ func TestClobbersArg1(t *testing.T) { } } +func TestNoRematerializeDeadConstant(t *testing.T) { + c := testConfigARM64(t) + f := c.Fun("b1", + Bloc("b1", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("addr", OpArg, c.config.Types.Int32.PtrTo(), 0, c.Temp(c.config.Types.Int32.PtrTo())), + Valu("const", OpARM64MOVDconst, c.config.Types.Int32, -1, nil), // Original constant + Valu("cmp", OpARM64CMPconst, types.TypeFlags, 0, nil, "const"), + Goto("b2"), + ), + Bloc("b2", + Valu("phi_mem", OpPhi, types.TypeMem, 0, nil, "mem", "callmem"), + Eq("cmp", "b6", "b3"), + ), + Bloc("b3", + Valu("call", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "phi_mem"), + Valu("callmem", OpSelectN, types.TypeMem, 0, nil, "call"), + Eq("cmp", "b5", "b4"), + ), + Bloc("b4", // A block where we don't really need to rematerialize the constant -1 + Goto("b2"), + ), + Bloc("b5", + Valu("user", OpAMD64MOVQstore, types.TypeMem, 0, nil, "addr", "const", "callmem"), + Exit("user"), + ), + Bloc("b6", + Exit("phi_mem"), + ), + ) + + regalloc(f.f) + checkFunc(f.f) + + // Check that in block b4, there's no dead rematerialization of the constant -1 + for _, v := range f.blocks["b4"].Values { + if v.Op == OpARM64MOVDconst && v.AuxInt == -1 { + t.Errorf("constant -1 rematerialized in loop block b4: %s", v.LongString()) + } + } +} + func numSpills(b *Block) int { return numOps(b, OpStoreReg) } @@ -279,3 +322,27 @@ func numOps(b *Block, op Op) int { } return n } + +func TestRematerializeableRegCompatible(t *testing.T) { + c := testConfig(t) + f := c.Fun("entry", + Bloc("entry", + Valu("mem", OpInitMem, types.TypeMem, 0, nil), + Valu("x", OpAMD64MOVLconst, c.config.Types.Int32, 1, nil), + Valu("a", OpAMD64POR, c.config.Types.Float32, 0, nil, "x", "x"), + Valu("res", OpMakeResult, types.NewResults([]*types.Type{c.config.Types.Float32, types.TypeMem}), 0, nil, "a", "mem"), + Ret("res"), + ), + ) + regalloc(f.f) + checkFunc(f.f) + moveFound := false + for _, v := range f.f.Blocks[0].Values { + if v.Op == OpCopy && x86.REG_X0 <= v.Reg() && v.Reg() <= x86.REG_X31 { + moveFound = true + } + } + if !moveFound { + t.Errorf("Expects an Copy to be issued, but got: %+v", f.f) + } +} diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 6d83ba565317a3..f02019df384f5f 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -6,9 +6,11 @@ package ssa import ( "cmd/compile/internal/base" + "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/reflectdata" "cmd/compile/internal/rttype" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/s390x" @@ -454,6 +456,26 @@ func isSameCall(aux Aux, name string) bool { return fn != nil && fn.String() == name } +func isMalloc(aux Aux) bool { + return isNewObject(aux) || isSpecializedMalloc(aux) +} + +func isNewObject(aux Aux) bool { + fn := aux.(*AuxCall).Fn + return fn != nil && fn.String() == "runtime.newobject" +} + +func isSpecializedMalloc(aux Aux) bool { + fn := aux.(*AuxCall).Fn + if fn == nil { + return false + } + name := fn.String() + return strings.HasPrefix(name, "runtime.mallocgcSmallNoScanSC") || + strings.HasPrefix(name, "runtime.mallocgcSmallScanNoHeaderSC") || + strings.HasPrefix(name, "runtime.mallocTiny") +} + // canLoadUnaligned reports if the architecture supports unaligned load operations. func canLoadUnaligned(c *Config) bool { return c.ctxt.Arch.Alignment == 1 @@ -2057,12 +2079,12 @@ func isFixedLoad(v *Value, sym Sym, off int64) bool { return false } - if strings.HasPrefix(lsym.Name, "type:") { + if ti := lsym.TypeInfo(); ti != nil { // Type symbols do not contain information about their fields, unlike the cases above. // Hand-implement field accesses. // TODO: can this be replaced with reflectdata.writeType and just use the code above? - t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type) + t := ti.Type.(*types.Type) for _, f := range rttype.Type.Fields() { if f.Offset == off && copyCompatibleType(v.Type, f.Type) { @@ -2116,12 +2138,12 @@ func rewriteFixedLoad(v *Value, sym Sym, sb *Value, off int64) *Value { base.Fatalf("fixedLoad data not known for %s:%d", sym, off) } - if strings.HasPrefix(lsym.Name, "type:") { + if ti := lsym.TypeInfo(); ti != nil { // Type symbols do not contain information about their fields, unlike the cases above. // Hand-implement field accesses. // TODO: can this be replaced with reflectdata.writeType and just use the code above? - t := (*lsym.Extra).(*obj.TypeInfo).Type.(*types.Type) + t := ti.Type.(*types.Type) ptrSizedOpConst := OpConst64 if f.Config.PtrSize == 4 { @@ -2611,10 +2633,7 @@ func isDirectType1(v *Value) bool { return isDirectType2(v.Args[0]) case OpAddr: lsym := v.Aux.(*obj.LSym) - if lsym.Extra == nil { - return false - } - if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok { + if ti := lsym.TypeInfo(); ti != nil { return types.IsDirectIface(ti.Type.(*types.Type)) } } @@ -2647,10 +2666,7 @@ func isDirectIface1(v *Value, depth int) bool { return isDirectIface2(v.Args[0], depth-1) case OpAddr: lsym := v.Aux.(*obj.LSym) - if lsym.Extra == nil { - return false - } - if ii, ok := (*lsym.Extra).(*obj.ItabInfo); ok { + if ii := lsym.ItabInfo(); ii != nil { return types.IsDirectIface(ii.Type.(*types.Type)) } case OpConstNil: @@ -2744,3 +2760,7 @@ func panicBoundsCToAux(p PanicBoundsC) Aux { func panicBoundsCCToAux(p PanicBoundsCC) Aux { return p } + +func isDictArgSym(sym Sym) bool { + return sym.(*ir.Name).Sym().Name == typecheck.LocalDictName +} diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index a7ee632ae1af72..e702925f5f3bac 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -5,6 +5,7 @@ package ssa import "internal/buildcfg" import "math" import "cmd/internal/obj" +import "cmd/compile/internal/base" import "cmd/compile/internal/types" func rewriteValueAMD64(v *Value) bool { @@ -694,11 +695,9 @@ func rewriteValueAMD64(v *Value) bool { case OpCtz8NonZero: return rewriteValueAMD64_OpCtz8NonZero(v) case OpCvt32Fto32: - v.Op = OpAMD64CVTTSS2SL - return true + return rewriteValueAMD64_OpCvt32Fto32(v) case OpCvt32Fto64: - v.Op = OpAMD64CVTTSS2SQ - return true + return rewriteValueAMD64_OpCvt32Fto64(v) case OpCvt32Fto64F: v.Op = OpAMD64CVTSS2SD return true @@ -709,14 +708,12 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64CVTSL2SD return true case OpCvt64Fto32: - v.Op = OpAMD64CVTTSD2SL - return true + return rewriteValueAMD64_OpCvt64Fto32(v) case OpCvt64Fto32F: v.Op = OpAMD64CVTSD2SS return true case OpCvt64Fto64: - v.Op = OpAMD64CVTTSD2SQ - return true + return rewriteValueAMD64_OpCvt64Fto64(v) case OpCvt64to32F: v.Op = OpAMD64CVTSQ2SS return true @@ -25511,6 +25508,190 @@ func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { } return false } +func rewriteValueAMD64_OpCvt32Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Fto32 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSS2SL x) (NOTL (MOVLf2i x))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARLconst, t) + v0.AuxInt = int8ToAuxInt(31) + v1 := b.NewValue0(v.Pos, OpAMD64ANDL, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSS2SL, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTL, typ.Int32) + v4 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v4.AddArg(x) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt32Fto32 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSS2SL x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSS2SL) + v.Type = t + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt32Fto64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt32Fto64 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORQ y (SARQconst [63] (ANDQ y:(CVTTSS2SQ x) (NOTQ (MOVQf2i (CVTSS2SD x))) ))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARQconst, t) + v0.AuxInt = int8ToAuxInt(63) + v1 := b.NewValue0(v.Pos, OpAMD64ANDQ, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSS2SQ, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTQ, typ.Int64) + v4 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpAMD64CVTSS2SD, typ.Float64) + v5.AddArg(x) + v4.AddArg(v5) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt32Fto64 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSS2SQ x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSS2SQ) + v.Type = t + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64Fto32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64Fto32 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORL y (SARLconst [31] (ANDL y:(CVTTSD2SL x) (NOTL (MOVLf2i (CVTSD2SS x)))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORL) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARLconst, t) + v0.AuxInt = int8ToAuxInt(31) + v1 := b.NewValue0(v.Pos, OpAMD64ANDL, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSD2SL, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTL, typ.Int32) + v4 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v5 := b.NewValue0(v.Pos, OpAMD64CVTSD2SS, typ.Float32) + v5.AddArg(x) + v4.AddArg(v5) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt64Fto32 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSD2SL x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSD2SL) + v.Type = t + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64Fto64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Cvt64Fto64 x) + // cond: base.ConvertHash.MatchPos(v.Pos, nil) + // result: (XORQ y (SARQconst [63] (ANDQ y:(CVTTSD2SQ x) (NOTQ (MOVQf2i x))))) + for { + t := v.Type + x := v_0 + if !(base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64XORQ) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64SARQconst, t) + v0.AuxInt = int8ToAuxInt(63) + v1 := b.NewValue0(v.Pos, OpAMD64ANDQ, t) + y := b.NewValue0(v.Pos, OpAMD64CVTTSD2SQ, t) + y.AddArg(x) + v3 := b.NewValue0(v.Pos, OpAMD64NOTQ, typ.Int64) + v4 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v4.AddArg(x) + v3.AddArg(v4) + v1.AddArg2(y, v3) + v0.AddArg(v1) + v.AddArg2(y, v0) + return true + } + // match: (Cvt64Fto64 x) + // cond: !base.ConvertHash.MatchPos(v.Pos, nil) + // result: (CVTTSD2SQ x) + for { + t := v.Type + x := v_0 + if !(!base.ConvertHash.MatchPos(v.Pos, nil)) { + break + } + v.reset(OpAMD64CVTTSD2SQ) + v.Type = t + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpDiv16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index c3c5528aaa6ab4..a164a6eee555b9 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -2,7 +2,6 @@ package ssa -import "internal/buildcfg" import "math" import "cmd/compile/internal/types" @@ -3202,8 +3201,6 @@ func rewriteValueWasm_OpRsh8x8(v *Value) bool { } func rewriteValueWasm_OpSignExt16to32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt16to32 x:(I64Load16S _ _)) // result: x for { @@ -3215,34 +3212,16 @@ func rewriteValueWasm_OpSignExt16to32(v *Value) bool { return true } // match: (SignExt16to32 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend16S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend16S) v.AddArg(x) return true } - // match: (SignExt16to32 x) - // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(48) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt16to64(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt16to64 x:(I64Load16S _ _)) // result: x for { @@ -3254,34 +3233,16 @@ func rewriteValueWasm_OpSignExt16to64(v *Value) bool { return true } // match: (SignExt16to64 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend16S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend16S) v.AddArg(x) return true } - // match: (SignExt16to64 x) - // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(48) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt32to64(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt32to64 x:(I64Load32S _ _)) // result: x for { @@ -3293,34 +3254,16 @@ func rewriteValueWasm_OpSignExt32to64(v *Value) bool { return true } // match: (SignExt32to64 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend32S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend32S) v.AddArg(x) return true } - // match: (SignExt32to64 x) - // result: (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(32) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt8to16(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt8to16 x:(I64Load8S _ _)) // result: x for { @@ -3332,34 +3275,16 @@ func rewriteValueWasm_OpSignExt8to16(v *Value) bool { return true } // match: (SignExt8to16 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend8S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend8S) v.AddArg(x) return true } - // match: (SignExt8to16 x) - // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(56) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt8to32(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt8to32 x:(I64Load8S _ _)) // result: x for { @@ -3371,34 +3296,16 @@ func rewriteValueWasm_OpSignExt8to32(v *Value) bool { return true } // match: (SignExt8to32 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend8S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend8S) v.AddArg(x) return true } - // match: (SignExt8to32 x) - // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(56) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSignExt8to64(v *Value) bool { v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (SignExt8to64 x:(I64Load8S _ _)) // result: x for { @@ -3410,29 +3317,13 @@ func rewriteValueWasm_OpSignExt8to64(v *Value) bool { return true } // match: (SignExt8to64 x) - // cond: buildcfg.GOWASM.SignExt // result: (I64Extend8S x) for { x := v_0 - if !(buildcfg.GOWASM.SignExt) { - break - } v.reset(OpWasmI64Extend8S) v.AddArg(x) return true } - // match: (SignExt8to64 x) - // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) - for { - x := v_0 - v.reset(OpWasmI64ShrS) - v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64) - v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) - v1.AuxInt = int64ToAuxInt(56) - v0.AddArg2(x, v1) - v.AddArg2(v0, v1) - return true - } } func rewriteValueWasm_OpSlicemask(v *Value) bool { v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 7e23194e6aba12..79c444a86b29df 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -422,6 +422,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpSliceCap(v) case OpSliceLen: return rewriteValuegeneric_OpSliceLen(v) + case OpSliceMake: + return rewriteValuegeneric_OpSliceMake(v) case OpSlicePtr: return rewriteValuegeneric_OpSlicePtr(v) case OpSlicemask: @@ -6605,12 +6607,16 @@ func rewriteValuegeneric_OpCtz8(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool { v_0 := v.Args[0] // match: (Cvt32Fto32 (Const32F [c])) + // cond: c >= -1<<31 && c < 1<<31 // result: (Const32 [int32(c)]) for { if v_0.Op != OpConst32F { break } c := auxIntToFloat32(v_0.AuxInt) + if !(c >= -1<<31 && c < 1<<31) { + break + } v.reset(OpConst32) v.AuxInt = int32ToAuxInt(int32(c)) return true @@ -6620,12 +6626,16 @@ func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool { v_0 := v.Args[0] // match: (Cvt32Fto64 (Const32F [c])) + // cond: c >= -1<<63 && c < 1<<63 // result: (Const64 [int64(c)]) for { if v_0.Op != OpConst32F { break } c := auxIntToFloat32(v_0.AuxInt) + if !(c >= -1<<63 && c < 1<<63) { + break + } v.reset(OpConst64) v.AuxInt = int64ToAuxInt(int64(c)) return true @@ -6680,12 +6690,16 @@ func rewriteValuegeneric_OpCvt32to64F(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool { v_0 := v.Args[0] // match: (Cvt64Fto32 (Const64F [c])) + // cond: c >= -1<<31 && c < 1<<31 // result: (Const32 [int32(c)]) for { if v_0.Op != OpConst64F { break } c := auxIntToFloat64(v_0.AuxInt) + if !(c >= -1<<31 && c < 1<<31) { + break + } v.reset(OpConst32) v.AuxInt = int32ToAuxInt(int32(c)) return true @@ -6730,12 +6744,16 @@ func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool { v_0 := v.Args[0] // match: (Cvt64Fto64 (Const64F [c])) + // cond: c >= -1<<63 && c < 1<<63 // result: (Const64 [int64(c)]) for { if v_0.Op != OpConst64F { break } c := auxIntToFloat64(v_0.AuxInt) + if !(c >= -1<<63 && c < 1<<63) { + break + } v.reset(OpConst64) v.AuxInt = int64ToAuxInt(int64(c)) return true @@ -21316,8 +21334,8 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { v.copyOf(ptr) return true } - // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) - // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") + // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall ___)) _) + // cond: isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: ptr for { ptr := v_0 @@ -21325,14 +21343,17 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { break } call := ptr.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + if call.Op != OpStaticLECall { + break + } + if !(isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.copyOf(ptr) return true } - // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) - // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check") + // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall ___))) _) + // cond: isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: ptr for { ptr := v_0 @@ -21344,7 +21365,10 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { break } call := ptr_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + if call.Op != OpStaticLECall { + break + } + if !(isMalloc(call.Aux) && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.copyOf(ptr) @@ -21393,6 +21417,21 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { v.copyOf(ptr) return true } + // match: (NilCheck ptr:(Arg {sym}) _) + // cond: isDictArgSym(sym) + // result: ptr + for { + ptr := v_0 + if ptr.Op != OpArg { + break + } + sym := auxToSym(ptr.Aux) + if !(isDictArgSym(sym)) { + break + } + v.copyOf(ptr) + return true + } // match: (NilCheck ptr:(NilCheck _ _) _ ) // result: ptr for { @@ -23940,6 +23979,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + typ := &b.Func.Config.Types // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d]))) // cond: c >= d // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) @@ -25252,6 +25292,558 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } + // match: (OrB (Neq64F x x) (Less64F x y:(Const64F [c]))) + // result: (Not (Leq64F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F x y:(Const64F [c]))) + // result: (Not (Less64F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) x)) + // result: (Not (Leq64F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) x)) + // result: (Not (Less64F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F x y:(Const32F [c]))) + // result: (Not (Leq32F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F x y:(Const32F [c]))) + // result: (Not (Less32F y x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F y:(Const32F [c]) x)) + // result: (Not (Leq32F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F y:(Const32F [c]) x)) + // result: (Not (Less32F x y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + if x != v_1.Args[1] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F abs:(Abs x) y:(Const64F [c]))) + // result: (Not (Leq64F y abs)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + abs := v_1.Args[0] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(y, abs) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F abs:(Abs x) y:(Const64F [c]))) + // result: (Not (Less64F y abs)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + abs := v_1.Args[0] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(y, abs) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) abs:(Abs x))) + // result: (Not (Leq64F abs y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + abs := v_1.Args[1] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(abs, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) abs:(Abs x))) + // result: (Not (Less64F abs y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + abs := v_1.Args[1] + if abs.Op != OpAbs || x != abs.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(abs, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F neg:(Neg64F x) y:(Const64F [c]))) + // result: (Not (Leq64F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F neg:(Neg64F x) y:(Const64F [c]))) + // result: (Not (Less64F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst64F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Less64F y:(Const64F [c]) neg:(Neg64F x))) + // result: (Not (Leq64F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq64F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq64F x x) (Leq64F y:(Const64F [c]) neg:(Neg64F x))) + // result: (Not (Less64F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq64F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst64F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg64F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F neg:(Neg32F x) y:(Const32F [c]))) + // result: (Not (Leq32F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F neg:(Neg32F x) y:(Const32F [c]))) + // result: (Not (Less32F y neg)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + neg := v_1.Args[0] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + y := v_1.Args[1] + if y.Op != OpConst32F { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(y, neg) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Less32F y:(Const32F [c]) neg:(Neg32F x))) + // result: (Not (Leq32F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLess32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLeq32F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } + // match: (OrB (Neq32F x x) (Leq32F y:(Const32F [c]) neg:(Neg32F x))) + // result: (Not (Less32F neg y)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32F { + continue + } + x := v_0.Args[1] + if x != v_0.Args[0] || v_1.Op != OpLeq32F { + continue + } + _ = v_1.Args[1] + y := v_1.Args[0] + if y.Op != OpConst32F { + continue + } + neg := v_1.Args[1] + if neg.Op != OpNeg32F || x != neg.Args[0] { + continue + } + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32F, typ.Bool) + v0.AddArg2(neg, y) + v.AddArg(v0) + return true + } + break + } return false } func rewriteValuegeneric_OpPhi(v *Value) bool { @@ -30499,6 +31091,91 @@ func rewriteValuegeneric_OpSliceLen(v *Value) bool { } return false } +func rewriteValuegeneric_OpSliceMake(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (SliceMake (AddPtr x (And64 y (Slicemask _))) w:(Const64 [c]) z) + // cond: c > 0 + // result: (SliceMake (AddPtr x y) w z) + for { + if v_0.Op != OpAddPtr { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAnd64 { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + v_0_1_1 := v_0_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_1_0, v_0_1_1 = _i0+1, v_0_1_1, v_0_1_0 { + y := v_0_1_0 + if v_0_1_1.Op != OpSlicemask { + continue + } + w := v_1 + if w.Op != OpConst64 { + continue + } + c := auxIntToInt64(w.AuxInt) + z := v_2 + if !(c > 0) { + continue + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpAddPtr, t) + v0.AddArg2(x, y) + v.AddArg3(v0, w, z) + return true + } + break + } + // match: (SliceMake (AddPtr x (And32 y (Slicemask _))) w:(Const32 [c]) z) + // cond: c > 0 + // result: (SliceMake (AddPtr x y) w z) + for { + if v_0.Op != OpAddPtr { + break + } + t := v_0.Type + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpAnd32 { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + v_0_1_1 := v_0_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_1_0, v_0_1_1 = _i0+1, v_0_1_1, v_0_1_0 { + y := v_0_1_0 + if v_0_1_1.Op != OpSlicemask { + continue + } + w := v_1 + if w.Op != OpConst32 { + continue + } + c := auxIntToInt32(w.AuxInt) + z := v_2 + if !(c > 0) { + continue + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpAddPtr, t) + v0.AddArg2(x, y) + v.AddArg3(v0, w, z) + return true + } + break + } + return false +} func rewriteValuegeneric_OpSlicePtr(v *Value) bool { v_0 := v.Args[0] // match: (SlicePtr (SliceMake (SlicePtr x) _ _)) @@ -31808,27 +32485,27 @@ func rewriteValuegeneric_OpStore(v *Value) bool { v.AddArg3(dst, e, mem) return true } - // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) - // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") + // match: (Store (SelectN [0] call:(StaticLECall ___)) x mem:(SelectN [1] call)) + // cond: isConstZero(x) && isMalloc(call.Aux) // result: mem for { if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { break } call := v_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 { + if call.Op != OpStaticLECall { break } x := v_1 mem := v_2 - if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) { + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isMalloc(call.Aux)) { break } v.copyOf(mem) return true } - // match: (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call)) - // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject") + // match: (Store (OffPtr (SelectN [0] call:(StaticLECall ___))) x mem:(SelectN [1] call)) + // cond: isConstZero(x) && isMalloc(call.Aux) // result: mem for { if v_0.Op != OpOffPtr { @@ -31839,12 +32516,12 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } call := v_0_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 { + if call.Op != OpStaticLECall { break } x := v_1 mem := v_2 - if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) { + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isMalloc(call.Aux)) { break } v.copyOf(mem) @@ -36187,19 +36864,19 @@ func rewriteValuegeneric_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) - // cond: isSameCall(call.Aux, "runtime.newobject") + // match: (Zero (SelectN [0] call:(StaticLECall ___)) mem:(SelectN [1] call)) + // cond: isMalloc(call.Aux) // result: mem for { if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 { break } call := v_0.Args[0] - if call.Op != OpStaticLECall || len(call.Args) != 2 { + if call.Op != OpStaticLECall { break } mem := v_1 - if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isSameCall(call.Aux, "runtime.newobject")) { + if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isMalloc(call.Aux)) { break } v.copyOf(mem) diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go index 8cd11e9828e0f9..d0f09da86ffce2 100644 --- a/src/cmd/compile/internal/ssa/stmtlines_test.go +++ b/src/cmd/compile/internal/ssa/stmtlines_test.go @@ -137,17 +137,17 @@ func TestStmtLines(t *testing.T) { } } - var m int + var m float64 if runtime.GOARCH == "amd64" { - m = 1 // > 99% obtained on amd64, no backsliding + m = 0.011 // > 98.9% obtained on amd64, no backsliding } else if runtime.GOARCH == "riscv64" { - m = 3 // XXX temporary update threshold to 97% for regabi + m = 0.03 // XXX temporary update threshold to 97% for regabi } else { - m = 2 // expect 98% elsewhere. + m = 0.02 // expect 98% elsewhere. } - if len(nonStmtLines)*100 > m*len(lines) { - t.Errorf("Saw too many (%s, > %d%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m, len(lines), len(nonStmtLines)) + if float64(len(nonStmtLines)) > m*float64(len(lines)) { + t.Errorf("Saw too many (%s, > %.1f%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m*100, len(lines), len(nonStmtLines)) } t.Logf("Saw %d out of %d lines without statement marks", len(nonStmtLines), len(lines)) if testing.Verbose() { diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 55ab23ce9a0bcd..51a70c7fd4fdcd 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -600,7 +600,7 @@ func (v *Value) removeable() bool { func AutoVar(v *Value) (*ir.Name, int64) { if loc, ok := v.Block.Func.RegAlloc[v.ID].(LocalSlot); ok { if v.Type.Size() > loc.Type.Size() { - v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type) + v.Fatalf("v%d: spill/restore type %v doesn't fit in slot type %v", v.ID, v.Type, loc.Type) } return loc.N, loc.Off } diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index ec6901f13ec1d2..ec5a0fed29d791 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -798,7 +798,16 @@ func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) { if call.Op != OpStaticCall { return nil, false } - if !isSameCall(call.Aux, "runtime.newobject") { + // Check for new object, or for new object calls that have been transformed into size-specialized malloc calls. + // Calls that have return type unsafe pointer may have originally been produced by flushPendingHeapAllocations + // in the ssa generator, so may have not originally been newObject calls. + var numParameters int64 + switch { + case isNewObject(call.Aux): + numParameters = 1 + case isSpecializedMalloc(call.Aux) && !v.Type.IsUnsafePtr(): + numParameters = 3 + default: return nil, false } if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 { @@ -813,7 +822,7 @@ func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) { if v.Args[0].Args[0].Op != OpSP { return nil, false } - if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+c.RegSize { // offset of return value + if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+numParameters*c.RegSize { // offset of return value return nil, false } return mem, true diff --git a/src/cmd/compile/internal/ssagen/phi.go b/src/cmd/compile/internal/ssagen/phi.go index 19b6920913d83c..0dcf353bf43089 100644 --- a/src/cmd/compile/internal/ssagen/phi.go +++ b/src/cmd/compile/internal/ssagen/phi.go @@ -253,7 +253,7 @@ func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *ty } // Add a phi to block c for variable n. hasPhi.add(c.ID) - v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right? + v := c.NewValue0I(s.s.blockStarts[b.ID], ssa.OpPhi, typ, int64(n)) // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building. if var_.Op() == ir.ONAME { s.s.addNamedValue(var_.(*ir.Name), v) @@ -513,6 +513,7 @@ loop: v.Op = ssa.OpPhi v.AddArgs(args...) v.Aux = nil + v.Pos = s.s.blockStarts[b.ID] continue loop } w = a // save witness diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 1e2159579dfbf2..ae7d57566f7e0d 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -12,6 +12,7 @@ import ( "go/constant" "html" "internal/buildcfg" + "internal/runtime/gc" "os" "path/filepath" "slices" @@ -124,6 +125,15 @@ func InitConfig() { ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded") ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice") ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch") + for i := 1; i < len(ir.Syms.MallocGCSmallNoScan); i++ { + ir.Syms.MallocGCSmallNoScan[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocgcSmallNoScanSC%d", i)) + } + for i := 1; i < len(ir.Syms.MallocGCSmallScanNoHeader); i++ { + ir.Syms.MallocGCSmallScanNoHeader[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocgcSmallScanNoHeaderSC%d", i)) + } + for i := 1; i < len(ir.Syms.MallocGCTiny); i++ { + ir.Syms.MallocGCTiny[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocTiny%d", i)) + } ir.Syms.MallocGC = typecheck.LookupRuntimeFunc("mallocgc") ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove") ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread") @@ -690,7 +700,7 @@ func allocAlign(t *types.Type) int64 { func (s *state) newHeapaddr(n *ir.Name) { size := allocSize(n.Type()) if n.Type().HasPointers() || size >= maxAggregatedHeapAllocation || size == 0 { - s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil)) + s.setHeapaddr(n.Pos(), n, s.newObject(n.Type())) return } @@ -709,7 +719,7 @@ func (s *state) newHeapaddr(n *ir.Name) { // Make an allocation, but the type being allocated is just // the first pending object. We will come back and update it // later if needed. - allocCall = s.newObject(n.Type(), nil) + allocCall = s.newObjectNonSpecialized(n.Type(), nil) } else { allocCall = s.pendingHeapAllocations[0].Args[0] } @@ -762,7 +772,11 @@ func (s *state) flushPendingHeapAllocations() { s.constBool(true), // needZero TODO: false is ok? call.Args[1], // memory } - call.Aux = ssa.StaticAuxCall(ir.Syms.MallocGC, s.f.ABIDefault.ABIAnalyzeTypes( + mallocSym := ir.Syms.MallocGC + if specialMallocSym := s.specializedMallocSym(size, false); specialMallocSym != nil { + mallocSym = specialMallocSym + } + call.Aux = ssa.StaticAuxCall(mallocSym, s.f.ABIDefault.ABIAnalyzeTypes( []*types.Type{args[0].Type, args[1].Type, args[2].Type}, []*types.Type{types.Types[types.TUNSAFEPTR]}, )) @@ -774,6 +788,43 @@ func (s *state) flushPendingHeapAllocations() { ptr.Type = types.Types[types.TUNSAFEPTR] } +func (s *state) specializedMallocSym(size int64, hasPointers bool) *obj.LSym { + if !s.sizeSpecializedMallocEnabled() { + return nil + } + ptrSize := s.config.PtrSize + ptrBits := ptrSize * 8 + minSizeForMallocHeader := ptrSize * ptrBits + heapBitsInSpan := size <= minSizeForMallocHeader + if !heapBitsInSpan { + return nil + } + divRoundUp := func(n, a uintptr) uintptr { return (n + a - 1) / a } + sizeClass := gc.SizeToSizeClass8[divRoundUp(uintptr(size), gc.SmallSizeDiv)] + if hasPointers { + return ir.Syms.MallocGCSmallScanNoHeader[sizeClass] + } + if size < gc.TinySize { + return ir.Syms.MallocGCTiny[size] + } + return ir.Syms.MallocGCSmallNoScan[sizeClass] +} + +func (s *state) sizeSpecializedMallocEnabled() bool { + if base.Flag.CompilingRuntime { + // The compiler forces the values of the asan, msan, and race flags to false if + // we're compiling the runtime, so we lose the information about whether we're + // building in asan, msan, or race mode. Because the specialized functions don't + // work in that mode, just turn if off in that case. + // TODO(matloob): Save the information about whether the flags were passed in + // originally so we can turn off size specialized malloc in that case instead + // using Instrumenting below. Then we can remove this condition. + return false + } + + return buildcfg.Experiment.SizeSpecializedMalloc && !base.Flag.Cfg.Instrumenting +} + // setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil) // and then sets it as n's heap address. func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) { @@ -796,7 +847,24 @@ func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) { } // newObject returns an SSA value denoting new(typ). -func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value { +func (s *state) newObject(typ *types.Type) *ssa.Value { + if typ.Size() == 0 { + return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb) + } + rtype := s.reflectType(typ) + if specialMallocSym := s.specializedMallocSym(typ.Size(), typ.HasPointers()); specialMallocSym != nil { + return s.rtcall(specialMallocSym, true, []*types.Type{types.NewPtr(typ)}, + s.constInt(types.Types[types.TUINTPTR], typ.Size()), + rtype, + s.constBool(true), + )[0] + } + return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0] +} + +// newObjectNonSpecialized returns an SSA value denoting new(typ). It does +// not produce size-specialized malloc functions. +func (s *state) newObjectNonSpecialized(typ *types.Type, rtype *ssa.Value) *ssa.Value { if typ.Size() == 0 { return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb) } @@ -1020,6 +1088,9 @@ type state struct { // First argument of append calls that could be stack allocated. appendTargets map[ir.Node]bool + + // Block starting position, indexed by block id. + blockStarts []src.XPos } type funcLine struct { @@ -1078,6 +1149,9 @@ func (s *state) startBlock(b *ssa.Block) { s.curBlock = b s.vars = map[ir.Node]*ssa.Value{} clear(s.fwdVars) + for len(s.blockStarts) <= int(b.ID) { + s.blockStarts = append(s.blockStarts, src.NoXPos) + } } // endBlock marks the end of generating code for the current block. @@ -1104,6 +1178,9 @@ func (s *state) endBlock() *ssa.Block { b.Pos = src.NoXPos } else { b.Pos = s.lastPos + if s.blockStarts[b.ID] == src.NoXPos { + s.blockStarts[b.ID] = s.lastPos + } } return b } @@ -1120,6 +1197,11 @@ func (s *state) pushLine(line src.XPos) { } else { s.lastPos = line } + // The first position we see for a new block is its starting position + // (the line number for its phis, if any). + if b := s.curBlock; b != nil && s.blockStarts[b.ID] == src.NoXPos { + s.blockStarts[b.ID] = line + } s.line = append(s.line, line) } @@ -2574,13 +2656,13 @@ var fpConvOpToSSA = map[twoTypes]twoOpsAndType{ {types.TFLOAT32, types.TUINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32}, {types.TFLOAT32, types.TUINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32}, - {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned - {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead + {types.TFLOAT32, types.TUINT32}: {ssa.OpInvalid, ssa.OpCopy, types.TINT64}, // Cvt64Fto32U, branchy code expansion instead + {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead {types.TFLOAT64, types.TUINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32}, {types.TFLOAT64, types.TUINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32}, - {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned - {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead + {types.TFLOAT64, types.TUINT32}: {ssa.OpInvalid, ssa.OpCopy, types.TINT64}, // Cvt64Fto32U, branchy code expansion instead + {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead // float {types.TFLOAT64, types.TFLOAT32}: {ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32}, @@ -2797,7 +2879,19 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value { } if ft.IsFloat() || tt.IsFloat() { - conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] + cft, ctt := s.concreteEtype(ft), s.concreteEtype(tt) + conv, ok := fpConvOpToSSA[twoTypes{cft, ctt}] + // there's a change to a conversion-op table, this restores the old behavior if ConvertHash is false. + // use salted hash to distinguish unsigned convert at a Pos from signed convert at a Pos + if ctt == types.TUINT32 && ft.IsFloat() && !base.ConvertHash.MatchPosWithInfo(n.Pos(), "U", nil) { + // revert to old behavior + conv.op1 = ssa.OpCvt64Fto64 + if cft == types.TFLOAT32 { + conv.op1 = ssa.OpCvt32Fto64 + } + conv.op2 = ssa.OpTrunc64to32 + + } if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat { if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { conv = conv1 @@ -2860,10 +2954,23 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value { } // ft is float32 or float64, and tt is unsigned integer if ft.Size() == 4 { - return s.float32ToUint64(n, v, ft, tt) + switch tt.Size() { + case 8: + return s.float32ToUint64(n, v, ft, tt) + case 4, 2, 1: + // TODO should 2 and 1 saturate or truncate? + return s.float32ToUint32(n, v, ft, tt) + } } if ft.Size() == 8 { - return s.float64ToUint64(n, v, ft, tt) + switch tt.Size() { + case 8: + return s.float64ToUint64(n, v, ft, tt) + case 4, 2, 1: + // TODO should 2 and 1 saturate or truncate? + return s.float64ToUint32(n, v, ft, tt) + } + } s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt) return nil @@ -3581,11 +3688,10 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value { case ir.ONEW: n := n.(*ir.UnaryExpr) - var rtype *ssa.Value if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE { - rtype = s.expr(x.RType) + return s.newObjectNonSpecialized(n.Type().Elem(), s.expr(x.RType)) } - return s.newObject(n.Type().Elem(), rtype) + return s.newObject(n.Type().Elem()) case ir.OUNSAFEADD: n := n.(*ir.BinaryExpr) @@ -5553,7 +5659,9 @@ func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, // equal to 10000000001; that rounds up, and the 1 cannot // be lost else it would round down if the LSB of the // candidate mantissa is 0. + cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x) + b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) @@ -5779,34 +5887,63 @@ func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ss func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value { // cutoff:=1<<(intY_Size-1) // if x < floatX(cutoff) { - // result = uintY(x) + // result = uintY(x) // bThen + // // gated by ConvertHash, clamp negative inputs to zero + // if x < 0 { // unlikely + // result = 0 // bZero + // } // } else { - // y = x - floatX(cutoff) + // y = x - floatX(cutoff) // bElse // z = uintY(y) // result = z | -(cutoff) // } + cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff)) - cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff) + cmp := s.newValueOrSfCall2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(cmp) b.Likely = ssa.BranchLikely - bThen := s.f.NewBlock(ssa.BlockPlain) + var bThen, bZero *ssa.Block + // use salted hash to distinguish unsigned convert at a Pos from signed convert at a Pos + newConversion := base.ConvertHash.MatchPosWithInfo(n.Pos(), "U", nil) + if newConversion { + bZero = s.f.NewBlock(ssa.BlockPlain) + bThen = s.f.NewBlock(ssa.BlockIf) + } else { + bThen = s.f.NewBlock(ssa.BlockPlain) + } + bElse := s.f.NewBlock(ssa.BlockPlain) bAfter := s.f.NewBlock(ssa.BlockPlain) b.AddEdgeTo(bThen) s.startBlock(bThen) - a0 := s.newValue1(cvttab.cvt2U, tt, x) + a0 := s.newValueOrSfCall1(cvttab.cvt2U, tt, x) s.vars[n] = a0 - s.endBlock() - bThen.AddEdgeTo(bAfter) + + if newConversion { + cmpz := s.newValueOrSfCall2(cvttab.ltf, types.Types[types.TBOOL], x, cvttab.floatValue(s, ft, 0.0)) + s.endBlock() + bThen.SetControl(cmpz) + bThen.AddEdgeTo(bZero) + bThen.Likely = ssa.BranchUnlikely + bThen.AddEdgeTo(bAfter) + + s.startBlock(bZero) + s.vars[n] = cvttab.intValue(s, tt, 0) + s.endBlock() + bZero.AddEdgeTo(bAfter) + } else { + s.endBlock() + bThen.AddEdgeTo(bAfter) + } b.AddEdgeTo(bElse) s.startBlock(bElse) - y := s.newValue2(cvttab.subf, ft, x, cutoff) - y = s.newValue1(cvttab.cvt2U, tt, y) + y := s.newValueOrSfCall2(cvttab.subf, ft, x, cutoff) + y = s.newValueOrSfCall1(cvttab.cvt2U, tt, y) z := cvttab.intValue(s, tt, int64(-cvttab.cutoff)) a1 := s.newValue2(cvttab.or, tt, y, z) s.vars[n] = a1 @@ -5827,6 +5964,25 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val if n.ITab != nil { targetItab = s.expr(n.ITab) } + + if n.UseNilPanic { + if commaok { + base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && commaok == true") + } + if n.Type().IsInterface() { + // Currently we do not expect the compiler to emit type asserts with UseNilPanic, that assert to an interface type. + // If needed, this can be relaxed in the future, but for now we can assert that. + base.Fatalf("unexpected *ir.TypeAssertExpr with UseNilPanic == true && Type().IsInterface() == true") + } + typs := s.f.Config.Types + iface = s.newValue2( + ssa.OpIMake, + iface.Type, + s.nilCheck(s.newValue1(ssa.OpITab, typs.BytePtr, iface)), + s.newValue1(ssa.OpIData, typs.BytePtr, iface), + ) + } + return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor) } diff --git a/src/cmd/compile/internal/test/float_test.go b/src/cmd/compile/internal/test/float_test.go index 9e61148c5297e5..7a5e27870f97f0 100644 --- a/src/cmd/compile/internal/test/float_test.go +++ b/src/cmd/compile/internal/test/float_test.go @@ -623,6 +623,110 @@ func TestInf(t *testing.T) { } } +//go:noinline +func isNaNOrGtZero64(x float64) bool { + return math.IsNaN(x) || x > 0 +} + +//go:noinline +func isNaNOrGteZero64(x float64) bool { + return x >= 0 || math.IsNaN(x) +} + +//go:noinline +func isNaNOrLtZero64(x float64) bool { + return x < 0 || math.IsNaN(x) +} + +//go:noinline +func isNaNOrLteZero64(x float64) bool { + return math.IsNaN(x) || x <= 0 +} + +func TestFusedNaNChecks64(t *testing.T) { + tests := []struct { + value float64 + isZero bool + isGreaterThanZero bool + isLessThanZero bool + isNaN bool + }{ + {value: 0.0, isZero: true}, + {value: math.Copysign(0, -1), isZero: true}, + {value: 1.0, isGreaterThanZero: true}, + {value: -1.0, isLessThanZero: true}, + {value: math.Inf(1), isGreaterThanZero: true}, + {value: math.Inf(-1), isLessThanZero: true}, + {value: math.NaN(), isNaN: true}, + } + + check := func(name string, f func(x float64) bool, value float64, want bool) { + got := f(value) + if got != want { + t.Errorf("%v(%g): want %v, got %v", name, value, want, got) + } + } + + for _, test := range tests { + check("isNaNOrGtZero64", isNaNOrGtZero64, test.value, test.isNaN || test.isGreaterThanZero) + check("isNaNOrGteZero64", isNaNOrGteZero64, test.value, test.isNaN || test.isGreaterThanZero || test.isZero) + check("isNaNOrLtZero64", isNaNOrLtZero64, test.value, test.isNaN || test.isLessThanZero) + check("isNaNOrLteZero64", isNaNOrLteZero64, test.value, test.isNaN || test.isLessThanZero || test.isZero) + } +} + +//go:noinline +func isNaNOrGtZero32(x float32) bool { + return x > 0 || x != x +} + +//go:noinline +func isNaNOrGteZero32(x float32) bool { + return x != x || x >= 0 +} + +//go:noinline +func isNaNOrLtZero32(x float32) bool { + return x != x || x < 0 +} + +//go:noinline +func isNaNOrLteZero32(x float32) bool { + return x <= 0 || x != x +} + +func TestFusedNaNChecks32(t *testing.T) { + tests := []struct { + value float32 + isZero bool + isGreaterThanZero bool + isLessThanZero bool + isNaN bool + }{ + {value: 0.0, isZero: true}, + {value: float32(math.Copysign(0, -1)), isZero: true}, + {value: 1.0, isGreaterThanZero: true}, + {value: -1.0, isLessThanZero: true}, + {value: float32(math.Inf(1)), isGreaterThanZero: true}, + {value: float32(math.Inf(-1)), isLessThanZero: true}, + {value: float32(math.NaN()), isNaN: true}, + } + + check := func(name string, f func(x float32) bool, value float32, want bool) { + got := f(value) + if got != want { + t.Errorf("%v(%g): want %v, got %v", name, value, want, got) + } + } + + for _, test := range tests { + check("isNaNOrGtZero32", isNaNOrGtZero32, test.value, test.isNaN || test.isGreaterThanZero) + check("isNaNOrGteZero32", isNaNOrGteZero32, test.value, test.isNaN || test.isGreaterThanZero || test.isZero) + check("isNaNOrLtZero32", isNaNOrLtZero32, test.value, test.isNaN || test.isLessThanZero) + check("isNaNOrLteZero32", isNaNOrLteZero32, test.value, test.isNaN || test.isLessThanZero || test.isZero) + } +} + var sinkFloat float64 func BenchmarkMul2(b *testing.B) { diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go index 90dda18cc88426..d306600ebd2710 100644 --- a/src/cmd/compile/internal/types2/alias.go +++ b/src/cmd/compile/internal/types2/alias.go @@ -113,7 +113,6 @@ func unalias(a0 *Alias) Type { for a := a0; a != nil; a, _ = t.(*Alias) { t = a.fromRHS } - // It's fine to memoize nil types since it's the zero value for actual. // It accomplishes nothing. a0.actual = t diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 0d3c8b8e3e5e0f..4b7012e6c45e9f 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -2468,8 +2468,8 @@ func TestInstantiateErrors(t *testing.T) { t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs) } - var argErr *ArgumentError - if !errors.As(err, &argErr) { + argErr, ok := errors.AsType[*ArgumentError](err) + if !ok { t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs) } @@ -2484,8 +2484,8 @@ func TestArgumentErrorUnwrapping(t *testing.T) { Index: 1, Err: Error{Msg: "test"}, } - var e Error - if !errors.As(err, &e) { + e, ok := errors.AsType[Error](err) + if !ok { t.Fatalf("error %v does not wrap types.Error", err) } if e.Msg != "test" { diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go index 8af5f4037a5e3e..87f5c8beeafcaf 100644 --- a/src/cmd/compile/internal/types2/assignments.go +++ b/src/cmd/compile/internal/types2/assignments.go @@ -91,7 +91,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) { // x.typ is typed // A generic (non-instantiated) function value cannot be assigned to a variable. - if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 { check.errorf(x, WrongTypeArgCount, "cannot use generic function %s without instantiation in %s", x, context) x.mode = invalid return @@ -261,7 +261,7 @@ func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand, context string var target *target // avoid calling ExprString if not needed if T != nil { - if _, ok := under(T).(*Signature); ok { + if _, ok := T.Underlying().(*Signature); ok { target = newTarget(T, ExprString(lhs)) } } diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go index df207a2746d627..84acb4ca484164 100644 --- a/src/cmd/compile/internal/types2/builtins.go +++ b/src/cmd/compile/internal/types2/builtins.go @@ -144,7 +144,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // len(x) mode := invalid var val constant.Value - switch t := arrayPtrDeref(under(x.typ)).(type) { + switch t := arrayPtrDeref(x.typ.Underlying()).(type) { case *Basic: if isString(t) && id == _Len { if x.mode == constant_ { @@ -203,7 +203,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( if mode == invalid { // avoid error if underlying type is invalid - if isValid(under(x.typ)) { + if isValid(x.typ.Underlying()) { code := InvalidCap if id == _Len { code = InvalidLen @@ -322,7 +322,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // (applyTypeFunc never calls f with a type parameter) f := func(typ Type) Type { assert(!isTypeParam(typ)) - if t, _ := under(typ).(*Basic); t != nil { + if t, _ := typ.Underlying().(*Basic); t != nil { switch t.kind { case Float32: return Typ[Complex64] @@ -472,7 +472,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // (applyTypeFunc never calls f with a type parameter) f := func(typ Type) Type { assert(!isTypeParam(typ)) - if t, _ := under(typ).(*Basic); t != nil { + if t, _ := typ.Underlying().(*Basic); t != nil { switch t.kind { case Complex64: return Typ[Float32] @@ -1020,7 +1020,7 @@ func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) { }() } - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Array: return hasVarSize(u.elem, seen) case *Struct: @@ -1112,7 +1112,7 @@ func makeSig(res Type, args ...Type) *Signature { // otherwise it returns typ. func arrayPtrDeref(typ Type) Type { if p, ok := Unalias(typ).(*Pointer); ok { - if a, _ := under(p.base).(*Array); a != nil { + if a, _ := p.base.Underlying().(*Array); a != nil { return a } } diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go index b7a2ebb41ec976..aca205ad985013 100644 --- a/src/cmd/compile/internal/types2/call.go +++ b/src/cmd/compile/internal/types2/call.go @@ -205,7 +205,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind { case 1: check.expr(nil, x, call.ArgList[0]) if x.mode != invalid { - if t, _ := under(T).(*Interface); t != nil && !isTypeParam(T) { + if t, _ := T.Underlying().(*Interface); t != nil && !isTypeParam(T) { if !t.IsMethodSet() { check.errorf(call, MisplacedConstraintIface, "cannot use interface %s in conversion (contains specific type constraints or is comparable)", T) break @@ -812,7 +812,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName obj, index, indirect = lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel, false) if obj == nil { // Don't report another error if the underlying type was invalid (go.dev/issue/49541). - if !isValid(under(x.typ)) { + if !isValid(x.typ.Underlying()) { goto Error } diff --git a/src/cmd/compile/internal/types2/const.go b/src/cmd/compile/internal/types2/const.go index 5e5bc74ba34ed6..b68d72de4d27f4 100644 --- a/src/cmd/compile/internal/types2/const.go +++ b/src/cmd/compile/internal/types2/const.go @@ -33,7 +33,7 @@ func (check *Checker) overflow(x *operand, opPos syntax.Pos) { // x.typ cannot be a type parameter (type // parameters cannot be constant types). if isTyped(x.typ) { - check.representable(x, under(x.typ).(*Basic)) + check.representable(x, x.typ.Underlying().(*Basic)) return } diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go index 0ad79afe71c409..d0920d7ef1006d 100644 --- a/src/cmd/compile/internal/types2/conversions.go +++ b/src/cmd/compile/internal/types2/conversions.go @@ -18,7 +18,7 @@ func (check *Checker) conversion(x *operand, T Type) { constArg := x.mode == constant_ constConvertibleTo := func(T Type, val *constant.Value) bool { - switch t, _ := under(T).(*Basic); { + switch t, _ := T.Underlying().(*Basic); { case t == nil: // nothing to do case representableConst(x.val, check, t, val): @@ -142,8 +142,8 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { origT := T V := Unalias(x.typ) T = Unalias(T) - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() Vp, _ := V.(*TypeParam) Tp, _ := T.(*TypeParam) @@ -158,7 +158,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { // and their pointer base types are not type parameters" if V, ok := V.(*Pointer); ok { if T, ok := T.(*Pointer); ok { - if IdenticalIgnoreTags(under(V.base), under(T.base)) && !isTypeParam(V.base) && !isTypeParam(T.base) { + if IdenticalIgnoreTags(V.base.Underlying(), T.base.Underlying()) && !isTypeParam(V.base) && !isTypeParam(T.base) { return true } } @@ -211,7 +211,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { return false } case *Pointer: - if a, _ := under(a.Elem()).(*Array); a != nil { + if a, _ := a.Elem().Underlying().(*Array); a != nil { if Identical(s.Elem(), a.Elem()) { if check == nil || check.allowVersion(go1_17) { return true @@ -292,23 +292,23 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { } func isUintptr(typ Type) bool { - t, _ := under(typ).(*Basic) + t, _ := typ.Underlying().(*Basic) return t != nil && t.kind == Uintptr } func isUnsafePointer(typ Type) bool { - t, _ := under(typ).(*Basic) + t, _ := typ.Underlying().(*Basic) return t != nil && t.kind == UnsafePointer } func isPointer(typ Type) bool { - _, ok := under(typ).(*Pointer) + _, ok := typ.Underlying().(*Pointer) return ok } func isBytesOrRunes(typ Type) bool { - if s, _ := under(typ).(*Slice); s != nil { - t, _ := under(s.elem).(*Basic) + if s, _ := typ.Underlying().(*Slice); s != nil { + t, _ := s.elem.Underlying().(*Basic) return t != nil && (t.kind == Byte || t.kind == Rune) } return false diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go index 34105816a65af3..b830cb6f4f3c1c 100644 --- a/src/cmd/compile/internal/types2/decl.go +++ b/src/cmd/compile/internal/types2/decl.go @@ -225,8 +225,8 @@ func (check *Checker) validCycle(obj Object) (valid bool) { start := obj.color() - grey // index of obj in objPath cycle := check.objPath[start:] tparCycle := false // if set, the cycle is through a type parameter list - nval := 0 // number of (constant or variable) values in the cycle; valid if !generic - ndef := 0 // number of type definitions in the cycle; valid if !generic + nval := 0 // number of (constant or variable) values in the cycle + ndef := 0 // number of type definitions in the cycle loop: for _, obj := range cycle { switch obj := obj.(type) { @@ -235,7 +235,7 @@ loop: case *TypeName: // If we reach a generic type that is part of a cycle // and we are in a type parameter list, we have a cycle - // through a type parameter list, which is invalid. + // through a type parameter list. if check.inTParamList && isGeneric(obj.typ) { tparCycle = true break loop @@ -286,20 +286,23 @@ loop: }() } - if !tparCycle { - // A cycle involving only constants and variables is invalid but we - // ignore them here because they are reported via the initialization - // cycle check. - if nval == len(cycle) { - return true - } + // Cycles through type parameter lists are ok (go.dev/issue/68162). + if tparCycle { + return true + } - // A cycle involving only types (and possibly functions) must have at least - // one type definition to be permitted: If there is no type definition, we - // have a sequence of alias type names which will expand ad infinitum. - if nval == 0 && ndef > 0 { - return true - } + // A cycle involving only constants and variables is invalid but we + // ignore them here because they are reported via the initialization + // cycle check. + if nval == len(cycle) { + return true + } + + // A cycle involving only types (and possibly functions) must have at least + // one type definition to be permitted: If there is no type definition, we + // have a sequence of alias type names which will expand ad infinitum. + if nval == 0 && ndef > 0 { + return true } check.cycleError(cycle, firstInSrc(cycle)) @@ -388,7 +391,7 @@ func (check *Checker) constDecl(obj *Const, typ, init syntax.Expr, inherited boo if !isConstType(t) { // don't report an error if the type is an invalid C (defined) type // (go.dev/issue/22090) - if isValid(under(t)) { + if isValid(t.Underlying()) { check.errorf(typ, InvalidConstType, "invalid constant type %s", t) } obj.typ = Typ[Invalid] @@ -473,7 +476,7 @@ func (check *Checker) isImportedConstraint(typ Type) bool { if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil { return false } - u, _ := named.under().(*Interface) + u, _ := named.Underlying().(*Interface) return u != nil && !u.IsMethodSet() } @@ -555,31 +558,33 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN named := check.newNamed(obj, nil, nil) setDefType(def, named) + // The RHS of a named N can be nil if, for example, N is defined as a cycle of aliases with + // gotypesalias=0. Consider: + // + // type D N // N.resolve() will panic + // type N A + // type A = N // N.fromRHS is not set before N.resolve(), since A does not call setDefType + // + // There is likely a better way to detect such cases, but it may not be worth the effort. + // Instead, we briefly permit a nil N.fromRHS while type-checking D. + named.allowNilRHS = true + defer (func() { named.allowNilRHS = false })() + if tdecl.TParamList != nil { check.openScope(tdecl, "type parameters") defer check.closeScope() check.collectTypeParams(&named.tparams, tdecl.TParamList) } - // determine underlying type of named rhs = check.definedType(tdecl.Type, obj) assert(rhs != nil) named.fromRHS = rhs - // If the underlying type was not set while type-checking the right-hand - // side, it is invalid and an error should have been reported elsewhere. - if named.underlying == nil { - named.underlying = Typ[Invalid] - } - - // Disallow a lone type parameter as the RHS of a type declaration (go.dev/issue/45639). - // We don't need this restriction anymore if we make the underlying type of a type - // parameter its constraint interface: if the RHS is a lone type parameter, we will - // use its underlying type (like we do for any RHS in a type declaration), and its - // underlying type is an interface and the type declaration is well defined. + // spec: "In a type definition the given type cannot be a type parameter." + // (See also go.dev/issue/45639.) if isTypeParam(rhs) { check.error(tdecl.Type, MisplacedTypeParam, "cannot use a type parameter as RHS in type declaration") - named.underlying = Typ[Invalid] + named.fromRHS = Typ[Invalid] } } @@ -721,7 +726,7 @@ func (check *Checker) collectMethods(obj *TypeName) { } func (check *Checker) checkFieldUniqueness(base *Named) { - if t, _ := base.under().(*Struct); t != nil { + if t, _ := base.Underlying().(*Struct); t != nil { var mset objset for i := 0; i < base.NumMethods(); i++ { m := base.Method(i) diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go index e5f9a1c6f7cb49..d62b0247578d6e 100644 --- a/src/cmd/compile/internal/types2/expr.go +++ b/src/cmd/compile/internal/types2/expr.go @@ -361,7 +361,7 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) { // If the new type is not final and still untyped, just // update the recorded type. if !final && isUntyped(typ) { - old.typ = under(typ).(*Basic) + old.typ = typ.Underlying().(*Basic) check.untyped[x] = old return } @@ -431,7 +431,7 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const return nil, nil, InvalidUntypedConversion } - switch u := under(target).(type) { + switch u := target.Underlying().(type) { case *Basic: if x.mode == constant_ { v, code := check.representation(x, u) @@ -616,7 +616,7 @@ Error: // incomparableCause returns a more specific cause why typ is not comparable. // If there is no more specific cause, the result is "". func (check *Checker) incomparableCause(typ Type) string { - switch under(typ).(type) { + switch typ.Underlying().(type) { case *Slice, *Signature, *Map: return compositeKind(typ) + " can only be compared to nil" } @@ -963,7 +963,7 @@ type target struct { // The result is nil if typ is not a signature. func newTarget(typ Type, desc string) *target { if typ != nil { - if sig, _ := under(typ).(*Signature); sig != nil { + if sig, _ := typ.Underlying().(*Signature); sig != nil { return &target{sig, desc} } } @@ -1112,7 +1112,7 @@ func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Ty check.errorf(x, InvalidAssert, invalidOp+"cannot use type assertion on type parameter value %s", x) goto Error } - if _, ok := under(x.typ).(*Interface); !ok { + if _, ok := x.typ.Underlying().(*Interface); !ok { check.errorf(x, InvalidAssert, invalidOp+"%s is not an interface", x) goto Error } diff --git a/src/cmd/compile/internal/types2/gcsizes.go b/src/cmd/compile/internal/types2/gcsizes.go index 15f3e006425ade..54e8ea23c173c5 100644 --- a/src/cmd/compile/internal/types2/gcsizes.go +++ b/src/cmd/compile/internal/types2/gcsizes.go @@ -16,7 +16,7 @@ func (s *gcSizes) Alignof(T Type) (result int64) { // For arrays and structs, alignment is defined in terms // of alignment of the elements and fields, respectively. - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Array: // spec: "For a variable x of array type: unsafe.Alignof(x) // is the same as unsafe.Alignof(x[0]), but at least 1." @@ -96,7 +96,7 @@ func (s *gcSizes) Offsetsof(fields []*Var) []int64 { } func (s *gcSizes) Sizeof(T Type) int64 { - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: assert(isTyped(T)) k := t.kind diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go index 7e16a87332dcdf..ca84184d35afed 100644 --- a/src/cmd/compile/internal/types2/index.go +++ b/src/cmd/compile/internal/types2/index.go @@ -35,7 +35,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo return false case value: - if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 { // function instantiation return true } @@ -50,7 +50,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo // ordinary index expression valid := false length := int64(-1) // valid if >= 0 - switch typ := under(x.typ).(type) { + switch typ := x.typ.Underlying().(type) { case *Basic: if isString(typ) { valid = true @@ -73,7 +73,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo x.typ = typ.elem case *Pointer: - if typ, _ := under(typ.base).(*Array); typ != nil { + if typ, _ := typ.base.Underlying().(*Array); typ != nil { valid = true length = typ.len x.mode = variable @@ -124,7 +124,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo mode = value } case *Pointer: - if t, _ := under(t.base).(*Array); t != nil { + if t, _ := t.base.Underlying().(*Array); t != nil { l = t.len e = t.elem } @@ -247,7 +247,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { // but don't go from untyped string to string. cu = Typ[String] if !isTypeParam(x.typ) { - cu = under(x.typ) // untyped string remains untyped + cu = x.typ.Underlying() // untyped string remains untyped } } @@ -292,7 +292,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { x.typ = &Slice{elem: u.elem} case *Pointer: - if u, _ := under(u.base).(*Array); u != nil { + if u, _ := u.base.Underlying().(*Array); u != nil { valid = true length = u.len x.typ = &Slice{elem: u.elem} diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go index 08d422969059f2..e7c59597372413 100644 --- a/src/cmd/compile/internal/types2/infer.go +++ b/src/cmd/compile/internal/types2/infer.go @@ -668,7 +668,7 @@ func coreTerm(tpar *TypeParam) (*term, bool) { if n == 1 { if debug { u, _ := commonUnder(tpar, nil) - assert(under(single.typ) == u) + assert(single.typ.Underlying() == u) } return single, true } diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go index 1c8c12d07cfafa..3c4044ed3c9f2c 100644 --- a/src/cmd/compile/internal/types2/instantiate.go +++ b/src/cmd/compile/internal/types2/instantiate.go @@ -83,7 +83,7 @@ func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, e // // For Named types the resulting instance may be unexpanded. // -// check may be nil (when not type-checking syntax); pos is used only only if check is non-nil. +// check may be nil (when not type-checking syntax); pos is used only if check is non-nil. func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, expanding *Named, ctxt *Context) (res Type) { // The order of the contexts below matters: we always prefer instances in the // expanding instance context in order to preserve reference cycles. @@ -226,12 +226,12 @@ func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type, // If the provided cause is non-nil, it may be set to an error string // explaining why V does not implement (or satisfy, for constraints) T. func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool { - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() if !isValid(Vu) || !isValid(Tu) { return true // avoid follow-on errors } - if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) { + if p, _ := Vu.(*Pointer); p != nil && !isValid(p.base.Underlying()) { return true // avoid follow-on errors (see go.dev/issue/49541 for an example) } @@ -339,7 +339,7 @@ func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool // If V ∉ t.typ but V ∈ ~t.typ then remember this type // so we can suggest it as an alternative in the error // message. - if alt == nil && !t.tilde && Identical(t.typ, under(t.typ)) { + if alt == nil && !t.tilde && Identical(t.typ, t.typ.Underlying()) { tt := *t tt.tilde = true if tt.includes(V) { diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go index 624b510dc83dc3..0b9282e1a7cfc2 100644 --- a/src/cmd/compile/internal/types2/lookup.go +++ b/src/cmd/compile/internal/types2/lookup.go @@ -152,7 +152,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string // *typ where typ is an interface (incl. a type parameter) has no methods. if isPtr { - if _, ok := under(typ).(*Interface); ok { + if _, ok := typ.Underlying().(*Interface); ok { return } } @@ -202,7 +202,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string } } - switch t := under(typ).(type) { + switch t := typ.Underlying().(type) { case *Struct: // look for a matching field and collect embedded types for i, f := range t.fields { @@ -373,7 +373,7 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b // The comparator is used to compare signatures. // If a method is missing and cause is not nil, *cause describes the error. func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y Type) bool, cause *string) (method *Func, wrongType bool) { - methods := under(T).(*Interface).typeSet().methods // T must be an interface + methods := T.Underlying().(*Interface).typeSet().methods // T must be an interface if len(methods) == 0 { return nil, false } @@ -393,7 +393,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y var m *Func // method on T we're trying to implement var f *Func // method on V, if found (state is one of ok, wrongName, wrongSig) - if u, _ := under(V).(*Interface); u != nil { + if u, _ := V.Underlying().(*Interface); u != nil { tset := u.typeSet() for _, m = range methods { _, f = tset.LookupMethod(m.pkg, m.name, false) @@ -534,7 +534,7 @@ func (check *Checker) hasAllMethods(V, T Type, static bool, equivalent func(x, y // hasInvalidEmbeddedFields reports whether T is a struct (or a pointer to a struct) that contains // (directly or indirectly) embedded fields with invalid types. func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool { - if S, _ := under(derefStructPtr(T)).(*Struct); S != nil && !seen[S] { + if S, _ := derefStructPtr(T).Underlying().(*Struct); S != nil && !seen[S] { if seen == nil { seen = make(map[*Struct]bool) } @@ -549,14 +549,14 @@ func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool { } func isInterfacePtr(T Type) bool { - p, _ := under(T).(*Pointer) + p, _ := T.Underlying().(*Pointer) return p != nil && IsInterface(p.base) } // check may be nil. func (check *Checker) interfacePtrError(T Type) string { assert(isInterfacePtr(T)) - if p, _ := under(T).(*Pointer); isTypeParam(p.base) { + if p, _ := T.Underlying().(*Pointer); isTypeParam(p.base) { return check.sprintf("type %s is pointer to type parameter, not type parameter", T) } return check.sprintf("type %s is pointer to interface, not interface", T) @@ -629,8 +629,8 @@ func deref(typ Type) (Type, bool) { // derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a // (named or unnamed) struct and returns its base. Otherwise it returns typ. func derefStructPtr(typ Type) Type { - if p, _ := under(typ).(*Pointer); p != nil { - if _, ok := under(p.base).(*Struct); ok { + if p, _ := typ.Underlying().(*Pointer); p != nil { + if _, ok := p.base.Underlying().(*Struct); ok { return p.base } } diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go index d02b95e874a470..4a28929c513a9b 100644 --- a/src/cmd/compile/internal/types2/named.go +++ b/src/cmd/compile/internal/types2/named.go @@ -62,14 +62,14 @@ import ( // - We say that a Named type is "resolved" if its RHS information has been // loaded or fully type-checked. For Named types constructed from export // data, this may involve invoking a loader function to extract information -// from export data. For instantiated named types this involves reading -// information from their origin. +// from export data. For instantiated Named types this involves reading +// information from their origin and substituting type arguments into a +// "synthetic" RHS; this process is called "expanding" the RHS (see below). // - We say that a Named type is "expanded" if it is an instantiated type and -// type parameters in its underlying type and methods have been substituted -// with the type arguments from the instantiation. A type may be partially -// expanded if some but not all of these details have been substituted. -// Similarly, we refer to these individual details (underlying type or -// method) as being "expanded". +// type parameters in its RHS and methods have been substituted with the type +// arguments from the instantiation. A type may be partially expanded if some +// but not all of these details have been substituted. Similarly, we refer to +// these individual details (RHS or method) as being "expanded". // - When all information is known for a named type, we say it is "complete". // // Some invariants to keep in mind: each declared Named type has a single @@ -107,18 +107,17 @@ type Named struct { check *Checker // non-nil during type-checking; nil otherwise obj *TypeName // corresponding declared object for declared types; see above for instantiated types - // fromRHS holds the type (on RHS of declaration) this *Named type is derived - // from (for cycle reporting). Only used by validType, and therefore does not - // require synchronization. - fromRHS Type + // flags indicating temporary violations of the invariants for fromRHS and underlying + allowNilRHS bool // same as below, as well as briefly during checking of a type declaration + allowNilUnderlying bool // may be true from creation via [NewNamed] until [Named.SetUnderlying] - // information for instantiated types; nil otherwise - inst *instance + underlying Type // underlying type, or nil + inst *instance // information for instantiated types; nil otherwise - mu sync.Mutex // guards all fields below - state_ uint32 // the current state of this type; must only be accessed atomically - underlying Type // possibly a *Named during setup; never a *Named once set up completely - tparams *TypeParamList // type parameters, or nil + mu sync.Mutex // guards all fields below + state_ uint32 // the current state of this type; must only be accessed atomically + fromRHS Type // the declaration RHS this type is derived from + tparams *TypeParamList // type parameters, or nil // methods declared for this type (not the method set of this type) // Signatures are type-checked lazily. @@ -145,7 +144,7 @@ type namedState uint32 // Note: the order of states is relevant const ( - unresolved namedState = iota // tparams, underlying type and methods might be unavailable + unresolved namedState = iota // type parameters, RHS, underlying, and methods might be unavailable resolved // resolve has run; methods might be unexpanded (for instances) loaded // loader has run; constraints might be unexpanded (for generic types) complete // all data is known @@ -158,10 +157,18 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named { if asNamed(underlying) != nil { panic("underlying type must not be *Named") } - return (*Checker)(nil).newNamed(obj, underlying, methods) + n := (*Checker)(nil).newNamed(obj, underlying, methods) + if underlying == nil { + n.allowNilRHS = true + n.allowNilUnderlying = true + } else { + n.SetUnderlying(underlying) + } + return n + } -// resolve resolves the type parameters, methods, and underlying type of n. +// resolve resolves the type parameters, methods, and RHS of n. // // For the purposes of resolution, there are three categories of named types: // 1. Instantiated Types @@ -171,18 +178,17 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named { // Note that the above form a partition. // // Instantiated types: -// Type parameters, methods, and underlying type of n become accessible, -// though methods are lazily populated as needed. +// Type parameters, methods, and RHS of n become accessible, though methods +// are lazily populated as needed. // // Lazy loaded types: -// Type parameters, methods, and underlying type of n become accessible -// and are fully expanded. +// Type parameters, methods, and RHS of n become accessible and are fully +// expanded. // // All others: -// Effectively, nothing happens. The underlying type of n may still be -// a named type. +// Effectively, nothing happens. func (n *Named) resolve() *Named { - if n.state() > unresolved { // avoid locking below + if n.state() >= resolved { // avoid locking below return n } @@ -191,21 +197,19 @@ func (n *Named) resolve() *Named { n.mu.Lock() defer n.mu.Unlock() - if n.state() > unresolved { + if n.state() >= resolved { return n } if n.inst != nil { - assert(n.underlying == nil) // n is an unresolved instance - assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil + assert(n.fromRHS == nil) // instantiated types are not declared types + assert(n.loader == nil) // cannot import an instantiation orig := n.inst.orig orig.resolve() - underlying := n.expandUnderlying() + n.fromRHS = n.expandRHS() n.tparams = orig.tparams - n.underlying = underlying - n.fromRHS = orig.fromRHS // for cycle detection if len(orig.methods) == 0 { n.setState(complete) // nothing further to do @@ -224,25 +228,25 @@ func (n *Named) resolve() *Named { // methods would need to support reentrant calls though. It would // also make the API more future-proof towards further extensions. if n.loader != nil { - assert(n.underlying == nil) - assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil + assert(n.fromRHS == nil) // not loaded yet + assert(n.inst == nil) // cannot import an instantiation tparams, underlying, methods, delayed := n.loader(n) n.loader = nil n.tparams = bindTParams(tparams) - n.underlying = underlying n.fromRHS = underlying // for cycle detection n.methods = methods - // advance state to avoid deadlock calling delayed functions - n.setState(loaded) + n.setState(loaded) // avoid deadlock calling delayed functions for _, f := range delayed { f() } } + assert(n.fromRHS != nil || n.allowNilRHS) + assert(n.underlying == nil) // underlying comes after resolving n.setState(complete) return n } @@ -259,8 +263,8 @@ func (n *Named) setState(state namedState) { } // newNamed is like NewNamed but with a *Checker receiver. -func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named { - typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods} +func (check *Checker) newNamed(obj *TypeName, fromRHS Type, methods []*Func) *Named { + typ := &Named{check: check, obj: obj, fromRHS: fromRHS, methods: methods} if obj.typ == nil { obj.typ = typ } @@ -300,25 +304,13 @@ func (check *Checker) newNamedInstance(pos syntax.Pos, orig *Named, targs []Type return typ } -func (t *Named) cleanup() { - assert(t.inst == nil || t.inst.orig.inst == nil) - // Ensure that every defined type created in the course of type-checking has - // either non-*Named underlying type, or is unexpanded. - // - // This guarantees that we don't leak any types whose underlying type is - // *Named, because any unexpanded instances will lazily compute their - // underlying type by substituting in the underlying type of their origin. - // The origin must have either been imported or type-checked and expanded - // here, and in either case its underlying type will be fully expanded. - switch t.underlying.(type) { - case nil: - if t.TypeArgs().Len() == 0 { - panic("nil underlying") - } - case *Named, *Alias: - t.under() // t.under may add entries to check.cleaners +func (n *Named) cleanup() { + // Instances can have a nil underlying at the end of type checking — they + // will lazily expand it as needed. All other types must have one. + if n.inst == nil { + n.resolve().under() } - t.check = nil + n.check = nil } // Obj returns the type name for the declaration defining the named type t. For @@ -477,10 +469,12 @@ func (t *Named) SetUnderlying(underlying Type) { if asNamed(underlying) != nil { panic("underlying type must not be *Named") } - t.resolve().underlying = underlying - if t.fromRHS == nil { - t.fromRHS = underlying // for cycle detection - } + // Invariant: Presence of underlying type implies it was resolved. + t.fromRHS = underlying + t.allowNilRHS = false + t.resolve() + t.underlying = underlying + t.allowNilUnderlying = false } // AddMethod adds method m unless it is already in the method list. @@ -523,9 +517,20 @@ func (t *Named) methodIndex(name string, foldCase bool) int { // Alias types. // // [underlying type]: https://go.dev/ref/spec#Underlying_types. -func (t *Named) Underlying() Type { - // TODO(gri) Investigate if Unalias can be moved to where underlying is set. - return Unalias(t.resolve().underlying) +func (n *Named) Underlying() Type { + n.resolve() + + // The gccimporter depends on writing a nil underlying via NewNamed and + // immediately reading it back. Rather than putting that in under() and + // complicating things there, we just check for that special case here. + if n.fromRHS == nil { + assert(n.allowNilRHS) + if n.allowNilUnderlying { + return nil + } + } + + return n.under() } func (t *Named) String() string { return TypeString(t, nil) } @@ -536,89 +541,55 @@ func (t *Named) String() string { return TypeString(t, nil) } // TODO(rfindley): reorganize the loading and expansion methods under this // heading. -// under returns the expanded underlying type of n0; possibly by following -// forward chains of named types. If an underlying type is found, resolve -// the chain by setting the underlying type for each defined type in the -// chain before returning it. If no underlying type is found or a cycle -// is detected, the result is Typ[Invalid]. If a cycle is detected and -// n0.check != nil, the cycle is reported. -// -// This is necessary because the underlying type of named may be itself a -// named type that is incomplete: -// -// type ( -// A B -// B *C -// C A -// ) -// -// The type of C is the (named) type of A which is incomplete, -// and which has as its underlying type the named type B. -func (n0 *Named) under() Type { - u := n0.Underlying() - - // If the underlying type of a defined type is not a defined - // (incl. instance) type, then that is the desired underlying - // type. - var n1 *Named - switch u1 := u.(type) { - case nil: - // After expansion via Underlying(), we should never encounter a nil - // underlying. - panic("nil underlying") - default: - // common case - return u - case *Named: - // handled below - n1 = u1 - } - - if n0.check == nil { - panic("Named.check == nil but type is incomplete") - } - - // Invariant: after this point n0 as well as any named types in its - // underlying chain should be set up when this function exits. - check := n0.check - n := n0 - - seen := make(map[*Named]int) // types that need their underlying type resolved - var path []Object // objects encountered, for cycle reporting - -loop: - for { - seen[n] = len(seen) - path = append(path, n.obj) - n = n1 - if i, ok := seen[n]; ok { - // cycle - check.cycleError(path[i:], firstInSrc(path[i:])) - u = Typ[Invalid] - break - } - u = n.Underlying() - switch u1 := u.(type) { +// under returns the (possibly expanded) underlying type of n. +// +// It does so by following RHS type chains. If a type literal is found, each +// named type in the chain has its underlying set to that type. Aliases are +// skipped because their underlying type is not memoized. +// +// This function also checks for instantiated layout cycles, which are +// reachable only in the case where resolve() expanded an instantiated +// type which became self-referencing without indirection. If such a +// cycle is found, the result is Typ[Invalid]; if n.check != nil, the +// cycle is also reported. +func (n *Named) under() Type { + assert(n.state() >= resolved) + + if n.underlying != nil { + return n.underlying + } + + var rhs Type = n + var u Type + + seen := make(map[*Named]int) + var path []Object + + for u == nil { + switch t := rhs.(type) { case nil: u = Typ[Invalid] - break loop - default: - break loop + case *Alias: + rhs = unalias(t) case *Named: - // Continue collecting *Named types in the chain. - n1 = u1 + if i, ok := seen[t]; ok { + n.check.cycleError(path[i:], firstInSrc(path[i:])) + u = Typ[Invalid] + break + } + seen[t] = len(seen) + path = append(path, t.obj) + t.resolve() + assert(t.fromRHS != nil || t.allowNilRHS) + rhs = t.fromRHS + default: + u = rhs // any type literal works } } - for n := range seen { - // We should never have to update the underlying type of an imported type; - // those underlying types should have been resolved during the import. - // Also, doing so would lead to a race condition (was go.dev/issue/31749). - // Do this check always, not just in debug mode (it's cheap). - if n.obj.pkg != check.pkg { - panic("imported type with unresolved underlying type") - } - n.underlying = u + // go back up the chain + for t := range seen { + t.underlying = u } return u @@ -646,78 +617,108 @@ func (check *Checker) context() *Context { return check.ctxt } -// expandUnderlying substitutes type arguments in the underlying type n.orig, -// returning the result. Returns Typ[Invalid] if there was an error. -func (n *Named) expandUnderlying() Type { +// expandRHS crafts a synthetic RHS for an instantiated type using the RHS of +// its origin type (which must be a generic type). +// +// Suppose that we had: +// +// type T[P any] struct { +// f P +// } +// +// type U T[int] +// +// When we go to U, we observe T[int]. Since T[int] is an instantiation, it has no +// declaration. Here, we craft a synthetic RHS for T[int] as if it were declared, +// somewhat similar to: +// +// type T[int] struct { +// f int +// } +// +// And note that the synthetic RHS here is the same as the underlying for U. Now, +// consider: +// +// type T[_ any] U +// type U int +// type V T[U] +// +// The synthetic RHS for T[U] becomes: +// +// type T[U] U +// +// Whereas the underlying of V is int, not U. +func (n *Named) expandRHS() (rhs Type) { check := n.check if check != nil && check.conf.Trace { - check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n) + check.trace(n.obj.pos, "-- Named.expandRHS %s", n) check.indent++ defer func() { check.indent-- - check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying) + check.trace(n.obj.pos, "=> %s (rhs = %s)", n, rhs) }() } - assert(n.inst.orig.underlying != nil) + assert(n.state() == unresolved) + if n.inst.ctxt == nil { n.inst.ctxt = NewContext() } + ctxt := n.inst.ctxt orig := n.inst.orig - targs := n.inst.targs - if asNamed(orig.underlying) != nil { - // We should only get a Named underlying type here during type checking - // (for example, in recursive type declarations). - assert(check != nil) - } + assert(orig.state() >= resolved) + assert(orig.fromRHS != nil) + + targs := n.inst.targs + tpars := orig.tparams - if orig.tparams.Len() != targs.Len() { - // Mismatching arg and tparam length may be checked elsewhere. + if targs.Len() != tpars.Len() { return Typ[Invalid] } - // Ensure that an instance is recorded before substituting, so that we - // resolve n for any recursive references. - h := n.inst.ctxt.instanceHash(orig, targs.list()) - n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n) - assert(n == n2) + h := ctxt.instanceHash(orig, targs.list()) + u := ctxt.update(h, orig, targs.list(), n) // block fixed point infinite instantiation + assert(n == u) - smap := makeSubstMap(orig.tparams.list(), targs.list()) - var ctxt *Context + m := makeSubstMap(tpars.list(), targs.list()) if check != nil { ctxt = check.context() } - underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt) - // If the underlying type of n is an interface, we need to set the receiver of - // its methods accurately -- we set the receiver of interface methods on - // the RHS of a type declaration to the defined type. - if iface, _ := underlying.(*Interface); iface != nil { + + rhs = check.subst(n.obj.pos, orig.fromRHS, m, n, ctxt) + + // TODO(markfreeman): Can we handle this in substitution? + // If the RHS is an interface, we must set the receiver of interface methods + // to the named type. + if iface, _ := rhs.(*Interface); iface != nil { if methods, copied := replaceRecvType(iface.methods, orig, n); copied { - // If the underlying type doesn't actually use type parameters, it's - // possible that it wasn't substituted. In this case we need to create - // a new *Interface before modifying receivers. - if iface == orig.underlying { - old := iface - iface = check.newInterface() - iface.embeddeds = old.embeddeds - assert(old.complete) // otherwise we are copying incomplete data - iface.complete = old.complete - iface.implicit = old.implicit // should be false but be conservative - underlying = iface + // If the RHS doesn't use type parameters, it may not have been + // substituted; we need to craft a new interface first. + if iface == orig.fromRHS { + assert(iface.complete) // otherwise we are copying incomplete data + + crafted := check.newInterface() + crafted.complete = true + crafted.implicit = false + crafted.embeddeds = iface.embeddeds + + iface = crafted } iface.methods = methods iface.tset = nil // recompute type set with new methods - // If check != nil, check.newInterface will have saved the interface for later completion. - if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated + // go.dev/issue/61561: We have to complete the interface even without a checker. + if check == nil { iface.typeSet() } + + return iface } } - return underlying + return rhs } // safeUnderlying returns the underlying type of typ without expanding diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go index 7096c556971077..463ed30308d0ee 100644 --- a/src/cmd/compile/internal/types2/object.go +++ b/src/cmd/compile/internal/types2/object.go @@ -295,7 +295,8 @@ func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName // lazily calls resolve to finish constructing the Named object. func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(*Named) ([]*TypeParam, Type, []*Func, []func())) *TypeName { obj := NewTypeName(pos, pkg, name, nil) - NewNamed(obj, nil, nil).loader = load + n := (*Checker)(nil).newNamed(obj, nil, nil) + n.loader = load return obj } @@ -638,7 +639,7 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) { } else { // TODO(gri) should this be fromRHS for *Named? // (See discussion in #66559.) - typ = under(typ) + typ = typ.Underlying() } } diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go index 81f46af535152b..cd9e9f3575f71a 100644 --- a/src/cmd/compile/internal/types2/operand.go +++ b/src/cmd/compile/internal/types2/operand.go @@ -194,7 +194,7 @@ func operandString(x *operand, qf Qualifier) string { what := compositeKind(x.typ) if what == "" { // x.typ must be basic type - what = under(x.typ).(*Basic).name + what = x.typ.Underlying().(*Basic).name } desc += what + " " } @@ -229,7 +229,7 @@ func operandString(x *operand, qf Qualifier) string { // ("array", "slice", etc.) or the empty string if typ is not // composite but a basic type. func compositeKind(typ Type) string { - switch under(typ).(type) { + switch typ.Underlying().(type) { case *Basic: return "" case *Array: @@ -319,8 +319,8 @@ func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Cod return true, 0 } - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() Vp, _ := V.(*TypeParam) Tp, _ := T.(*TypeParam) diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go index c157672ba58a4d..60147c5e21138c 100644 --- a/src/cmd/compile/internal/types2/predicates.go +++ b/src/cmd/compile/internal/types2/predicates.go @@ -28,11 +28,11 @@ func isString(t Type) bool { return isBasic(t, IsString) } func isIntegerOrFloat(t Type) bool { return isBasic(t, IsInteger|IsFloat) } func isConstType(t Type) bool { return isBasic(t, IsConstType) } -// isBasic reports whether under(t) is a basic type with the specified info. +// isBasic reports whether t.Underlying() is a basic type with the specified info. // If t is a type parameter the result is false; i.e., // isBasic does not look inside a type parameter. func isBasic(t Type, info BasicInfo) bool { - u, _ := under(t).(*Basic) + u, _ := t.Underlying().(*Basic) return u != nil && u.info&info != 0 } @@ -48,7 +48,7 @@ func allString(t Type) bool { return allBasic(t, IsString) } func allOrdered(t Type) bool { return allBasic(t, IsOrdered) } func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) } -// allBasic reports whether under(t) is a basic type with the specified info. +// allBasic reports whether t.Underlying() is a basic type with the specified info. // If t is a type parameter, the result is true if isBasic(t, info) is true // for all specific types of the type parameter's type set. func allBasic(t Type, info BasicInfo) bool { @@ -107,7 +107,7 @@ func isUntypedNumeric(t Type) bool { // IsInterface reports whether t is an interface type. func IsInterface(t Type) bool { - _, ok := under(t).(*Interface) + _, ok := t.Underlying().(*Interface) return ok } @@ -163,7 +163,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError { } seen[T] = true - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: // assume invalid types to be comparable to avoid follow-up errors if t.kind == UntypedNil { @@ -206,7 +206,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError { // hasNil reports whether type t includes the nil value. func hasNil(t Type) bool { - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Basic: return u.kind == UnsafePointer case *Slice, *Pointer, *Signature, *Map, *Chan: diff --git a/src/cmd/compile/internal/types2/range.go b/src/cmd/compile/internal/types2/range.go index b654601eafc73e..899f5c09911a5e 100644 --- a/src/cmd/compile/internal/types2/range.go +++ b/src/cmd/compile/internal/types2/range.go @@ -35,7 +35,7 @@ func (check *Checker) rangeStmt(inner stmtContext, rangeStmt *syntax.ForStmt, no check.expr(nil, &x, rangeVar) if isTypes2 && x.mode != invalid && sValue == nil && !check.hasCallOrRecv { - if t, ok := arrayPtrDeref(under(x.typ)).(*Array); ok { + if t, ok := arrayPtrDeref(x.typ.Underlying()).(*Array); ok { for { // Put constant info on the thing inside parentheses. // That's where (*../noder/writer).expr expects it. diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go index ea1cfd88cc60b4..ea60254fa6832e 100644 --- a/src/cmd/compile/internal/types2/signature.go +++ b/src/cmd/compile/internal/types2/signature.go @@ -203,7 +203,7 @@ func (check *Checker) collectRecv(rparam *syntax.Field, scopePos syntax.Pos) (*V case *Alias: // Methods on generic aliases are not permitted. // Only report an error if the alias type is valid. - if isValid(unalias(t)) { + if isValid(t) { check.errorf(rbase, InvalidRecv, "cannot define new methods on generic alias type %s", t) } // Ok to continue but do not set basetype in this case so that @@ -439,7 +439,7 @@ func (check *Checker) validRecv(pos poser, recv *Var) { break } var cause string - switch u := T.under().(type) { + switch u := T.Underlying().(type) { case *Basic: // unsafe.Pointer is treated like a regular pointer if u.kind == UnsafePointer { diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go index d435c049c5b87d..13b96209114de1 100644 --- a/src/cmd/compile/internal/types2/sizeof_test.go +++ b/src/cmd/compile/internal/types2/sizeof_test.go @@ -31,7 +31,7 @@ func TestSizeof(t *testing.T) { {Interface{}, 40, 80}, {Map{}, 16, 32}, {Chan{}, 12, 24}, - {Named{}, 60, 112}, + {Named{}, 64, 120}, {TypeParam{}, 28, 48}, {term{}, 12, 24}, diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go index 7b1c00b40ab9a9..534ecfba35ced7 100644 --- a/src/cmd/compile/internal/types2/sizes.go +++ b/src/cmd/compile/internal/types2/sizes.go @@ -54,7 +54,7 @@ func (s *StdSizes) Alignof(T Type) (result int64) { // For arrays and structs, alignment is defined in terms // of alignment of the elements and fields, respectively. - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Array: // spec: "For a variable x of array type: unsafe.Alignof(x) // is the same as unsafe.Alignof(x[0]), but at least 1." @@ -162,7 +162,7 @@ var basicSizes = [...]byte{ } func (s *StdSizes) Sizeof(T Type) int64 { - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: assert(isTyped(T)) k := t.kind @@ -307,7 +307,7 @@ func (conf *Config) offsetsof(T *Struct) []int64 { func (conf *Config) offsetof(T Type, index []int) int64 { var offs int64 for _, i := range index { - s := under(T).(*Struct) + s := T.Underlying().(*Struct) d := conf.offsetsof(s)[i] if d < 0 { return -1 diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go index f5cdc472f77de2..99b75332e8ffec 100644 --- a/src/cmd/compile/internal/types2/struct.go +++ b/src/cmd/compile/internal/types2/struct.go @@ -141,12 +141,12 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { // Because we have a name, typ must be of the form T or *T, where T is the name // of a (named or alias) type, and t (= deref(typ)) must be the type of T. // We must delay this check to the end because we don't want to instantiate - // (via under(t)) a possibly incomplete type. + // (via t.Underlying()) a possibly incomplete type. embeddedTyp := typ // for closure below embeddedPos := pos check.later(func() { t, isPtr := deref(embeddedTyp) - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Basic: if !isValid(t) { // error was reported before diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go index c60b5eb41722e4..2ffa00c32c7ec4 100644 --- a/src/cmd/compile/internal/types2/typeparam.go +++ b/src/cmd/compile/internal/types2/typeparam.go @@ -113,7 +113,7 @@ func (t *TypeParam) iface() *Interface { // determine constraint interface var ityp *Interface - switch u := under(bound).(type) { + switch u := bound.Underlying().(type) { case *Basic: if !isValid(u) { // error is reported elsewhere diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go index ce487e74f7232a..fafe6f368ba432 100644 --- a/src/cmd/compile/internal/types2/typeset.go +++ b/src/cmd/compile/internal/types2/typeset.go @@ -114,13 +114,13 @@ func (s *_TypeSet) all(f func(t, u Type) bool) bool { for _, t := range s.terms { assert(t.typ != nil) - // Unalias(x) == under(x) for ~x terms + // Unalias(x) == x.Underlying() for ~x terms u := Unalias(t.typ) if !t.tilde { - u = under(u) + u = u.Underlying() } if debug { - assert(Identical(u, under(u))) + assert(Identical(u, u.Underlying())) } if !f(t.typ, u) { return false @@ -264,7 +264,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_ } var comparable bool var terms termlist - switch u := under(typ).(type) { + switch u := typ.Underlying().(type) { case *Interface: // For now we don't permit type parameters as constraints. assert(!isTypeParam(typ)) @@ -380,7 +380,7 @@ func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos syn var allTerms termlist for _, t := range utyp.terms { var terms termlist - u := under(t.typ) + u := t.typ.Underlying() if ui, _ := u.(*Interface); ui != nil { // For now we don't permit type parameters as constraints. assert(!isTypeParam(t.typ)) diff --git a/src/cmd/compile/internal/types2/typeset_test.go b/src/cmd/compile/internal/types2/typeset_test.go index 40ca28e525feb5..bcff2489306828 100644 --- a/src/cmd/compile/internal/types2/typeset_test.go +++ b/src/cmd/compile/internal/types2/typeset_test.go @@ -64,7 +64,7 @@ func TestTypeSetString(t *testing.T) { if obj == nil { t.Fatalf("%s: T not found (invalid test case)", body) } - T, ok := under(obj.Type()).(*Interface) + T, ok := obj.Type().Underlying().(*Interface) if !ok { t.Fatalf("%s: %v is not an interface (invalid test case)", body, obj) } diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go index 47f53bc12d77b5..b1f0d0929bae33 100644 --- a/src/cmd/compile/internal/types2/typestring.go +++ b/src/cmd/compile/internal/types2/typestring.go @@ -455,7 +455,7 @@ func (w *typeWriter) tuple(tup *Tuple, variadic bool) { } else { // special case: // append(s, "foo"...) leads to signature func([]byte, string...) - if t, _ := under(typ).(*Basic); t == nil || t.kind != String { + if t, _ := typ.Underlying().(*Basic); t == nil || t.kind != String { w.error("expected string type") continue } diff --git a/src/cmd/compile/internal/types2/typeterm.go b/src/cmd/compile/internal/types2/typeterm.go index 97791324e1e75c..cb11811d458c5a 100644 --- a/src/cmd/compile/internal/types2/typeterm.go +++ b/src/cmd/compile/internal/types2/typeterm.go @@ -115,7 +115,7 @@ func (x *term) includes(t Type) bool { u := t if x.tilde { - u = under(u) + u = u.Underlying() } return Identical(x.typ, u) } @@ -155,11 +155,11 @@ func (x *term) disjoint(y *term) bool { } ux := x.typ if y.tilde { - ux = under(ux) + ux = ux.Underlying() } uy := y.typ if x.tilde { - uy = under(uy) + uy = uy.Underlying() } return !Identical(ux, uy) } diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go index 8accc46751fdd0..8601ce627768e4 100644 --- a/src/cmd/compile/internal/types2/typexpr.go +++ b/src/cmd/compile/internal/types2/typexpr.go @@ -169,11 +169,11 @@ func (check *Checker) validVarType(e syntax.Expr, typ Type) { return } - // We don't want to call under() or complete interfaces while we are in + // We don't want to call typ.Underlying() or complete interfaces while we are in // the middle of type-checking parameter declarations that might belong // to interface methods. Delay this check to the end of type-checking. check.later(func() { - if t, _ := under(typ).(*Interface); t != nil { + if t, _ := typ.Underlying().(*Interface); t != nil { pos := syntax.StartPos(e) tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position? if !tset.IsMethodSet() { @@ -239,7 +239,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *TypeName) (T Type) { check.indent-- var under Type if T != nil { - // Calling under() here may lead to endless instantiations. + // Calling T.Underlying() here may lead to endless instantiations. // Test case: type T[P any] *T[P] under = safeUnderlying(T) } @@ -425,7 +425,7 @@ func setDefType(def *TypeName, typ Type) { case *Basic: assert(t == Typ[Invalid]) case *Named: - t.underlying = typ + t.fromRHS = typ default: panic(fmt.Sprintf("unexpected type %T", t)) } diff --git a/src/cmd/compile/internal/types2/under.go b/src/cmd/compile/internal/types2/under.go index 078ba9ab172453..98c62733c7cd99 100644 --- a/src/cmd/compile/internal/types2/under.go +++ b/src/cmd/compile/internal/types2/under.go @@ -6,19 +6,8 @@ package types2 import "iter" -// under returns the true expanded underlying type. -// If it doesn't exist, the result is Typ[Invalid]. -// under must only be called when a type is known -// to be fully set up. -func under(t Type) Type { - if t := asNamed(t); t != nil { - return t.under() - } - return t.Underlying() -} - // If typ is a type parameter, underIs returns the result of typ.underIs(f). -// Otherwise, underIs returns the result of f(under(typ)). +// Otherwise, underIs returns the result of f(typ.Underlying()). func underIs(typ Type, f func(Type) bool) bool { return all(typ, func(_, u Type) bool { return f(u) @@ -31,7 +20,7 @@ func all(t Type, f func(t, u Type) bool) bool { if p, _ := Unalias(t).(*TypeParam); p != nil { return p.typeset(f) } - return f(t, under(t)) + return f(t, t.Underlying()) } // typeset is an iterator over the (type/underlying type) pairs of the diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go index 9cd3af8607181e..eecef455ac577c 100644 --- a/src/cmd/compile/internal/types2/unify.go +++ b/src/cmd/compile/internal/types2/unify.go @@ -270,7 +270,7 @@ func (u *unifier) inferred(tparams []*TypeParam) []Type { // it is a non-type parameter interface. Otherwise it returns nil. func asInterface(x Type) (i *Interface) { if _, ok := Unalias(x).(*TypeParam); !ok { - i, _ = under(x).(*Interface) + i, _ = x.Underlying().(*Interface) } return i } @@ -339,7 +339,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { if traceInference { u.tracef("%s ≡ under %s", x, ny) } - y = ny.under() + y = ny.Underlying() // Per the spec, a defined type cannot have an underlying type // that is a type parameter. assert(!isTypeParam(y)) @@ -430,7 +430,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { u.set(px, y) default: // Neither x nor y are defined types. - if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv { + if yc, _ := y.Underlying().(*Chan); yc != nil && yc.dir != SendRecv { // y is a directed channel type: select y. u.set(px, y) } diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go index 1bf4353f264e34..ab0bd43cd350ac 100644 --- a/src/cmd/compile/internal/types2/union.go +++ b/src/cmd/compile/internal/types2/union.go @@ -93,7 +93,7 @@ func parseUnion(check *Checker, uexpr syntax.Expr) Type { continue } - u := under(t.typ) + u := t.typ.Underlying() f, _ := u.(*Interface) if t.tilde { if f != nil { diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go index c66caebd10cef7..332cd174f97466 100644 --- a/src/cmd/compile/internal/types2/universe.go +++ b/src/cmd/compile/internal/types2/universe.go @@ -116,7 +116,7 @@ func defPredeclaredTypes() { { obj := NewTypeName(nopos, nil, "error", nil) obj.setColor(black) - typ := NewNamed(obj, nil, nil) + typ := (*Checker)(nil).newNamed(obj, nil, nil) // error.Error() string recv := newVar(RecvVar, nopos, nil, "", typ) @@ -128,7 +128,8 @@ func defPredeclaredTypes() { ityp := &Interface{methods: []*Func{err}, complete: true} computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset - typ.SetUnderlying(ityp) + typ.fromRHS = ityp + typ.Underlying() def(obj) } @@ -136,12 +137,13 @@ func defPredeclaredTypes() { { obj := NewTypeName(nopos, nil, "comparable", nil) obj.setColor(black) - typ := NewNamed(obj, nil, nil) + typ := (*Checker)(nil).newNamed(obj, nil, nil) // interface{} // marked as comparable ityp := &Interface{complete: true, tset: &_TypeSet{nil, allTermlist, true}} - typ.SetUnderlying(ityp) + typ.fromRHS = ityp + typ.Underlying() def(obj) } } diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go index 32e389a6562445..528f5121e392ee 100644 --- a/src/cmd/compile/internal/types2/validtype.go +++ b/src/cmd/compile/internal/types2/validtype.go @@ -91,13 +91,6 @@ func (check *Checker) validType0(pos syntax.Pos, typ Type, nest, path []*Named) // break // } - // Don't report a 2nd error if we already know the type is invalid - // (e.g., if a cycle was detected earlier, via under). - // Note: ensure that t.orig is fully resolved by calling Underlying(). - if !isValid(t.Underlying()) { - return false - } - // If the current type t is also found in nest, (the memory of) t is // embedded in itself, indicating an invalid recursive type. for _, e := range nest { @@ -125,8 +118,9 @@ func (check *Checker) validType0(pos syntax.Pos, typ Type, nest, path []*Named) // are not yet available to other goroutines). assert(t.obj.pkg == check.pkg) assert(t.Origin().obj.pkg == check.pkg) - t.underlying = Typ[Invalid] - t.Origin().underlying = Typ[Invalid] + + // let t become invalid when it resolves + t.Origin().fromRHS = Typ[Invalid] // Find the starting point of the cycle and report it. // Because each type in nest must also appear in path (see invariant below), diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index a7d8182a747ce7..25add3d8043905 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -275,6 +275,15 @@ func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { // function calls, which could clobber function call arguments/results // currently on the stack. func mayCall(n ir.Node) bool { + // This is intended to avoid putting constants + // into temporaries with the race detector (or other + // instrumentation) which interferes with simple + // "this is a constant" tests in ssagen. + // Also, it will generally lead to better code. + if n.Op() == ir.OLITERAL { + return false + } + // When instrumenting, any expression might require function calls. if base.Flag.Cfg.Instrumenting { return true diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index daee82f1fd7366..1e3b318e8c9fe0 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -14,7 +14,6 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/wasm" - "internal/buildcfg" ) /* @@ -425,27 +424,11 @@ func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) { case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S: getValue64(s, v.Args[0]) - if buildcfg.GOWASM.SatConv { - s.Prog(v.Op.Asm()) - } else { - if v.Op == ssa.OpWasmI64TruncSatF32S { - s.Prog(wasm.AF64PromoteF32) - } - p := s.Prog(wasm.ACall) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS} - } + s.Prog(v.Op.Asm()) case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U: getValue64(s, v.Args[0]) - if buildcfg.GOWASM.SatConv { - s.Prog(v.Op.Asm()) - } else { - if v.Op == ssa.OpWasmI64TruncSatF32U { - s.Prog(wasm.AF64PromoteF32) - } - p := s.Prog(wasm.ACall) - p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU} - } + s.Prog(v.Op.Asm()) case ssa.OpWasmF32DemoteF64: getValue64(s, v.Args[0]) diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index 9a7951726f6f04..2fcdb2d3915b9b 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -1397,7 +1397,7 @@ var ( binExesIncludedInDistpack = []string{"cmd/go", "cmd/gofmt"} // Keep in sync with the filter in cmd/distpack/pack.go. - toolsIncludedInDistpack = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/link", "cmd/preprofile", "cmd/vet"} + toolsIncludedInDistpack = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/fix", "cmd/link", "cmd/preprofile", "cmd/vet"} // We could install all tools in "cmd", but is unnecessary because we will // remove them in distpack, so instead install the tools that will actually diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 080de832b2ad96..62cd9376927e0d 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -90,6 +90,7 @@ var bootstrapDirs = []string{ "internal/platform", "internal/profile", "internal/race", + "internal/runtime/gc", "internal/saferio", "internal/syscall/unix", "internal/types/errors", diff --git a/src/cmd/distpack/pack.go b/src/cmd/distpack/pack.go index 6bab45f1d3d642..09c3a331195f58 100644 --- a/src/cmd/distpack/pack.go +++ b/src/cmd/distpack/pack.go @@ -172,7 +172,7 @@ func main() { default: return false // Keep in sync with toolsIncludedInDistpack in cmd/dist/build.go. - case "asm", "cgo", "compile", "cover", "link", "preprofile", "vet": + case "asm", "cgo", "compile", "cover", "fix", "link", "preprofile", "vet": } } return true diff --git a/src/cmd/fix/doc.go b/src/cmd/fix/doc.go deleted file mode 100644 index b3d69144717172..00000000000000 --- a/src/cmd/fix/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Fix finds Go programs that use old APIs and rewrites them to use -newer ones. After you update to a new Go release, fix helps make -the necessary changes to your programs. - -Usage: - - go tool fix [ignored...] - -This tool is currently in transition. All its historical fixers were -long obsolete and have been removed, so it is currently a no-op. In -due course the tool will integrate with the Go analysis framework -(golang.org/x/tools/go/analysis) and run a modern suite of fix -algorithms; see https://go.dev/issue/71859. -*/ -package main diff --git a/src/cmd/fix/main.go b/src/cmd/fix/main.go index 87cc0d6414601b..422fa827459900 100644 --- a/src/cmd/fix/main.go +++ b/src/cmd/fix/main.go @@ -1,31 +1,59 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +/* +Fix is a tool executed by "go fix" to update Go programs that use old +features of the language and library and rewrite them to use newer +ones. After you update to a new Go release, fix helps make the +necessary changes to your programs. + +See the documentation for "go fix" for how to run this command. +You can provide an alternative tool using "go fix -fixtool=..." + +Run "go tool fix help" to see the list of analyzers supported by this +program. + +See [golang.org/x/tools/go/analysis] for information on how to write +an analyzer that can suggest fixes. +*/ package main import ( - "flag" - "fmt" - "os" -) + "cmd/internal/objabi" + "cmd/internal/telemetry/counter" -var ( - _ = flag.Bool("diff", false, "obsolete, no effect") - _ = flag.String("go", "", "obsolete, no effect") - _ = flag.String("r", "", "obsolete, no effect") - _ = flag.String("force", "", "obsolete, no effect") + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildtag" + "golang.org/x/tools/go/analysis/passes/hostport" + "golang.org/x/tools/go/analysis/unitchecker" ) -func usage() { - fmt.Fprintf(os.Stderr, "usage: go tool fix [-diff] [-r ignored] [-force ignored] ...\n") - flag.PrintDefaults() - os.Exit(2) -} - func main() { - flag.Usage = usage - flag.Parse() + // Keep consistent with cmd/vet/main.go! + counter.Open() + objabi.AddVersionFlag() + counter.Inc("fix/invocations") + + unitchecker.Main(suite...) // (never returns) +} - os.Exit(0) +// The fix suite analyzers produce fixes that are safe to apply. +// (Diagnostics may not describe actual problems, +// but their fixes must be unambiguously safe to apply.) +var suite = []*analysis.Analyzer{ + buildtag.Analyzer, + hostport.Analyzer, + // TODO(adonovan): now the modernize (proposal #75266) and + // inline (proposal #75267) analyzers are published, revendor + // x/tools and add them here. + // + // TODO(adonovan):add any other vet analyzers whose fixes are always safe. + // Candidates to audit: sigchanyzer, printf, assign, unreachable. + // Rejected: + // - composites: some types (e.g. PointXY{1,2}) don't want field names. + // - timeformat: flipping MM/DD is a behavior change, but the code + // could potentially be a workaround for another bug. + // - stringintconv: offers two fixes, user input required to choose. + // - fieldalignment: poor signal/noise; fix could be a regression. } diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 017883a7870cd2..1fc256ae6f2ce7 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -6,16 +6,16 @@ require ( github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58 golang.org/x/build v0.0.0-20250806225920-b7c66c047964 - golang.org/x/mod v0.28.0 + golang.org/x/mod v0.29.0 golang.org/x/sync v0.17.0 - golang.org/x/sys v0.36.0 - golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 + golang.org/x/sys v0.37.0 + golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 golang.org/x/term v0.34.0 - golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 + golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 ) require ( github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b // indirect - golang.org/x/text v0.29.0 // indirect + golang.org/x/text v0.30.0 // indirect rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef // indirect ) diff --git a/src/cmd/go.sum b/src/cmd/go.sum index 0906ffcc605854..eb7af161552020 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -10,19 +10,19 @@ golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58 h1:uxPa6+/WsUfzikIAPMqpT golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/build v0.0.0-20250806225920-b7c66c047964 h1:yRs1K51GKq7hsIO+YHJ8LsslrvwFceNPIv0tYjpcBd0= golang.org/x/build v0.0.0-20250806225920-b7c66c047964/go.mod h1:i9Vx7+aOQUpYJRxSO+OpRStVBCVL/9ccI51xblWm5WY= -golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= -golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 h1:dHQOQddU4YHS5gY33/6klKjq7Gp3WwMyOXGNp5nzRj8= -golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= +golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= -golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 h1:IcXDtHggZZo+GzNzvVRPyNFLnOc2/Z1gg3ZVIWF2uCU= -golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 h1:cz7f45KGWAtyIrz6bm45Gc+lw8beIxBSW3EQh4Bwbg4= +golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef h1:mqLYrXCXYEZOop9/Dbo6RPX11539nwiCNBb1icVPmw8= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ= diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index f1e1b1c333542c..67c0ecbe8b2fcf 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -18,7 +18,7 @@ // clean remove object files and cached files // doc show documentation for package or symbol // env print Go environment information -// fix update packages to use new APIs +// fix apply fixes suggested by static checkers // fmt gofmt (reformat) package sources // generate generate Go files by processing source // get add dependencies to current module and install them @@ -495,22 +495,34 @@ // // For more about environment variables, see 'go help environment'. // -// # Update packages to use new APIs +// # Apply fixes suggested by static checkers // // Usage: // -// go fix [-fix list] [packages] +// go fix [build flags] [-fixtool prog] [fix flags] [packages] // -// Fix runs the Go fix command on the packages named by the import paths. +// Fix runs the Go fix tool (cmd/vet) on the named packages +// and applies suggested fixes. // -// The -fix flag sets a comma-separated list of fixes to run. -// The default is all known fixes. -// (Its value is passed to 'go tool fix -r'.) +// It supports these flags: +// +// -diff +// instead of applying each fix, print the patch as a unified diff +// +// The -fixtool=prog flag selects a different analysis tool with +// alternative or additional fixes; see the documentation for go vet's +// -vettool flag for details. // -// For more about fix, see 'go doc cmd/fix'. // For more about specifying packages, see 'go help packages'. // -// To run fix with other options, run 'go tool fix'. +// For a list of fixers and their flags, see 'go tool fix help'. +// +// For details of a specific fixer such as 'hostport', +// see 'go tool fix help hostport'. +// +// The build flags supported by go fix are those that control package resolution +// and execution, such as -C, -n, -x, -v, -tags, and -toolexec. +// For more about these flags, see 'go help build'. // // See also: go fmt, go vet. // @@ -1280,11 +1292,6 @@ // The -json flag prints the final go.mod file in JSON format instead of // writing it back to go.mod. The JSON output corresponds to these Go types: // -// type Module struct { -// Path string -// Version string -// } -// // type GoMod struct { // Module ModPath // Go string @@ -1294,6 +1301,13 @@ // Exclude []Module // Replace []Replace // Retract []Retract +// Tool []Tool +// Ignore []Ignore +// } +// +// type Module struct { +// Path string +// Version string // } // // type ModPath struct { @@ -2012,20 +2026,34 @@ // // go vet [build flags] [-vettool prog] [vet flags] [packages] // -// Vet runs the Go vet command on the packages named by the import paths. +// Vet runs the Go vet tool (cmd/vet) on the named packages +// and reports diagnostics. // -// For more about vet and its flags, see 'go doc cmd/vet'. -// For more about specifying packages, see 'go help packages'. -// For a list of checkers and their flags, see 'go tool vet help'. -// For details of a specific checker such as 'printf', see 'go tool vet help printf'. +// It supports these flags: // -// The -vettool=prog flag selects a different analysis tool with alternative -// or additional checks. -// For example, the 'shadow' analyzer can be built and run using these commands: +// -c int +// display offending line with this many lines of context (default -1) +// -json +// emit JSON output +// -fix +// instead of printing each diagnostic, apply its first fix (if any) +// -diff +// instead of applying each fix, print the patch as a unified diff +// +// The -vettool=prog flag selects a different analysis tool with +// alternative or additional checks. For example, the 'shadow' analyzer +// can be built and run using these commands: // // go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest // go vet -vettool=$(which shadow) // +// Alternative vet tools should be built atop golang.org/x/tools/go/analysis/unitchecker, +// which handles the interaction with go vet. +// +// For more about specifying packages, see 'go help packages'. +// For a list of checkers and their flags, see 'go tool vet help'. +// For details of a specific checker such as 'printf', see 'go tool vet help printf'. +// // The build flags supported by go vet are those that control package resolution // and execution, such as -C, -n, -x, -v, -tags, and -toolexec. // For more about these flags, see 'go help build'. @@ -3242,6 +3270,10 @@ // The following flags are recognized by the 'go test' command and // control the execution of any test: // +// -artifacts +// Save test artifacts in the directory specified by -outputdir. +// See 'go doc testing.T.ArtifactDir'. +// // -bench regexp // Run only those benchmarks matching a regular expression. // By default, no benchmarks are run. @@ -3336,6 +3368,10 @@ // This will only list top-level tests. No subtest or subbenchmarks will be // shown. // +// -outputdir directory +// Place output files from profiling and test artifacts in the +// specified directory, by default the directory in which "go test" is running. +// // -parallel n // Allow parallel execution of test functions that call t.Parallel, and // fuzz targets that call t.Parallel when running the seed corpus. @@ -3447,10 +3483,6 @@ // Sample 1 in n stack traces of goroutines holding a // contended mutex. // -// -outputdir directory -// Place output files from profiling in the specified directory, -// by default the directory in which "go test" is running. -// // -trace trace.out // Write an execution trace to the specified file before exiting. // diff --git a/src/cmd/go/internal/base/path.go b/src/cmd/go/internal/base/path.go index 5bb7bc3bde63e2..a7577f62e76898 100644 --- a/src/cmd/go/internal/base/path.go +++ b/src/cmd/go/internal/base/path.go @@ -55,8 +55,7 @@ func sameFile(path1, path2 string) bool { // ShortPathError rewrites the path in err using base.ShortPath, if err is a wrapped PathError. func ShortPathError(err error) error { - var pe *fs.PathError - if errors.As(err, &pe) { + if pe, ok := errors.AsType[*fs.PathError](err); ok { pe.Path = ShortPath(pe.Path) } return err diff --git a/src/cmd/go/internal/bug/bug.go b/src/cmd/go/internal/bug/bug.go index 4ff45d2d888c96..4e9ae1e9b499ca 100644 --- a/src/cmd/go/internal/bug/bug.go +++ b/src/cmd/go/internal/bug/bug.go @@ -21,6 +21,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/envcmd" + "cmd/go/internal/modload" "cmd/go/internal/web" "cmd/go/internal/work" ) @@ -44,7 +45,7 @@ func runBug(ctx context.Context, cmd *base.Command, args []string) { if len(args) > 0 { base.Fatalf("go: bug takes no arguments") } - work.BuildInit() + work.BuildInit(modload.LoaderState) var buf strings.Builder buf.WriteString(bugHeader) diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go index c6f311e0263af8..1c05977de554f8 100644 --- a/src/cmd/go/internal/clean/clean.go +++ b/src/cmd/go/internal/clean/clean.go @@ -120,7 +120,7 @@ func init() { } func runClean(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if len(args) > 0 { cacheFlag := "" switch { @@ -142,13 +142,13 @@ func runClean(ctx context.Context, cmd *base.Command, args []string) { // either the flags and arguments explicitly imply a package, // or no other target (such as a cache) was requested to be cleaned. cleanPkg := len(args) > 0 || cleanI || cleanR - if (!modload.Enabled() || modload.HasModRoot()) && + if (!modload.Enabled(modload.LoaderState) || modload.HasModRoot(modload.LoaderState)) && !cleanCache && !cleanModcache && !cleanTestcache && !cleanFuzzcache { cleanPkg = true } if cleanPkg { - for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { + for _, pkg := range load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{}, args) { clean(pkg) } } diff --git a/src/cmd/go/internal/doc/pkgsite.go b/src/cmd/go/internal/doc/pkgsite.go index 06289ac4fc9a8a..c173167b6329a4 100644 --- a/src/cmd/go/internal/doc/pkgsite.go +++ b/src/cmd/go/internal/doc/pkgsite.go @@ -81,8 +81,7 @@ func doPkgsite(urlPath, fragment string) error { cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { - var ee *exec.ExitError - if errors.As(err, &ee) { + if ee, ok := errors.AsType[*exec.ExitError](err); ok { // Exit with the same exit status as pkgsite to avoid // printing of "exit status" error messages. // Any relevant messages have already been printed diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index 6ad6954dd52125..13708ae170c1d8 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -191,14 +191,14 @@ func findEnv(env []cfg.EnvVar, name string) string { // ExtraEnvVars returns environment variables that should not leak into child processes. func ExtraEnvVars() []cfg.EnvVar { gomod := "" - modload.Init() - if modload.HasModRoot() { + modload.Init(modload.LoaderState) + if modload.HasModRoot(modload.LoaderState) { gomod = modload.ModFilePath() - } else if modload.Enabled() { + } else if modload.Enabled(modload.LoaderState) { gomod = os.DevNull } - modload.InitWorkfile() - gowork := modload.WorkFilePath() + modload.InitWorkfile(modload.LoaderState) + gowork := modload.WorkFilePath(modload.LoaderState) // As a special case, if a user set off explicitly, report that in GOWORK. if cfg.Getenv("GOWORK") == "off" { gowork = "off" @@ -336,7 +336,7 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) { } } if needCostly { - work.BuildInit() + work.BuildInit(modload.LoaderState) env = append(env, ExtraEnvVarsCostly()...) } diff --git a/src/cmd/go/internal/fix/fix.go b/src/cmd/go/internal/fix/fix.go deleted file mode 100644 index 8947da05c3ee63..00000000000000 --- a/src/cmd/go/internal/fix/fix.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fix implements the “go fix” command. -package fix - -import ( - "cmd/go/internal/base" - "cmd/go/internal/cfg" - "cmd/go/internal/load" - "cmd/go/internal/modload" - "cmd/go/internal/str" - "cmd/go/internal/work" - "context" - "fmt" - "go/build" - "os" - "path/filepath" -) - -var CmdFix = &base.Command{ - UsageLine: "go fix [-fix list] [packages]", - Short: "update packages to use new APIs", - Long: ` -Fix runs the Go fix command on the packages named by the import paths. - -The -fix flag sets a comma-separated list of fixes to run. -The default is all known fixes. -(Its value is passed to 'go tool fix -r'.) - -For more about fix, see 'go doc cmd/fix'. -For more about specifying packages, see 'go help packages'. - -To run fix with other options, run 'go tool fix'. - -See also: go fmt, go vet. - `, -} - -var fixes = CmdFix.Flag.String("fix", "", "comma-separated list of fixes to apply") - -func init() { - work.AddBuildFlags(CmdFix, work.OmitBuildOnlyFlags) - CmdFix.Run = runFix // fix cycle -} - -func runFix(ctx context.Context, cmd *base.Command, args []string) { - pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args) - w := 0 - for _, pkg := range pkgs { - if pkg.Error != nil { - base.Errorf("%v", pkg.Error) - continue - } - pkgs[w] = pkg - w++ - } - pkgs = pkgs[:w] - - printed := false - for _, pkg := range pkgs { - if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { - if !printed { - fmt.Fprintf(os.Stderr, "go: not fixing packages in dependency modules\n") - printed = true - } - continue - } - // Use pkg.gofiles instead of pkg.Dir so that - // the command only applies to this package, - // not to packages in subdirectories. - files := base.RelPaths(pkg.InternalAllGoFiles()) - goVersion := "" - if pkg.Module != nil { - goVersion = "go" + pkg.Module.GoVersion - } else if pkg.Standard { - goVersion = build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1] - } - var fixArg []string - if *fixes != "" { - fixArg = []string{"-r=" + *fixes} - } - base.Run(str.StringList(cfg.BuildToolexec, filepath.Join(cfg.GOROOTbin, "go"), "tool", "fix", "-go="+goVersion, fixArg, files)) - } -} diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go index 62b22f6bcfa407..a42e7753050356 100644 --- a/src/cmd/go/internal/fmtcmd/fmt.go +++ b/src/cmd/go/internal/fmtcmd/fmt.go @@ -59,8 +59,8 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) { baseGofmtArgs := len(gofmtArgs) baseGofmtArgLen := gofmtArgLen - for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { - if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { + for _, pkg := range load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{}, args) { + if modload.Enabled(modload.LoaderState) && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not formatting packages in dependency modules\n") printed = true @@ -68,11 +68,10 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) { continue } if pkg.Error != nil { - var nogo *load.NoGoError - var embed *load.EmbedError - if (errors.As(pkg.Error, &nogo) || errors.As(pkg.Error, &embed)) && len(pkg.InternalAllGoFiles()) > 0 { - // Skip this error, as we will format - // all files regardless. + if _, ok := errors.AsType[*load.NoGoError](pkg.Error); ok { + // Skip this error, as we will format all files regardless. + } else if _, ok := errors.AsType[*load.EmbedError](pkg.Error); ok && len(pkg.InternalAllGoFiles()) > 0 { + // Skip this error, as we will format all files regardless. } else { base.Errorf("%v", pkg.Error) continue diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go index 0f4b4a972e9107..4250916b8d09d6 100644 --- a/src/cmd/go/internal/generate/generate.go +++ b/src/cmd/go/internal/generate/generate.go @@ -182,7 +182,7 @@ func init() { } func runGenerate(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if generateRunFlag != "" { var err error @@ -204,8 +204,8 @@ func runGenerate(ctx context.Context, cmd *base.Command, args []string) { // Even if the arguments are .go files, this loop suffices. printed := false pkgOpts := load.PackageOpts{IgnoreImports: true} - for _, pkg := range load.PackagesAndErrors(ctx, pkgOpts, args) { - if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { + for _, pkg := range load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, args) { + if modload.Enabled(modload.LoaderState) && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not generating in packages in dependency modules\n") printed = true diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index bee7dc8053ee6f..0bf86ae004a26e 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -419,7 +419,7 @@ func (v *jsonFlag) needAny(fields ...string) bool { var nl = []byte{'\n'} func runList(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if *listFmt != "" && listJson { base.Fatalf("go list -f cannot be used with -json") @@ -427,11 +427,11 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if *listReuse != "" && !*listM { base.Fatalf("go list -reuse cannot be used without -m") } - if *listReuse != "" && modload.HasModRoot() { + if *listReuse != "" && modload.HasModRoot(modload.LoaderState) { base.Fatalf("go list -reuse cannot be used inside a module") } - work.BuildInit() + work.BuildInit(modload.LoaderState) out := newTrackingWriter(os.Stdout) defer out.w.Flush() @@ -496,12 +496,12 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { } } - modload.Init() + modload.Init(modload.LoaderState) if *listRetracted { if cfg.BuildMod == "vendor" { base.Fatalf("go list -retracted cannot be used when vendoring is enabled") } - if !modload.Enabled() { + if !modload.Enabled(modload.LoaderState) { base.Fatalf("go list -retracted can only be used in module-aware mode") } } @@ -525,11 +525,11 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go list -test cannot be used with -m") } - if modload.Init(); !modload.Enabled() { + if modload.Init(modload.LoaderState); !modload.Enabled(modload.LoaderState) { base.Fatalf("go: list -m cannot be used with GO111MODULE=off") } - modload.LoadModFile(ctx) // Sets cfg.BuildMod as a side-effect. + modload.LoadModFile(modload.LoaderState, ctx) // Sets cfg.BuildMod as a side-effect. if cfg.BuildMod == "vendor" { const actionDisabledFormat = "go: can't %s using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)" @@ -613,7 +613,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { SuppressBuildInfo: !*listExport && !listJsonFields.needAny("Stale", "StaleReason"), SuppressEmbedFiles: !*listExport && !listJsonFields.needAny("EmbedFiles", "TestEmbedFiles", "XTestEmbedFiles"), } - pkgs := load.PackagesAndErrors(ctx, pkgOpts, args) + pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, args) if !*listE { w := 0 for _, pkg := range pkgs { @@ -727,7 +727,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { b.NeedExport = *listExport b.NeedCompiledGoFiles = *listCompiled if cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) + load.PrepareForCoverageBuild(modload.LoaderState, pkgs) } a := &work.Action{} // TODO: Use pkgsFilter? diff --git a/src/cmd/go/internal/load/flag.go b/src/cmd/go/internal/load/flag.go index 55bdab013505ab..86a922bc103a82 100644 --- a/src/cmd/go/internal/load/flag.go +++ b/src/cmd/go/internal/load/flag.go @@ -6,6 +6,7 @@ package load import ( "cmd/go/internal/base" + "cmd/go/internal/modload" "cmd/internal/quoted" "fmt" "strings" @@ -63,7 +64,7 @@ func (f *PerPackageFlag) set(v, cwd string) error { return fmt.Errorf("parameter may not start with quote character %c", v[0]) } pattern := strings.TrimSpace(v[:i]) - match = MatchPackage(pattern, cwd) + match = MatchPackage(modload.LoaderState, pattern, cwd) v = v[i+1:] } flags, err := quoted.Split(v) diff --git a/src/cmd/go/internal/load/godebug.go b/src/cmd/go/internal/load/godebug.go index 8ea8ffab1aea1f..817cc4faebf7b4 100644 --- a/src/cmd/go/internal/load/godebug.go +++ b/src/cmd/go/internal/load/godebug.go @@ -45,12 +45,12 @@ func ParseGoDebug(text string) (key, value string, err error) { // defaultGODEBUG returns the default GODEBUG setting for the main package p. // When building a test binary, directives, testDirectives, and xtestDirectives // list additional directives from the package under test. -func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []build.Directive) string { +func defaultGODEBUG(loaderstate *modload.State, p *Package, directives, testDirectives, xtestDirectives []build.Directive) string { if p.Name != "main" { return "" } - goVersion := modload.MainModules.GoVersion() - if modload.RootMode == modload.NoRoot && p.Module != nil { + goVersion := loaderstate.MainModules.GoVersion(loaderstate) + if loaderstate.RootMode == modload.NoRoot && p.Module != nil { // This is go install pkg@version or go run pkg@version. // Use the Go version from the package. // If there isn't one, then assume Go 1.20, @@ -73,7 +73,7 @@ func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []bu } // Add directives from main module go.mod. - for _, g := range modload.MainModules.Godebugs() { + for _, g := range loaderstate.MainModules.Godebugs(loaderstate) { if m == nil { m = make(map[string]string) } diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 1f791546f90088..cfaece2072dbb9 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -290,8 +290,8 @@ func (p *Package) setLoadPackageDataError(err error, path string, stk *ImportSta // Replace (possibly wrapped) *build.NoGoError with *load.NoGoError. // The latter is more specific about the cause. - var nogoErr *build.NoGoError - if errors.As(err, &nogoErr) { + nogoErr, ok := errors.AsType[*build.NoGoError](err) + if ok { if p.Dir == "" && nogoErr.Dir != "" { p.Dir = nogoErr.Dir } @@ -686,8 +686,8 @@ const ( ) // LoadPackage does Load import, but without a parent package load context -func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { - p, err := loadImport(ctx, opts, nil, path, srcDir, nil, stk, importPos, mode) +func LoadPackage(loaderstate *modload.State, ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { + p, err := loadImport(loaderstate, ctx, opts, nil, path, srcDir, nil, stk, importPos, mode) if err != nil { base.Fatalf("internal error: loadImport of %q with nil parent returned an error", path) } @@ -703,7 +703,7 @@ func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk // The returned PackageError, if any, describes why parent is not allowed // to import the named package, with the error referring to importPos. // The PackageError can only be non-nil when parent is not nil. -func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { +func loadImport(loaderstate *modload.State, ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { ctx, span := trace.StartSpan(ctx, "modload.loadImport "+path) defer span.Done() @@ -718,9 +718,9 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi parentRoot = parent.Root parentIsStd = parent.Standard } - bp, loaded, err := loadPackageData(ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode) + bp, loaded, err := loadPackageData(loaderstate, ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode) if loaded && pre != nil && !opts.IgnoreImports { - pre.preloadImports(ctx, opts, bp.Imports, bp) + pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp) } if bp == nil { p := &Package{ @@ -771,7 +771,7 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi // Load package. // loadPackageData may return bp != nil even if an error occurs, // in order to return partial information. - p.load(ctx, opts, path, stk, importPos, bp, err) + p.load(loaderstate, ctx, opts, path, stk, importPos, bp, err) if !cfg.ModulesEnabled && path != cleanImport(path) { p.Error = &PackageError{ @@ -784,7 +784,7 @@ func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDi } // Checked on every import because the rules depend on the code doing the importing. - if perr := disallowInternal(ctx, srcDir, parent, parentPath, p, stk); perr != nil { + if perr := disallowInternal(loaderstate, ctx, srcDir, parent, parentPath, p, stk); perr != nil { perr.setPos(importPos) return p, perr } @@ -838,7 +838,7 @@ func extractFirstImport(importPos []token.Position) *token.Position { // // loadPackageData returns a boolean, loaded, which is true if this is the // first time the package was loaded. Callers may preload imports in this case. -func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) { +func loadPackageData(loaderstate *modload.State, ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) { ctx, span := trace.StartSpan(ctx, "load.loadPackageData "+path) defer span.Done() @@ -883,7 +883,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo r.path = newPath r.dir = dir } else if cfg.ModulesEnabled { - r.dir, r.path, r.err = modload.Lookup(parentPath, parentIsStd, path) + r.dir, r.path, r.err = modload.Lookup(loaderstate, parentPath, parentIsStd, path) } else if build.IsLocalImport(path) { r.dir = filepath.Join(parentDir, path) r.path = dirToImportPath(r.dir) @@ -892,7 +892,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo // find out the key to use in packageCache without the // overhead of repeated calls to buildContext.Import. // The code is also needed in a few other places anyway. - r.path = resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd) + r.path = resolveImportPath(loaderstate, path, parentPath, parentDir, parentRoot, parentIsStd) } else if mode&ResolveModule != 0 { r.path = moduleImportPath(path, parentPath, parentDir, parentRoot) } @@ -921,7 +921,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo } else { buildContext.GOPATH = "" // Clear GOPATH so packages are imported as pure module packages } - modroot := modload.PackageModRoot(ctx, r.path) + modroot := modload.PackageModRoot(loaderstate, ctx, r.path) if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) { modroot = cfg.GOROOTsrc gorootSrcCmd := filepath.Join(cfg.GOROOTsrc, "cmd") @@ -942,7 +942,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo if cfg.ModulesEnabled { // Override data.p.Root, since ImportDir sets it to $GOPATH, if // the module is inside $GOPATH/src. - if info := modload.PackageModuleInfo(ctx, path); info != nil { + if info := modload.PackageModuleInfo(loaderstate, ctx, path); info != nil { data.p.Root = info.Dir } } @@ -989,7 +989,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo if cfg.GOBIN != "" { data.p.BinDir = cfg.GOBIN } else if cfg.ModulesEnabled { - data.p.BinDir = modload.BinDir() + data.p.BinDir = modload.BinDir(loaderstate) } } @@ -1068,7 +1068,7 @@ func newPreload() *preload { // preloadMatches loads data for package paths matched by patterns. // When preloadMatches returns, some packages may not be loaded yet, but // loadPackageData and loadImport are always safe to call. -func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matches []*search.Match) { +func (pre *preload) preloadMatches(loaderstate *modload.State, ctx context.Context, opts PackageOpts, matches []*search.Match) { for _, m := range matches { for _, pkg := range m.Pkgs { select { @@ -1077,10 +1077,10 @@ func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matche case pre.sema <- struct{}{}: go func(pkg string) { mode := 0 // don't use vendoring or module import resolution - bp, loaded, err := loadPackageData(ctx, pkg, "", base.Cwd(), "", false, mode) + bp, loaded, err := loadPackageData(loaderstate, ctx, pkg, "", base.Cwd(), "", false, mode) <-pre.sema if bp != nil && loaded && err == nil && !opts.IgnoreImports { - pre.preloadImports(ctx, opts, bp.Imports, bp) + pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp) } }(pkg) } @@ -1091,7 +1091,7 @@ func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matche // preloadImports queues a list of imports for preloading. // When preloadImports returns, some packages may not be loaded yet, // but loadPackageData and loadImport are always safe to call. -func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) { +func (pre *preload) preloadImports(loaderstate *modload.State, ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) { parentIsStd := parent.Goroot && parent.ImportPath != "" && search.IsStandardImportPath(parent.ImportPath) for _, path := range imports { if path == "C" || path == "unsafe" { @@ -1102,10 +1102,10 @@ func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, import return case pre.sema <- struct{}{}: go func(path string) { - bp, loaded, err := loadPackageData(ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport) + bp, loaded, err := loadPackageData(loaderstate, ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport) <-pre.sema if bp != nil && loaded && err == nil && !opts.IgnoreImports { - pre.preloadImports(ctx, opts, bp.Imports, bp) + pre.preloadImports(loaderstate, ctx, opts, bp.Imports, bp) } }(path) } @@ -1160,12 +1160,12 @@ func ResolveImportPath(parent *Package, path string) (found string) { parentRoot = parent.Root parentIsStd = parent.Standard } - return resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd) + return resolveImportPath(modload.LoaderState, path, parentPath, parentDir, parentRoot, parentIsStd) } -func resolveImportPath(path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) { +func resolveImportPath(loaderstate *modload.State, path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) { if cfg.ModulesEnabled { - if _, p, e := modload.Lookup(parentPath, parentIsStd, path); e == nil { + if _, p, e := modload.Lookup(loaderstate, parentPath, parentIsStd, path); e == nil { return p } return path @@ -1463,7 +1463,7 @@ func reusePackage(p *Package, stk *ImportStack) *Package { // is allowed to import p. // If the import is allowed, disallowInternal returns the original package p. // If not, it returns a new package containing just an appropriate error. -func disallowInternal(ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError { +func disallowInternal(loaderstate *modload.State, ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError { // golang.org/s/go14internal: // An import of a path containing the element “internal” // is disallowed if the importing code is outside the tree @@ -1552,7 +1552,7 @@ func disallowInternal(ctx context.Context, srcDir string, importer *Package, imp // directory containing them. // If the directory is outside the main modules, this will resolve to ".", // which is not a prefix of any valid module. - importerPath, _ = modload.MainModules.DirImportPath(ctx, importer.Dir) + importerPath, _ = loaderstate.MainModules.DirImportPath(loaderstate, ctx, importer.Dir) } parentOfInternal := p.ImportPath[:i] if str.HasPathPrefix(importerPath, parentOfInternal) { @@ -1771,7 +1771,7 @@ func (p *Package) DefaultExecName() string { // load populates p using information from bp, err, which should // be the result of calling build.Context.Import. // stk contains the import stack, not including path itself. -func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) { +func (p *Package) load(loaderstate *modload.State, ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) { p.copyBuild(opts, bp) // The localPrefix is the path we interpret ./ imports relative to, @@ -1835,7 +1835,7 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * elem = full } if p.Internal.Build.BinDir == "" && cfg.ModulesEnabled { - p.Internal.Build.BinDir = modload.BinDir() + p.Internal.Build.BinDir = modload.BinDir(loaderstate) } if p.Internal.Build.BinDir != "" { // Install to GOBIN or bin of GOPATH entry. @@ -1973,9 +1973,9 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * pkgPath = "command-line-arguments" } if cfg.ModulesEnabled { - p.Module = modload.PackageModuleInfo(ctx, pkgPath) + p.Module = modload.PackageModuleInfo(loaderstate, ctx, pkgPath) } - p.DefaultGODEBUG = defaultGODEBUG(p, nil, nil, nil) + p.DefaultGODEBUG = defaultGODEBUG(loaderstate, p, nil, nil, nil) if !opts.SuppressEmbedFiles { p.EmbedFiles, p.Internal.Embed, err = resolveEmbed(p.Dir, p.EmbedPatterns) @@ -2026,7 +2026,7 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk * if path == "C" { continue } - p1, err := loadImport(ctx, opts, nil, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport) + p1, err := loadImport(loaderstate, ctx, opts, nil, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport) if err != nil && p.Error == nil { p.Error = err p.Incomplete = true @@ -2813,7 +2813,7 @@ func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) [] } walkTest := func(root *Package, path string) { var stk ImportStack - p1, err := loadImport(ctx, opts, nil, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport) + p1, err := loadImport(modload.LoaderState, ctx, opts, nil, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport) if err != nil && root.Error == nil { // Assign error importing the package to the importer. root.Error = err @@ -2840,16 +2840,16 @@ func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) [] // dependencies (like sync/atomic for coverage). // TODO(jayconrod): delete this function and set flags automatically // in LoadImport instead. -func LoadImportWithFlags(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { - p, err := loadImport(context.TODO(), PackageOpts{}, nil, path, srcDir, parent, stk, importPos, mode) +func LoadImportWithFlags(loaderstate *modload.State, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { + p, err := loadImport(loaderstate, context.TODO(), PackageOpts{}, nil, path, srcDir, parent, stk, importPos, mode) setToolFlags(p) return p, err } // LoadPackageWithFlags is the same as LoadImportWithFlags but without a parent. // It's then guaranteed to not return an error -func LoadPackageWithFlags(path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { - p := LoadPackage(context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode) +func LoadPackageWithFlags(loaderstate *modload.State, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { + p := LoadPackage(loaderstate, context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode) setToolFlags(p) return p } @@ -2899,7 +2899,7 @@ type PackageOpts struct { // // To obtain a flat list of packages, use PackageList. // To report errors loading packages, use ReportPackageErrors. -func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) []*Package { +func PackagesAndErrors(loaderstate *modload.State, ctx context.Context, opts PackageOpts, patterns []string) []*Package { ctx, span := trace.StartSpan(ctx, "load.PackagesAndErrors") defer span.Done() @@ -2911,7 +2911,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) // We need to test whether the path is an actual Go file and not a // package path or pattern ending in '.go' (see golang.org/issue/34653). if fi, err := fsys.Stat(p); err == nil && !fi.IsDir() { - pkgs := []*Package{GoFilesPackage(ctx, opts, patterns)} + pkgs := []*Package{GoFilesPackage(loaderstate, ctx, opts, patterns)} setPGOProfilePath(pkgs) return pkgs } @@ -2919,13 +2919,13 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) } var matches []*search.Match - if modload.Init(); cfg.ModulesEnabled { + if modload.Init(loaderstate); cfg.ModulesEnabled { modOpts := modload.PackageOpts{ ResolveMissingImports: true, LoadTests: opts.ModResolveTests, SilencePackageErrors: true, } - matches, _ = modload.LoadPackages(ctx, modOpts, patterns...) + matches, _ = modload.LoadPackages(loaderstate, ctx, modOpts, patterns...) } else { matches = search.ImportPaths(patterns) } @@ -2938,7 +2938,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) pre := newPreload() defer pre.flush() - pre.preloadMatches(ctx, opts, matches) + pre.preloadMatches(loaderstate, ctx, opts, matches) for _, m := range matches { for _, pkg := range m.Pkgs { @@ -2952,7 +2952,7 @@ func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) // a literal and also a non-literal pattern. mode |= cmdlinePkgLiteral } - p, perr := loadImport(ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode) + p, perr := loadImport(loaderstate, ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode) if perr != nil { base.Fatalf("internal error: loadImport of %q with nil parent returned an error", pkg) } @@ -3243,8 +3243,8 @@ func setToolFlags(pkgs ...*Package) { // GoFilesPackage creates a package for building a collection of Go files // (typically named on the command line). The target is named p.a for // package p or named after the first Go file for package main. -func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Package { - modload.Init() +func GoFilesPackage(loaderstate *modload.State, ctx context.Context, opts PackageOpts, gofiles []string) *Package { + modload.Init(loaderstate) for _, f := range gofiles { if !strings.HasSuffix(f, ".go") { @@ -3289,7 +3289,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa ctxt.ReadDir = func(string) ([]fs.FileInfo, error) { return dirent, nil } if cfg.ModulesEnabled { - modload.ImportFromFiles(ctx, gofiles) + modload.ImportFromFiles(loaderstate, ctx, gofiles) } var err error @@ -3305,7 +3305,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa pkg := new(Package) pkg.Internal.Local = true pkg.Internal.CmdlineFiles = true - pkg.load(ctx, opts, "command-line-arguments", &stk, nil, bp, err) + pkg.load(loaderstate, ctx, opts, "command-line-arguments", &stk, nil, bp, err) if !cfg.ModulesEnabled { pkg.Internal.LocalPrefix = dirToImportPath(dir) } @@ -3319,7 +3319,7 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa if cfg.GOBIN != "" { pkg.Target = filepath.Join(cfg.GOBIN, exe) } else if cfg.ModulesEnabled { - pkg.Target = filepath.Join(modload.BinDir(), exe) + pkg.Target = filepath.Join(modload.BinDir(loaderstate), exe) } } @@ -3347,11 +3347,11 @@ func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Pa // module, but its go.mod file (if it has one) must not contain directives that // would cause it to be interpreted differently if it were the main module // (replace, exclude). -func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) { - if !modload.ForceUseModules { +func PackagesAndErrorsOutsideModule(loaderstate *modload.State, ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) { + if !loaderstate.ForceUseModules { panic("modload.ForceUseModules must be true") } - if modload.RootMode != modload.NoRoot { + if loaderstate.RootMode != modload.NoRoot { panic("modload.RootMode must be NoRoot") } @@ -3404,12 +3404,12 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args allowed = nil } noneSelected := func(path string) (version string) { return "none" } - qrs, err := modload.QueryPackages(ctx, patterns[0], version, noneSelected, allowed) + qrs, err := modload.QueryPackages(loaderstate, ctx, patterns[0], version, noneSelected, allowed) if err != nil { return nil, fmt.Errorf("%s: %w", args[0], err) } rootMod := qrs[0].Mod - deprecation, err := modload.CheckDeprecation(ctx, rootMod) + deprecation, err := modload.CheckDeprecation(loaderstate, ctx, rootMod) if err != nil { return nil, fmt.Errorf("%s: %w", args[0], err) } @@ -3438,12 +3438,12 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args // Since we are in NoRoot mode, the build list initially contains only // the dummy command-line-arguments module. Add a requirement on the // module that provides the packages named on the command line. - if _, err := modload.EditBuildList(ctx, nil, []module.Version{rootMod}); err != nil { + if _, err := modload.EditBuildList(loaderstate, ctx, nil, []module.Version{rootMod}); err != nil { return nil, fmt.Errorf("%s: %w", args[0], err) } // Load packages for all arguments. - pkgs := PackagesAndErrors(ctx, opts, patterns) + pkgs := PackagesAndErrors(loaderstate, ctx, opts, patterns) // Check that named packages are all provided by the same module. for _, pkg := range pkgs { @@ -3471,14 +3471,14 @@ func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args } // EnsureImport ensures that package p imports the named package. -func EnsureImport(p *Package, pkg string) { +func EnsureImport(loaderstate *modload.State, p *Package, pkg string) { for _, d := range p.Internal.Imports { if d.Name == pkg { return } } - p1, err := LoadImportWithFlags(pkg, p.Dir, p, &ImportStack{}, nil, 0) + p1, err := LoadImportWithFlags(loaderstate, pkg, p.Dir, p, &ImportStack{}, nil, 0) if err != nil { base.Fatalf("load %s: %v", pkg, err) } @@ -3494,7 +3494,7 @@ func EnsureImport(p *Package, pkg string) { // "go test -cover"). It walks through the packages being built (and // dependencies) and marks them for coverage instrumentation when // appropriate, and possibly adding additional deps where needed. -func PrepareForCoverageBuild(pkgs []*Package) { +func PrepareForCoverageBuild(loaderstate *modload.State, pkgs []*Package) { var match []func(*Package) bool matchMainModAndCommandLine := func(p *Package) bool { @@ -3507,7 +3507,7 @@ func PrepareForCoverageBuild(pkgs []*Package) { // the specific packages selected by the user-specified pattern(s). match = make([]func(*Package) bool, len(cfg.BuildCoverPkg)) for i := range cfg.BuildCoverPkg { - match[i] = MatchPackage(cfg.BuildCoverPkg[i], base.Cwd()) + match[i] = MatchPackage(loaderstate, cfg.BuildCoverPkg[i], base.Cwd()) } } else { // Without -coverpkg, instrument only packages in the main module @@ -3519,10 +3519,10 @@ func PrepareForCoverageBuild(pkgs []*Package) { // Visit the packages being built or installed, along with all of // their dependencies, and mark them to be instrumented, taking // into account the matchers we've set up in the sequence above. - SelectCoverPackages(PackageList(pkgs), match, "build") + SelectCoverPackages(loaderstate, PackageList(pkgs), match, "build") } -func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op string) []*Package { +func SelectCoverPackages(loaderstate *modload.State, roots []*Package, match []func(*Package) bool, op string) []*Package { var warntag string var includeMain bool switch op { @@ -3602,7 +3602,7 @@ func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op strin // Force import of sync/atomic into package if atomic mode. if cfg.BuildCoverMode == "atomic" { - EnsureImport(p, "sync/atomic") + EnsureImport(loaderstate, p, "sync/atomic") } } diff --git a/src/cmd/go/internal/load/search.go b/src/cmd/go/internal/load/search.go index 941cfb77a2ec08..09e32a4f46a69b 100644 --- a/src/cmd/go/internal/load/search.go +++ b/src/cmd/go/internal/load/search.go @@ -14,7 +14,7 @@ import ( ) // MatchPackage(pattern, cwd)(p) reports whether package p matches pattern in the working directory cwd. -func MatchPackage(pattern, cwd string) func(*Package) bool { +func MatchPackage(loaderstate *modload.State, pattern, cwd string) func(*Package) bool { switch { case search.IsRelativePath(pattern): // Split pattern into leading pattern-free directory path @@ -54,13 +54,13 @@ func MatchPackage(pattern, cwd string) func(*Package) bool { return func(p *Package) bool { return p.Standard } case pattern == "cmd": return func(p *Package) bool { return p.Standard && strings.HasPrefix(p.ImportPath, "cmd/") } - case pattern == "tool" && modload.Enabled(): + case pattern == "tool" && modload.Enabled(loaderstate): return func(p *Package) bool { - return modload.MainModules.Tools()[p.ImportPath] + return loaderstate.MainModules.Tools()[p.ImportPath] } - case pattern == "work" && modload.Enabled(): + case pattern == "work" && modload.Enabled(loaderstate): return func(p *Package) bool { - return p.Module != nil && modload.MainModules.Contains(p.Module.Path) + return p.Module != nil && loaderstate.MainModules.Contains(p.Module.Path) } default: diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go index f895e3a2461d9e..9019545b4b8d82 100644 --- a/src/cmd/go/internal/load/test.go +++ b/src/cmd/go/internal/load/test.go @@ -24,6 +24,7 @@ import ( "unicode/utf8" "cmd/go/internal/fsys" + "cmd/go/internal/modload" "cmd/go/internal/str" "cmd/go/internal/trace" ) @@ -106,7 +107,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p defer pre.flush() allImports := append([]string{}, p.TestImports...) allImports = append(allImports, p.XTestImports...) - pre.preloadImports(ctx, opts, allImports, p.Internal.Build) + pre.preloadImports(modload.LoaderState, ctx, opts, allImports, p.Internal.Build) var ptestErr, pxtestErr *PackageError var imports, ximports []*Package @@ -116,7 +117,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p stk.Push(ImportInfo{Pkg: p.ImportPath + " (test)"}) rawTestImports := str.StringList(p.TestImports) for i, path := range p.TestImports { - p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport) + p1, err := loadImport(modload.LoaderState, ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport) if err != nil && ptestErr == nil { ptestErr = err incomplete = true @@ -145,7 +146,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p var pxtestIncomplete bool rawXTestImports := str.StringList(p.XTestImports) for i, path := range p.XTestImports { - p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport) + p1, err := loadImport(modload.LoaderState, ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport) if err != nil && pxtestErr == nil { pxtestErr = err } @@ -292,7 +293,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p } pb := p.Internal.Build - pmain.DefaultGODEBUG = defaultGODEBUG(pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives) + pmain.DefaultGODEBUG = defaultGODEBUG(modload.LoaderState, pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives) if pmain.Internal.BuildInfo == nil || pmain.DefaultGODEBUG != p.DefaultGODEBUG { // Either we didn't generate build info for the package under test (because it wasn't package main), or // the DefaultGODEBUG used to build the test main package is different from the DefaultGODEBUG @@ -321,7 +322,7 @@ func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p if dep == ptest.ImportPath { pmain.Internal.Imports = append(pmain.Internal.Imports, ptest) } else { - p1, err := loadImport(ctx, opts, pre, dep, "", nil, &stk, nil, 0) + p1, err := loadImport(modload.LoaderState, ctx, opts, pre, dep, "", nil, &stk, nil, 0) if err != nil && pmain.Error == nil { pmain.Error = err pmain.Incomplete = true @@ -649,6 +650,14 @@ func (t *testFuncs) ImportPath() string { return pkg } +func (t *testFuncs) ModulePath() string { + m := t.Package.Module + if m == nil { + return "" + } + return m.Path +} + // Covered returns a string describing which packages are being tested for coverage. // If the covered package is the same as the tested package, it returns the empty string. // Otherwise it is a comma-separated human-readable list of packages beginning with @@ -836,6 +845,7 @@ func init() { testdeps.CoverMarkProfileEmittedFunc = cfile.MarkProfileEmitted {{end}} + testdeps.ModulePath = {{.ModulePath | printf "%q"}} testdeps.ImportPath = {{.ImportPath | printf "%q"}} } diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index 2f4feae8f254b2..f4ed4b45834e89 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -109,18 +109,18 @@ type ModuleJSON struct { } func runDownload(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) // Check whether modules are enabled and whether we're in a module. - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.ExplicitWriteGoMod = true haveExplicitArgs := len(args) > 0 - if modload.HasModRoot() || modload.WorkFilePath() != "" { - modload.LoadModFile(ctx) // to fill MainModules + if modload.HasModRoot(modload.LoaderState) || modload.WorkFilePath(modload.LoaderState) != "" { + modload.LoadModFile(modload.LoaderState, ctx) // to fill MainModules if haveExplicitArgs { - for _, mainModule := range modload.MainModules.Versions() { + for _, mainModule := range modload.LoaderState.MainModules.Versions() { targetAtUpgrade := mainModule.Path + "@upgrade" targetAtPatch := mainModule.Path + "@patch" for _, arg := range args { @@ -130,14 +130,14 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } } } - } else if modload.WorkFilePath() != "" { + } else if modload.WorkFilePath(modload.LoaderState) != "" { // TODO(#44435): Think about what the correct query is to download the // right set of modules. Also see code review comment at // https://go-review.googlesource.com/c/go/+/359794/comments/ce946a80_6cf53992. args = []string{"all"} } else { - mainModule := modload.MainModules.Versions()[0] - modFile := modload.MainModules.ModFile(mainModule) + mainModule := modload.LoaderState.MainModules.Versions()[0] + modFile := modload.LoaderState.MainModules.ModFile(mainModule) if modFile.Go == nil || gover.Compare(modFile.Go.Version, gover.ExplicitIndirectVersion) < 0 { if len(modFile.Require) > 0 { args = []string{"all"} @@ -169,7 +169,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } if len(args) == 0 { - if modload.HasModRoot() { + if modload.HasModRoot(modload.LoaderState) { os.Stderr.WriteString("go: no module dependencies to download\n") } else { base.Errorf("go: no modules specified (see 'go help mod download')") @@ -177,7 +177,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { base.Exit() } - if *downloadReuse != "" && modload.HasModRoot() { + if *downloadReuse != "" && modload.HasModRoot(modload.LoaderState) { base.Fatalf("go mod download -reuse cannot be used inside a module") } @@ -220,7 +220,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // when we can. } - if !haveExplicitArgs && modload.WorkFilePath() == "" { + if !haveExplicitArgs && modload.WorkFilePath(modload.LoaderState) == "" { // 'go mod download' is sometimes run without arguments to pre-populate the // module cache. In modules that aren't at go 1.17 or higher, it may fetch // modules that aren't needed to build packages in the main module. This is @@ -291,7 +291,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // with no arguments we download the module pattern "all", // which may include dependencies that are normally pruned out // of the individual modules in the workspace. - if haveExplicitArgs || modload.WorkFilePath() != "" { + if haveExplicitArgs || modload.WorkFilePath(modload.LoaderState) != "" { var sw toolchain.Switcher // Add errors to the Switcher in deterministic order so that they will be // logged deterministically. @@ -347,7 +347,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { // // Don't save sums for 'go mod download' without arguments unless we're in // workspace mode; see comment above. - if haveExplicitArgs || modload.WorkFilePath() != "" { + if haveExplicitArgs || modload.WorkFilePath(modload.LoaderState) != "" { if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil { base.Error(err) } diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go index aafd9752a8e487..041b4432bfd4f0 100644 --- a/src/cmd/go/internal/modcmd/edit.go +++ b/src/cmd/go/internal/modcmd/edit.go @@ -104,11 +104,6 @@ writing it back to go.mod. The -json flag prints the final go.mod file in JSON format instead of writing it back to go.mod. The JSON output corresponds to these Go types: - type Module struct { - Path string - Version string - } - type GoMod struct { Module ModPath Go string @@ -118,6 +113,13 @@ writing it back to go.mod. The JSON output corresponds to these Go types: Exclude []Module Replace []Replace Retract []Retract + Tool []Tool + Ignore []Ignore + } + + type Module struct { + Path string + Version string } type ModPath struct { diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go index 172c1dda5ce8fb..3bc6009b57b595 100644 --- a/src/cmd/go/internal/modcmd/graph.go +++ b/src/cmd/go/internal/modcmd/graph.go @@ -52,13 +52,13 @@ func init() { } func runGraph(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if len(args) > 0 { base.Fatalf("go: 'go mod graph' accepts no arguments") } - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.LoaderState.ForceUseModules = true + modload.LoaderState.RootMode = modload.NeedRoot goVersion := graphGo.String() if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 { diff --git a/src/cmd/go/internal/modcmd/init.go b/src/cmd/go/internal/modcmd/init.go index 356a0569913edf..618c673bf86f24 100644 --- a/src/cmd/go/internal/modcmd/init.go +++ b/src/cmd/go/internal/modcmd/init.go @@ -43,6 +43,6 @@ func runInit(ctx context.Context, cmd *base.Command, args []string) { modPath = args[0] } - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true modload.CreateModFile(ctx, modPath) // does all the hard work } diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go index 2efa33a7c343dd..c693bd52a38af9 100644 --- a/src/cmd/go/internal/modcmd/tidy.go +++ b/src/cmd/go/internal/modcmd/tidy.go @@ -119,8 +119,8 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { // those packages. In order to make 'go test' reproducible for the packages // that are in 'all' but outside of the main module, we must explicitly // request that their test dependencies be included. - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.LoaderState.ForceUseModules = true + modload.LoaderState.RootMode = modload.NeedRoot goVersion := tidyGo.String() if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 { @@ -130,7 +130,7 @@ func runTidy(ctx context.Context, cmd *base.Command, args []string) { }) } - modload.LoadPackages(ctx, modload.PackageOpts{ + modload.LoadPackages(modload.LoaderState, ctx, modload.PackageOpts{ TidyGoVersion: tidyGo.String(), Tags: imports.AnyTags(), Tidy: true, diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index e1a9081a95ff80..8d9672d5365523 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -66,8 +66,8 @@ func init() { } func runVendor(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - if modload.WorkFilePath() != "" { + modload.InitWorkfile(modload.LoaderState) + if modload.WorkFilePath(modload.LoaderState) != "" { base.Fatalf("go: 'go mod vendor' cannot be run in workspace mode. Run 'go work vendor' to vendor the workspace or set 'GOWORK=off' to exit workspace mode.") } RunVendor(ctx, vendorE, vendorO, args) @@ -77,8 +77,8 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) if len(args) != 0 { base.Fatalf("go: 'go mod vendor' accepts no arguments") } - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.LoaderState.ForceUseModules = true + modload.LoaderState.RootMode = modload.NeedRoot loadOpts := modload.PackageOpts{ Tags: imports.AnyTags(), @@ -88,7 +88,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) AllowErrors: vendorE, SilenceMissingStdImports: true, } - _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") + _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, loadOpts, "all") var vdir string switch { @@ -97,7 +97,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) case vendorO != "": vdir = filepath.Join(base.Cwd(), vendorO) default: - vdir = filepath.Join(modload.VendorDir()) + vdir = filepath.Join(modload.VendorDir(modload.LoaderState)) } if err := os.RemoveAll(vdir); err != nil { base.Fatal(err) @@ -106,7 +106,7 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) modpkgs := make(map[module.Version][]string) for _, pkg := range pkgs { m := modload.PackageModule(pkg) - if m.Path == "" || modload.MainModules.Contains(m.Path) { + if m.Path == "" || modload.LoaderState.MainModules.Contains(m.Path) { continue } modpkgs[m] = append(modpkgs[m], pkg) @@ -116,13 +116,13 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) includeAllReplacements := false includeGoVersions := false isExplicit := map[module.Version]bool{} - gv := modload.MainModules.GoVersion() - if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(base.Cwd()) != "" || modload.ModFile().Go != nil) { + gv := modload.LoaderState.MainModules.GoVersion(modload.LoaderState) + if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(modload.LoaderState, base.Cwd()) != "" || modload.ModFile().Go != nil) { // If the Go version is at least 1.14, annotate all explicit 'require' and // 'replace' targets found in the go.mod file so that we can perform a // stronger consistency check when -mod=vendor is set. - for _, m := range modload.MainModules.Versions() { - if modFile := modload.MainModules.ModFile(m); modFile != nil { + for _, m := range modload.LoaderState.MainModules.Versions() { + if modFile := modload.LoaderState.MainModules.ModFile(m); modFile != nil { for _, r := range modFile.Require { isExplicit[r.Mod] = true } @@ -156,13 +156,13 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) w = io.MultiWriter(&buf, os.Stderr) } - if modload.MainModules.WorkFile() != nil { + if modload.LoaderState.MainModules.WorkFile() != nil { fmt.Fprintf(w, "## workspace\n") } replacementWritten := make(map[module.Version]bool) for _, m := range vendorMods { - replacement := modload.Replacement(m) + replacement := modload.Replacement(modload.LoaderState, m) line := moduleLine(m, replacement) replacementWritten[m] = true io.WriteString(w, line) @@ -192,8 +192,8 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) // Record unused and wildcard replacements at the end of the modules.txt file: // without access to the complete build list, the consumer of the vendor // directory can't otherwise determine that those replacements had no effect. - for _, m := range modload.MainModules.Versions() { - if workFile := modload.MainModules.WorkFile(); workFile != nil { + for _, m := range modload.LoaderState.MainModules.Versions() { + if workFile := modload.LoaderState.MainModules.WorkFile(); workFile != nil { for _, r := range workFile.Replace { if replacementWritten[r.Old] { // We already recorded this replacement. @@ -208,14 +208,14 @@ func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) } } } - if modFile := modload.MainModules.ModFile(m); modFile != nil { + if modFile := modload.LoaderState.MainModules.ModFile(m); modFile != nil { for _, r := range modFile.Replace { if replacementWritten[r.Old] { // We already recorded this replacement. continue } replacementWritten[r.Old] = true - rNew := modload.Replacement(r.Old) + rNew := modload.Replacement(modload.LoaderState, r.Old) if rNew == (module.Version{}) { // There is no replacement. Don't try to write it. continue @@ -269,7 +269,7 @@ func moduleLine(m, r module.Version) string { } func vendorPkg(vdir, pkg string) { - src, realPath, _ := modload.Lookup("", false, pkg) + src, realPath, _ := modload.Lookup(modload.LoaderState, "", false, pkg) if src == "" { base.Errorf("internal error: no pkg for %s\n", pkg) return @@ -315,7 +315,7 @@ func vendorPkg(vdir, pkg string) { } } var embedPatterns []string - if gover.Compare(modload.MainModules.GoVersion(), "1.22") >= 0 { + if gover.Compare(modload.LoaderState.MainModules.GoVersion(modload.LoaderState), "1.22") >= 0 { embedPatterns = bp.EmbedPatterns } else { // Maintain the behavior of https://github.com/golang/go/issues/63473 @@ -431,7 +431,7 @@ func matchPotentialSourceFile(dir string, info fs.DirEntry) bool { return false } if info.Name() == "go.mod" || info.Name() == "go.sum" { - if gv := modload.MainModules.GoVersion(); gover.Compare(gv, "1.17") >= 0 { + if gv := modload.LoaderState.MainModules.GoVersion(modload.LoaderState); gover.Compare(gv, "1.17") >= 0 { // As of Go 1.17, we strip go.mod and go.sum files from dependency modules. // Otherwise, 'go' commands invoked within the vendor subtree may misidentify // an arbitrary directory within the vendor tree as a module root. diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index d07f730c5d0dcf..d8227bcd5455a3 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -44,14 +44,14 @@ func init() { } func runVerify(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) if len(args) != 0 { // NOTE(rsc): Could take a module pattern. base.Fatalf("go: verify takes no arguments") } - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.LoaderState.ForceUseModules = true + modload.LoaderState.RootMode = modload.NeedRoot // Only verify up to GOMAXPROCS zips at once. type token struct{} @@ -94,7 +94,7 @@ func verifyMod(ctx context.Context, mod module.Version) []error { // "go" and "toolchain" have no disk footprint; nothing to verify. return nil } - if modload.MainModules.Contains(mod.Path) { + if modload.LoaderState.MainModules.Contains(mod.Path) { return nil } var errs []error diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index 198672d8064113..b37d9fded0f47c 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -63,9 +63,9 @@ func init() { } func runWhy(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - modload.ForceUseModules = true - modload.RootMode = modload.NeedRoot + modload.InitWorkfile(modload.LoaderState) + modload.LoaderState.ForceUseModules = true + modload.LoaderState.RootMode = modload.NeedRoot modload.ExplicitWriteGoMod = true // don't write go.mod in ListModules loadOpts := modload.PackageOpts{ @@ -89,7 +89,7 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) { } byModule := make(map[string][]string) - _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") + _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, loadOpts, "all") for _, path := range pkgs { m := modload.PackageModule(path) if m.Path != "" { @@ -120,9 +120,9 @@ func runWhy(ctx context.Context, cmd *base.Command, args []string) { } } else { // Resolve to packages. - matches, _ := modload.LoadPackages(ctx, loadOpts, args...) + matches, _ := modload.LoadPackages(modload.LoaderState, ctx, loadOpts, args...) - modload.LoadPackages(ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages) + modload.LoadPackages(modload.LoaderState, ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages) sep := "" for _, m := range matches { diff --git a/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go index 16cc1457058933..9d59d1a8ea8218 100644 --- a/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go +++ b/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go @@ -94,7 +94,7 @@ func TestZipSums(t *testing.T) { cfg.GOPROXY = "direct" cfg.GOSUMDB = "off" - modload.Init() + modload.Init(modload.LoaderState) // Shard tests by downloading only every nth module when shard flags are set. // This makes it easier to test small groups of modules quickly. We avoid diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 25dbf3972fd465..141e1708fa6ded 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -298,7 +298,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go: -insecure flag is no longer supported; use GOINSECURE instead") } - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true // Do not allow any updating of go.mod until we've applied // all the requested changes and checked that the result matches @@ -307,14 +307,14 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // Allow looking up modules for import paths when outside of a module. // 'go get' is expected to do this, unlike other commands. - modload.AllowMissingModuleImports() + modload.AllowMissingModuleImports(modload.LoaderState) // 'go get' no longer builds or installs packages, so there's nothing to do // if there's no go.mod file. // TODO(#40775): make modload.Init return ErrNoModRoot instead of exiting. // We could handle that here by printing a different message. - modload.Init() - if !modload.HasModRoot() { + modload.Init(modload.LoaderState) + if !modload.HasModRoot(modload.LoaderState) { base.Fatalf("go: go.mod file not found in current directory or any parent directory.\n" + "\t'go get' is no longer supported outside a module.\n" + "\tTo build and install a command, use 'go install' with a version,\n" + @@ -424,9 +424,9 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { newReqs := reqsFromGoMod(modload.ModFile()) r.reportChanges(oldReqs, newReqs) - if gowork := modload.FindGoWork(base.Cwd()); gowork != "" { + if gowork := modload.FindGoWork(modload.LoaderState, base.Cwd()); gowork != "" { wf, err := modload.ReadWorkFile(gowork) - if err == nil && modload.UpdateWorkGoVersion(wf, modload.MainModules.GoVersion()) { + if err == nil && modload.UpdateWorkGoVersion(wf, modload.LoaderState.MainModules.GoVersion(modload.LoaderState)) { modload.WriteWorkFile(gowork, wf) } } @@ -448,7 +448,7 @@ func updateTools(ctx context.Context, queries []*query, opts *modload.WriteOpts) patterns = append(patterns, q.pattern) } - matches, _ := modload.LoadPackages(ctx, pkgOpts, patterns...) + matches, _ := modload.LoadPackages(modload.LoaderState, ctx, pkgOpts, patterns...) for i, m := range matches { if queries[i].version == "none" { opts.DropTools = append(opts.DropTools, m.Pkgs...) @@ -574,7 +574,7 @@ func newResolver(ctx context.Context, queries []*query) *resolver { buildListVersion: initialVersion, initialVersion: initialVersion, nonesByPath: map[string]*query{}, - workspace: loadWorkspace(modload.FindGoWork(base.Cwd())), + workspace: loadWorkspace(modload.FindGoWork(modload.LoaderState, base.Cwd())), } for _, q := range queries { @@ -645,7 +645,7 @@ func (r *resolver) noneForPath(mPath string) (nq *query, found bool) { // allowed versions. func (r *resolver) queryModule(ctx context.Context, mPath, query string, selected func(string) string) (module.Version, error) { current := r.initialSelected(mPath) - rev, err := modload.Query(ctx, mPath, query, current, r.checkAllowedOr(query, selected)) + rev, err := modload.Query(modload.LoaderState, ctx, mPath, query, current, r.checkAllowedOr(query, selected)) if err != nil { return module.Version{}, err } @@ -655,7 +655,7 @@ func (r *resolver) queryModule(ctx context.Context, mPath, query string, selecte // queryPackages wraps modload.QueryPackage, substituting r.checkAllowedOr to // decide allowed versions. func (r *resolver) queryPackages(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, err error) { - results, err := modload.QueryPackages(ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) + results, err := modload.QueryPackages(modload.LoaderState, ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) if len(results) > 0 { pkgMods = make([]module.Version, 0, len(results)) for _, qr := range results { @@ -668,7 +668,7 @@ func (r *resolver) queryPackages(ctx context.Context, pattern, query string, sel // queryPattern wraps modload.QueryPattern, substituting r.checkAllowedOr to // decide allowed versions. func (r *resolver) queryPattern(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, mod module.Version, err error) { - results, modOnly, err := modload.QueryPattern(ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) + results, modOnly, err := modload.QueryPattern(modload.LoaderState, ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) if len(results) > 0 { pkgMods = make([]module.Version, 0, len(results)) for _, qr := range results { @@ -721,8 +721,8 @@ func (r *resolver) queryNone(ctx context.Context, q *query) { if !q.isWildcard() { q.pathOnce(q.pattern, func() pathSet { - hasModRoot := modload.HasModRoot() - if hasModRoot && modload.MainModules.Contains(q.pattern) { + hasModRoot := modload.HasModRoot(modload.LoaderState) + if hasModRoot && modload.LoaderState.MainModules.Contains(q.pattern) { v := module.Version{Path: q.pattern} // The user has explicitly requested to downgrade their own module to // version "none". This is not an entirely unreasonable request: it @@ -746,7 +746,7 @@ func (r *resolver) queryNone(ctx context.Context, q *query) { continue } q.pathOnce(curM.Path, func() pathSet { - if modload.HasModRoot() && curM.Version == "" && modload.MainModules.Contains(curM.Path) { + if modload.HasModRoot(modload.LoaderState) && curM.Version == "" && modload.LoaderState.MainModules.Contains(curM.Path) { return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{curM}, Pattern: q.pattern, Query: q.version}) } return pathSet{mod: module.Version{Path: curM.Path, Version: "none"}} @@ -766,13 +766,13 @@ func (r *resolver) performLocalQueries(ctx context.Context) { // Absolute paths like C:\foo and relative paths like ../foo... are // restricted to matching packages in the main module. - pkgPattern, mainModule := modload.MainModules.DirImportPath(ctx, q.pattern) + pkgPattern, mainModule := modload.LoaderState.MainModules.DirImportPath(modload.LoaderState, ctx, q.pattern) if pkgPattern == "." { modload.MustHaveModRoot() - versions := modload.MainModules.Versions() + versions := modload.LoaderState.MainModules.Versions() modRoots := make([]string, 0, len(versions)) for _, m := range versions { - modRoots = append(modRoots, modload.MainModules.ModRoot(m)) + modRoots = append(modRoots, modload.LoaderState.MainModules.ModRoot(m)) } var plural string if len(modRoots) != 1 { @@ -792,7 +792,7 @@ func (r *resolver) performLocalQueries(ctx context.Context) { } if !q.isWildcard() { modload.MustHaveModRoot() - return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, modload.MainModules.ModRoot(mainModule))) + return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, modload.LoaderState.MainModules.ModRoot(mainModule))) } search.WarnUnmatched([]*search.Match{match}) return pathSet{} @@ -848,7 +848,7 @@ func (r *resolver) queryWildcard(ctx context.Context, q *query) { return pathSet{} } - if modload.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) { + if modload.LoaderState.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) { if q.matchesPath(curM.Path) { return errSet(&modload.QueryMatchesMainModulesError{ MainModules: []module.Version{curM}, @@ -1065,7 +1065,7 @@ func (r *resolver) queryPath(ctx context.Context, q *query) { // pattern is "tool". func (r *resolver) performToolQueries(ctx context.Context) { for _, q := range r.toolQueries { - for tool := range modload.MainModules.Tools() { + for tool := range modload.LoaderState.MainModules.Tools() { q.pathOnce(tool, func() pathSet { pkgMods, err := r.queryPackages(ctx, tool, q.version, r.initialSelected) return pathSet{pkgMods: pkgMods, err: err} @@ -1082,10 +1082,10 @@ func (r *resolver) performWorkQueries(ctx context.Context) { // TODO(matloob): Maybe export MainModules.mustGetSingleMainModule and call that. // There are a few other places outside the modload package where we expect // a single main module. - if len(modload.MainModules.Versions()) != 1 { + if len(modload.LoaderState.MainModules.Versions()) != 1 { panic("internal error: number of main modules is not exactly one in resolution phase of go get") } - mainModule := modload.MainModules.Versions()[0] + mainModule := modload.LoaderState.MainModules.Versions()[0] // We know what the result is going to be, assuming the main module is not // empty, (it's the main module itself) but first check to see that there @@ -1275,13 +1275,13 @@ func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPack return nil } - _, pkgs := modload.LoadPackages(ctx, opts, patterns...) + _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, opts, patterns...) for _, pkgPath := range pkgs { const ( parentPath = "" parentIsStd = false ) - _, _, err := modload.Lookup(parentPath, parentIsStd, pkgPath) + _, _, err := modload.Lookup(modload.LoaderState, parentPath, parentIsStd, pkgPath) if err == nil { continue } @@ -1294,15 +1294,13 @@ func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPack continue } - var ( - importMissing *modload.ImportMissingError - ambiguous *modload.AmbiguousImportError - ) - if !errors.As(err, &importMissing) && !errors.As(err, &ambiguous) { - // The package, which is a dependency of something we care about, has some - // problem that we can't resolve with a version change. - // Leave the error for the final LoadPackages call. - continue + if _, ok := errors.AsType[*modload.ImportMissingError](err); !ok { + if _, ok := errors.AsType[*modload.AmbiguousImportError](err); !ok { + // The package, which is a dependency of something we care about, has some + // problem that we can't resolve with a version change. + // Leave the error for the final LoadPackages call. + continue + } } path := pkgPath @@ -1496,7 +1494,7 @@ func (r *resolver) disambiguate(cs pathSet) (filtered pathSet, isPackage bool, m continue } - if modload.MainModules.Contains(m.Path) { + if modload.LoaderState.MainModules.Contains(m.Path) { if m.Version == "" { return pathSet{}, true, m, true } @@ -1612,7 +1610,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin // info, but switch back to single module mode when fetching sums so that we update // the single module's go.sum file. var exitWorkspace func() - if r.workspace != nil && r.workspace.hasModule(modload.MainModules.Versions()[0].Path) { + if r.workspace != nil && r.workspace.hasModule(modload.LoaderState.MainModules.Versions()[0].Path) { var err error exitWorkspace, err = modload.EnterWorkspace(ctx) if err != nil { @@ -1653,7 +1651,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin AllowErrors: true, SilenceNoGoErrors: true, } - matches, pkgs := modload.LoadPackages(ctx, pkgOpts, pkgPatterns...) + matches, pkgs := modload.LoadPackages(modload.LoaderState, ctx, pkgOpts, pkgPatterns...) for _, m := range matches { if len(m.Errs) > 0 { base.SetExitStatus(1) @@ -1661,7 +1659,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin } } for _, pkg := range pkgs { - if dir, _, err := modload.Lookup("", false, pkg); err != nil { + if dir, _, err := modload.Lookup(modload.LoaderState, "", false, pkg); err != nil { if dir != "" && errors.Is(err, imports.ErrNoGo) { // Since dir is non-empty, we must have located source files // associated with either the package or its test — ErrNoGo must @@ -1674,7 +1672,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin } base.SetExitStatus(1) - if ambiguousErr := (*modload.AmbiguousImportError)(nil); errors.As(err, &ambiguousErr) { + if ambiguousErr, ok := errors.AsType[*modload.AmbiguousImportError](err); ok { for _, m := range ambiguousErr.Modules { relevantMods[m] |= hasPkg } @@ -1692,7 +1690,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin } } - reqs := modload.LoadModFile(ctx) + reqs := modload.LoadModFile(modload.LoaderState, ctx) for m := range relevantMods { if reqs.IsDirect(m.Path) { relevantMods[m] |= direct @@ -1716,8 +1714,8 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin for i := range retractions { i := i r.work.Add(func() { - err := modload.CheckRetractions(ctx, retractions[i].m) - if retractErr := (*modload.ModuleRetractedError)(nil); errors.As(err, &retractErr) { + err := modload.CheckRetractions(modload.LoaderState, ctx, retractions[i].m) + if _, ok := errors.AsType[*modload.ModuleRetractedError](err); ok { retractions[i].message = err.Error() } }) @@ -1737,7 +1735,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin for i := range deprecations { i := i r.work.Add(func() { - deprecation, err := modload.CheckDeprecation(ctx, deprecations[i].m) + deprecation, err := modload.CheckDeprecation(modload.LoaderState, ctx, deprecations[i].m) if err != nil || deprecation == "" { return } @@ -1767,7 +1765,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin i := i m := r.buildList[i] mActual := m - if mRepl := modload.Replacement(m); mRepl.Path != "" { + if mRepl := modload.Replacement(modload.LoaderState, m); mRepl.Path != "" { mActual = mRepl } old := module.Version{Path: m.Path, Version: r.initialVersion[m.Path]} @@ -1775,7 +1773,7 @@ func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []strin continue } oldActual := old - if oldRepl := modload.Replacement(old); oldRepl.Path != "" { + if oldRepl := modload.Replacement(modload.LoaderState, old); oldRepl.Path != "" { oldActual = oldRepl } if mActual == oldActual || mActual.Version == "" || !modfetch.HaveSum(oldActual) { @@ -1951,7 +1949,7 @@ func (r *resolver) resolve(q *query, m module.Version) { panic("internal error: resolving a module.Version with an empty path") } - if modload.MainModules.Contains(m.Path) && m.Version != "" { + if modload.LoaderState.MainModules.Contains(m.Path) && m.Version != "" { reportError(q, &modload.QueryMatchesMainModulesError{ MainModules: []module.Version{{Path: m.Path}}, Pattern: q.pattern, @@ -1983,19 +1981,19 @@ func (r *resolver) updateBuildList(ctx context.Context, additions []module.Versi resolved := make([]module.Version, 0, len(r.resolvedVersion)) for mPath, rv := range r.resolvedVersion { - if !modload.MainModules.Contains(mPath) { + if !modload.LoaderState.MainModules.Contains(mPath) { resolved = append(resolved, module.Version{Path: mPath, Version: rv.version}) } } - changed, err := modload.EditBuildList(ctx, additions, resolved) + changed, err := modload.EditBuildList(modload.LoaderState, ctx, additions, resolved) if err != nil { if errors.Is(err, gover.ErrTooNew) { toolchain.SwitchOrFatal(ctx, err) } - var constraint *modload.ConstraintError - if !errors.As(err, &constraint) { + constraint, ok := errors.AsType[*modload.ConstraintError](err) + if !ok { base.Fatal(err) } @@ -2066,8 +2064,11 @@ func reqsFromGoMod(f *modfile.File) []module.Version { // does not exist at the requested version, either because the module does not // exist at all or because it does not include that specific version. func isNoSuchModuleVersion(err error) bool { - var noMatch *modload.NoMatchingVersionError - return errors.Is(err, os.ErrNotExist) || errors.As(err, &noMatch) + if errors.Is(err, os.ErrNotExist) { + return true + } + _, ok := errors.AsType[*modload.NoMatchingVersionError](err) + return ok } // isNoSuchPackageVersion reports whether err indicates that the requested @@ -2075,8 +2076,11 @@ func isNoSuchModuleVersion(err error) bool { // that could contain it exists at that version, or because every such module // that does exist does not actually contain the package. func isNoSuchPackageVersion(err error) bool { - var noPackage *modload.PackageNotInModuleError - return isNoSuchModuleVersion(err) || errors.As(err, &noPackage) + if isNoSuchModuleVersion(err) { + return true + } + _, ok := errors.AsType[*modload.PackageNotInModuleError](err) + return ok } // workspace represents the set of modules in a workspace. diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go index 05872d52ec4e04..7076bbadce898b 100644 --- a/src/cmd/go/internal/modget/query.go +++ b/src/cmd/go/internal/modget/query.go @@ -184,7 +184,7 @@ func (q *query) validate() error { if q.pattern == "all" { // If there is no main module, "all" is not meaningful. - if !modload.HasModRoot() { + if !modload.HasModRoot(modload.LoaderState) { return fmt.Errorf(`cannot match "all": %v`, modload.ErrNoModRoot) } if !versionOkForMainModule(q.version) { @@ -192,7 +192,7 @@ func (q *query) validate() error { // request that we remove all module requirements, leaving only the main // module and standard library. Perhaps we should implement that someday. return &modload.QueryUpgradesAllError{ - MainModules: modload.MainModules.Versions(), + MainModules: modload.LoaderState.MainModules.Versions(), Query: q.version, } } @@ -283,7 +283,7 @@ func reportError(q *query, err error) { // If err already mentions all of the relevant parts of q, just log err to // reduce stutter. Otherwise, log both q and err. // - // TODO(bcmills): Use errors.As to unpack these errors instead of parsing + // TODO(bcmills): Use errors.AsType to unpack these errors instead of parsing // strings with regular expressions. if !utf8.ValidString(q.pattern) || !utf8.ValidString(q.version) { diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index 6e30afd5247b36..5b1b643d272dfd 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -51,8 +51,8 @@ func findStandardImportPath(path string) string { // a given package. If modules are not enabled or if the package is in the // standard library or if the package was not successfully loaded with // LoadPackages or ImportFromFiles, nil is returned. -func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePublic { - if isStandardImportPath(pkgpath) || !Enabled() { +func PackageModuleInfo(loaderstate *State, ctx context.Context, pkgpath string) *modinfo.ModulePublic { + if isStandardImportPath(pkgpath) || !Enabled(loaderstate) { return nil } m, ok := findModule(loaded, pkgpath) @@ -60,23 +60,23 @@ func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePubli return nil } - rs := LoadModFile(ctx) - return moduleInfo(ctx, rs, m, 0, nil) + rs := LoadModFile(loaderstate, ctx) + return moduleInfo(loaderstate, ctx, rs, m, 0, nil) } // PackageModRoot returns the module root directory for the module that provides // a given package. If modules are not enabled or if the package is in the // standard library or if the package was not successfully loaded with // LoadPackages or ImportFromFiles, the empty string is returned. -func PackageModRoot(ctx context.Context, pkgpath string) string { - if isStandardImportPath(pkgpath) || !Enabled() || cfg.BuildMod == "vendor" { +func PackageModRoot(loaderstate *State, ctx context.Context, pkgpath string) string { + if isStandardImportPath(pkgpath) || !Enabled(loaderstate) || cfg.BuildMod == "vendor" { return "" } m, ok := findModule(loaded, pkgpath) if !ok { return "" } - root, _, err := fetch(ctx, m) + root, _, err := fetch(loaderstate, ctx, m) if err != nil { return "" } @@ -84,26 +84,26 @@ func PackageModRoot(ctx context.Context, pkgpath string) string { } func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { - if !Enabled() { + if !Enabled(LoaderState) { return nil } if path, vers, found := strings.Cut(path, "@"); found { m := module.Version{Path: path, Version: vers} - return moduleInfo(ctx, nil, m, 0, nil) + return moduleInfo(LoaderState, ctx, nil, m, 0, nil) } - rs := LoadModFile(ctx) + rs := LoadModFile(LoaderState, ctx) var ( v string ok bool ) if rs.pruning == pruned { - v, ok = rs.rootSelected(path) + v, ok = rs.rootSelected(LoaderState, path) } if !ok { - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(LoaderState, ctx) if err != nil { base.Fatal(err) } @@ -119,7 +119,7 @@ func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { } } - return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0, nil) + return moduleInfo(LoaderState, ctx, rs, module.Version{Path: path, Version: v}, 0, nil) } // addUpdate fills in m.Update if an updated version is available. @@ -128,12 +128,11 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { return } - info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed) - var noVersionErr *NoMatchingVersionError - if errors.Is(err, ErrDisallowed) || + info, err := Query(LoaderState, ctx, m.Path, "upgrade", m.Version, CheckAllowed) + if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) || - errors.As(err, &noVersionErr) { - // Ignore "not found" and "no matching version" errors. + errors.Is(err, ErrDisallowed) { + // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. // // Ignore "disallowed" errors. This means the current version is @@ -222,7 +221,7 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo if listRetracted { allowed = CheckExclusions } - v, origin, err := versions(ctx, m.Path, allowed) + v, origin, err := versions(LoaderState, ctx, m.Path, allowed) if err != nil && m.Error == nil { m.Error = &modinfo.ModuleError{Err: err.Error()} } @@ -232,16 +231,16 @@ func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted boo // addRetraction fills in m.Retracted if the module was retracted by its author. // m.Error is set if there's an error loading retraction information. -func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { +func addRetraction(loaderstate *State, ctx context.Context, m *modinfo.ModulePublic) { if m.Version == "" { return } - err := CheckRetractions(ctx, module.Version{Path: m.Path, Version: m.Version}) - var noVersionErr *NoMatchingVersionError - var retractErr *ModuleRetractedError - if err == nil || errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { - // Ignore "not found" and "no matching version" errors. + err := CheckRetractions(loaderstate, ctx, module.Version{Path: m.Path, Version: m.Version}) + if err == nil { + return + } else if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) { + // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. // // We should report other errors though. An attacker that controls the @@ -250,7 +249,7 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { // hide versions, since the "list" and "latest" endpoints are not // authenticated. return - } else if errors.As(err, &retractErr) { + } else if retractErr, ok := errors.AsType[*ModuleRetractedError](err); ok { if len(retractErr.Rationale) == 0 { m.Retracted = []string{"retracted by module author"} } else { @@ -264,10 +263,9 @@ func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { // addDeprecation fills in m.Deprecated if the module was deprecated by its // author. m.Error is set if there's an error loading deprecation information. func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { - deprecation, err := CheckDeprecation(ctx, module.Version{Path: m.Path, Version: m.Version}) - var noVersionErr *NoMatchingVersionError - if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { - // Ignore "not found" and "no matching version" errors. + deprecation, err := CheckDeprecation(LoaderState, ctx, module.Version{Path: m.Path, Version: m.Version}) + if _, ok := errors.AsType[*NoMatchingVersionError](err); ok || errors.Is(err, fs.ErrNotExist) { + // Ignore "no matching version" and "not found" errors. // This means the proxy has no matching version or no versions at all. // // We should report other errors though. An attacker that controls the @@ -289,8 +287,8 @@ func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { // moduleInfo returns information about module m, loaded from the requirements // in rs (which may be nil to indicate that m was not loaded from a requirement // graph). -func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic { - if m.Version == "" && MainModules.Contains(m.Path) { +func moduleInfo(loaderstate *State, ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic { + if m.Version == "" && loaderstate.MainModules.Contains(m.Path) { info := &modinfo.ModulePublic{ Path: m.Path, Version: m.Version, @@ -301,7 +299,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } else { panic("internal error: GoVersion not set for main module") } - if modRoot := MainModules.ModRoot(m); modRoot != "" { + if modRoot := loaderstate.MainModules.ModRoot(m); modRoot != "" { info.Dir = modRoot info.GoMod = modFilePath(modRoot) } @@ -324,7 +322,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } checksumOk := func(suffix string) bool { - return rs == nil || m.Version == "" || !mustHaveSums() || + return rs == nil || m.Version == "" || !mustHaveSums(loaderstate) || modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix}) } @@ -332,7 +330,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li if m.Version != "" { if old := reuse[mod]; old != nil { - if err := checkReuse(ctx, mod, old.Origin); err == nil { + if err := checkReuse(loaderstate, ctx, mod, old.Origin); err == nil { *m = *old m.Query = "" m.Dir = "" @@ -340,7 +338,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } } - if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil { + if q, err := Query(loaderstate, ctx, m.Path, m.Version, "", nil); err != nil { m.Error = &modinfo.ModuleError{Err: err.Error()} } else { m.Version = q.Version @@ -351,7 +349,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li if m.GoVersion == "" && checksumOk("/go.mod") { // Load the go.mod file to determine the Go version, since it hasn't // already been populated from rawGoVersion. - if summary, err := rawGoModSummary(mod); err == nil && summary.goVersion != "" { + if summary, err := rawGoModSummary(loaderstate, mod); err == nil && summary.goVersion != "" { m.GoVersion = summary.goVersion } } @@ -379,7 +377,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li } if mode&ListRetracted != 0 { - addRetraction(ctx, m) + addRetraction(loaderstate, ctx, m) } } } @@ -391,7 +389,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li return info } - r := Replacement(m) + r := Replacement(loaderstate, m) if r.Path == "" { if cfg.BuildMod == "vendor" { // It's tempting to fill in the "Dir" field to point within the vendor @@ -420,7 +418,7 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li if filepath.IsAbs(r.Path) { info.Replace.Dir = r.Path } else { - info.Replace.Dir = filepath.Join(replaceRelativeTo(), r.Path) + info.Replace.Dir = filepath.Join(replaceRelativeTo(loaderstate), r.Path) } info.Replace.GoMod = filepath.Join(info.Replace.Dir, "go.mod") } diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index 2ba04f707b5472..54ec4d23ebe08e 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -85,16 +85,6 @@ type cachedGraph struct { err error // If err is non-nil, mg may be incomplete (but must still be non-nil). } -// requirements is the requirement graph for the main module. -// -// It is always non-nil if the main module's go.mod file has been loaded. -// -// This variable should only be read from the loadModFile function, and should -// only be written in the loadModFile and commitRequirements functions. -// All other functions that need or produce a *Requirements should -// accept and/or return an explicit parameter. -var requirements *Requirements - func mustHaveGoRoot(roots []module.Version) { for _, m := range roots { if m.Path == "go" { @@ -114,21 +104,21 @@ func mustHaveGoRoot(roots []module.Version) { // // If vendoring is in effect, the caller must invoke initVendor on the returned // *Requirements before any other method. -func newRequirements(pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements { +func newRequirements(loaderstate *State, pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements { mustHaveGoRoot(rootModules) if pruning != workspace { - if workFilePath != "" { + if loaderstate.workFilePath != "" { panic("in workspace mode, but pruning is not workspace in newRequirements") } } if pruning != workspace { - if workFilePath != "" { + if loaderstate.workFilePath != "" { panic("in workspace mode, but pruning is not workspace in newRequirements") } for i, m := range rootModules { - if m.Version == "" && MainModules.Contains(m.Path) { + if m.Version == "" && loaderstate.MainModules.Contains(m.Path) { panic(fmt.Sprintf("newRequirements called with untrimmed build list: rootModules[%v] is a main module", i)) } if m.Path == "" || m.Version == "" { @@ -172,10 +162,10 @@ func (rs *Requirements) String() string { // initVendor initializes rs.graph from the given list of vendored module // dependencies, overriding the graph that would normally be loaded from module // requirements. -func (rs *Requirements) initVendor(vendorList []module.Version) { +func (rs *Requirements) initVendor(loaderstate *State, vendorList []module.Version) { rs.graphOnce.Do(func() { - roots := MainModules.Versions() - if inWorkspaceMode() { + roots := loaderstate.MainModules.Versions() + if inWorkspaceMode(loaderstate) { // Use rs.rootModules to pull in the go and toolchain roots // from the go.work file and preserve the invariant that all // of rs.rootModules are in mg.g. @@ -186,7 +176,7 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { } if rs.pruning == pruned { - mainModule := MainModules.mustGetSingleMainModule() + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) // The roots of a single pruned module should already include every module in the // vendor list, because the vendored modules are the same as those needed // for graph pruning. @@ -194,7 +184,7 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { // Just to be sure, we'll double-check that here. inconsistent := false for _, m := range vendorList { - if v, ok := rs.rootSelected(m.Path); !ok || v != m.Version { + if v, ok := rs.rootSelected(loaderstate, m.Path); !ok || v != m.Version { base.Errorf("go: vendored module %v should be required explicitly in go.mod", m) inconsistent = true } @@ -218,15 +208,15 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { // graph, but still distinguishes between direct and indirect // dependencies. vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""} - if inWorkspaceMode() { - for _, m := range MainModules.Versions() { - reqs, _ := rootsFromModFile(m, MainModules.ModFile(m), omitToolchainRoot) + if inWorkspaceMode(loaderstate) { + for _, m := range loaderstate.MainModules.Versions() { + reqs, _ := rootsFromModFile(loaderstate, m, loaderstate.MainModules.ModFile(m), omitToolchainRoot) mg.g.Require(m, append(reqs, vendorMod)) } mg.g.Require(vendorMod, vendorList) } else { - mainModule := MainModules.mustGetSingleMainModule() + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) mg.g.Require(mainModule, append(rs.rootModules, vendorMod)) mg.g.Require(vendorMod, vendorList) } @@ -237,8 +227,8 @@ func (rs *Requirements) initVendor(vendorList []module.Version) { } // GoVersion returns the Go language version for the Requirements. -func (rs *Requirements) GoVersion() string { - v, _ := rs.rootSelected("go") +func (rs *Requirements) GoVersion(loaderstate *State) string { + v, _ := rs.rootSelected(loaderstate, "go") if v == "" { panic("internal error: missing go version in modload.Requirements") } @@ -248,8 +238,8 @@ func (rs *Requirements) GoVersion() string { // rootSelected returns the version of the root dependency with the given module // path, or the zero module.Version and ok=false if the module is not a root // dependency. -func (rs *Requirements) rootSelected(path string) (version string, ok bool) { - if MainModules.Contains(path) { +func (rs *Requirements) rootSelected(loaderstate *State, path string) (version string, ok bool) { + if loaderstate.MainModules.Contains(path) { return "", true } if v, ok := rs.maxRootVersion[path]; ok { @@ -262,9 +252,9 @@ func (rs *Requirements) rootSelected(path string) (version string, ok bool) { // of the same module or a requirement on any version of the main module. // Redundant requirements should be pruned, but they may influence version // selection. -func (rs *Requirements) hasRedundantRoot() bool { +func (rs *Requirements) hasRedundantRoot(loaderstate *State) bool { for i, m := range rs.rootModules { - if MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) { + if loaderstate.MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) { return true } } @@ -279,9 +269,9 @@ func (rs *Requirements) hasRedundantRoot() bool { // // If the requirements of any relevant module fail to load, Graph also // returns a non-nil error of type *mvs.BuildListError. -func (rs *Requirements) Graph(ctx context.Context) (*ModuleGraph, error) { +func (rs *Requirements) Graph(loaderstate *State, ctx context.Context) (*ModuleGraph, error) { rs.graphOnce.Do(func() { - mg, mgErr := readModGraph(ctx, rs.pruning, rs.rootModules, nil) + mg, mgErr := readModGraph(loaderstate, ctx, rs.pruning, rs.rootModules, nil) rs.graph.Store(&cachedGraph{mg, mgErr}) }) cached := rs.graph.Load() @@ -317,7 +307,7 @@ var readModGraphDebugOnce sync.Once // // Unlike LoadModGraph, readModGraph does not attempt to diagnose or update // inconsistent roots. -func readModGraph(ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) { +func readModGraph(loaderstate *State, ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) { mustHaveGoRoot(roots) if pruning == pruned { // Enable diagnostics for lazy module loading @@ -343,10 +333,10 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio } var graphRoots []module.Version - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { graphRoots = roots } else { - graphRoots = MainModules.Versions() + graphRoots = loaderstate.MainModules.Versions() } var ( mu sync.Mutex // guards mg.g and hasError during loading @@ -357,10 +347,10 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio ) if pruning != workspace { - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { panic("pruning is not workspace in workspace mode") } - mg.g.Require(MainModules.mustGetSingleMainModule(), roots) + mg.g.Require(loaderstate.MainModules.mustGetSingleMainModule(loaderstate), roots) } type dedupKey struct { @@ -377,7 +367,7 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio // m's go.mod file indicates that it supports graph pruning. loadOne := func(m module.Version) (*modFileSummary, error) { return mg.loadCache.Do(m, func() (*modFileSummary, error) { - summary, err := goModSummary(m) + summary, err := goModSummary(loaderstate, m) mu.Lock() if err == nil { @@ -537,12 +527,12 @@ func (mg *ModuleGraph) findError() error { return nil } -func (mg *ModuleGraph) allRootsSelected() bool { +func (mg *ModuleGraph) allRootsSelected(loaderstate *State) bool { var roots []module.Version - if inWorkspaceMode() { - roots = MainModules.Versions() + if inWorkspaceMode(loaderstate) { + roots = loaderstate.MainModules.Versions() } else { - roots, _ = mg.g.RequiredBy(MainModules.mustGetSingleMainModule()) + roots, _ = mg.g.RequiredBy(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) } for _, m := range roots { if mg.Selected(m.Path) != m.Version { @@ -563,13 +553,13 @@ func (mg *ModuleGraph) allRootsSelected() bool { // LoadModGraph need only be called if LoadPackages is not, // typically in commands that care about modules but no particular package. func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { - rs, err := loadModFile(ctx, nil) + rs, err := loadModFile(LoaderState, ctx, nil) if err != nil { return nil, err } if goVersion != "" { - v, _ := rs.rootSelected("go") + v, _ := rs.rootSelected(LoaderState, "go") if gover.Compare(v, gover.GoStrictVersion) >= 0 && gover.Compare(goVersion, v) < 0 { return nil, fmt.Errorf("requested Go version %s cannot load module graph (requires Go >= %s)", goVersion, v) } @@ -579,17 +569,17 @@ func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { // Use newRequirements instead of convertDepth because convertDepth // also updates roots; here, we want to report the unmodified roots // even though they may seem inconsistent. - rs = newRequirements(unpruned, rs.rootModules, rs.direct) + rs = newRequirements(LoaderState, unpruned, rs.rootModules, rs.direct) } - return rs.Graph(ctx) + return rs.Graph(LoaderState, ctx) } - rs, mg, err := expandGraph(ctx, rs) + rs, mg, err := expandGraph(LoaderState, ctx, rs) if err != nil { return nil, err } - requirements = rs + LoaderState.requirements = rs return mg, nil } @@ -604,22 +594,22 @@ func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { // from those roots and any error encountered while loading that graph. // expandGraph returns non-nil requirements and a non-nil graph regardless of // errors. On error, the roots might not be updated to be consistent. -func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) { - mg, mgErr := rs.Graph(ctx) +func expandGraph(loaderstate *State, ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) { + mg, mgErr := rs.Graph(loaderstate, ctx) if mgErr != nil { // Without the graph, we can't update the roots: we don't know which // versions of transitive dependencies would be selected. return rs, mg, mgErr } - if !mg.allRootsSelected() { + if !mg.allRootsSelected(loaderstate) { // The roots of rs are not consistent with the rest of the graph. Update // them. In an unpruned module this is a no-op for the build list as a whole — // it just promotes what were previously transitive requirements to be // roots — but in a pruned module it may pull in previously-irrelevant // transitive dependencies. - newRS, rsErr := updateRoots(ctx, rs.direct, rs, nil, nil, false) + newRS, rsErr := updateRoots(loaderstate, ctx, rs.direct, rs, nil, nil, false) if rsErr != nil { // Failed to update roots, perhaps because of an error in a transitive // dependency needed for the update. Return the original Requirements @@ -627,7 +617,7 @@ func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleG return rs, mg, rsErr } rs = newRS - mg, mgErr = rs.Graph(ctx) + mg, mgErr = rs.Graph(loaderstate, ctx) } return rs, mg, mgErr @@ -649,16 +639,16 @@ func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleG // On success, EditBuildList reports whether the selected version of any module // in the build list may have been changed (possibly to or from "none") as a // result. -func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) { - rs, changed, err := editRequirements(ctx, LoadModFile(ctx), add, mustSelect) +func EditBuildList(loaderstate *State, ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) { + rs, changed, err := editRequirements(loaderstate, ctx, LoadModFile(loaderstate, ctx), add, mustSelect) if err != nil { return false, err } - requirements = rs + loaderstate.requirements = rs return changed, nil } -func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Version) *Requirements { +func overrideRoots(loaderstate *State, ctx context.Context, rs *Requirements, replace []module.Version) *Requirements { drop := make(map[string]bool) for _, m := range replace { drop[m.Path] = true @@ -671,7 +661,7 @@ func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Versi } roots = append(roots, replace...) gover.ModSort(roots) - return newRequirements(rs.pruning, roots, rs.direct) + return newRequirements(loaderstate, rs.pruning, roots, rs.direct) } // A ConstraintError describes inconsistent constraints in EditBuildList @@ -775,28 +765,28 @@ func (c Conflict) String() string { // tidyRoots trims the root dependencies to the minimal requirements needed to // both retain the same versions of all packages in pkgs and satisfy the // graph-pruning invariants (if applicable). -func tidyRoots(ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) { - mainModule := MainModules.mustGetSingleMainModule() +func tidyRoots(loaderstate *State, ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) { + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) if rs.pruning == unpruned { - return tidyUnprunedRoots(ctx, mainModule, rs, pkgs) + return tidyUnprunedRoots(loaderstate, ctx, mainModule, rs, pkgs) } - return tidyPrunedRoots(ctx, mainModule, rs, pkgs) + return tidyPrunedRoots(loaderstate, ctx, mainModule, rs, pkgs) } -func updateRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { +func updateRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { switch rs.pruning { case unpruned: - return updateUnprunedRoots(ctx, direct, rs, add) + return updateUnprunedRoots(loaderstate, ctx, direct, rs, add) case pruned: - return updatePrunedRoots(ctx, direct, rs, pkgs, add, rootsImported) + return updatePrunedRoots(loaderstate, ctx, direct, rs, pkgs, add, rootsImported) case workspace: - return updateWorkspaceRoots(ctx, direct, rs, add) + return updateWorkspaceRoots(loaderstate, ctx, direct, rs, add) default: panic(fmt.Sprintf("unsupported pruning mode: %v", rs.pruning)) } } -func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { +func updateWorkspaceRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { if len(add) != 0 { // add should be empty in workspace mode because workspace mode implies // -mod=readonly, which in turn implies no new requirements. The code path @@ -807,7 +797,7 @@ func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requi // return an error. panic("add is not empty") } - return newRequirements(workspace, rs.rootModules, direct), nil + return newRequirements(loaderstate, workspace, rs.rootModules, direct), nil } // tidyPrunedRoots returns a minimal set of root requirements that maintains the @@ -826,16 +816,16 @@ func updateWorkspaceRoots(ctx context.Context, direct map[string]bool, rs *Requi // To ensure that the loading process eventually converges, the caller should // add any needed roots from the tidy root set (without removing existing untidy // roots) until the set of roots has converged. -func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { +func tidyPrunedRoots(loaderstate *State, ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { var ( roots []module.Version pathIsRoot = map[string]bool{mainModule.Path: true} ) - if v, ok := old.rootSelected("go"); ok { + if v, ok := old.rootSelected(loaderstate, "go"); ok { roots = append(roots, module.Version{Path: "go", Version: v}) pathIsRoot["go"] = true } - if v, ok := old.rootSelected("toolchain"); ok { + if v, ok := old.rootSelected(loaderstate, "toolchain"); ok { roots = append(roots, module.Version{Path: "toolchain", Version: v}) pathIsRoot["toolchain"] = true } @@ -857,7 +847,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir if !pkg.flags.has(pkgInAll) { continue } - if pkg.fromExternalModule() && !pathIsRoot[pkg.mod.Path] { + if pkg.fromExternalModule(loaderstate) && !pathIsRoot[pkg.mod.Path] { roots = append(roots, pkg.mod) pathIsRoot[pkg.mod.Path] = true } @@ -865,11 +855,11 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir queued[pkg] = true } gover.ModSort(roots) - tidy := newRequirements(pruned, roots, old.direct) + tidy := newRequirements(loaderstate, pruned, roots, old.direct) for len(queue) > 0 { roots = tidy.rootModules - mg, err := tidy.Graph(ctx) + mg, err := tidy.Graph(loaderstate, ctx) if err != nil { return nil, err } @@ -902,12 +892,12 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir if len(roots) > len(tidy.rootModules) { gover.ModSort(roots) - tidy = newRequirements(pruned, roots, tidy.direct) + tidy = newRequirements(loaderstate, pruned, roots, tidy.direct) } } roots = tidy.rootModules - _, err := tidy.Graph(ctx) + _, err := tidy.Graph(loaderstate, ctx) if err != nil { return nil, err } @@ -931,8 +921,8 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir pkg := pkg q.Add(func() { skipModFile := true - _, _, _, _, err := importFromModules(ctx, pkg.path, tidy, nil, skipModFile) - if aie := (*AmbiguousImportError)(nil); errors.As(err, &aie) { + _, _, _, _, err := importFromModules(loaderstate, ctx, pkg.path, tidy, nil, skipModFile) + if _, ok := errors.AsType[*AmbiguousImportError](err); ok { disambiguateRoot.Store(pkg.mod, true) } }) @@ -948,8 +938,8 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir if len(roots) > len(tidy.rootModules) { module.Sort(roots) - tidy = newRequirements(pruned, roots, tidy.direct) - _, err = tidy.Graph(ctx) + tidy = newRequirements(loaderstate, pruned, roots, tidy.direct) + _, err = tidy.Graph(loaderstate, ctx) if err != nil { return nil, err } @@ -1009,7 +999,7 @@ func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requir // // (See https://golang.org/design/36460-lazy-module-loading#invariants for more // detail.) -func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { +func updatePrunedRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { roots := rs.rootModules rootsUpgraded := false @@ -1019,7 +1009,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // either pkgInAll or pkgIsRoot is included as a root.” needSort := false for _, pkg := range pkgs { - if !pkg.fromExternalModule() { + if !pkg.fromExternalModule(loaderstate) { // pkg was not loaded from a module dependency, so we don't need // to do anything special to maintain that dependency. continue @@ -1068,7 +1058,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem continue } - if _, ok := rs.rootSelected(pkg.mod.Path); ok { + if _, ok := rs.rootSelected(loaderstate, pkg.mod.Path); ok { // It is possible that the main module's go.mod file is incomplete or // otherwise erroneous — for example, perhaps the author forgot to 'git // add' their updated go.mod file after adding a new package import, or @@ -1104,7 +1094,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem } for _, m := range add { - if v, ok := rs.rootSelected(m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 { + if v, ok := rs.rootSelected(loaderstate, m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 { roots = append(roots, m) rootsUpgraded = true needSort = true @@ -1121,7 +1111,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // We've added or upgraded one or more roots, so load the full module // graph so that we can update those roots to be consistent with other // requirements. - if mustHaveCompleteRequirements() { + if mustHaveCompleteRequirements(loaderstate) { // Our changes to the roots may have moved dependencies into or out of // the graph-pruning horizon, which could in turn change the selected // versions of other modules. (For pruned modules adding or removing an @@ -1129,9 +1119,9 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem return rs, errGoModDirty } - rs = newRequirements(pruned, roots, direct) + rs = newRequirements(loaderstate, pruned, roots, direct) var err error - mg, err = rs.Graph(ctx) + mg, err = rs.Graph(loaderstate, ctx) if err != nil { return rs, err } @@ -1145,20 +1135,20 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // We've already loaded the full module graph, which includes the // requirements of all of the root modules — even the transitive // requirements, if they are unpruned! - mg, _ = rs.Graph(ctx) + mg, _ = rs.Graph(loaderstate, ctx) } else if cfg.BuildMod == "vendor" { // We can't spot-check the requirements of other modules because we // don't in general have their go.mod files available in the vendor // directory. (Fortunately this case is impossible, because mg.graph is // always non-nil in vendor mode!) panic("internal error: rs.graph is unexpectedly nil with -mod=vendor") - } else if !spotCheckRoots(ctx, rs, spotCheckRoot) { + } else if !spotCheckRoots(loaderstate, ctx, rs, spotCheckRoot) { // We spot-checked the explicit requirements of the roots that are // relevant to the packages we've loaded. Unfortunately, they're // inconsistent in some way; we need to load the full module graph // so that we can fix the roots properly. var err error - mg, err = rs.Graph(ctx) + mg, err = rs.Graph(loaderstate, ctx) if err != nil { return rs, err } @@ -1168,7 +1158,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem roots = make([]module.Version, 0, len(rs.rootModules)) rootsUpgraded = false inRootPaths := make(map[string]bool, len(rs.rootModules)+1) - for _, mm := range MainModules.Versions() { + for _, mm := range loaderstate.MainModules.Versions() { inRootPaths[mm.Path] = true } for _, m := range rs.rootModules { @@ -1194,7 +1184,7 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem var v string if mg == nil { - v, _ = rs.rootSelected(m.Path) + v, _ = rs.rootSelected(loaderstate, m.Path) } else { v = mg.Selected(m.Path) } @@ -1228,12 +1218,12 @@ func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirem // preserve its cached ModuleGraph (if any). return rs, nil } - return newRequirements(pruned, roots, direct), nil + return newRequirements(loaderstate, pruned, roots, direct), nil } // spotCheckRoots reports whether the versions of the roots in rs satisfy the // explicit requirements of the modules in mods. -func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool { +func spotCheckRoots(loaderstate *State, ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -1245,14 +1235,14 @@ func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Versi return } - summary, err := goModSummary(m) + summary, err := goModSummary(loaderstate, m) if err != nil { cancel() return } for _, r := range summary.require { - if v, ok := rs.rootSelected(r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 { + if v, ok := rs.rootSelected(loaderstate, r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 { cancel() return } @@ -1274,7 +1264,7 @@ func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Versi // the selected version of every module that provided or lexically could have // provided a package in pkgs, and includes the selected version of every such // module in direct as a root. -func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { +func tidyUnprunedRoots(loaderstate *State, ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { var ( // keep is a set of modules that provide packages or are needed to // disambiguate imports. @@ -1302,16 +1292,16 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ // without its sum. See #47738. altMods = map[string]string{} ) - if v, ok := old.rootSelected("go"); ok { + if v, ok := old.rootSelected(loaderstate, "go"); ok { keep = append(keep, module.Version{Path: "go", Version: v}) keptPath["go"] = true } - if v, ok := old.rootSelected("toolchain"); ok { + if v, ok := old.rootSelected(loaderstate, "toolchain"); ok { keep = append(keep, module.Version{Path: "toolchain", Version: v}) keptPath["toolchain"] = true } for _, pkg := range pkgs { - if !pkg.fromExternalModule() { + if !pkg.fromExternalModule(loaderstate) { continue } if m := pkg.mod; !keptPath[m.Path] { @@ -1360,7 +1350,7 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ } } - return newRequirements(unpruned, min, old.direct), nil + return newRequirements(loaderstate, unpruned, min, old.direct), nil } // updateUnprunedRoots returns a set of root requirements that includes the selected @@ -1377,8 +1367,8 @@ func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requ // by a dependency in add. // 4. Every version in add is selected at its given version unless upgraded by // (the dependencies of) an existing root or another module in add. -func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { - mg, err := rs.Graph(ctx) +func updateUnprunedRoots(loaderstate *State, ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { + mg, err := rs.Graph(loaderstate, ctx) if err != nil { // We can't ignore errors in the module graph even if the user passed the -e // flag to try to push past them. If we can't load the complete module @@ -1386,7 +1376,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir return rs, err } - if mustHaveCompleteRequirements() { + if mustHaveCompleteRequirements(loaderstate) { // Instead of actually updating the requirements, just check that no updates // are needed. if rs == nil { @@ -1406,7 +1396,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir } } for mPath := range direct { - if _, ok := rs.rootSelected(mPath); !ok { + if _, ok := rs.rootSelected(loaderstate, mPath); !ok { // Module m is supposed to be listed explicitly, but isn't. // // Note that this condition is also detected (and logged with more @@ -1445,7 +1435,7 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir // This is only for convenience and clarity for end users: in an unpruned module, // the choice of explicit vs. implicit dependency has no impact on MVS // selection (for itself or any other module). - keep := append(mg.BuildList()[MainModules.Len():], add...) + keep := append(mg.BuildList()[loaderstate.MainModules.Len():], add...) for _, m := range keep { if direct[m.Path] && !inRootPaths[m.Path] { rootPaths = append(rootPaths, m.Path) @@ -1454,14 +1444,14 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir } var roots []module.Version - for _, mainModule := range MainModules.Versions() { + for _, mainModule := range loaderstate.MainModules.Versions() { min, err := mvs.Req(mainModule, rootPaths, &mvsReqs{roots: keep}) if err != nil { return rs, err } roots = append(roots, min...) } - if MainModules.Len() > 1 { + if loaderstate.MainModules.Len() > 1 { gover.ModSort(roots) } if rs.pruning == unpruned && slices.Equal(roots, rs.rootModules) && maps.Equal(direct, rs.direct) { @@ -1470,12 +1460,12 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir return rs, nil } - return newRequirements(unpruned, roots, direct), nil + return newRequirements(loaderstate, unpruned, roots, direct), nil } // convertPruning returns a version of rs with the given pruning behavior. // If rs already has the given pruning, convertPruning returns rs unmodified. -func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) { +func convertPruning(loaderstate *State, ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) { if rs.pruning == pruning { return rs, nil } else if rs.pruning == workspace || pruning == workspace { @@ -1487,7 +1477,7 @@ func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) ( // pruned module graph are a superset of the roots of an unpruned one, so // we don't need to add any new roots — we just need to drop the ones that // are redundant, which is exactly what updateUnprunedRoots does. - return updateUnprunedRoots(ctx, rs.direct, rs, nil) + return updateUnprunedRoots(loaderstate, ctx, rs.direct, rs, nil) } // We are converting an unpruned module to a pruned one. @@ -1497,9 +1487,9 @@ func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) ( // root set! “Include the transitive dependencies of every module in the build // list” is exactly what happens in a pruned module if we promote every module // in the build list to a root. - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return rs, err } - return newRequirements(pruned, mg.BuildList()[MainModules.Len():], rs.direct), nil + return newRequirements(loaderstate, pruned, mg.BuildList()[loaderstate.MainModules.Len():], rs.direct), nil } diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go index b406193dc5a673..72d0f754224456 100644 --- a/src/cmd/go/internal/modload/edit.go +++ b/src/cmd/go/internal/modload/edit.go @@ -42,7 +42,7 @@ import ( // If pruning is enabled, the roots of the edited requirements include an // explicit entry for each module path in tryUpgrade, mustSelect, and the roots // of rs, unless the selected version for the module path is "none". -func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) { +func editRequirements(loaderstate *State, ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) { if rs.pruning == workspace { panic("editRequirements cannot edit workspace requirements") } @@ -82,7 +82,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel } if rootPruning != rs.pruning { - rs, err = convertPruning(ctx, rs, rootPruning) + rs, err = convertPruning(loaderstate, ctx, rs, rootPruning) if err != nil { return orig, false, err } @@ -100,13 +100,13 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // dependencies, so we need to treat everything in the build list as // potentially relevant — that is, as what would be a “root” in a module // with graph pruning enabled. - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { // If we couldn't load the graph, we don't know what its requirements were // to begin with, so we can't edit those requirements in a coherent way. return orig, false, err } - bl := mg.BuildList()[MainModules.Len():] + bl := mg.BuildList()[loaderstate.MainModules.Len():] selectedRoot = make(map[string]string, len(bl)) for _, m := range bl { selectedRoot[m.Path] = m.Version @@ -224,10 +224,12 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // of every root. The upgraded roots are in addition to the original // roots, so we will have enough information to trace a path to each // conflict we discover from one or more of the original roots. - mg, upgradedRoots, err := extendGraph(ctx, rootPruning, roots, selectedRoot) + mg, upgradedRoots, err := extendGraph(loaderstate, ctx, rootPruning, roots, selectedRoot) if err != nil { - var tooNew *gover.TooNewError - if mg == nil || errors.As(err, &tooNew) { + if mg == nil { + return orig, false, err + } + if _, ok := errors.AsType[*gover.TooNewError](err); ok { return orig, false, err } // We're about to walk the entire extended module graph, so we will find @@ -389,7 +391,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // the edit. We want to make sure we consider keeping it as-is, // even if it wouldn't normally be included. (For example, it might // be a pseudo-version or pre-release.) - origMG, _ := orig.Graph(ctx) + origMG, _ := orig.Graph(loaderstate, ctx) origV := origMG.Selected(m.Path) if conflict.Err != nil && origV == m.Version { @@ -413,7 +415,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel rejectedRoot[m] = true prev := m for { - prev, err = previousVersion(ctx, prev) + prev, err = previousVersion(loaderstate, ctx, prev) if gover.ModCompare(m.Path, m.Version, origV) > 0 && (gover.ModCompare(m.Path, prev.Version, origV) < 0 || err != nil) { // previousVersion skipped over origV. Insert it into the order. prev.Version = origV @@ -513,13 +515,13 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // The modules in mustSelect are always promoted to be explicit. for _, m := range mustSelect { - if m.Version != "none" && !MainModules.Contains(m.Path) { + if m.Version != "none" && !loaderstate.MainModules.Contains(m.Path) { rootPaths = append(rootPaths, m.Path) } } for _, m := range roots { - if v, ok := rs.rootSelected(m.Path); ok && (v == m.Version || rs.direct[m.Path]) { + if v, ok := rs.rootSelected(loaderstate, m.Path); ok && (v == m.Version || rs.direct[m.Path]) { // m.Path was formerly a root, and either its version hasn't changed or // we believe that it provides a package directly imported by a package // or test in the main module. For now we'll assume that it is still @@ -530,7 +532,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel } } - roots, err = mvs.Req(MainModules.mustGetSingleMainModule(), rootPaths, &mvsReqs{roots: roots}) + roots, err = mvs.Req(loaderstate.MainModules.mustGetSingleMainModule(loaderstate), rootPaths, &mvsReqs{roots: roots}) if err != nil { return nil, false, err } @@ -561,7 +563,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel direct[m.Path] = true } } - edited = newRequirements(rootPruning, roots, direct) + edited = newRequirements(loaderstate, rootPruning, roots, direct) // If we ended up adding a dependency that upgrades our go version far enough // to activate pruning, we must convert the edited Requirements in order to @@ -576,7 +578,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // those two modules will never be downgraded due to a conflict with any other // constraint. if rootPruning == unpruned { - if v, ok := edited.rootSelected("go"); ok && pruningForGoVersion(v) == pruned { + if v, ok := edited.rootSelected(loaderstate, "go"); ok && pruningForGoVersion(v) == pruned { // Since we computed the edit with the unpruned graph, and the pruned // graph is a strict subset of the unpruned graph, this conversion // preserves the exact (edited) build list that we already computed. @@ -585,7 +587,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // the graph. 'go get' will check for that sort of transition and log a // message reminding the user how to clean up this mess we're about to // make. 😅 - edited, err = convertPruning(ctx, edited, pruned) + edited, err = convertPruning(loaderstate, ctx, edited, pruned) if err != nil { return orig, false, err } @@ -605,9 +607,9 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel // The extended graph is useful for diagnosing version conflicts: for each // selected module version, it can provide a complete path of requirements from // some root to that version. -func extendGraph(ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) { +func extendGraph(loaderstate *State, ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) { for { - mg, err = readModGraph(ctx, rootPruning, roots, upgradedRoot) + mg, err = readModGraph(loaderstate, ctx, rootPruning, roots, upgradedRoot) // We keep on going even if err is non-nil until we reach a steady state. // (Note that readModGraph returns a non-nil *ModuleGraph even in case of // errors.) The caller may be able to fix the errors by adjusting versions, diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index 171d9d692fbb82..a2a98289b0ed77 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -82,8 +82,8 @@ func (e *ImportMissingError) Error() string { if e.QueryErr != nil { return fmt.Sprintf("%s: %v", message, e.QueryErr) } - if e.ImportingMainModule.Path != "" && e.ImportingMainModule != MainModules.ModContainingCWD() { - return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, MainModules.ModRoot(e.ImportingMainModule), e.Path) + if e.ImportingMainModule.Path != "" && e.ImportingMainModule != LoaderState.MainModules.ModContainingCWD() { + return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, LoaderState.MainModules.ModRoot(e.ImportingMainModule), e.Path) } return fmt.Sprintf("%s; to add it:\n\tgo get %s", message, e.Path) } @@ -262,7 +262,7 @@ func (e *invalidImportError) Unwrap() error { // (https://go.dev/issue/56222) for modules with 'go' versions between 1.17 and // 1.20, preventing unnecessary go.sum churn and network access in those // modules. -func importFromModules(ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) { +func importFromModules(loaderstate *State, ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) { invalidf := func(format string, args ...interface{}) (module.Version, string, string, []module.Version, error) { return module.Version{}, "", "", nil, &invalidImportError{ importPath: path, @@ -299,12 +299,12 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // Is the package in the standard library? pathIsStd := search.IsStandardImportPath(path) if pathIsStd && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { - for _, mainModule := range MainModules.Versions() { - if MainModules.InGorootSrc(mainModule) { - if dir, ok, err := dirInModule(path, MainModules.PathPrefix(mainModule), MainModules.ModRoot(mainModule), true); err != nil { - return module.Version{}, MainModules.ModRoot(mainModule), dir, nil, err + for _, mainModule := range loaderstate.MainModules.Versions() { + if loaderstate.MainModules.InGorootSrc(mainModule) { + if dir, ok, err := dirInModule(path, loaderstate.MainModules.PathPrefix(mainModule), loaderstate.MainModules.ModRoot(mainModule), true); err != nil { + return module.Version{}, loaderstate.MainModules.ModRoot(mainModule), dir, nil, err } else if ok { - return mainModule, MainModules.ModRoot(mainModule), dir, nil, nil + return mainModule, loaderstate.MainModules.ModRoot(mainModule), dir, nil, nil } } } @@ -321,10 +321,10 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // Everything must be in the main modules or the main module's or workspace's vendor directory. if cfg.BuildMod == "vendor" { var mainErr error - for _, mainModule := range MainModules.Versions() { - modRoot := MainModules.ModRoot(mainModule) + for _, mainModule := range loaderstate.MainModules.Versions() { + modRoot := loaderstate.MainModules.ModRoot(mainModule) if modRoot != "" { - dir, mainOK, err := dirInModule(path, MainModules.PathPrefix(mainModule), modRoot, true) + dir, mainOK, err := dirInModule(path, loaderstate.MainModules.PathPrefix(mainModule), modRoot, true) if mainErr == nil { mainErr = err } @@ -336,8 +336,8 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M } } - if HasModRoot() { - vendorDir := VendorDir() + if HasModRoot(loaderstate) { + vendorDir := VendorDir(loaderstate) dir, inVendorDir, _ := dirInModule(path, "", vendorDir, false) if inVendorDir { readVendorList(vendorDir) @@ -345,13 +345,13 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // vendor/modules.txt does not exist or the user manually added directories to the vendor directory. // Go 1.23 and later require vendored packages to be present in modules.txt to be imported. _, ok := vendorPkgModule[path] - if ok || (gover.Compare(MainModules.GoVersion(), gover.ExplicitModulesTxtImportVersion) < 0) { + if ok || (gover.Compare(loaderstate.MainModules.GoVersion(loaderstate), gover.ExplicitModulesTxtImportVersion) < 0) { mods = append(mods, vendorPkgModule[path]) dirs = append(dirs, dir) roots = append(roots, vendorDir) } else { subCommand := "mod" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { subCommand = "work" } fmt.Fprintf(os.Stderr, "go: ignoring package %s which exists in the vendor directory but is missing from vendor/modules.txt. To sync the vendor directory run go %s vendor.\n", path, subCommand) @@ -399,7 +399,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M ok bool ) if mg == nil { - v, ok = rs.rootSelected(prefix) + v, ok = rs.rootSelected(loaderstate, prefix) } else { v, ok = mg.Selected(prefix), true } @@ -408,9 +408,9 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M } m := module.Version{Path: prefix, Version: v} - root, isLocal, err := fetch(ctx, m) + root, isLocal, err := fetch(loaderstate, ctx, m) if err != nil { - if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { + if _, ok := errors.AsType[*sumMissingError](err); ok { // We are missing a sum needed to fetch a module in the build list. // We can't verify that the package is unique, and we may not find // the package at all. Keep checking other modules to decide which @@ -471,8 +471,8 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // If the module graph is pruned and this is a test-only dependency // of a package in "all", we didn't necessarily load that file // when we read the module graph, so do it now to be sure. - if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !MainModules.Contains(mods[0].Path) { - if _, err := goModSummary(mods[0]); err != nil { + if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !loaderstate.MainModules.Contains(mods[0].Path) { + if _, err := goModSummary(loaderstate, mods[0]); err != nil { return module.Version{}, "", "", nil, err } } @@ -483,7 +483,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // We checked the full module graph and still didn't find the // requested package. var queryErr error - if !HasModRoot() { + if !HasModRoot(loaderstate) { queryErr = ErrNoModRoot } return module.Version{}, "", "", nil, &ImportMissingError{Path: path, QueryErr: queryErr, isStd: pathIsStd} @@ -491,7 +491,7 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // So far we've checked the root dependencies. // Load the full module graph and try again. - mg, err = rs.Graph(ctx) + mg, err = rs.Graph(loaderstate, ctx) if err != nil { // We might be missing one or more transitive (implicit) dependencies from // the module graph, so we can't return an ImportMissingError here — one @@ -507,12 +507,12 @@ func importFromModules(ctx context.Context, path string, rs *Requirements, mg *M // // Unlike QueryPattern, queryImport prefers to add a replaced version of a // module *before* checking the proxies for a version to add. -func queryImport(ctx context.Context, path string, rs *Requirements) (module.Version, error) { +func queryImport(loaderstate *State, ctx context.Context, path string, rs *Requirements) (module.Version, error) { // To avoid spurious remote fetches, try the latest replacement for each // module (golang.org/issue/26241). var mods []module.Version - if MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check. - for mp, mv := range MainModules.HighestReplaced() { + if loaderstate.MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check. + for mp, mv := range loaderstate.MainModules.HighestReplaced() { if !maybeInModule(path, mp) { continue } @@ -528,7 +528,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver mv = module.ZeroPseudoVersion("v0") } } - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return module.Version{}, err } @@ -547,9 +547,9 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver return len(mods[i].Path) > len(mods[j].Path) }) for _, m := range mods { - root, isLocal, err := fetch(ctx, m) + root, isLocal, err := fetch(loaderstate, ctx, m) if err != nil { - if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { + if _, ok := errors.AsType[*sumMissingError](err); ok { return module.Version{}, &ImportMissingSumError{importPath: path} } return module.Version{}, err @@ -567,7 +567,7 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver // The package path is not valid to fetch remotely, // so it can only exist in a replaced module, // and we know from the above loop that it is not. - replacement := Replacement(mods[0]) + replacement := Replacement(loaderstate, mods[0]) return module.Version{}, &PackageNotInModuleError{ Mod: mods[0], Query: "latest", @@ -607,12 +607,12 @@ func queryImport(ctx context.Context, path string, rs *Requirements) (module.Ver // and return m, dir, ImportMissingError. fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path) - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return module.Version{}, err } - candidates, err := QueryPackages(ctx, path, "latest", mg.Selected, CheckAllowed) + candidates, err := QueryPackages(loaderstate, ctx, path, "latest", mg.Selected, CheckAllowed) if err != nil { if errors.Is(err, fs.ErrNotExist) { // Return "cannot find module providing package […]" instead of whatever @@ -747,15 +747,15 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile // // The isLocal return value reports whether the replacement, // if any, is local to the filesystem. -func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) { - if modRoot := MainModules.ModRoot(mod); modRoot != "" { +func fetch(loaderstate *State, ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) { + if modRoot := loaderstate.MainModules.ModRoot(mod); modRoot != "" { return modRoot, true, nil } - if r := Replacement(mod); r.Path != "" { + if r := Replacement(loaderstate, mod); r.Path != "" { if r.Version == "" { dir = r.Path if !filepath.IsAbs(dir) { - dir = filepath.Join(replaceRelativeTo(), dir) + dir = filepath.Join(replaceRelativeTo(loaderstate), dir) } // Ensure that the replacement directory actually exists: // dirInModule does not report errors for missing modules, @@ -780,7 +780,7 @@ func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, e mod = r } - if mustHaveSums() && !modfetch.HaveSum(mod) { + if mustHaveSums(loaderstate) && !modfetch.HaveSum(mod) { return "", false, module.VersionError(mod, &sumMissingError{}) } @@ -790,8 +790,8 @@ func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, e // mustHaveSums reports whether we require that all checksums // needed to load or build packages are already present in the go.sum file. -func mustHaveSums() bool { - return HasModRoot() && cfg.BuildMod == "readonly" && !inWorkspaceMode() +func mustHaveSums(loaderstate *State) bool { + return HasModRoot(loaderstate) && cfg.BuildMod == "readonly" && !inWorkspaceMode(loaderstate) } type sumMissingError struct { diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go index eb4f5d64d3a3c7..a5c4b837a0be12 100644 --- a/src/cmd/go/internal/modload/import_test.go +++ b/src/cmd/go/internal/modload/import_test.go @@ -60,21 +60,21 @@ func TestQueryImport(t *testing.T) { testenv.MustHaveExecPath(t, "git") oldAllowMissingModuleImports := allowMissingModuleImports - oldRootMode := RootMode + oldRootMode := LoaderState.RootMode defer func() { allowMissingModuleImports = oldAllowMissingModuleImports - RootMode = oldRootMode + LoaderState.RootMode = oldRootMode }() allowMissingModuleImports = true - RootMode = NoRoot + LoaderState.RootMode = NoRoot ctx := context.Background() - rs := LoadModFile(ctx) + rs := LoadModFile(LoaderState, ctx) for _, tt := range importTests { t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) { // Note that there is no build list, so Import should always fail. - m, err := queryImport(ctx, tt.path, rs) + m, err := queryImport(LoaderState, ctx, tt.path, rs) if tt.err == "" { if err != nil { diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 498ff7433ea6fe..20751528862ce2 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -38,13 +38,6 @@ import ( // // TODO(#40775): See if these can be plumbed as explicit parameters. var ( - // RootMode determines whether a module root is needed. - RootMode Root - - // ForceUseModules may be set to force modules to be enabled when - // GO111MODULE=auto or to report an error when GO111MODULE=off. - ForceUseModules bool - allowMissingModuleImports bool // ExplicitWriteGoMod prevents LoadPackages, ListModules, and other functions @@ -60,26 +53,18 @@ var ( // Variables set in Init. var ( - initialized bool - - // These are primarily used to initialize the MainModules, and should be - // eventually superseded by them but are still used in cases where the module - // roots are required but MainModules hasn't been initialized yet. Set to - // the modRoots of the main modules. - // modRoots != nil implies len(modRoots) > 0 - modRoots []string - gopath string + gopath string ) // EnterModule resets MainModules and requirements to refer to just this one module. func EnterModule(ctx context.Context, enterModroot string) { - MainModules = nil // reset MainModules - requirements = nil - workFilePath = "" // Force module mode + LoaderState.MainModules = nil // reset MainModules + LoaderState.requirements = nil + LoaderState.workFilePath = "" // Force module mode modfetch.Reset() - modRoots = []string{enterModroot} - LoadModFile(ctx) + LoaderState.modRoots = []string{enterModroot} + LoadModFile(LoaderState, ctx) } // EnterWorkspace enters workspace mode from module mode, applying the updated requirements to the main @@ -88,36 +73,30 @@ func EnterModule(ctx context.Context, enterModroot string) { // EnterWorkspace will modify the global state they depend on in a non-thread-safe way. func EnterWorkspace(ctx context.Context) (exit func(), err error) { // Find the identity of the main module that will be updated before we reset modload state. - mm := MainModules.mustGetSingleMainModule() + mm := LoaderState.MainModules.mustGetSingleMainModule(LoaderState) // Get the updated modfile we will use for that module. - _, _, updatedmodfile, err := UpdateGoModFromReqs(ctx, WriteOpts{}) + _, _, updatedmodfile, err := UpdateGoModFromReqs(LoaderState, ctx, WriteOpts{}) if err != nil { return nil, err } // Reset the state to a clean state. - oldstate := setState(state{}) - ForceUseModules = true + oldstate := setState(State{}) + LoaderState.ForceUseModules = true // Load in workspace mode. - InitWorkfile() - LoadModFile(ctx) + InitWorkfile(LoaderState) + LoadModFile(LoaderState, ctx) // Update the content of the previous main module, and recompute the requirements. - *MainModules.ModFile(mm) = *updatedmodfile - requirements = requirementsFromModFiles(ctx, MainModules.workFile, slices.Collect(maps.Values(MainModules.modFiles)), nil) + *LoaderState.MainModules.ModFile(mm) = *updatedmodfile + LoaderState.requirements = requirementsFromModFiles(LoaderState, ctx, LoaderState.MainModules.workFile, slices.Collect(maps.Values(LoaderState.MainModules.modFiles)), nil) return func() { setState(oldstate) }, nil } -// Variable set in InitWorkfile -var ( - // Set to the path to the go.work file, or "" if workspace mode is disabled. - workFilePath string -) - type MainModuleSet struct { // versions are the module.Version values of each of the main modules. // For each of them, the Path fields are ordinary module paths and the Version @@ -203,12 +182,12 @@ func (mms *MainModuleSet) InGorootSrc(m module.Version) bool { return mms.inGorootSrc[m] } -func (mms *MainModuleSet) mustGetSingleMainModule() module.Version { +func (mms *MainModuleSet) mustGetSingleMainModule(loaderstate *State) module.Version { if mms == nil || len(mms.versions) == 0 { panic("internal error: mustGetSingleMainModule called in context with no main modules") } if len(mms.versions) != 1 { - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { panic("internal error: mustGetSingleMainModule called in workspace mode") } else { panic("internal error: multiple main modules present outside of workspace mode") @@ -217,14 +196,14 @@ func (mms *MainModuleSet) mustGetSingleMainModule() module.Version { return mms.versions[0] } -func (mms *MainModuleSet) GetSingleIndexOrNil() *modFileIndex { +func (mms *MainModuleSet) GetSingleIndexOrNil(loaderstate *State) *modFileIndex { if mms == nil { return nil } if len(mms.versions) == 0 { return nil } - return mms.indices[mms.mustGetSingleMainModule()] + return mms.indices[mms.mustGetSingleMainModule(loaderstate)] } func (mms *MainModuleSet) Index(m module.Version) *modFileIndex { @@ -267,12 +246,12 @@ func (mms *MainModuleSet) HighestReplaced() map[string]string { // GoVersion returns the go version set on the single module, in module mode, // or the go.work file in workspace mode. -func (mms *MainModuleSet) GoVersion() string { - if inWorkspaceMode() { +func (mms *MainModuleSet) GoVersion(loaderstate *State) string { + if inWorkspaceMode(loaderstate) { return gover.FromGoWork(mms.workFile) } if mms != nil && len(mms.versions) == 1 { - f := mms.ModFile(mms.mustGetSingleMainModule()) + f := mms.ModFile(mms.mustGetSingleMainModule(loaderstate)) if f == nil { // Special case: we are outside a module, like 'go run x.go'. // Assume the local Go version. @@ -287,15 +266,15 @@ func (mms *MainModuleSet) GoVersion() string { // Godebugs returns the godebug lines set on the single module, in module mode, // or on the go.work file in workspace mode. // The caller must not modify the result. -func (mms *MainModuleSet) Godebugs() []*modfile.Godebug { - if inWorkspaceMode() { +func (mms *MainModuleSet) Godebugs(loaderstate *State) []*modfile.Godebug { + if inWorkspaceMode(loaderstate) { if mms.workFile != nil { return mms.workFile.Godebug } return nil } if mms != nil && len(mms.versions) == 1 { - f := mms.ModFile(mms.mustGetSingleMainModule()) + f := mms.ModFile(mms.mustGetSingleMainModule(loaderstate)) if f == nil { // Special case: we are outside a module, like 'go run x.go'. return nil @@ -309,8 +288,6 @@ func (mms *MainModuleSet) WorkFileReplaceMap() map[module.Version]module.Version return mms.workFileReplaceMap } -var MainModules *MainModuleSet - type Root int const ( @@ -338,16 +315,16 @@ const ( // To make permanent changes to the require statements // in go.mod, edit it before loading. func ModFile() *modfile.File { - Init() - modFile := MainModules.ModFile(MainModules.mustGetSingleMainModule()) + Init(LoaderState) + modFile := LoaderState.MainModules.ModFile(LoaderState.MainModules.mustGetSingleMainModule(LoaderState)) if modFile == nil { - die() + die(LoaderState) } return modFile } -func BinDir() string { - Init() +func BinDir(loaderstate *State) string { + Init(loaderstate) if cfg.GOBIN != "" { return cfg.GOBIN } @@ -360,13 +337,13 @@ func BinDir() string { // InitWorkfile initializes the workFilePath variable for commands that // operate in workspace mode. It should not be called by other commands, // for example 'go mod tidy', that don't operate in workspace mode. -func InitWorkfile() { +func InitWorkfile(loaderstate *State) { // Initialize fsys early because we need overlay to read go.work file. fips140.Init() if err := fsys.Init(); err != nil { base.Fatal(err) } - workFilePath = FindGoWork(base.Cwd()) + loaderstate.workFilePath = FindGoWork(loaderstate, base.Cwd()) } // FindGoWork returns the name of the go.work file for this command, @@ -374,8 +351,8 @@ func InitWorkfile() { // Most code should use Init and Enabled rather than use this directly. // It is exported mainly for Go toolchain switching, which must process // the go.work very early at startup. -func FindGoWork(wd string) string { - if RootMode == NoRoot { +func FindGoWork(loaderstate *State, wd string) string { + if loaderstate.RootMode == NoRoot { return "" } @@ -394,34 +371,34 @@ func FindGoWork(wd string) string { // WorkFilePath returns the absolute path of the go.work file, or "" if not in // workspace mode. WorkFilePath must be called after InitWorkfile. -func WorkFilePath() string { - return workFilePath +func WorkFilePath(loaderstate *State) string { + return loaderstate.workFilePath } // Reset clears all the initialized, cached state about the use of modules, // so that we can start over. func Reset() { - setState(state{}) + setState(State{}) } -func setState(s state) state { - oldState := state{ - initialized: initialized, - forceUseModules: ForceUseModules, - rootMode: RootMode, - modRoots: modRoots, +func setState(s State) State { + oldState := State{ + initialized: LoaderState.initialized, + ForceUseModules: LoaderState.ForceUseModules, + RootMode: LoaderState.RootMode, + modRoots: LoaderState.modRoots, modulesEnabled: cfg.ModulesEnabled, - mainModules: MainModules, - requirements: requirements, + MainModules: LoaderState.MainModules, + requirements: LoaderState.requirements, } - initialized = s.initialized - ForceUseModules = s.forceUseModules - RootMode = s.rootMode - modRoots = s.modRoots + LoaderState.initialized = s.initialized + LoaderState.ForceUseModules = s.ForceUseModules + LoaderState.RootMode = s.RootMode + LoaderState.modRoots = s.modRoots cfg.ModulesEnabled = s.modulesEnabled - MainModules = s.mainModules - requirements = s.requirements - workFilePath = s.workFilePath + LoaderState.MainModules = s.MainModules + LoaderState.requirements = s.requirements + LoaderState.workFilePath = s.workFilePath // The modfetch package's global state is used to compute // the go.sum file, so save and restore it along with the // modload state. @@ -429,27 +406,56 @@ func setState(s state) state { return oldState } -type state struct { - initialized bool - forceUseModules bool - rootMode Root - modRoots []string - modulesEnabled bool - mainModules *MainModuleSet - requirements *Requirements - workFilePath string - modfetchState modfetch.State +type State struct { + initialized bool + + // ForceUseModules may be set to force modules to be enabled when + // GO111MODULE=auto or to report an error when GO111MODULE=off. + ForceUseModules bool + + // RootMode determines whether a module root is needed. + RootMode Root + + // These are primarily used to initialize the MainModules, and should + // be eventually superseded by them but are still used in cases where + // the module roots are required but MainModules has not been + // initialized yet. Set to the modRoots of the main modules. + // modRoots != nil implies len(modRoots) > 0 + modRoots []string + modulesEnabled bool + MainModules *MainModuleSet + + // requirements is the requirement graph for the main module. + // + // It is always non-nil if the main module's go.mod file has been + // loaded. + // + // This variable should only be read from the loadModFile + // function, and should only be written in the loadModFile and + // commitRequirements functions. All other functions that need or + // produce a *Requirements should accept and/or return an explicit + // parameter. + requirements *Requirements + + // Set to the path to the go.work file, or "" if workspace mode is + // disabled + workFilePath string + modfetchState modfetch.State } +func NewState() *State { return &State{} } + +var LoaderState = NewState() + // Init determines whether module mode is enabled, locates the root of the // current module (if any), sets environment variables for Git subprocesses, and // configures the cfg, codehost, load, modfetch, and search packages for use // with modules. -func Init() { - if initialized { +func Init(loaderstate *State) { + if loaderstate.initialized { return } - initialized = true + loaderstate.initialized = true fips140.Init() @@ -462,11 +468,11 @@ func Init() { default: base.Fatalf("go: unknown environment setting GO111MODULE=%s", env) case "auto": - mustUseModules = ForceUseModules + mustUseModules = loaderstate.ForceUseModules case "on", "": mustUseModules = true case "off": - if ForceUseModules { + if loaderstate.ForceUseModules { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } mustUseModules = false @@ -490,15 +496,15 @@ func Init() { if os.Getenv("GCM_INTERACTIVE") == "" { os.Setenv("GCM_INTERACTIVE", "never") } - if modRoots != nil { + if loaderstate.modRoots != nil { // modRoot set before Init was called ("go mod init" does this). // No need to search for go.mod. - } else if RootMode == NoRoot { + } else if loaderstate.RootMode == NoRoot { if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") { base.Fatalf("go: -modfile cannot be used with commands that ignore the current module") } - modRoots = nil - } else if workFilePath != "" { + loaderstate.modRoots = nil + } else if loaderstate.workFilePath != "" { // We're in workspace mode, which implies module mode. if cfg.ModFile != "" { base.Fatalf("go: -modfile cannot be used in workspace mode") @@ -508,7 +514,7 @@ func Init() { if cfg.ModFile != "" { base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.") } - if RootMode == NeedRoot { + if loaderstate.RootMode == NeedRoot { base.Fatal(ErrNoModRoot) } if !mustUseModules { @@ -523,14 +529,14 @@ func Init() { // It's a bit of a peculiar thing to disallow but quite mysterious // when it happens. See golang.org/issue/26708. fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) - if RootMode == NeedRoot { + if loaderstate.RootMode == NeedRoot { base.Fatal(ErrNoModRoot) } if !mustUseModules { return } } else { - modRoots = []string{modRoot} + loaderstate.modRoots = []string{modRoot} } } if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") { @@ -539,13 +545,13 @@ func Init() { // We're in module mode. Set any global variables that need to be set. cfg.ModulesEnabled = true - setDefaultBuildMod() + setDefaultBuildMod(loaderstate) list := filepath.SplitList(cfg.BuildContext.GOPATH) if len(list) > 0 && list[0] != "" { gopath = list[0] if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil { fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in $GOPATH %v\n", gopath) - if RootMode == NeedRoot { + if loaderstate.RootMode == NeedRoot { base.Fatal(ErrNoModRoot) } if !mustUseModules { @@ -565,11 +571,11 @@ func Init() { // be called until the command is installed and flags are parsed. Instead of // calling Init and Enabled, the main package can call this function. func WillBeEnabled() bool { - if modRoots != nil || cfg.ModulesEnabled { + if LoaderState.modRoots != nil || cfg.ModulesEnabled { // Already enabled. return true } - if initialized { + if LoaderState.initialized { // Initialized, not enabled. return false } @@ -616,49 +622,49 @@ func FindGoMod(wd string) string { // If modules are enabled but there is no main module, Enabled returns true // and then the first use of module information will call die // (usually through MustModRoot). -func Enabled() bool { - Init() - return modRoots != nil || cfg.ModulesEnabled +func Enabled(loaderstate *State) bool { + Init(loaderstate) + return loaderstate.modRoots != nil || cfg.ModulesEnabled } -func VendorDir() string { - if inWorkspaceMode() { - return filepath.Join(filepath.Dir(WorkFilePath()), "vendor") +func VendorDir(loaderstate *State) string { + if inWorkspaceMode(loaderstate) { + return filepath.Join(filepath.Dir(WorkFilePath(loaderstate)), "vendor") } // Even if -mod=vendor, we could be operating with no mod root (and thus no // vendor directory). As long as there are no dependencies that is expected // to work. See script/vendor_outside_module.txt. - modRoot := MainModules.ModRoot(MainModules.mustGetSingleMainModule()) + modRoot := loaderstate.MainModules.ModRoot(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) if modRoot == "" { panic("vendor directory does not exist when in single module mode outside of a module") } return filepath.Join(modRoot, "vendor") } -func inWorkspaceMode() bool { - if !initialized { +func inWorkspaceMode(loaderstate *State) bool { + if !loaderstate.initialized { panic("inWorkspaceMode called before modload.Init called") } - if !Enabled() { + if !Enabled(loaderstate) { return false } - return workFilePath != "" + return loaderstate.workFilePath != "" } // HasModRoot reports whether a main module or main modules are present. // HasModRoot may return false even if Enabled returns true: for example, 'get' // does not require a main module. -func HasModRoot() bool { - Init() - return modRoots != nil +func HasModRoot(loaderstate *State) bool { + Init(loaderstate) + return loaderstate.modRoots != nil } // MustHaveModRoot checks that a main module or main modules are present, // and calls base.Fatalf if there are no main modules. func MustHaveModRoot() { - Init() - if !HasModRoot() { - die() + Init(LoaderState) + if !HasModRoot(LoaderState) { + die(LoaderState) } } @@ -680,11 +686,11 @@ func modFilePath(modRoot string) string { return filepath.Join(modRoot, "go.mod") } -func die() { +func die(loaderstate *State) { if cfg.Getenv("GO111MODULE") == "off" { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } - if !inWorkspaceMode() { + if !inWorkspaceMode(loaderstate) { if dir, name := findAltConfig(base.Cwd()); dir != "" { rel, err := filepath.Rel(base.Cwd(), dir) if err != nil { @@ -705,7 +711,7 @@ func die() { type noMainModulesError struct{} func (e noMainModulesError) Error() string { - if inWorkspaceMode() { + if inWorkspaceMode(LoaderState) { return "no modules were found in the current workspace; see 'go help work'" } return "go.mod file not found in current directory or any parent directory; see 'go help modules'" @@ -862,33 +868,33 @@ func UpdateWorkFile(wf *modfile.WorkFile) { // other, but unlike LoadModGraph does not load the full module graph or check // it for global consistency. Most callers outside of the modload package should // use LoadModGraph instead. -func LoadModFile(ctx context.Context) *Requirements { - rs, err := loadModFile(ctx, nil) +func LoadModFile(loaderstate *State, ctx context.Context) *Requirements { + rs, err := loadModFile(loaderstate, ctx, nil) if err != nil { base.Fatal(err) } return rs } -func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) { - if requirements != nil { - return requirements, nil +func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*Requirements, error) { + if loaderstate.requirements != nil { + return loaderstate.requirements, nil } - Init() + Init(loaderstate) var workFile *modfile.WorkFile - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { var err error - workFile, modRoots, err = LoadWorkFile(workFilePath) + workFile, loaderstate.modRoots, err = LoadWorkFile(loaderstate.workFilePath) if err != nil { return nil, err } - for _, modRoot := range modRoots { + for _, modRoot := range loaderstate.modRoots { sumFile := strings.TrimSuffix(modFilePath(modRoot), ".mod") + ".sum" modfetch.WorkspaceGoSumFiles = append(modfetch.WorkspaceGoSumFiles, sumFile) } - modfetch.GoSumFile = workFilePath + ".sum" - } else if len(modRoots) == 0 { + modfetch.GoSumFile = loaderstate.workFilePath + ".sum" + } else if len(loaderstate.modRoots) == 0 { // We're in module mode, but not inside a module. // // Commands like 'go build', 'go run', 'go list' have no go.mod file to @@ -907,25 +913,25 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) // // See golang.org/issue/32027. } else { - modfetch.GoSumFile = strings.TrimSuffix(modFilePath(modRoots[0]), ".mod") + ".sum" + modfetch.GoSumFile = strings.TrimSuffix(modFilePath(loaderstate.modRoots[0]), ".mod") + ".sum" } - if len(modRoots) == 0 { + if len(loaderstate.modRoots) == 0 { // TODO(#49228): Instead of creating a fake module with an empty modroot, // make MainModules.Len() == 0 mean that we're in module mode but not inside // any module. mainModule := module.Version{Path: "command-line-arguments"} - MainModules = makeMainModules([]module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil) + loaderstate.MainModules = makeMainModules(loaderstate, []module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil) var ( goVersion string pruning modPruning roots []module.Version direct = map[string]bool{"go": true} ) - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { // Since we are in a workspace, the Go version for the synthetic // "command-line-arguments" module must not exceed the Go version // for the workspace. - goVersion = MainModules.GoVersion() + goVersion = loaderstate.MainModules.GoVersion(loaderstate) pruning = workspace roots = []module.Version{ mainModule, @@ -941,26 +947,26 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } rawGoVersion.Store(mainModule, goVersion) - requirements = newRequirements(pruning, roots, direct) + loaderstate.requirements = newRequirements(loaderstate, pruning, roots, direct) if cfg.BuildMod == "vendor" { // For issue 56536: Some users may have GOFLAGS=-mod=vendor set. // Make sure it behaves as though the fake module is vendored // with no dependencies. - requirements.initVendor(nil) + loaderstate.requirements.initVendor(loaderstate, nil) } - return requirements, nil + return loaderstate.requirements, nil } var modFiles []*modfile.File var mainModules []module.Version var indices []*modFileIndex var errs []error - for _, modroot := range modRoots { + for _, modroot := range loaderstate.modRoots { gomod := modFilePath(modroot) var fixed bool - data, f, err := ReadModFile(gomod, fixVersion(ctx, &fixed)) + data, f, err := ReadModFile(gomod, fixVersion(loaderstate, ctx, &fixed)) if err != nil { - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { if tooNew, ok := err.(*gover.TooNewError); ok && !strings.HasPrefix(cfg.CmdName, "work ") { // Switching to a newer toolchain won't help - the go.work has the wrong version. // Report this more specific error, unless we are a command like 'go work use' @@ -975,7 +981,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) errs = append(errs, err) continue } - if inWorkspaceMode() && !strings.HasPrefix(cfg.CmdName, "work ") { + if inWorkspaceMode(loaderstate) && !strings.HasPrefix(cfg.CmdName, "work ") { // Refuse to use workspace if its go version is too old. // Disable this check if we are a workspace command like work use or work sync, // which will fix the problem. @@ -987,7 +993,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } - if !inWorkspaceMode() { + if !inWorkspaceMode(loaderstate) { ok := true for _, g := range f.Godebug { if err := CheckGodebug("godebug", g.Key, g.Value); err != nil { @@ -1016,45 +1022,45 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) return nil, errors.Join(errs...) } - MainModules = makeMainModules(mainModules, modRoots, modFiles, indices, workFile) - setDefaultBuildMod() // possibly enable automatic vendoring - rs := requirementsFromModFiles(ctx, workFile, modFiles, opts) + loaderstate.MainModules = makeMainModules(loaderstate, mainModules, loaderstate.modRoots, modFiles, indices, workFile) + setDefaultBuildMod(loaderstate) // possibly enable automatic vendoring + rs := requirementsFromModFiles(loaderstate, ctx, workFile, modFiles, opts) if cfg.BuildMod == "vendor" { - readVendorList(VendorDir()) - versions := MainModules.Versions() + readVendorList(VendorDir(loaderstate)) + versions := loaderstate.MainModules.Versions() indexes := make([]*modFileIndex, 0, len(versions)) modFiles := make([]*modfile.File, 0, len(versions)) modRoots := make([]string, 0, len(versions)) for _, m := range versions { - indexes = append(indexes, MainModules.Index(m)) - modFiles = append(modFiles, MainModules.ModFile(m)) - modRoots = append(modRoots, MainModules.ModRoot(m)) + indexes = append(indexes, loaderstate.MainModules.Index(m)) + modFiles = append(modFiles, loaderstate.MainModules.ModFile(m)) + modRoots = append(modRoots, loaderstate.MainModules.ModRoot(m)) } - checkVendorConsistency(indexes, modFiles, modRoots) - rs.initVendor(vendorList) + checkVendorConsistency(loaderstate, indexes, modFiles, modRoots) + rs.initVendor(loaderstate, vendorList) } - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { // We don't need to update the mod file so return early. - requirements = rs + loaderstate.requirements = rs return rs, nil } - mainModule := MainModules.mustGetSingleMainModule() + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) - if rs.hasRedundantRoot() { + if rs.hasRedundantRoot(loaderstate) { // If any module path appears more than once in the roots, we know that the // go.mod file needs to be updated even though we have not yet loaded any // transitive dependencies. var err error - rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false) + rs, err = updateRoots(loaderstate, ctx, rs.direct, rs, nil, nil, false) if err != nil { return nil, err } } - if MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace { + if loaderstate.MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace { // TODO(#45551): Do something more principled instead of checking // cfg.CmdName directly here. if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" { @@ -1063,8 +1069,8 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) if opts != nil && opts.TidyGoVersion != "" { v = opts.TidyGoVersion } - addGoStmt(MainModules.ModFile(mainModule), mainModule, v) - rs = overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: v}}) + addGoStmt(loaderstate.MainModules.ModFile(mainModule), mainModule, v) + rs = overrideRoots(loaderstate, ctx, rs, []module.Version{{Path: "go", Version: v}}) // We need to add a 'go' version to the go.mod file, but we must assume // that its existing contents match something between Go 1.11 and 1.16. @@ -1073,7 +1079,7 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) // requirements to support pruning. if gover.Compare(v, gover.ExplicitIndirectVersion) >= 0 { var err error - rs, err = convertPruning(ctx, rs, pruned) + rs, err = convertPruning(loaderstate, ctx, rs, pruned) if err != nil { return nil, err } @@ -1083,8 +1089,8 @@ func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) } } - requirements = rs - return requirements, nil + loaderstate.requirements = rs + return loaderstate.requirements, nil } func errWorkTooOld(gomod string, wf *modfile.WorkFile, goVers string) error { @@ -1119,8 +1125,8 @@ func CheckReservedModulePath(path string) error { // packages at multiple versions from the same module). func CreateModFile(ctx context.Context, modPath string) { modRoot := base.Cwd() - modRoots = []string{modRoot} - Init() + LoaderState.modRoots = []string{modRoot} + Init(LoaderState) modFilePath := modFilePath(modRoot) if _, err := fsys.Stat(modFilePath); err == nil { base.Fatalf("go: %s already exists", modFilePath) @@ -1156,16 +1162,16 @@ func CreateModFile(ctx context.Context, modPath string) { fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", modPath) modFile := new(modfile.File) modFile.AddModuleStmt(modPath) - MainModules = makeMainModules([]module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil) + LoaderState.MainModules = makeMainModules(LoaderState, []module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil) addGoStmt(modFile, modFile.Module.Mod, gover.Local()) // Add the go directive before converted module requirements. - rs := requirementsFromModFiles(ctx, nil, []*modfile.File{modFile}, nil) - rs, err := updateRoots(ctx, rs.direct, rs, nil, nil, false) + rs := requirementsFromModFiles(LoaderState, ctx, nil, []*modfile.File{modFile}, nil) + rs, err := updateRoots(LoaderState, ctx, rs.direct, rs, nil, nil, false) if err != nil { base.Fatal(err) } - requirements = rs - if err := commitRequirements(ctx, WriteOpts{}); err != nil { + LoaderState.requirements = rs + if err := commitRequirements(LoaderState, ctx, WriteOpts{}); err != nil { base.Fatal(err) } @@ -1200,7 +1206,7 @@ func CreateModFile(ctx context.Context, modPath string) { // and does nothing for versions that already appear to be canonical. // // The VersionFixer sets 'fixed' if it ever returns a non-canonical version. -func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { +func fixVersion(loaderstate *State, ctx context.Context, fixed *bool) modfile.VersionFixer { return func(path, vers string) (resolved string, err error) { defer func() { if err == nil && resolved != vers { @@ -1233,7 +1239,7 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { return vers, nil } - info, err := Query(ctx, path, vers, "", nil) + info, err := Query(loaderstate, ctx, path, vers, "", nil) if err != nil { return "", err } @@ -1248,8 +1254,8 @@ func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { // // This function affects the default cfg.BuildMod when outside of a module, // so it can only be called prior to Init. -func AllowMissingModuleImports() { - if initialized { +func AllowMissingModuleImports(loaderstate *State) { + if loaderstate.initialized { panic("AllowMissingModuleImports after Init") } allowMissingModuleImports = true @@ -1257,7 +1263,7 @@ func AllowMissingModuleImports() { // makeMainModules creates a MainModuleSet and associated variables according to // the given main modules. -func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet { +func makeMainModules(loaderstate *State, ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet { for _, m := range ms { if m.Version != "" { panic("mainModulesCalled with module.Version with non empty Version field: " + fmt.Sprintf("%#v", m)) @@ -1332,7 +1338,7 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile continue } var newV module.Version = r.New - if WorkFilePath() != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) { + if WorkFilePath(loaderstate) != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) { // Since we are in a workspace, we may be loading replacements from // multiple go.mod files. Relative paths in those replacement are // relative to the go.mod file, not the workspace, so the same string @@ -1374,14 +1380,14 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile // requirementsFromModFiles returns the set of non-excluded requirements from // the global modFile. -func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements { +func requirementsFromModFiles(loaderstate *State, ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements { var roots []module.Version direct := map[string]bool{} var pruning modPruning - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { pruning = workspace - roots = make([]module.Version, len(MainModules.Versions()), 2+len(MainModules.Versions())) - copy(roots, MainModules.Versions()) + roots = make([]module.Version, len(loaderstate.MainModules.Versions()), 2+len(loaderstate.MainModules.Versions())) + copy(roots, loaderstate.MainModules.Versions()) goVersion := gover.FromGoWork(workFile) var toolchain string if workFile.Toolchain != nil { @@ -1390,16 +1396,16 @@ func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, m roots = appendGoAndToolchainRoots(roots, goVersion, toolchain, direct) direct = directRequirements(modFiles) } else { - pruning = pruningForGoVersion(MainModules.GoVersion()) + pruning = pruningForGoVersion(loaderstate.MainModules.GoVersion(loaderstate)) if len(modFiles) != 1 { panic(fmt.Errorf("requirementsFromModFiles called with %v modfiles outside workspace mode", len(modFiles))) } modFile := modFiles[0] - roots, direct = rootsFromModFile(MainModules.mustGetSingleMainModule(), modFile, withToolchainRoot) + roots, direct = rootsFromModFile(loaderstate, loaderstate.MainModules.mustGetSingleMainModule(loaderstate), modFile, withToolchainRoot) } gover.ModSort(roots) - rs := newRequirements(pruning, roots, direct) + rs := newRequirements(loaderstate, pruning, roots, direct) return rs } @@ -1422,7 +1428,7 @@ func directRequirements(modFiles []*modfile.File) map[string]bool { return direct } -func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) { +func rootsFromModFile(loaderstate *State, m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) { direct = make(map[string]bool) padding := 2 // Add padding for the toolchain and go version, added upon return. if !addToolchainRoot { @@ -1430,7 +1436,7 @@ func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot } roots = make([]module.Version, 0, padding+len(modFile.Require)) for _, r := range modFile.Require { - if index := MainModules.Index(m); index != nil && index.exclude[r.Mod] { + if index := loaderstate.MainModules.Index(m); index != nil && index.exclude[r.Mod] { if cfg.BuildMod == "mod" { fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version) } else { @@ -1471,9 +1477,9 @@ func appendGoAndToolchainRoots(roots []module.Version, goVersion, toolchain stri // setDefaultBuildMod sets a default value for cfg.BuildMod if the -mod flag // wasn't provided. setDefaultBuildMod may be called multiple times. -func setDefaultBuildMod() { +func setDefaultBuildMod(loaderstate *State) { if cfg.BuildModExplicit { - if inWorkspaceMode() && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { + if inWorkspaceMode(loaderstate) && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { switch cfg.CmdName { case "work sync", "mod graph", "mod verify", "mod why": // These commands run with BuildMod set to mod, but they don't take the @@ -1508,7 +1514,7 @@ func setDefaultBuildMod() { cfg.BuildMod = "readonly" return } - if modRoots == nil { + if loaderstate.modRoots == nil { if allowMissingModuleImports { cfg.BuildMod = "mod" } else { @@ -1517,29 +1523,29 @@ func setDefaultBuildMod() { return } - if len(modRoots) >= 1 { + if len(loaderstate.modRoots) >= 1 { var goVersion string var versionSource string - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { versionSource = "go.work" - if wfg := MainModules.WorkFile().Go; wfg != nil { + if wfg := loaderstate.MainModules.WorkFile().Go; wfg != nil { goVersion = wfg.Version } } else { versionSource = "go.mod" - index := MainModules.GetSingleIndexOrNil() + index := loaderstate.MainModules.GetSingleIndexOrNil(loaderstate) if index != nil { goVersion = index.goVersion } } vendorDir := "" - if workFilePath != "" { - vendorDir = filepath.Join(filepath.Dir(workFilePath), "vendor") + if loaderstate.workFilePath != "" { + vendorDir = filepath.Join(filepath.Dir(loaderstate.workFilePath), "vendor") } else { - if len(modRoots) != 1 { - panic(fmt.Errorf("outside workspace mode, but have %v modRoots", modRoots)) + if len(loaderstate.modRoots) != 1 { + panic(fmt.Errorf("outside workspace mode, but have %v modRoots", loaderstate.modRoots)) } - vendorDir = filepath.Join(modRoots[0], "vendor") + vendorDir = filepath.Join(loaderstate.modRoots[0], "vendor") } if fi, err := fsys.Stat(vendorDir); err == nil && fi.IsDir() { if goVersion != "" { @@ -1607,8 +1613,8 @@ func modulesTextIsForWorkspace(vendorDir string) (bool, error) { return false, nil } -func mustHaveCompleteRequirements() bool { - return cfg.BuildMod != "mod" && !inWorkspaceMode() +func mustHaveCompleteRequirements(loaderstate *State) bool { + return cfg.BuildMod != "mod" && !inWorkspaceMode(loaderstate) } // addGoStmt adds a go directive to the go.mod file if it does not already @@ -1803,21 +1809,21 @@ type WriteOpts struct { // WriteGoMod writes the current build list back to go.mod. func WriteGoMod(ctx context.Context, opts WriteOpts) error { - requirements = LoadModFile(ctx) - return commitRequirements(ctx, opts) + LoaderState.requirements = LoadModFile(LoaderState, ctx) + return commitRequirements(LoaderState, ctx, opts) } var errNoChange = errors.New("no update needed") // UpdateGoModFromReqs returns a modified go.mod file using the current // requirements. It does not commit these changes to disk. -func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []byte, modFile *modfile.File, err error) { - if MainModules.Len() != 1 || MainModules.ModRoot(MainModules.Versions()[0]) == "" { +func UpdateGoModFromReqs(loaderstate *State, ctx context.Context, opts WriteOpts) (before, after []byte, modFile *modfile.File, err error) { + if loaderstate.MainModules.Len() != 1 || loaderstate.MainModules.ModRoot(loaderstate.MainModules.Versions()[0]) == "" { // We aren't in a module, so we don't have anywhere to write a go.mod file. return nil, nil, nil, errNoChange } - mainModule := MainModules.mustGetSingleMainModule() - modFile = MainModules.ModFile(mainModule) + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) + modFile = loaderstate.MainModules.ModFile(mainModule) if modFile == nil { // command-line-arguments has no .mod file to write. return nil, nil, nil, errNoChange @@ -1830,7 +1836,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b var list []*modfile.Require toolchain := "" goVersion := "" - for _, m := range requirements.rootModules { + for _, m := range loaderstate.requirements.rootModules { if m.Path == "go" { goVersion = m.Version continue @@ -1841,7 +1847,7 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b } list = append(list, &modfile.Require{ Mod: m, - Indirect: !requirements.direct[m.Path], + Indirect: !loaderstate.requirements.direct[m.Path], }) } @@ -1911,13 +1917,13 @@ func UpdateGoModFromReqs(ctx context.Context, opts WriteOpts) (before, after []b // go.mod or go.sum are out of date in a semantically significant way. // // In workspace mode, commitRequirements only writes changes to go.work.sum. -func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { - if inWorkspaceMode() { +func commitRequirements(loaderstate *State, ctx context.Context, opts WriteOpts) (err error) { + if inWorkspaceMode(loaderstate) { // go.mod files aren't updated in workspace mode, but we still want to // update the go.work.sum file. - return modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + return modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)) } - _, updatedGoMod, modFile, err := UpdateGoModFromReqs(ctx, opts) + _, updatedGoMod, modFile, err := UpdateGoModFromReqs(loaderstate, ctx, opts) if err != nil { if errors.Is(err, errNoChange) { return nil @@ -1925,7 +1931,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { return err } - index := MainModules.GetSingleIndexOrNil() + index := loaderstate.MainModules.GetSingleIndexOrNil(loaderstate) dirty := index.modFileIsDirty(modFile) || len(opts.DropTools) > 0 || len(opts.AddTools) > 0 if dirty && cfg.BuildMod != "mod" { // If we're about to fail due to -mod=readonly, @@ -1939,15 +1945,15 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { // Don't write go.mod, but write go.sum in case we added or trimmed sums. // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { - if err := modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()); err != nil { + if err := modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)); err != nil { return err } } return nil } - mainModule := MainModules.mustGetSingleMainModule() - modFilePath := modFilePath(MainModules.ModRoot(mainModule)) + mainModule := loaderstate.MainModules.mustGetSingleMainModule(loaderstate) + modFilePath := modFilePath(loaderstate.MainModules.ModRoot(mainModule)) if fsys.Replaced(modFilePath) { if dirty { return errors.New("updates to go.mod needed, but go.mod is part of the overlay specified with -overlay") @@ -1956,13 +1962,13 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { } defer func() { // At this point we have determined to make the go.mod file on disk equal to new. - MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false)) + loaderstate.MainModules.SetIndex(mainModule, indexModFile(updatedGoMod, modFile, mainModule, false)) // Update go.sum after releasing the side lock and refreshing the index. // 'go mod init' shouldn't write go.sum, since it will be incomplete. if cfg.CmdName != "mod init" { if err == nil { - err = modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + err = modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)) } } }() @@ -2005,7 +2011,7 @@ func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { // including any go.mod files needed to reconstruct the MVS result // or identify go versions, // in addition to the checksums for every module in keepMods. -func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool { +func keepSums(loaderstate *State, ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool { // Every module in the full module graph contributes its requirements, // so in order to ensure that the build list itself is reproducible, // we need sums for every go.mod in the graph (regardless of whether @@ -2018,12 +2024,12 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // ambiguous import errors the next time we load the package. keepModSumsForZipSums := true if ld == nil { - if gover.Compare(MainModules.GoVersion(), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" { + if gover.Compare(loaderstate.MainModules.GoVersion(loaderstate), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" { keepModSumsForZipSums = false } } else { keepPkgGoModSums := true - if gover.Compare(ld.requirements.GoVersion(), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") { + if gover.Compare(ld.requirements.GoVersion(loaderstate), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") { keepPkgGoModSums = false keepModSumsForZipSums = false } @@ -2041,21 +2047,21 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // minor, so we maintain the previous (buggy) behavior in 'go mod tidy' to // avoid introducing unnecessary churn. if keepPkgGoModSums { - r := resolveReplacement(pkg.mod) + r := resolveReplacement(loaderstate, pkg.mod) keep[modkey(r)] = true } if rs.pruning == pruned && pkg.mod.Path != "" { - if v, ok := rs.rootSelected(pkg.mod.Path); ok && v == pkg.mod.Version { + if v, ok := rs.rootSelected(loaderstate, pkg.mod.Path); ok && v == pkg.mod.Version { // pkg was loaded from a root module, and because the main module has // a pruned module graph we do not check non-root modules for // conflicts for packages that can be found in roots. So we only need // the checksums for the root modules that may contain pkg, not all // possible modules. for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { - if v, ok := rs.rootSelected(prefix); ok && v != "none" { + if v, ok := rs.rootSelected(loaderstate, prefix); ok && v != "none" { m := module.Version{Path: prefix, Version: v} - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[r] = true } } @@ -2063,11 +2069,11 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums } } - mg, _ := rs.Graph(ctx) + mg, _ := rs.Graph(loaderstate, ctx) for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { if v := mg.Selected(prefix); v != "none" { m := module.Version{Path: prefix, Version: v} - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[r] = true } } @@ -2079,27 +2085,27 @@ func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums // Save sums for the root modules (or their replacements), but don't // incur the cost of loading the graph just to find and retain the sums. for _, m := range rs.rootModules { - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[modkey(r)] = true if which == addBuildListZipSums { keep[r] = true } } } else { - mg, _ := rs.Graph(ctx) + mg, _ := rs.Graph(loaderstate, ctx) mg.WalkBreadthFirst(func(m module.Version) { if _, ok := mg.RequiredBy(m); ok { // The requirements from m's go.mod file are present in the module graph, // so they are relevant to the MVS result regardless of whether m was // actually selected. - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) keep[modkey(r)] = true } }) if which == addBuildListZipSums { for _, m := range mg.BuildList() { - r := resolveReplacement(m) + r := resolveReplacement(loaderstate, m) if keepModSumsForZipSums { keep[modkey(r)] = true // we need the go version from the go.mod file to do anything useful with the zipfile } diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 53cb6c2ffe1406..bd28d7596e160a 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -69,7 +69,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st } } - rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode, reuse) + rs, mods, err := listModules(ctx, LoadModFile(LoaderState, ctx), args, mode, reuse) type token struct{} sem := make(chan token, runtime.GOMAXPROCS(0)) @@ -88,7 +88,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st addVersions(ctx, m, mode&ListRetractedVersions != 0) } if mode&ListRetracted != 0 { - addRetraction(ctx, m) + addRetraction(LoaderState, ctx, m) } if mode&ListDeprecated != 0 { addDeprecation(ctx, m) @@ -109,7 +109,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st } if err == nil { - requirements = rs + LoaderState.requirements = rs // TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3 // where "go mod tidy" and "go list -m -u all" fight over whether the go.sum // should be considered up-to-date. The fix for now is to always treat the @@ -117,7 +117,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st // but in general list -u is looking up other checksums in the checksum database // that won't be necessary later, so it makes sense not to write the go.sum back out. if !ExplicitWriteGoMod && mode&ListU == 0 { - err = commitRequirements(ctx, WriteOpts{}) + err = commitRequirements(LoaderState, ctx, WriteOpts{}) } } return mods, err @@ -126,11 +126,11 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) { if len(args) == 0 { var ms []*modinfo.ModulePublic - for _, m := range MainModules.Versions() { + for _, m := range LoaderState.MainModules.Versions() { if gover.IsToolchain(m.Path) { continue } - ms = append(ms, moduleInfo(ctx, rs, m, mode, reuse)) + ms = append(ms, moduleInfo(LoaderState, ctx, rs, m, mode, reuse)) } return rs, ms, nil } @@ -145,25 +145,25 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List } if arg == "all" || strings.Contains(arg, "...") { needFullGraph = true - if !HasModRoot() { + if !HasModRoot(LoaderState) { base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) } continue } if path, vers, found := strings.Cut(arg, "@"); found { if vers == "upgrade" || vers == "patch" { - if _, ok := rs.rootSelected(path); !ok || rs.pruning == unpruned { + if _, ok := rs.rootSelected(LoaderState, path); !ok || rs.pruning == unpruned { needFullGraph = true - if !HasModRoot() { + if !HasModRoot(LoaderState) { base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) } } } continue } - if _, ok := rs.rootSelected(arg); !ok || rs.pruning == unpruned { + if _, ok := rs.rootSelected(LoaderState, arg); !ok || rs.pruning == unpruned { needFullGraph = true - if mode&ListVersions == 0 && !HasModRoot() { + if mode&ListVersions == 0 && !HasModRoot(LoaderState) { base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, ErrNoModRoot) } } @@ -171,7 +171,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List var mg *ModuleGraph if needFullGraph { - rs, mg, mgErr = expandGraph(ctx, rs) + rs, mg, mgErr = expandGraph(LoaderState, ctx, rs) } matchedModule := map[module.Version]bool{} @@ -179,7 +179,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List if path, vers, found := strings.Cut(arg, "@"); found { var current string if mg == nil { - current, _ = rs.rootSelected(path) + current, _ = rs.rootSelected(LoaderState, path) } else { current = mg.Selected(path) } @@ -198,7 +198,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List // specific revision or used 'go list -retracted'. allowed = nil } - info, err := queryReuse(ctx, path, vers, current, allowed, reuse) + info, err := queryReuse(LoaderState, ctx, path, vers, current, allowed, reuse) if err != nil { var origin *codehost.Origin if info != nil { @@ -217,7 +217,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List // *Requirements instead. var noRS *Requirements - mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse) + mod := moduleInfo(LoaderState, ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse) if vers != mod.Version { mod.Query = vers } @@ -237,7 +237,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List var v string if mg == nil { var ok bool - v, ok = rs.rootSelected(arg) + v, ok = rs.rootSelected(LoaderState, arg) if !ok { // We checked rootSelected(arg) in the earlier args loop, so if there // is no such root we should have loaded a non-nil mg. @@ -251,7 +251,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List continue } if v != "none" { - mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse)) + mods = append(mods, moduleInfo(LoaderState, ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse)) } else if cfg.BuildMod == "vendor" { // In vendor mode, we can't determine whether a missing module is “a // known dependency” because the module graph is incomplete. @@ -292,7 +292,7 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List fetchedMods := make([]*modinfo.ModulePublic, len(matches)) for i, m := range matches { q.Add(func() { - fetchedMods[i] = moduleInfo(ctx, rs, m, mode, reuse) + fetchedMods[i] = moduleInfo(LoaderState, ctx, rs, m, mode, reuse) }) } <-q.Idle() @@ -305,13 +305,11 @@ func listModules(ctx context.Context, rs *Requirements, args []string, mode List // modinfoError wraps an error to create an error message in // modinfo.ModuleError with minimal redundancy. func modinfoError(path, vers string, err error) *modinfo.ModuleError { - var nerr *NoMatchingVersionError - var merr *module.ModuleError - if errors.As(err, &nerr) { + if _, ok := errors.AsType[*NoMatchingVersionError](err); ok { // NoMatchingVersionError contains the query, so we don't mention the // query again in ModuleError. err = &module.ModuleError{Path: path, Err: err} - } else if !errors.As(err, &merr) { + } else if _, ok := errors.AsType[*module.ModuleError](err); !ok { // If the error does not contain path and version, wrap it in a // module.ModuleError. err = &module.ModuleError{Path: path, Version: vers, Err: err} diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 8b2be3b300e9e1..ad3b80bfd954aa 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -250,7 +250,7 @@ type PackageOpts struct { // LoadPackages identifies the set of packages matching the given patterns and // loads the packages in the import graph rooted at that set. -func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) { +func LoadPackages(loaderstate *State, ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) { if opts.Tags == nil { opts.Tags = imports.Tags() } @@ -271,11 +271,11 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma case m.IsLocal(): // Evaluate list of file system directories on first iteration. if m.Dirs == nil { - matchModRoots := modRoots + matchModRoots := loaderstate.modRoots if opts.MainModule != (module.Version{}) { - matchModRoots = []string{MainModules.ModRoot(opts.MainModule)} + matchModRoots = []string{loaderstate.MainModules.ModRoot(opts.MainModule)} } - matchLocalDirs(ctx, matchModRoots, m, rs) + matchLocalDirs(loaderstate, ctx, matchModRoots, m, rs) } // Make a copy of the directory list and translate to import paths. @@ -286,7 +286,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // the loader iterations. m.Pkgs = m.Pkgs[:0] for _, dir := range m.Dirs { - pkg, err := resolveLocalPackage(ctx, dir, rs) + pkg, err := resolveLocalPackage(loaderstate, ctx, dir, rs) if err != nil { if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) { continue // Don't include "builtin" or GOROOT/src in wildcard patterns. @@ -294,8 +294,8 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // If we're outside of a module, ensure that the failure mode // indicates that. - if !HasModRoot() { - die() + if !HasModRoot(loaderstate) { + die(loaderstate) } if ld != nil { @@ -311,7 +311,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma case strings.Contains(m.Pattern(), "..."): m.Errs = m.Errs[:0] - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { // The module graph is (or may be) incomplete — perhaps we failed to // load the requirements of some module. This is an error in matching @@ -321,26 +321,26 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // necessarily prevent us from loading the packages we could find. m.Errs = append(m.Errs, err) } - matchPackages(ctx, m, opts.Tags, includeStd, mg.BuildList()) + matchPackages(loaderstate, ctx, m, opts.Tags, includeStd, mg.BuildList()) case m.Pattern() == "work": - matchModules := MainModules.Versions() + matchModules := loaderstate.MainModules.Versions() if opts.MainModule != (module.Version{}) { matchModules = []module.Version{opts.MainModule} } - matchPackages(ctx, m, opts.Tags, omitStd, matchModules) + matchPackages(loaderstate, ctx, m, opts.Tags, omitStd, matchModules) case m.Pattern() == "all": if ld == nil { // The initial roots are the packages and tools in the main module. // loadFromRoots will expand that to "all". m.Errs = m.Errs[:0] - matchModules := MainModules.Versions() + matchModules := loaderstate.MainModules.Versions() if opts.MainModule != (module.Version{}) { matchModules = []module.Version{opts.MainModule} } - matchPackages(ctx, m, opts.Tags, omitStd, matchModules) - for tool := range MainModules.Tools() { + matchPackages(loaderstate, ctx, m, opts.Tags, omitStd, matchModules) + for tool := range loaderstate.MainModules.Tools() { m.Pkgs = append(m.Pkgs, tool) } } else { @@ -355,7 +355,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } case m.Pattern() == "tool": - for tool := range MainModules.Tools() { + for tool := range loaderstate.MainModules.Tools() { m.Pkgs = append(m.Pkgs, tool) } default: @@ -364,12 +364,12 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } } - initialRS, err := loadModFile(ctx, &opts) + initialRS, err := loadModFile(loaderstate, ctx, &opts) if err != nil { base.Fatal(err) } - ld := loadFromRoots(ctx, loaderParams{ + ld := loadFromRoots(loaderstate, ctx, loaderParams{ PackageOpts: opts, requirements: initialRS, @@ -404,7 +404,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma if opts.Tidy { if cfg.BuildV { - mg, _ := ld.requirements.Graph(ctx) + mg, _ := ld.requirements.Graph(loaderstate, ctx) for _, m := range initialRS.rootModules { var unused bool if ld.requirements.pruning == unpruned { @@ -416,7 +416,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // m is unused if it was dropped from the roots. If it is still present // as a transitive dependency, that transitive dependency is not needed // by any package or test in the main module. - _, ok := ld.requirements.rootSelected(m.Path) + _, ok := ld.requirements.rootSelected(loaderstate, m.Path) unused = !ok } if unused { @@ -425,9 +425,9 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma } } - keep := keepSums(ctx, ld, ld.requirements, loadedZipSumsOnly) + keep := keepSums(loaderstate, ctx, ld, ld.requirements, loadedZipSumsOnly) compatVersion := ld.TidyCompatibleVersion - goVersion := ld.requirements.GoVersion() + goVersion := ld.requirements.GoVersion(loaderstate) if compatVersion == "" { if gover.Compare(goVersion, gover.GoStrictVersion) < 0 { compatVersion = gover.Prev(goVersion) @@ -444,10 +444,10 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma compatVersion = goVersion } if compatPruning := pruningForGoVersion(compatVersion); compatPruning != ld.requirements.pruning { - compatRS := newRequirements(compatPruning, ld.requirements.rootModules, ld.requirements.direct) - ld.checkTidyCompatibility(ctx, compatRS, compatVersion) + compatRS := newRequirements(loaderstate, compatPruning, ld.requirements.rootModules, ld.requirements.direct) + ld.checkTidyCompatibility(loaderstate, ctx, compatRS, compatVersion) - for m := range keepSums(ctx, ld, compatRS, loadedZipSumsOnly) { + for m := range keepSums(loaderstate, ctx, ld, compatRS, loadedZipSumsOnly) { keep[m] = true } } @@ -455,8 +455,8 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma if opts.TidyDiff { cfg.BuildMod = "readonly" loaded = ld - requirements = loaded.requirements - currentGoMod, updatedGoMod, _, err := UpdateGoModFromReqs(ctx, WriteOpts{}) + loaderstate.requirements = loaded.requirements + currentGoMod, updatedGoMod, _, err := UpdateGoModFromReqs(loaderstate, ctx, WriteOpts{}) if err != nil { base.Fatal(err) } @@ -466,7 +466,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // Dropping compatibility for 1.16 may result in a strictly smaller go.sum. // Update the keep map with only the loaded.requirements. if gover.Compare(compatVersion, "1.16") > 0 { - keep = keepSums(ctx, loaded, requirements, addBuildListZipSums) + keep = keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums) } currentGoSum, tidyGoSum := modfetch.TidyGoSum(keep) goSumDiff := diff.Diff("current/go.sum", currentGoSum, "tidy/go.sum", tidyGoSum) @@ -490,7 +490,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // loaded.requirements, but here we may have also loaded (and want to // preserve checksums for) additional entities from compatRS, which are // only needed for compatibility with ld.TidyCompatibleVersion. - if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements()); err != nil { + if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements(loaderstate)); err != nil { base.Fatal(err) } } @@ -505,7 +505,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // to call WriteGoMod itself) or if ResolveMissingImports is false (the // command wants to examine the package graph as-is). loaded = ld - requirements = loaded.requirements + loaderstate.requirements = loaded.requirements for _, pkg := range ld.pkgs { if !pkg.isTest() { @@ -515,7 +515,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma sort.Strings(loadedPackages) if !ExplicitWriteGoMod && opts.ResolveMissingImports { - if err := commitRequirements(ctx, WriteOpts{}); err != nil { + if err := commitRequirements(loaderstate, ctx, WriteOpts{}); err != nil { base.Fatal(err) } } @@ -525,7 +525,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma // matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories // outside of the standard library and active modules. -func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) { +func matchLocalDirs(loaderstate *State, ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) { if !m.IsLocal() { panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern())) } @@ -543,10 +543,10 @@ func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs } modRoot := findModuleRoot(absDir) - if !slices.Contains(modRoots, modRoot) && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(ctx, absDir, rs) == "" { + if !slices.Contains(modRoots, modRoot) && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(loaderstate, ctx, absDir, rs) == "" { m.Dirs = []string{} scope := "main module or its selected dependencies" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { scope = "modules listed in go.work or their selected dependencies" } m.AddError(fmt.Errorf("directory prefix %s does not contain %s", base.ShortPath(absDir), scope)) @@ -558,7 +558,7 @@ func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs } // resolveLocalPackage resolves a filesystem path to a package path. -func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (string, error) { +func resolveLocalPackage(loaderstate *State, ctx context.Context, dir string, rs *Requirements) (string, error) { var absDir string if filepath.IsAbs(dir) { absDir = filepath.Clean(dir) @@ -596,13 +596,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str } } - for _, mod := range MainModules.Versions() { - modRoot := MainModules.ModRoot(mod) + for _, mod := range loaderstate.MainModules.Versions() { + modRoot := loaderstate.MainModules.ModRoot(mod) if modRoot != "" && absDir == modRoot { if absDir == cfg.GOROOTsrc { return "", errPkgIsGorootSrc } - return MainModules.PathPrefix(mod), nil + return loaderstate.MainModules.PathPrefix(mod), nil } } @@ -611,8 +611,8 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str // It's not strictly necessary but helpful to keep the checks. var pkgNotFoundErr error pkgNotFoundLongestPrefix := "" - for _, mainModule := range MainModules.Versions() { - modRoot := MainModules.ModRoot(mainModule) + for _, mainModule := range loaderstate.MainModules.Versions() { + modRoot := loaderstate.MainModules.ModRoot(mainModule) if modRoot != "" && str.HasFilePathPrefix(absDir, modRoot) && !strings.Contains(absDir[len(modRoot):], "@") { suffix := filepath.ToSlash(str.TrimFilePathPrefix(absDir, modRoot)) if pkg, found := strings.CutPrefix(suffix, "vendor/"); found { @@ -620,14 +620,14 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str return "", fmt.Errorf("without -mod=vendor, directory %s has no package path", absDir) } - readVendorList(VendorDir()) + readVendorList(VendorDir(loaderstate)) if _, ok := vendorPkgModule[pkg]; !ok { return "", fmt.Errorf("directory %s is not a package listed in vendor/modules.txt", absDir) } return pkg, nil } - mainModulePrefix := MainModules.PathPrefix(mainModule) + mainModulePrefix := loaderstate.MainModules.PathPrefix(mainModule) if mainModulePrefix == "" { pkg := suffix if pkg == "builtin" { @@ -668,13 +668,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str return pkg, nil } - pkg := pathInModuleCache(ctx, absDir, rs) + pkg := pathInModuleCache(loaderstate, ctx, absDir, rs) if pkg == "" { dirstr := fmt.Sprintf("directory %s", base.ShortPath(absDir)) if dirstr == "directory ." { dirstr = "current directory" } - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { if mr := findModuleRoot(absDir); mr != "" { return "", fmt.Errorf("%s is contained in a module that is not one of the workspace modules listed in go.work. You can add the module to the workspace using:\n\tgo work use %s", dirstr, base.ShortPath(mr)) } @@ -693,17 +693,17 @@ var ( // pathInModuleCache returns the import path of the directory dir, // if dir is in the module cache copy of a module in our build list. -func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string { +func pathInModuleCache(loaderstate *State, ctx context.Context, dir string, rs *Requirements) string { tryMod := func(m module.Version) (string, bool) { if gover.IsToolchain(m.Path) { return "", false } var root string var err error - if repl := Replacement(m); repl.Path != "" && repl.Version == "" { + if repl := Replacement(loaderstate, m); repl.Path != "" && repl.Version == "" { root = repl.Path if !filepath.IsAbs(root) { - root = filepath.Join(replaceRelativeTo(), root) + root = filepath.Join(replaceRelativeTo(loaderstate), root) } } else if repl.Path != "" { root, err = modfetch.DownloadDir(ctx, repl) @@ -728,7 +728,7 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string if rs.pruning == pruned { for _, m := range rs.rootModules { - if v, _ := rs.rootSelected(m.Path); v != m.Version { + if v, _ := rs.rootSelected(loaderstate, m.Path); v != m.Version { continue // m is a root, but we have a higher root for the same path. } if importPath, ok := tryMod(m); ok { @@ -747,7 +747,7 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string // versions of root modules may differ from what we already checked above. // Re-check those paths too. - mg, _ := rs.Graph(ctx) + mg, _ := rs.Graph(loaderstate, ctx) var importPath string for _, m := range mg.BuildList() { var found bool @@ -766,8 +766,8 @@ func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string // // TODO(bcmills): Silencing errors seems off. Take a closer look at this and // figure out what the error-reporting actually ought to be. -func ImportFromFiles(ctx context.Context, gofiles []string) { - rs := LoadModFile(ctx) +func ImportFromFiles(loaderstate *State, ctx context.Context, gofiles []string) { + rs := LoadModFile(loaderstate, ctx) tags := imports.Tags() imports, testImports, err := imports.ScanFiles(gofiles, tags) @@ -775,7 +775,7 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { base.Fatal(err) } - loaded = loadFromRoots(ctx, loaderParams{ + loaded = loadFromRoots(loaderstate, ctx, loaderParams{ PackageOpts: PackageOpts{ Tags: tags, ResolveMissingImports: true, @@ -788,10 +788,10 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { return roots }, }) - requirements = loaded.requirements + loaderstate.requirements = loaded.requirements if !ExplicitWriteGoMod { - if err := commitRequirements(ctx, WriteOpts{}); err != nil { + if err := commitRequirements(loaderstate, ctx, WriteOpts{}); err != nil { base.Fatal(err) } } @@ -799,11 +799,11 @@ func ImportFromFiles(ctx context.Context, gofiles []string) { // DirImportPath returns the effective import path for dir, // provided it is within a main module, or else returns ".". -func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path string, m module.Version) { - if !HasModRoot() { +func (mms *MainModuleSet) DirImportPath(loaderstate *State, ctx context.Context, dir string) (path string, m module.Version) { + if !HasModRoot(loaderstate) { return ".", module.Version{} } - LoadModFile(ctx) // Sets targetPrefix. + LoadModFile(loaderstate, ctx) // Sets targetPrefix. if !filepath.IsAbs(dir) { dir = filepath.Join(base.Cwd(), dir) @@ -820,7 +820,7 @@ func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path s return mms.PathPrefix(v), v } if str.HasFilePathPrefix(dir, modRoot) { - pathPrefix := MainModules.PathPrefix(v) + pathPrefix := loaderstate.MainModules.PathPrefix(v) if pathPrefix > longestPrefix { longestPrefix = pathPrefix longestPrefixVersion = v @@ -853,13 +853,13 @@ func PackageModule(path string) module.Version { // the package at path as imported from the package in parentDir. // Lookup requires that one of the Load functions in this package has already // been called. -func Lookup(parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) { +func Lookup(loaderstate *State, parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) { if path == "" { panic("Lookup called with empty package path") } if parentIsStd { - path = loaded.stdVendor(parentPath, path) + path = loaded.stdVendor(loaderstate, parentPath, path) } pkg, ok := loaded.pkgCache.Get(path) if !ok { @@ -957,11 +957,11 @@ func (ld *loader) exitIfErrors(ctx context.Context) { // goVersion reports the Go version that should be used for the loader's // requirements: ld.TidyGoVersion if set, or ld.requirements.GoVersion() // otherwise. -func (ld *loader) goVersion() string { +func (ld *loader) goVersion(loaderstate *State) string { if ld.TidyGoVersion != "" { return ld.TidyGoVersion } - return ld.requirements.GoVersion() + return ld.requirements.GoVersion(loaderstate) } // A loadPkg records information about a single loaded package. @@ -1064,11 +1064,11 @@ func (pkg *loadPkg) isTest() bool { // fromExternalModule reports whether pkg was loaded from a module other than // the main module. -func (pkg *loadPkg) fromExternalModule() bool { +func (pkg *loadPkg) fromExternalModule(loaderstate *State) bool { if pkg.mod.Path == "" { return false // loaded from the standard library, not a module } - return !MainModules.Contains(pkg.mod.Path) + return !loaderstate.MainModules.Contains(pkg.mod.Path) } var errMissing = errors.New("cannot find package") @@ -1079,7 +1079,7 @@ var errMissing = errors.New("cannot find package") // The set of root packages is returned by the params.listRoots function, and // expanded to the full set of packages by tracing imports (and possibly tests) // as needed. -func loadFromRoots(ctx context.Context, params loaderParams) *loader { +func loadFromRoots(loaderstate *State, ctx context.Context, params loaderParams) *loader { ld := &loader{ loaderParams: params, work: par.NewQueue(runtime.GOMAXPROCS(0)), @@ -1095,7 +1095,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // spot-checks in modules that do not maintain the expanded go.mod // requirements needed for graph pruning. var err error - ld.requirements, _, err = expandGraph(ctx, ld.requirements) + ld.requirements, _, err = expandGraph(loaderstate, ctx, ld.requirements) if err != nil { ld.error(err) } @@ -1103,11 +1103,11 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld.exitIfErrors(ctx) updateGoVersion := func() { - goVersion := ld.goVersion() + goVersion := ld.goVersion(loaderstate) if ld.requirements.pruning != workspace { var err error - ld.requirements, err = convertPruning(ctx, ld.requirements, pruningForGoVersion(goVersion)) + ld.requirements, err = convertPruning(loaderstate, ctx, ld.requirements, pruningForGoVersion(goVersion)) if err != nil { ld.error(err) ld.exitIfErrors(ctx) @@ -1141,7 +1141,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // set of root packages does not change then we can select the correct // versions of all transitive imports on the first try and complete // loading in a single iteration. - changedBuildList := ld.preloadRootModules(ctx, rootPkgs) + changedBuildList := ld.preloadRootModules(loaderstate, ctx, rootPkgs) if changedBuildList { // The build list has changed, so the set of root packages may have also // changed. Start over to pick up the changes. (Preloading roots is much @@ -1154,7 +1154,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { inRoots := map[*loadPkg]bool{} for _, path := range rootPkgs { - root := ld.pkg(ctx, path, pkgIsRoot) + root := ld.pkg(loaderstate, ctx, path, pkgIsRoot) if !inRoots[root] { ld.roots = append(ld.roots, root) inRoots[root] = true @@ -1170,7 +1170,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld.buildStacks() - changed, err := ld.updateRequirements(ctx) + changed, err := ld.updateRequirements(loaderstate, ctx) if err != nil { ld.error(err) break @@ -1184,12 +1184,12 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { continue } - if !ld.ResolveMissingImports || (!HasModRoot() && !allowMissingModuleImports) { + if !ld.ResolveMissingImports || (!HasModRoot(loaderstate) && !allowMissingModuleImports) { // We've loaded as much as we can without resolving missing imports. break } - modAddedBy, err := ld.resolveMissingImports(ctx) + modAddedBy, err := ld.resolveMissingImports(loaderstate, ctx) if err != nil { ld.error(err) break @@ -1216,7 +1216,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // iteration so we don't need to also update it here. (That would waste time // computing a "direct" map that we'll have to recompute later anyway.) direct := ld.requirements.direct - rs, err := updateRoots(ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported) + rs, err := updateRoots(loaderstate, ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported) if err != nil { // If an error was found in a newly added module, report the package // import stack instead of the module requirement stack. Packages @@ -1244,7 +1244,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // Tidy the build list, if applicable, before we report errors. // (The process of tidying may remove errors from irrelevant dependencies.) if ld.Tidy { - rs, err := tidyRoots(ctx, ld.requirements, ld.pkgs) + rs, err := tidyRoots(loaderstate, ctx, ld.requirements, ld.pkgs) if err != nil { ld.error(err) } else { @@ -1252,8 +1252,8 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // Attempt to switch to the requested Go version. We have been using its // pruning and semantics all along, but there may have been — and may // still be — requirements on higher versions in the graph. - tidy := overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}}) - mg, err := tidy.Graph(ctx) + tidy := overrideRoots(loaderstate, ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}}) + mg, err := tidy.Graph(loaderstate, ctx) if err != nil { ld.error(err) } @@ -1285,7 +1285,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { if m.Path == "go" && ld.TidyGoVersion != "" { continue } - if v, ok := ld.requirements.rootSelected(m.Path); !ok || v != m.Version { + if v, ok := ld.requirements.rootSelected(loaderstate, m.Path); !ok || v != m.Version { ld.error(fmt.Errorf("internal error: a requirement on %v is needed but was not added during package loading (selected %s)", m, v)) } } @@ -1304,7 +1304,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { } // Add importer information to checksum errors. - if sumErr := (*ImportMissingSumError)(nil); errors.As(pkg.err, &sumErr) { + if sumErr, ok := errors.AsType[*ImportMissingSumError](pkg.err); ok { if importer := pkg.stack; importer != nil { sumErr.importer = importer.path sumErr.importerVersion = importer.mod.Version @@ -1312,7 +1312,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { } } - if stdErr := (*ImportMissingError)(nil); errors.As(pkg.err, &stdErr) && stdErr.isStd { + if stdErr, ok := errors.AsType[*ImportMissingError](pkg.err); ok && stdErr.isStd { // Add importer go version information to import errors of standard // library packages arising from newer releases. if importer := pkg.stack; importer != nil { @@ -1334,7 +1334,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { ld.error(fmt.Errorf("%s: %w", pkg.stackText(), pkg.err)) } - ld.checkMultiplePaths() + ld.checkMultiplePaths(loaderstate) return ld } @@ -1357,7 +1357,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader { // The "changed" return value reports whether the update changed the selected // version of any module that either provided a loaded package or may now // provide a package that was previously unresolved. -func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err error) { +func (ld *loader) updateRequirements(loaderstate *State, ctx context.Context) (changed bool, err error) { rs := ld.requirements // direct contains the set of modules believed to provide packages directly @@ -1384,22 +1384,22 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err var maxTooNew *gover.TooNewError for _, pkg := range ld.pkgs { if pkg.err != nil { - if tooNew := (*gover.TooNewError)(nil); errors.As(pkg.err, &tooNew) { + if tooNew, ok := errors.AsType[*gover.TooNewError](pkg.err); ok { if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 { maxTooNew = tooNew } } } - if pkg.mod.Version != "" || !MainModules.Contains(pkg.mod.Path) { + if pkg.mod.Version != "" || !loaderstate.MainModules.Contains(pkg.mod.Path) { continue } for _, dep := range pkg.imports { - if !dep.fromExternalModule() { + if !dep.fromExternalModule(loaderstate) { continue } - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { // In workspace mode / workspace pruning mode, the roots are the main modules // rather than the main module's direct dependencies. The check below on the selected // roots does not apply. @@ -1412,7 +1412,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // of the vendor directory anyway. continue } - if mg, err := rs.Graph(ctx); err != nil { + if mg, err := rs.Graph(loaderstate, ctx); err != nil { return false, err } else if _, ok := mg.RequiredBy(dep.mod); !ok { // dep.mod is not an explicit dependency, but needs to be. @@ -1424,7 +1424,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err } } } else if pkg.err == nil && cfg.BuildMod != "mod" { - if v, ok := rs.rootSelected(dep.mod.Path); !ok || v != dep.mod.Version { + if v, ok := rs.rootSelected(loaderstate, dep.mod.Path); !ok || v != dep.mod.Version { // dep.mod is not an explicit dependency, but needs to be. // Because we are not in "mod" mode, we will not be able to update it. // Instead, mark the importing package with an error. @@ -1490,21 +1490,21 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // roots can only increase and the set of roots can only expand. The set // of extant root paths is finite and the set of versions of each path is // finite, so the iteration *must* reach a stable fixed-point. - tidy, err := tidyRoots(ctx, rs, ld.pkgs) + tidy, err := tidyRoots(loaderstate, ctx, rs, ld.pkgs) if err != nil { return false, err } addRoots = tidy.rootModules } - rs, err = updateRoots(ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported) + rs, err = updateRoots(loaderstate, ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported) if err != nil { // We don't actually know what even the root requirements are supposed to be, // so we can't proceed with loading. Return the error to the caller return false, err } - if rs.GoVersion() != ld.requirements.GoVersion() { + if rs.GoVersion(loaderstate) != ld.requirements.GoVersion(loaderstate) { // A change in the selected Go version may or may not affect the set of // loaded packages, but in some cases it can change the meaning of the "all" // pattern, the level of pruning in the module graph, and even the set of @@ -1515,12 +1515,12 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // The roots of the module graph have changed in some way (not just the // "direct" markings). Check whether the changes affected any of the loaded // packages. - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { return false, err } for _, pkg := range ld.pkgs { - if pkg.fromExternalModule() && mg.Selected(pkg.mod.Path) != pkg.mod.Version { + if pkg.fromExternalModule(loaderstate) && mg.Selected(pkg.mod.Path) != pkg.mod.Version { changed = true break } @@ -1540,7 +1540,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // // In some sense, we can think of this as ‘upgraded the module providing // pkg.path from "none" to a version higher than "none"’. - if _, _, _, _, err = importFromModules(ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil { + if _, _, _, _, err = importFromModules(loaderstate, ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil { changed = true break } @@ -1558,7 +1558,7 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err // The newly-resolved packages are added to the addedModuleFor map, and // resolveMissingImports returns a map from each new module version to // the first missing package that module would resolve. -func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) { +func (ld *loader) resolveMissingImports(loaderstate *State, ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) { type pkgMod struct { pkg *loadPkg mod *module.Version @@ -1573,7 +1573,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod // we should only add the missing import once. continue } - if !errors.As(pkg.err, new(*ImportMissingError)) { + if _, ok := errors.AsType[*ImportMissingError](pkg.err); !ok { // Leave other errors for Import or load.Packages to report. continue } @@ -1582,12 +1582,11 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod var mod module.Version ld.work.Add(func() { var err error - mod, err = queryImport(ctx, pkg.path, ld.requirements) + mod, err = queryImport(loaderstate, ctx, pkg.path, ld.requirements) if err != nil { - var ime *ImportMissingError - if errors.As(err, &ime) { + if ime, ok := errors.AsType[*ImportMissingError](err); ok { for curstack := pkg.stack; curstack != nil; curstack = curstack.stack { - if MainModules.Contains(curstack.mod.Path) { + if loaderstate.MainModules.Contains(curstack.mod.Path) { ime.ImportingMainModule = curstack.mod break } @@ -1625,7 +1624,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod maxTooNewPkg *loadPkg ) for _, pm := range pkgMods { - if tooNew := (*gover.TooNewError)(nil); errors.As(pm.pkg.err, &tooNew) { + if tooNew, ok := errors.AsType[*gover.TooNewError](pm.pkg.err); ok { if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 { maxTooNew = tooNew maxTooNewPkg = pm.pkg @@ -1659,7 +1658,7 @@ func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[mod // ld.work queue, and its test (if requested) will also be populated once // imports have been resolved. When ld.work goes idle, all transitive imports of // the requested package (and its test, if requested) will have been loaded. -func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loadPkg { +func (ld *loader) pkg(loaderstate *State, ctx context.Context, path string, flags loadPkgFlags) *loadPkg { if flags.has(pkgImportsLoaded) { panic("internal error: (*loader).pkg called with pkgImportsLoaded flag set") } @@ -1668,20 +1667,20 @@ func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loa pkg := &loadPkg{ path: path, } - ld.applyPkgFlags(ctx, pkg, flags) + ld.applyPkgFlags(loaderstate, ctx, pkg, flags) - ld.work.Add(func() { ld.load(ctx, pkg) }) + ld.work.Add(func() { ld.load(loaderstate, ctx, pkg) }) return pkg }) - ld.applyPkgFlags(ctx, pkg, flags) + ld.applyPkgFlags(loaderstate, ctx, pkg, flags) return pkg } // applyPkgFlags updates pkg.flags to set the given flags and propagate the // (transitive) effects of those flags, possibly loading or enqueueing further // packages as a result. -func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkgFlags) { +func (ld *loader) applyPkgFlags(loaderstate *State, ctx context.Context, pkg *loadPkg, flags loadPkgFlags) { if flags == 0 { return } @@ -1709,7 +1708,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // so it's ok if we call it more than is strictly necessary. wantTest := false switch { - case ld.allPatternIsRoot && MainModules.Contains(pkg.mod.Path): + case ld.allPatternIsRoot && loaderstate.MainModules.Contains(pkg.mod.Path): // We are loading the "all" pattern, which includes packages imported by // tests in the main module. This package is in the main module, so we // need to identify the imports of its test even if LoadTests is not set. @@ -1730,13 +1729,13 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg if wantTest { var testFlags loadPkgFlags - if MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) { + if loaderstate.MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) { // Tests of packages in the main module are in "all", in the sense that // they cause the packages they import to also be in "all". So are tests // of packages in "all" if "all" closes over test dependencies. testFlags |= pkgInAll } - ld.pkgTest(ctx, pkg, testFlags) + ld.pkgTest(loaderstate, ctx, pkg, testFlags) } } @@ -1744,13 +1743,13 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // We have just marked pkg with pkgInAll, or we have just loaded its // imports, or both. Now is the time to propagate pkgInAll to the imports. for _, dep := range pkg.imports { - ld.applyPkgFlags(ctx, dep, pkgInAll) + ld.applyPkgFlags(loaderstate, ctx, dep, pkgInAll) } } if new.has(pkgFromRoot) && !old.has(pkgFromRoot|pkgImportsLoaded) { for _, dep := range pkg.imports { - ld.applyPkgFlags(ctx, dep, pkgFromRoot) + ld.applyPkgFlags(loaderstate, ctx, dep, pkgFromRoot) } } } @@ -1758,7 +1757,7 @@ func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkg // preloadRootModules loads the module requirements needed to identify the // selected version of each module providing a package in rootPkgs, // adding new root modules to the module graph if needed. -func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (changedBuildList bool) { +func (ld *loader) preloadRootModules(loaderstate *State, ctx context.Context, rootPkgs []string) (changedBuildList bool) { needc := make(chan map[module.Version]bool, 1) needc <- map[module.Version]bool{} for _, path := range rootPkgs { @@ -1769,13 +1768,12 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch // If the main module is tidy and the package is in "all" — or if we're // lucky — we can identify all of its imports without actually loading the // full module graph. - m, _, _, _, err := importFromModules(ctx, path, ld.requirements, nil, ld.skipImportModFiles) + m, _, _, _, err := importFromModules(loaderstate, ctx, path, ld.requirements, nil, ld.skipImportModFiles) if err != nil { - var missing *ImportMissingError - if errors.As(err, &missing) && ld.ResolveMissingImports { + if _, ok := errors.AsType[*ImportMissingError](err); ok && ld.ResolveMissingImports { // This package isn't provided by any selected module. // If we can find it, it will be a new root dependency. - m, err = queryImport(ctx, path, ld.requirements) + m, err = queryImport(loaderstate, ctx, path, ld.requirements) } if err != nil { // We couldn't identify the root module containing this package. @@ -1788,7 +1786,7 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch return } - v, ok := ld.requirements.rootSelected(m.Path) + v, ok := ld.requirements.rootSelected(loaderstate, m.Path) if !ok || v != m.Version { // We found the requested package in m, but m is not a root, so // loadModGraph will not load its requirements. We need to promote the @@ -1816,7 +1814,7 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch } gover.ModSort(toAdd) - rs, err := updateRoots(ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported) + rs, err := updateRoots(loaderstate, ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported) if err != nil { // We are missing some root dependency, and for some reason we can't load // enough of the module dependency graph to add the missing root. Package @@ -1838,11 +1836,11 @@ func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (ch } // load loads an individual package. -func (ld *loader) load(ctx context.Context, pkg *loadPkg) { +func (ld *loader) load(loaderstate *State, ctx context.Context, pkg *loadPkg) { var mg *ModuleGraph if ld.requirements.pruning == unpruned { var err error - mg, err = ld.requirements.Graph(ctx) + mg, err = ld.requirements.Graph(loaderstate, ctx) if err != nil { // We already checked the error from Graph in loadFromRoots and/or // updateRequirements, so we ignored the error on purpose and we should @@ -1857,17 +1855,17 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { } var modroot string - pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles) - if MainModules.Tools()[pkg.path] { + pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(loaderstate, ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles) + if loaderstate.MainModules.Tools()[pkg.path] { // Tools declared by main modules are always in "all". // We apply the package flags before returning so that missing // tool dependencies report an error https://go.dev/issue/70582 - ld.applyPkgFlags(ctx, pkg, pkgInAll) + ld.applyPkgFlags(loaderstate, ctx, pkg, pkgInAll) } if pkg.dir == "" { return } - if MainModules.Contains(pkg.mod.Path) { + if loaderstate.MainModules.Contains(pkg.mod.Path) { // Go ahead and mark pkg as in "all". This provides the invariant that a // package that is *only* imported by other packages in "all" is always // marked as such before loading its imports. @@ -1877,7 +1875,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { // about (by reducing churn on the flag bits of dependencies), and costs // essentially nothing (these atomic flag ops are essentially free compared // to scanning source code for imports). - ld.applyPkgFlags(ctx, pkg, pkgInAll) + ld.applyPkgFlags(loaderstate, ctx, pkg, pkgInAll) } if ld.AllowPackage != nil { if err := ld.AllowPackage(ctx, pkg.path, pkg.mod); err != nil { @@ -1909,13 +1907,13 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { if pkg.inStd { // Imports from packages in "std" and "cmd" should resolve using // GOROOT/src/vendor even when "std" is not the main module. - path = ld.stdVendor(pkg.path, path) + path = ld.stdVendor(loaderstate, pkg.path, path) } - pkg.imports = append(pkg.imports, ld.pkg(ctx, path, importFlags)) + pkg.imports = append(pkg.imports, ld.pkg(loaderstate, ctx, path, importFlags)) } pkg.testImports = testImports - ld.applyPkgFlags(ctx, pkg, pkgImportsLoaded) + ld.applyPkgFlags(loaderstate, ctx, pkg, pkgImportsLoaded) } // pkgTest locates the test of pkg, creating it if needed, and updates its state @@ -1923,7 +1921,7 @@ func (ld *loader) load(ctx context.Context, pkg *loadPkg) { // // pkgTest requires that the imports of pkg have already been loaded (flagged // with pkgImportsLoaded). -func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { +func (ld *loader) pkgTest(loaderstate *State, ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { if pkg.isTest() { panic("pkgTest called on a test package") } @@ -1938,7 +1936,7 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl err: pkg.err, inStd: pkg.inStd, } - ld.applyPkgFlags(ctx, pkg.test, testFlags) + ld.applyPkgFlags(loaderstate, ctx, pkg.test, testFlags) createdTest = true }) @@ -1951,14 +1949,14 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl } for _, path := range pkg.testImports { if pkg.inStd { - path = ld.stdVendor(test.path, path) + path = ld.stdVendor(loaderstate, test.path, path) } - test.imports = append(test.imports, ld.pkg(ctx, path, importFlags)) + test.imports = append(test.imports, ld.pkg(loaderstate, ctx, path, importFlags)) } pkg.testImports = nil - ld.applyPkgFlags(ctx, test, pkgImportsLoaded) + ld.applyPkgFlags(loaderstate, ctx, test, pkgImportsLoaded) } else { - ld.applyPkgFlags(ctx, test, testFlags) + ld.applyPkgFlags(loaderstate, ctx, test, testFlags) } return test @@ -1966,7 +1964,7 @@ func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFl // stdVendor returns the canonical import path for the package with the given // path when imported from the standard-library package at parentPath. -func (ld *loader) stdVendor(parentPath, path string) string { +func (ld *loader) stdVendor(loaderstate *State, parentPath, path string) string { if p, _, ok := fips140.ResolveImport(path); ok { return p } @@ -1975,14 +1973,14 @@ func (ld *loader) stdVendor(parentPath, path string) string { } if str.HasPathPrefix(parentPath, "cmd") { - if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("cmd") { + if !ld.VendorModulesInGOROOTSrc || !loaderstate.MainModules.Contains("cmd") { vendorPath := pathpkg.Join("cmd", "vendor", path) if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil { return vendorPath } } - } else if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") { + } else if !ld.VendorModulesInGOROOTSrc || !loaderstate.MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") { // If we are outside of the 'std' module, resolve imports from within 'std' // to the vendor directory. // @@ -2028,7 +2026,7 @@ func (ld *loader) computePatternAll() (all []string) { // or as a replacement for another module, but not both at the same time. // // (See https://golang.org/issue/26607 and https://golang.org/issue/34650.) -func (ld *loader) checkMultiplePaths() { +func (ld *loader) checkMultiplePaths(loaderstate *State) { mods := ld.requirements.rootModules if cached := ld.requirements.graph.Load(); cached != nil { if mg := cached.mg; mg != nil { @@ -2038,7 +2036,7 @@ func (ld *loader) checkMultiplePaths() { firstPath := map[module.Version]string{} for _, mod := range mods { - src := resolveReplacement(mod) + src := resolveReplacement(loaderstate, mod) if prev, ok := firstPath[src]; !ok { firstPath[src] = mod.Path } else if prev != mod.Path { @@ -2049,8 +2047,8 @@ func (ld *loader) checkMultiplePaths() { // checkTidyCompatibility emits an error if any package would be loaded from a // different module under rs than under ld.requirements. -func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, compatVersion string) { - goVersion := rs.GoVersion() +func (ld *loader) checkTidyCompatibility(loaderstate *State, ctx context.Context, rs *Requirements, compatVersion string) { + goVersion := rs.GoVersion(loaderstate) suggestUpgrade := false suggestEFlag := false suggestFixes := func() { @@ -2067,7 +2065,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, fmt.Fprintln(os.Stderr) goFlag := "" - if goVersion != MainModules.GoVersion() { + if goVersion != loaderstate.MainModules.GoVersion(loaderstate) { goFlag = " -go=" + goVersion } @@ -2096,7 +2094,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, fmt.Fprintf(os.Stderr, "For information about 'go mod tidy' compatibility, see:\n\thttps://go.dev/ref/mod#graph-pruning\n") } - mg, err := rs.Graph(ctx) + mg, err := rs.Graph(loaderstate, ctx) if err != nil { ld.error(fmt.Errorf("error loading go %s module graph: %w", compatVersion, err)) ld.switchIfErrors(ctx) @@ -2134,7 +2132,7 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, pkg := pkg ld.work.Add(func() { - mod, _, _, _, err := importFromModules(ctx, pkg.path, rs, mg, ld.skipImportModFiles) + mod, _, _, _, err := importFromModules(loaderstate, ctx, pkg.path, rs, mg, ld.skipImportModFiles) if mod != pkg.mod { mismatches := <-mismatchMu mismatches[pkg] = mismatch{mod: mod, err: err} @@ -2196,14 +2194,14 @@ func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, // module that previously provided the package to a version that no // longer does, or to a version for which the module source code (but // not the go.mod file in isolation) has a checksum error. - if missing := (*ImportMissingError)(nil); errors.As(mismatch.err, &missing) { + if _, ok := errors.AsType[*ImportMissingError](mismatch.err); ok { selected := module.Version{ Path: pkg.mod.Path, Version: mg.Selected(pkg.mod.Path), } ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it in %s", pkg.stackText(), pkg.mod, compatVersion, selected)) } else { - if ambiguous := (*AmbiguousImportError)(nil); errors.As(mismatch.err, &ambiguous) { + if _, ok := errors.AsType[*AmbiguousImportError](mismatch.err); ok { // TODO: Is this check needed? } ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it:\n\t%v", pkg.stackText(), pkg.mod, compatVersion, mismatch.err)) diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index 04e204cc984c59..20feb8fcacc784 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -76,8 +76,7 @@ func ReadModFile(gomod string, fix modfile.VersionFixer) (data []byte, f *modfil } func shortPathErrorList(err error) error { - var el modfile.ErrorList - if errors.As(err, &el) { + if el, ok := errors.AsType[modfile.ErrorList](err); ok { for i := range el { el[i].Filename = base.ShortPath(el[i].Filename) } @@ -143,7 +142,7 @@ func CheckAllowed(ctx context.Context, m module.Version) error { if err := CheckExclusions(ctx, m); err != nil { return err } - if err := CheckRetractions(ctx, m); err != nil { + if err := CheckRetractions(LoaderState, ctx, m); err != nil { return err } return nil @@ -156,8 +155,8 @@ var ErrDisallowed = errors.New("disallowed module version") // CheckExclusions returns an error equivalent to ErrDisallowed if module m is // excluded by the main module's go.mod file. func CheckExclusions(ctx context.Context, m module.Version) error { - for _, mainModule := range MainModules.Versions() { - if index := MainModules.Index(mainModule); index != nil && index.exclude[m] { + for _, mainModule := range LoaderState.MainModules.Versions() { + if index := LoaderState.MainModules.Index(mainModule); index != nil && index.exclude[m] { return module.VersionError(m, errExcluded) } } @@ -173,14 +172,17 @@ func (e *excludedError) Is(err error) bool { return err == ErrDisallowed } // CheckRetractions returns an error if module m has been retracted by // its author. -func CheckRetractions(ctx context.Context, m module.Version) (err error) { +func CheckRetractions(loaderstate *State, ctx context.Context, m module.Version) (err error) { defer func() { - if retractErr := (*ModuleRetractedError)(nil); err == nil || errors.As(err, &retractErr) { + if err == nil { + return + } + if _, ok := errors.AsType[*ModuleRetractedError](err); ok { return } // Attribute the error to the version being checked, not the version from // which the retractions were to be loaded. - if mErr := (*module.ModuleError)(nil); errors.As(err, &mErr) { + if mErr, ok := errors.AsType[*module.ModuleError](err); ok { err = mErr.Err } err = &retractionLoadingError{m: m, err: err} @@ -191,7 +193,7 @@ func CheckRetractions(ctx context.Context, m module.Version) (err error) { // Cannot be retracted. return nil } - if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + if repl := Replacement(loaderstate, module.Version{Path: m.Path}); repl.Path != "" { // All versions of the module were replaced. // Don't load retractions, since we'd just load the replacement. return nil @@ -208,11 +210,11 @@ func CheckRetractions(ctx context.Context, m module.Version) (err error) { // We load the raw file here: the go.mod file may have a different module // path that we expect if the module or its repository was renamed. // We still want to apply retractions to other aliases of the module. - rm, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + rm, err := queryLatestVersionIgnoringRetractions(loaderstate, ctx, m.Path) if err != nil { return err } - summary, err := rawGoModSummary(rm) + summary, err := rawGoModSummary(loaderstate, rm) if err != nil && !errors.Is(err, gover.ErrTooNew) { return err } @@ -298,7 +300,7 @@ func ShortMessage(message, emptyDefault string) string { // // CheckDeprecation returns an error if the message can't be loaded. // CheckDeprecation returns "", nil if there is no deprecation message. -func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string, err error) { +func CheckDeprecation(loaderstate *State, ctx context.Context, m module.Version) (deprecation string, err error) { defer func() { if err != nil { err = fmt.Errorf("loading deprecation for %s: %w", m.Path, err) @@ -310,17 +312,17 @@ func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string // Don't look up deprecation. return "", nil } - if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + if repl := Replacement(loaderstate, module.Version{Path: m.Path}); repl.Path != "" { // All versions of the module were replaced. // We'll look up deprecation separately for the replacement. return "", nil } - latest, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + latest, err := queryLatestVersionIgnoringRetractions(loaderstate, ctx, m.Path) if err != nil { return "", err } - summary, err := rawGoModSummary(latest) + summary, err := rawGoModSummary(loaderstate, latest) if err != nil && !errors.Is(err, gover.ErrTooNew) { return "", err } @@ -340,28 +342,28 @@ func replacement(mod module.Version, replace map[module.Version]module.Version) // Replacement returns the replacement for mod, if any. If the path in the // module.Version is relative it's relative to the single main module outside // workspace mode, or the workspace's directory in workspace mode. -func Replacement(mod module.Version) module.Version { - r, foundModRoot, _ := replacementFrom(mod) - return canonicalizeReplacePath(r, foundModRoot) +func Replacement(loaderstate *State, mod module.Version) module.Version { + r, foundModRoot, _ := replacementFrom(loaderstate, mod) + return canonicalizeReplacePath(loaderstate, r, foundModRoot) } // replacementFrom returns the replacement for mod, if any, the modroot of the replacement if it appeared in a go.mod, // and the source of the replacement. The replacement is relative to the go.work or go.mod file it appears in. -func replacementFrom(mod module.Version) (r module.Version, modroot string, fromFile string) { +func replacementFrom(loaderstate *State, mod module.Version) (r module.Version, modroot string, fromFile string) { foundFrom, found, foundModRoot := "", module.Version{}, "" - if MainModules == nil { + if loaderstate.MainModules == nil { return module.Version{}, "", "" - } else if MainModules.Contains(mod.Path) && mod.Version == "" { + } else if loaderstate.MainModules.Contains(mod.Path) && mod.Version == "" { // Don't replace the workspace version of the main module. return module.Version{}, "", "" } - if _, r, ok := replacement(mod, MainModules.WorkFileReplaceMap()); ok { - return r, "", workFilePath + if _, r, ok := replacement(mod, loaderstate.MainModules.WorkFileReplaceMap()); ok { + return r, "", loaderstate.workFilePath } - for _, v := range MainModules.Versions() { - if index := MainModules.Index(v); index != nil { + for _, v := range loaderstate.MainModules.Versions() { + if index := loaderstate.MainModules.Index(v); index != nil { if from, r, ok := replacement(mod, index.replace); ok { - modRoot := MainModules.ModRoot(v) + modRoot := loaderstate.MainModules.ModRoot(v) if foundModRoot != "" && foundFrom != from && found != r { base.Errorf("conflicting replacements found for %v in workspace modules defined by %v and %v", mod, modFilePath(foundModRoot), modFilePath(modRoot)) @@ -374,21 +376,21 @@ func replacementFrom(mod module.Version) (r module.Version, modroot string, from return found, foundModRoot, modFilePath(foundModRoot) } -func replaceRelativeTo() string { - if workFilePath := WorkFilePath(); workFilePath != "" { +func replaceRelativeTo(loaderstate *State) string { + if workFilePath := WorkFilePath(loaderstate); workFilePath != "" { return filepath.Dir(workFilePath) } - return MainModules.ModRoot(MainModules.mustGetSingleMainModule()) + return loaderstate.MainModules.ModRoot(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) } // canonicalizeReplacePath ensures that relative, on-disk, replaced module paths // are relative to the workspace directory (in workspace mode) or to the module's // directory (in module mode, as they already are). -func canonicalizeReplacePath(r module.Version, modRoot string) module.Version { +func canonicalizeReplacePath(loaderstate *State, r module.Version, modRoot string) module.Version { if filepath.IsAbs(r.Path) || r.Version != "" || modRoot == "" { return r } - workFilePath := WorkFilePath() + workFilePath := WorkFilePath(loaderstate) if workFilePath == "" { return r } @@ -405,8 +407,8 @@ func canonicalizeReplacePath(r module.Version, modRoot string) module.Version { // for m: either m itself, or the replacement for m (iff m is replaced). // It also returns the modroot of the module providing the replacement if // one was found. -func resolveReplacement(m module.Version) module.Version { - if r := Replacement(m); r.Path != "" { +func resolveReplacement(loaderstate *State, m module.Version) module.Version { + if r := Replacement(loaderstate, m); r.Path != "" { return r } return m @@ -571,12 +573,12 @@ type retraction struct { // module versions. // // The caller must not modify the returned summary. -func goModSummary(m module.Version) (*modFileSummary, error) { - if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) { +func goModSummary(loaderstate *State, m module.Version) (*modFileSummary, error) { + if m.Version == "" && !inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { panic("internal error: goModSummary called on a main module") } if gover.IsToolchain(m.Path) { - return rawGoModSummary(m) + return rawGoModSummary(loaderstate, m) } if cfg.BuildMod == "vendor" { @@ -584,7 +586,7 @@ func goModSummary(m module.Version) (*modFileSummary, error) { module: module.Version{Path: m.Path}, } - readVendorList(VendorDir()) + readVendorList(VendorDir(loaderstate)) if vendorVersion[m.Path] != m.Version { // This module is not vendored, so packages cannot be loaded from it and // it cannot be relevant to the build. @@ -599,15 +601,15 @@ func goModSummary(m module.Version) (*modFileSummary, error) { return summary, nil } - actual := resolveReplacement(m) - if mustHaveSums() && actual.Version != "" { + actual := resolveReplacement(loaderstate, m) + if mustHaveSums(loaderstate) && actual.Version != "" { key := module.Version{Path: actual.Path, Version: actual.Version + "/go.mod"} if !modfetch.HaveSum(key) { suggestion := fmt.Sprintf(" for go.mod file; to add it:\n\tgo mod download %s", m.Path) return nil, module.VersionError(actual, &sumMissingError{suggestion: suggestion}) } } - summary, err := rawGoModSummary(actual) + summary, err := rawGoModSummary(loaderstate, actual) if err != nil { return nil, err } @@ -639,8 +641,8 @@ func goModSummary(m module.Version) (*modFileSummary, error) { } } - for _, mainModule := range MainModules.Versions() { - if index := MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 { + for _, mainModule := range loaderstate.MainModules.Versions() { + if index := loaderstate.MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 { // Drop any requirements on excluded versions. // Don't modify the cached summary though, since we might need the raw // summary separately. @@ -674,7 +676,7 @@ func goModSummary(m module.Version) (*modFileSummary, error) { // rawGoModSummary cannot be used on the main module outside of workspace mode. // The modFileSummary can still be used for retractions and deprecations // even if a TooNewError is returned. -func rawGoModSummary(m module.Version) (*modFileSummary, error) { +func rawGoModSummary(loaderstate *State, m module.Version) (*modFileSummary, error) { if gover.IsToolchain(m.Path) { if m.Path == "go" && gover.Compare(m.Version, gover.GoStrictVersion) >= 0 { // Declare that go 1.21.3 requires toolchain 1.21.3, @@ -684,7 +686,7 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { } return &modFileSummary{module: m}, nil } - if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) { + if m.Version == "" && !inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { // Calling rawGoModSummary implies that we are treating m as a module whose // requirements aren't the roots of the module graph and can't be modified. // @@ -692,22 +694,22 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) { // are the roots of the module graph and we expect them to be kept consistent. panic("internal error: rawGoModSummary called on a main module") } - if m.Version == "" && inWorkspaceMode() && m.Path == "command-line-arguments" { + if m.Version == "" && inWorkspaceMode(loaderstate) && m.Path == "command-line-arguments" { // "go work sync" calls LoadModGraph to make sure the module graph is valid. // If there are no modules in the workspace, we synthesize an empty // command-line-arguments module, which rawGoModData cannot read a go.mod for. return &modFileSummary{module: m}, nil - } else if m.Version == "" && inWorkspaceMode() && MainModules.Contains(m.Path) { + } else if m.Version == "" && inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { // When go get uses EnterWorkspace to check that the workspace loads properly, // it will update the contents of the workspace module's modfile in memory. To use the updated // contents of the modfile when doing the load, don't read from disk and instead // recompute a summary using the updated contents of the modfile. - if mf := MainModules.ModFile(m); mf != nil { - return summaryFromModFile(m, MainModules.modFiles[m]) + if mf := loaderstate.MainModules.ModFile(m); mf != nil { + return summaryFromModFile(m, loaderstate.MainModules.modFiles[m]) } } return rawGoModSummaryCache.Do(m, func() (*modFileSummary, error) { - name, data, err := rawGoModData(m) + name, data, err := rawGoModData(loaderstate, m) if err != nil { return nil, err } @@ -779,15 +781,15 @@ var rawGoModSummaryCache par.ErrCache[module.Version, *modFileSummary] // // Unlike rawGoModSummary, rawGoModData does not cache its results in memory. // Use rawGoModSummary instead unless you specifically need these bytes. -func rawGoModData(m module.Version) (name string, data []byte, err error) { +func rawGoModData(loaderstate *State, m module.Version) (name string, data []byte, err error) { if m.Version == "" { dir := m.Path if !filepath.IsAbs(dir) { - if inWorkspaceMode() && MainModules.Contains(m.Path) { - dir = MainModules.ModRoot(m) + if inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { + dir = loaderstate.MainModules.ModRoot(m) } else { // m is a replacement module with only a file path. - dir = filepath.Join(replaceRelativeTo(), dir) + dir = filepath.Join(replaceRelativeTo(loaderstate), dir) } } name = filepath.Join(dir, "go.mod") @@ -823,12 +825,12 @@ func rawGoModData(m module.Version) (name string, data []byte, err error) { // // If the queried latest version is replaced, // queryLatestVersionIgnoringRetractions returns the replacement. -func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (latest module.Version, err error) { +func queryLatestVersionIgnoringRetractions(loaderstate *State, ctx context.Context, path string) (latest module.Version, err error) { return latestVersionIgnoringRetractionsCache.Do(path, func() (module.Version, error) { ctx, span := trace.StartSpan(ctx, "queryLatestVersionIgnoringRetractions "+path) defer span.Done() - if repl := Replacement(module.Version{Path: path}); repl.Path != "" { + if repl := Replacement(loaderstate, module.Version{Path: path}); repl.Path != "" { // All versions of the module were replaced. // No need to query. return repl, nil @@ -838,12 +840,12 @@ func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (la // Ignore exclusions from the main module's go.mod. const ignoreSelected = "" var allowAll AllowedFunc - rev, err := Query(ctx, path, "latest", ignoreSelected, allowAll) + rev, err := Query(loaderstate, ctx, path, "latest", ignoreSelected, allowAll) if err != nil { return module.Version{}, err } latest := module.Version{Path: path, Version: rev.Version} - if repl := resolveReplacement(latest); repl.Path != "" { + if repl := resolveReplacement(loaderstate, latest); repl.Path != "" { latest = repl } return latest, nil diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go index 8ae2dbff1e8887..32afc866fbcc14 100644 --- a/src/cmd/go/internal/modload/mvs.go +++ b/src/cmd/go/internal/modload/mvs.go @@ -43,7 +43,7 @@ type mvsReqs struct { } func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { - if mod.Version == "" && MainModules.Contains(mod.Path) { + if mod.Version == "" && LoaderState.MainModules.Contains(mod.Path) { // Use the build list as it existed when r was constructed, not the current // global build list. return r.roots, nil @@ -53,7 +53,7 @@ func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { return nil, nil } - summary, err := goModSummary(mod) + summary, err := goModSummary(LoaderState, mod) if err != nil { return nil, err } @@ -79,11 +79,11 @@ func (*mvsReqs) Upgrade(m module.Version) (module.Version, error) { return m, nil } -func versions(ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) { +func versions(loaderstate *State, ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) { // Note: modfetch.Lookup and repo.Versions are cached, // so there's no need for us to add extra caching here. err = modfetch.TryProxies(func(proxy string) error { - repo, err := lookupRepo(ctx, proxy, path) + repo, err := lookupRepo(loaderstate, ctx, proxy, path) if err != nil { return err } @@ -111,12 +111,12 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) (versions [ // // Since the version of a main module is not found in the version list, // it has no previous version. -func previousVersion(ctx context.Context, m module.Version) (module.Version, error) { - if m.Version == "" && MainModules.Contains(m.Path) { +func previousVersion(loaderstate *State, ctx context.Context, m module.Version) (module.Version, error) { + if m.Version == "" && loaderstate.MainModules.Contains(m.Path) { return module.Version{Path: m.Path, Version: "none"}, nil } - list, _, err := versions(ctx, m.Path, CheckAllowed) + list, _, err := versions(loaderstate, ctx, m.Path, CheckAllowed) if err != nil { if errors.Is(err, os.ErrNotExist) { return module.Version{Path: m.Path, Version: "none"}, nil @@ -132,5 +132,5 @@ func previousVersion(ctx context.Context, m module.Version) (module.Version, err func (*mvsReqs) Previous(m module.Version) (module.Version, error) { // TODO(golang.org/issue/38714): thread tracing context through MVS. - return previousVersion(context.TODO(), m) + return previousVersion(LoaderState, context.TODO(), m) } diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index c4cf55442ba69b..17a0aef21ab2e9 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -80,19 +80,19 @@ import ( // // Query often returns a non-nil *RevInfo with a non-nil error, // to provide an info.Origin that can allow the error to be cached. -func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { +func Query(loaderstate *State, ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { ctx, span := trace.StartSpan(ctx, "modload.Query "+path) defer span.Done() - return queryReuse(ctx, path, query, current, allowed, nil) + return queryReuse(loaderstate, ctx, path, query, current, allowed, nil) } // queryReuse is like Query but also takes a map of module info that can be reused // if the validation criteria in Origin are met. -func queryReuse(ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { +func queryReuse(loaderstate *State, ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { var info *modfetch.RevInfo err := modfetch.TryProxies(func(proxy string) (err error) { - info, err = queryProxy(ctx, proxy, path, query, current, allowed, reuse) + info, err = queryProxy(loaderstate, ctx, proxy, path, query, current, allowed, reuse) return err }) return info, err @@ -100,9 +100,9 @@ func queryReuse(ctx context.Context, path, query, current string, allowed Allowe // checkReuse checks whether a revision of a given module // for a given module may be reused, according to the information in origin. -func checkReuse(ctx context.Context, m module.Version, old *codehost.Origin) error { +func checkReuse(loaderstate *State, ctx context.Context, m module.Version, old *codehost.Origin) error { return modfetch.TryProxies(func(proxy string) error { - repo, err := lookupRepo(ctx, proxy, m.Path) + repo, err := lookupRepo(loaderstate, ctx, proxy, m.Path) if err != nil { return err } @@ -197,7 +197,7 @@ func (queryDisabledError) Error() string { return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) } -func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { +func queryProxy(loaderstate *State, ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query) defer span.Done() @@ -211,7 +211,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed allowed = func(context.Context, module.Version) error { return nil } } - if MainModules.Contains(path) && (query == "upgrade" || query == "patch") { + if loaderstate.MainModules.Contains(path) && (query == "upgrade" || query == "patch") { m := module.Version{Path: path} if err := allowed(ctx, m); err != nil { return nil, fmt.Errorf("internal error: main module version is not allowed: %w", err) @@ -223,7 +223,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return nil, fmt.Errorf("can't query specific version (%q) of standard-library module %q", query, path) } - repo, err := lookupRepo(ctx, proxy, path) + repo, err := lookupRepo(loaderstate, ctx, proxy, path) if err != nil { return nil, err } @@ -296,7 +296,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed return &clone } - releases, prereleases, err := qm.filterVersions(ctx, versions.List) + releases, prereleases, err := qm.filterVersions(loaderstate, ctx, versions.List) if err != nil { return revWithOrigin(nil), err } @@ -569,7 +569,7 @@ func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool { // // If the allowed predicate returns an error not equivalent to ErrDisallowed, // filterVersions returns that error. -func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) (releases, prereleases []string, err error) { +func (qm *queryMatcher) filterVersions(loaderstate *State, ctx context.Context, versions []string) (releases, prereleases []string, err error) { needIncompatible := qm.preferIncompatible var lastCompatible string @@ -602,7 +602,7 @@ func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) ( // ignore any version with a higher (+incompatible) major version. (See // https://golang.org/issue/34165.) Note that we even prefer a // compatible pre-release over an incompatible release. - ok, err := versionHasGoMod(ctx, module.Version{Path: qm.path, Version: lastCompatible}) + ok, err := versionHasGoMod(loaderstate, ctx, module.Version{Path: qm.path, Version: lastCompatible}) if err != nil { return nil, nil, err } @@ -639,11 +639,11 @@ type QueryResult struct { // QueryPackages is like QueryPattern, but requires that the pattern match at // least one package and omits the non-package result (if any). -func QueryPackages(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) { - pkgMods, modOnly, err := QueryPattern(ctx, pattern, query, current, allowed) +func QueryPackages(loaderstate *State, ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) { + pkgMods, modOnly, err := QueryPattern(loaderstate, ctx, pattern, query, current, allowed) if len(pkgMods) == 0 && err == nil { - replacement := Replacement(modOnly.Mod) + replacement := Replacement(loaderstate, modOnly.Mod) return nil, &PackageNotInModuleError{ Mod: modOnly.Mod, Replacement: replacement, @@ -670,7 +670,7 @@ func QueryPackages(ctx context.Context, pattern, query string, current func(stri // // QueryPattern always returns at least one QueryResult (which may be only // modOnly) or a non-nil error. -func QueryPattern(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) { +func QueryPattern(loaderstate *State, ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) { ctx, span := trace.StartSpan(ctx, "modload.QueryPattern "+pattern+" "+query) defer span.Done() @@ -693,15 +693,15 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin } match = func(mod module.Version, roots []string, isLocal bool) *search.Match { m := search.NewMatch(pattern) - matchPackages(ctx, m, imports.AnyTags(), omitStd, []module.Version{mod}) + matchPackages(loaderstate, ctx, m, imports.AnyTags(), omitStd, []module.Version{mod}) return m } } else { match = func(mod module.Version, roots []string, isLocal bool) *search.Match { m := search.NewMatch(pattern) prefix := mod.Path - if MainModules.Contains(mod.Path) { - prefix = MainModules.PathPrefix(module.Version{Path: mod.Path}) + if loaderstate.MainModules.Contains(mod.Path) { + prefix = loaderstate.MainModules.PathPrefix(module.Version{Path: mod.Path}) } for _, root := range roots { if _, ok, err := dirInModule(pattern, prefix, root, isLocal); err != nil { @@ -715,8 +715,8 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin } var mainModuleMatches []module.Version - for _, mainModule := range MainModules.Versions() { - m := match(mainModule, modRoots, true) + for _, mainModule := range loaderstate.MainModules.Versions() { + m := match(mainModule, loaderstate.modRoots, true) if len(m.Pkgs) > 0 { if query != "upgrade" && query != "patch" { return nil, nil, &QueryMatchesPackagesInMainModuleError{ @@ -756,7 +756,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin var ( results []QueryResult - candidateModules = modulePrefixesExcludingTarget(base) + candidateModules = modulePrefixesExcludingTarget(loaderstate, base) ) if len(candidateModules) == 0 { if modOnly != nil { @@ -783,7 +783,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin pathCurrent := current(path) r.Mod.Path = path - r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed, nil) + r.Rev, err = queryProxy(loaderstate, ctx, proxy, path, query, pathCurrent, allowed, nil) if err != nil { return r, err } @@ -791,7 +791,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin if gover.IsToolchain(r.Mod.Path) { return r, nil } - root, isLocal, err := fetch(ctx, r.Mod) + root, isLocal, err := fetch(loaderstate, ctx, r.Mod) if err != nil { return r, err } @@ -801,7 +801,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin if err := firstError(m); err != nil { return r, err } - replacement := Replacement(r.Mod) + replacement := Replacement(loaderstate, r.Mod) return r, &PackageNotInModuleError{ Mod: r.Mod, Replacement: replacement, @@ -812,7 +812,7 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin return r, nil } - allResults, err := queryPrefixModules(ctx, candidateModules, queryModule) + allResults, err := queryPrefixModules(loaderstate, ctx, candidateModules, queryModule) results = allResults[:0] for _, r := range allResults { if len(r.Packages) == 0 { @@ -838,11 +838,11 @@ func QueryPattern(ctx context.Context, pattern, query string, current func(strin // itself, sorted by descending length. Prefixes that are not valid module paths // but are valid package paths (like "m" or "example.com/.gen") are included, // since they might be replaced. -func modulePrefixesExcludingTarget(path string) []string { +func modulePrefixesExcludingTarget(loaderstate *State, path string) []string { prefixes := make([]string, 0, strings.Count(path, "/")+1) mainModulePrefixes := make(map[string]bool) - for _, m := range MainModules.Versions() { + for _, m := range loaderstate.MainModules.Versions() { mainModulePrefixes[m.Path] = true } @@ -863,7 +863,7 @@ func modulePrefixesExcludingTarget(path string) []string { return prefixes } -func queryPrefixModules(ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) { +func queryPrefixModules(loaderstate *State, ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) { ctx, span := trace.StartSpan(ctx, "modload.queryPrefixModules") defer span.Done() @@ -905,7 +905,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod case *PackageNotInModuleError: // Given the option, prefer to attribute “package not in module” // to modules other than the main one. - if noPackage == nil || MainModules.Contains(noPackage.Mod.Path) { + if noPackage == nil || loaderstate.MainModules.Contains(noPackage.Mod.Path) { noPackage = rErr } case *NoMatchingVersionError: @@ -932,7 +932,7 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod if notExistErr == nil { notExistErr = rErr } - } else if iv := (*module.InvalidVersionError)(nil); errors.As(rErr, &iv) { + } else if _, ok := errors.AsType[*module.InvalidVersionError](rErr); ok { if invalidVersion == nil { invalidVersion = rErr } @@ -1096,8 +1096,8 @@ func (e *PackageNotInModuleError) ImportPath() string { // go.mod with different content. Second, if we don't fetch the .zip, then // we don't need to verify it in go.sum. This makes 'go list -m -u' faster // and simpler. -func versionHasGoMod(_ context.Context, m module.Version) (bool, error) { - _, data, err := rawGoModData(m) +func versionHasGoMod(loaderstate *State, _ context.Context, m module.Version) (bool, error) { + _, data, err := rawGoModData(loaderstate, m) if err != nil { return false, err } @@ -1117,7 +1117,7 @@ type versionRepo interface { var _ versionRepo = modfetch.Repo(nil) -func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err error) { +func lookupRepo(loaderstate *State, ctx context.Context, proxy, path string) (repo versionRepo, err error) { if path != "go" && path != "toolchain" { err = module.CheckPath(path) } @@ -1127,9 +1127,9 @@ func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err repo = emptyRepo{path: path, err: err} } - if MainModules == nil { + if loaderstate.MainModules == nil { return repo, err - } else if _, ok := MainModules.HighestReplaced()[path]; ok { + } else if _, ok := loaderstate.MainModules.HighestReplaced()[path]; ok { return &replacementRepo{repo: repo}, nil } @@ -1186,8 +1186,8 @@ func (rr *replacementRepo) Versions(ctx context.Context, prefix string) (*modfet } versions := repoVersions.List - for _, mm := range MainModules.Versions() { - if index := MainModules.Index(mm); index != nil && len(index.replace) > 0 { + for _, mm := range LoaderState.MainModules.Versions() { + if index := LoaderState.MainModules.Index(mm); index != nil && len(index.replace) > 0 { path := rr.ModulePath() for m := range index.replace { if m.Path == path && strings.HasPrefix(m.Version, prefix) && m.Version != "" && !module.IsPseudoVersion(m.Version) { @@ -1215,8 +1215,8 @@ func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevI return info, err } var hasReplacements bool - for _, v := range MainModules.Versions() { - if index := MainModules.Index(v); index != nil && len(index.replace) > 0 { + for _, v := range LoaderState.MainModules.Versions() { + if index := LoaderState.MainModules.Index(v); index != nil && len(index.replace) > 0 { hasReplacements = true } } @@ -1239,7 +1239,7 @@ func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevI } } - if r := Replacement(module.Version{Path: path, Version: v}); r.Path == "" { + if r := Replacement(LoaderState, module.Version{Path: path, Version: v}); r.Path == "" { return info, err } return rr.replacementStat(v) @@ -1249,7 +1249,7 @@ func (rr *replacementRepo) Latest(ctx context.Context) (*modfetch.RevInfo, error info, err := rr.repo.Latest(ctx) path := rr.ModulePath() - if v, ok := MainModules.HighestReplaced()[path]; ok { + if v, ok := LoaderState.MainModules.HighestReplaced()[path]; ok { if v == "" { // The only replacement is a wildcard that doesn't specify a version, so // synthesize a pseudo-version with an appropriate major version and a @@ -1290,7 +1290,7 @@ type QueryMatchesMainModulesError struct { } func (e *QueryMatchesMainModulesError) Error() string { - if MainModules.Contains(e.Pattern) { + if LoaderState.MainModules.Contains(e.Pattern) { return fmt.Sprintf("can't request version %q of the main module (%s)", e.Query, e.Pattern) } diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go index 93f8f0d00d1c8d..b4487eebb0d9e9 100644 --- a/src/cmd/go/internal/modload/query_test.go +++ b/src/cmd/go/internal/modload/query_test.go @@ -182,7 +182,7 @@ func TestQuery(t *testing.T) { t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+tt.current+"/"+allow, func(t *testing.T) { t.Parallel() - info, err := Query(ctx, tt.path, tt.query, tt.current, allowed) + info, err := Query(LoaderState, ctx, tt.path, tt.query, tt.current, allowed) if tt.err != "" { if err == nil { t.Errorf("Query(_, %q, %q, %q, %v) = %v, want error %q", tt.path, tt.query, tt.current, allow, info.Version, tt.err) diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go index 9ff9738e281118..9951e68ee8e6c9 100644 --- a/src/cmd/go/internal/modload/search.go +++ b/src/cmd/go/internal/modload/search.go @@ -41,7 +41,7 @@ const ( // matchPackages is like m.MatchPackages, but uses a local variable (rather than // a global) for tags, can include or exclude packages in the standard library, // and is restricted to the given list of modules. -func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, filter stdFilter, modules []module.Version) { +func matchPackages(loaderstate *State, ctx context.Context, m *search.Match, tags map[string]bool, filter stdFilter, modules []module.Version) { ctx, span := trace.StartSpan(ctx, "modload.matchPackages") defer span.Done() @@ -74,7 +74,7 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f ) q := par.NewQueue(runtime.GOMAXPROCS(0)) - ignorePatternsMap := parseIgnorePatterns(ctx, treeCanMatch, modules) + ignorePatternsMap := parseIgnorePatterns(loaderstate, ctx, treeCanMatch, modules) walkPkgs := func(root, importPathRoot string, prune pruning) { _, span := trace.StartSpan(ctx, "walkPkgs "+root) defer span.Done() @@ -171,13 +171,13 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f } if cfg.BuildMod == "vendor" { - for _, mod := range MainModules.Versions() { - if modRoot := MainModules.ModRoot(mod); modRoot != "" { - walkPkgs(modRoot, MainModules.PathPrefix(mod), pruneGoMod|pruneVendor) + for _, mod := range loaderstate.MainModules.Versions() { + if modRoot := loaderstate.MainModules.ModRoot(mod); modRoot != "" { + walkPkgs(modRoot, loaderstate.MainModules.PathPrefix(mod), pruneGoMod|pruneVendor) } } - if HasModRoot() { - walkPkgs(VendorDir(), "", pruneVendor) + if HasModRoot(loaderstate) { + walkPkgs(VendorDir(loaderstate), "", pruneVendor) } return } @@ -191,16 +191,16 @@ func matchPackages(ctx context.Context, m *search.Match, tags map[string]bool, f root, modPrefix string isLocal bool ) - if MainModules.Contains(mod.Path) { - if MainModules.ModRoot(mod) == "" { + if loaderstate.MainModules.Contains(mod.Path) { + if loaderstate.MainModules.ModRoot(mod) == "" { continue // If there is no main module, we can't search in it. } - root = MainModules.ModRoot(mod) - modPrefix = MainModules.PathPrefix(mod) + root = loaderstate.MainModules.ModRoot(mod) + modPrefix = loaderstate.MainModules.PathPrefix(mod) isLocal = true } else { var err error - root, isLocal, err = fetch(ctx, mod) + root, isLocal, err = fetch(loaderstate, ctx, mod) if err != nil { m.AddError(err) continue @@ -286,17 +286,17 @@ func walkFromIndex(index *modindex.Module, importPathRoot string, isMatch, treeC func MatchInModule(ctx context.Context, pattern string, m module.Version, tags map[string]bool) *search.Match { match := search.NewMatch(pattern) if m == (module.Version{}) { - matchPackages(ctx, match, tags, includeStd, nil) + matchPackages(LoaderState, ctx, match, tags, includeStd, nil) } - LoadModFile(ctx) // Sets Target, needed by fetch and matchPackages. + LoadModFile(LoaderState, ctx) // Sets Target, needed by fetch and matchPackages. if !match.IsLiteral() { - matchPackages(ctx, match, tags, omitStd, []module.Version{m}) + matchPackages(LoaderState, ctx, match, tags, omitStd, []module.Version{m}) return match } - root, isLocal, err := fetch(ctx, m) + root, isLocal, err := fetch(LoaderState, ctx, m) if err != nil { match.Errs = []error{err} return match @@ -322,7 +322,7 @@ func MatchInModule(ctx context.Context, pattern string, m module.Version, tags m // parseIgnorePatterns collects all ignore patterns associated with the // provided list of modules. // It returns a map of module root -> *search.IgnorePatterns. -func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, modules []module.Version) map[string]*search.IgnorePatterns { +func parseIgnorePatterns(loaderstate *State, ctx context.Context, treeCanMatch func(string) bool, modules []module.Version) map[string]*search.IgnorePatterns { ignorePatternsMap := make(map[string]*search.IgnorePatterns) for _, mod := range modules { if gover.IsToolchain(mod.Path) || !treeCanMatch(mod.Path) { @@ -330,12 +330,12 @@ func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, mo } var modRoot string var ignorePatterns []string - if MainModules.Contains(mod.Path) { - modRoot = MainModules.ModRoot(mod) + if loaderstate.MainModules.Contains(mod.Path) { + modRoot = loaderstate.MainModules.ModRoot(mod) if modRoot == "" { continue } - modIndex := MainModules.Index(mod) + modIndex := loaderstate.MainModules.Index(mod) if modIndex == nil { continue } @@ -344,11 +344,11 @@ func parseIgnorePatterns(ctx context.Context, treeCanMatch func(string) bool, mo // Skip getting ignore patterns for vendored modules because they // do not have go.mod files. var err error - modRoot, _, err = fetch(ctx, mod) + modRoot, _, err = fetch(loaderstate, ctx, mod) if err != nil { continue } - summary, err := goModSummary(mod) + summary, err := goModSummary(loaderstate, mod) if err != nil { continue } diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go index c7fe73193582ab..1fc20ad398b363 100644 --- a/src/cmd/go/internal/modload/vendor.go +++ b/src/cmd/go/internal/modload/vendor.go @@ -140,10 +140,10 @@ func readVendorList(vendorDir string) { // checkVendorConsistency verifies that the vendor/modules.txt file matches (if // go 1.14) or at least does not contradict (go 1.13 or earlier) the // requirements and replacements listed in the main module's go.mod file. -func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) { +func checkVendorConsistency(loaderstate *State, indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) { // readVendorList only needs the main module to get the directory // the vendor directory is in. - readVendorList(VendorDir()) + readVendorList(VendorDir(loaderstate)) if len(modFiles) < 1 { // We should never get here if there are zero modfiles. Either @@ -154,7 +154,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m } pre114 := false - if !inWorkspaceMode() { // workspace mode was added after Go 1.14 + if !inWorkspaceMode(loaderstate) { // workspace mode was added after Go 1.14 if len(indexes) != 1 { panic(fmt.Errorf("not in workspace mode but number of indexes is %v, not 1", len(indexes))) } @@ -188,7 +188,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m // However, we can at least detect a version mismatch if packages were // vendored from a non-matching version. if vv, ok := vendorVersion[r.Mod.Path]; ok && vv != r.Mod.Version { - vendErrorf(r.Mod, fmt.Sprintf("is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv)) + vendErrorf(r.Mod, "is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv) } } else { vendErrorf(r.Mod, "is explicitly required in go.mod, but not marked as explicit in vendor/modules.txt") @@ -215,8 +215,8 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m continue // Don't print the same error more than once } seenrep[r.Old] = true - rNew, modRoot, replacementSource := replacementFrom(r.Old) - rNewCanonical := canonicalizeReplacePath(rNew, modRoot) + rNew, modRoot, replacementSource := replacementFrom(loaderstate, r.Old) + rNewCanonical := canonicalizeReplacePath(loaderstate, rNew, modRoot) vr := vendorMeta[r.Old].Replacement if vr == (module.Version{}) { if rNewCanonical == (module.Version{}) { @@ -236,8 +236,8 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m for _, modFile := range modFiles { checkReplace(modFile.Replace) } - if MainModules.workFile != nil { - checkReplace(MainModules.workFile.Replace) + if loaderstate.MainModules.workFile != nil { + checkReplace(loaderstate.MainModules.workFile.Replace) } for _, mod := range vendorList { @@ -252,7 +252,7 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m } if !foundRequire { article := "" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { article = "a " } vendErrorf(mod, "is marked as explicit in vendor/modules.txt, but not explicitly required in %vgo.mod", article) @@ -262,9 +262,9 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m } for _, mod := range vendorReplaced { - r := Replacement(mod) + r := Replacement(loaderstate, mod) replacementSource := "go.mod" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { replacementSource = "the workspace" } if r == (module.Version{}) { @@ -276,9 +276,9 @@ func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, m if vendErrors.Len() > 0 { subcmd := "mod" - if inWorkspaceMode() { + if inWorkspaceMode(loaderstate) { subcmd = "work" } - base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir()), vendErrors, subcmd) + base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir(loaderstate)), vendErrors, subcmd) } } diff --git a/src/cmd/go/internal/rf-cleanup.zsh b/src/cmd/go/internal/rf-cleanup.zsh new file mode 100755 index 00000000000000..c805db56e3611e --- /dev/null +++ b/src/cmd/go/internal/rf-cleanup.zsh @@ -0,0 +1,43 @@ +#!/usr/bin/env zsh + +set -eu -o pipefail + +# This is a large series of sed commands to cleanup after successful use of the +# `rf inject` command. This script will be used to refactor the codebase to +# eliminate global state within the module loader. Once that effort is +# complete, this script will be removed. + +find . -name '*.go' -exec \ + sed -i ' + # + # CompileAction does not use loaderstate. + # + s/CompileAction(loaderstate[^ ]* \*modload.State, /CompileAction(/g + s/CompileAction(modload.LoaderState[^,]*, /CompileAction(/g + s/CompileAction(loaderstate[^,]*, /CompileAction(/g + # + # cgoAction does not use loaderstate. + # + s/cgoAction(loaderstate \*modload\.State, /cgoAction(/g + s/cgoAction(loaderstate, /cgoAction(/g + s/cgoAction(loaderstate_, /cgoAction(/g + # + # Remove redundant mentions of LoaderState from function call sites. + # + s/(modload\.LoaderState_*, loaderstate,/(loaderstate,/g + s/(modload\.LoaderState_*, moduleLoaderState,/(moduleLoaderState,/g + s/(modload\.LoaderState_*, modload\.LoaderState/(modload.LoaderState/g + s/(modload\.LoaderState_*, loaderstate,/(loaderstate,/g + s/(modload\.LoaderState_*, moduleLoaderState,/(moduleLoaderState,/g + s/(modload\.LoaderState_*, modload\.LoaderState,/(modload.LoaderState,/g + s/(loaderstate_* \*modload.State, loaderstate \*modload.State/(loaderstate *modload.State/g + s/(loaderstate_* \*State, loaderstate \*State/(loaderstate *State/g + s/(loaderstate_*, loaderstate,/(loaderstate,/g + s/(LoaderState_*, loaderstate,/(loaderstate,/g + s/(LoaderState_*, loaderState,/(loaderState,/g + s/(LoaderState_*, LoaderState,/(LoaderState,/g + s/(LoaderState_*, LoaderState,/(LoaderState,/g + s/(moduleLoaderState_*, loaderstate,/(loaderstate,/g + s/(moduleLoaderState_*, moduleLoaderState,/(moduleLoaderState,/g + ' {} \; + diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go index b81b1a007bd79d..b6d76514b032d8 100644 --- a/src/cmd/go/internal/run/run.go +++ b/src/cmd/go/internal/run/run.go @@ -76,15 +76,15 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { // This must be done before modload.Init, but we need to call work.BuildInit // before loading packages, since it affects package locations, e.g., // for -race and -msan. - modload.ForceUseModules = true - modload.RootMode = modload.NoRoot - modload.AllowMissingModuleImports() - modload.Init() + modload.LoaderState.ForceUseModules = true + modload.LoaderState.RootMode = modload.NoRoot + modload.AllowMissingModuleImports(modload.LoaderState) + modload.Init(modload.LoaderState) } else { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) } - work.BuildInit() + work.BuildInit(modload.LoaderState) b := work.NewBuilder("") defer func() { if err := b.Close(); err != nil { @@ -107,18 +107,18 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go: cannot run *_test.go files (%s)", file) } } - p = load.GoFilesPackage(ctx, pkgOpts, files) + p = load.GoFilesPackage(modload.LoaderState, ctx, pkgOpts, files) } else if len(args) > 0 && !strings.HasPrefix(args[0], "-") { arg := args[0] var pkgs []*load.Package if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) { var err error - pkgs, err = load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args[:1]) + pkgs, err = load.PackagesAndErrorsOutsideModule(modload.LoaderState, ctx, pkgOpts, args[:1]) if err != nil { base.Fatal(err) } } else { - pkgs = load.PackagesAndErrors(ctx, pkgOpts, args[:1]) + pkgs = load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, args[:1]) } if len(pkgs) == 0 { @@ -140,7 +140,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { load.CheckPackageErrors([]*load.Package{p}) if cfg.BuildCover { - load.PrepareForCoverageBuild([]*load.Package{p}) + load.PrepareForCoverageBuild(modload.LoaderState, []*load.Package{p}) } p.Internal.OmitDebug = true @@ -166,7 +166,7 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { p.Internal.ExeName = p.DefaultExecName() } - a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p) + a1 := b.LinkAction(modload.LoaderState, work.ModeBuild, work.ModeBuild, p) a1.CacheExecutable = true a := &work.Action{Mode: "go run", Actor: work.ActorFunc(buildRunProgram), Args: cmdArgs, Deps: []*work.Action{a1}} b.Do(ctx, a) diff --git a/src/cmd/go/internal/telemetrystats/telemetrystats.go b/src/cmd/go/internal/telemetrystats/telemetrystats.go index d5b642240f16b7..9586324551dfc9 100644 --- a/src/cmd/go/internal/telemetrystats/telemetrystats.go +++ b/src/cmd/go/internal/telemetrystats/telemetrystats.go @@ -24,7 +24,7 @@ func Increment() { func incrementConfig() { if !modload.WillBeEnabled() { counter.Inc("go/mode:gopath") - } else if workfile := modload.FindGoWork(base.Cwd()); workfile != "" { + } else if workfile := modload.FindGoWork(modload.LoaderState, base.Cwd()); workfile != "" { counter.Inc("go/mode:workspace") } else { counter.Inc("go/mode:module") diff --git a/src/cmd/go/internal/test/flagdefs.go b/src/cmd/go/internal/test/flagdefs.go index 8aa0bfc2bf3120..b8b4bf649e42e7 100644 --- a/src/cmd/go/internal/test/flagdefs.go +++ b/src/cmd/go/internal/test/flagdefs.go @@ -9,6 +9,7 @@ package test // passFlagToTest contains the flags that should be forwarded to // the test binary with the prefix "test.". var passFlagToTest = map[string]bool{ + "artifacts": true, "bench": true, "benchmem": true, "benchtime": true, diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 8bfb3c149b6c69..5e5d79a39f001f 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -192,6 +192,10 @@ and -show_bytes options of pprof control how the information is presented. The following flags are recognized by the 'go test' command and control the execution of any test: + -artifacts + Save test artifacts in the directory specified by -outputdir. + See 'go doc testing.T.ArtifactDir'. + -bench regexp Run only those benchmarks matching a regular expression. By default, no benchmarks are run. @@ -286,6 +290,10 @@ control the execution of any test: This will only list top-level tests. No subtest or subbenchmarks will be shown. + -outputdir directory + Place output files from profiling and test artifacts in the + specified directory, by default the directory in which "go test" is running. + -parallel n Allow parallel execution of test functions that call t.Parallel, and fuzz targets that call t.Parallel when running the seed corpus. @@ -397,10 +405,6 @@ profile the tests during execution: Sample 1 in n stack traces of goroutines holding a contended mutex. - -outputdir directory - Place output files from profiling in the specified directory, - by default the directory in which "go test" is running. - -trace trace.out Write an execution trace to the specified file before exiting. @@ -540,6 +544,7 @@ See the documentation of the testing package for more information. } var ( + testArtifacts bool // -artifacts flag testBench string // -bench flag testC bool // -c flag testCoverPkgs []*load.Package // -coverpkg flag @@ -678,7 +683,7 @@ var defaultVetFlags = []string{ func runTest(ctx context.Context, cmd *base.Command, args []string) { pkgArgs, testArgs = testFlags(args) - modload.InitWorkfile() // The test command does custom flag processing; initialize workspaces after that. + modload.InitWorkfile(modload.LoaderState) // The test command does custom flag processing; initialize workspaces after that. if cfg.DebugTrace != "" { var close func() error @@ -699,12 +704,13 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { work.FindExecCmd() // initialize cached result - work.BuildInit() + work.BuildInit(modload.LoaderState) work.VetFlags = testVet.flags work.VetExplicit = testVet.explicit + work.VetTool = base.Tool("vet") pkgOpts := load.PackageOpts{ModResolveTests: true} - pkgs = load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) + pkgs = load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, pkgArgs) // We *don't* call load.CheckPackageErrors here because we want to report // loading errors as per-package test setup errors later. if len(pkgs) == 0 { @@ -730,12 +736,12 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { // the module cache (or permanently alter the behavior of std tests for all // users) by writing the failing input to the package's testdata directory. // (See https://golang.org/issue/48495 and test_fuzz_modcache.txt.) - mainMods := modload.MainModules + mainMods := modload.LoaderState.MainModules if m := pkgs[0].Module; m != nil && m.Path != "" { if !mainMods.Contains(m.Path) { base.Fatalf("cannot use -fuzz flag on package outside the main module") } - } else if pkgs[0].Standard && modload.Enabled() { + } else if pkgs[0].Standard && modload.Enabled(modload.LoaderState) { // Because packages in 'std' and 'cmd' are part of the standard library, // they are only treated as part of a module in 'go mod' subcommands and // 'go get'. However, we still don't want to accidentally corrupt their @@ -861,13 +867,13 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { if cfg.BuildCoverPkg != nil { match := make([]func(*load.Package) bool, len(cfg.BuildCoverPkg)) for i := range cfg.BuildCoverPkg { - match[i] = load.MatchPackage(cfg.BuildCoverPkg[i], base.Cwd()) + match[i] = load.MatchPackage(modload.LoaderState, cfg.BuildCoverPkg[i], base.Cwd()) } // Select for coverage all dependencies matching the -coverpkg // patterns. plist := load.TestPackageList(ctx, pkgOpts, pkgs) - testCoverPkgs = load.SelectCoverPackages(plist, match, "test") + testCoverPkgs = load.SelectCoverPackages(modload.LoaderState, plist, match, "test") if len(testCoverPkgs) > 0 { // create a new singleton action that will collect up the // meta-data files from all of the packages mentioned in @@ -975,7 +981,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { // happens we'll wind up building the Q compile action // before updating its deps to include sync/atomic). if cfg.BuildCoverMode == "atomic" && p.ImportPath != "sync/atomic" { - load.EnsureImport(p, "sync/atomic") + load.EnsureImport(modload.LoaderState, p, "sync/atomic") } // Tag the package for static meta-data generation if no // test files (this works only with the new coverage @@ -1215,7 +1221,7 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, } } - a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain) + a := b.LinkAction(modload.LoaderState, work.ModeBuild, work.ModeBuild, pmain) a.Target = testDir + testBinary + cfg.ExeSuffix if cfg.Goos == "windows" { // There are many reserved words on Windows that, @@ -1735,8 +1741,7 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action) } else if errors.Is(err, exec.ErrWaitDelay) { fmt.Fprintf(cmd.Stdout, "*** Test I/O incomplete %v after exiting.\n", cmd.WaitDelay) } - var ee *exec.ExitError - if len(out) == 0 || !errors.As(err, &ee) || !ee.Exited() { + if ee, ok := errors.AsType[*exec.ExitError](err); !ok || !ee.Exited() || len(out) == 0 { // If there was no test output, print the exit status so that the reason // for failure is clear. fmt.Fprintf(cmd.Stdout, "%s\n", err) diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go index 983e8f56e9af09..d6891a1d0b955b 100644 --- a/src/cmd/go/internal/test/testflag.go +++ b/src/cmd/go/internal/test/testflag.go @@ -44,6 +44,7 @@ func init() { // some of them so that cmd/go knows what to do with the test output, or knows // to build the test in a way that supports the use of the flag. + cf.BoolVar(&testArtifacts, "artifacts", false, "") cf.StringVar(&testBench, "bench", "", "") cf.Bool("benchmem", false, "") cf.String("benchtime", "", "") @@ -260,7 +261,7 @@ func testFlags(args []string) (packageNames, passToTest []string) { break } - if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) { + if nf, ok := errors.AsType[cmdflag.NonFlagError](err); ok { if !inPkgList && packageNames != nil { // We already saw the package list previously, and this argument is not // a flag, so it — and everything after it — must be either a value for @@ -295,7 +296,7 @@ func testFlags(args []string) (packageNames, passToTest []string) { inPkgList = false } - if nd := (cmdflag.FlagNotDefinedError{}); errors.As(err, &nd) { + if nd, ok := errors.AsType[cmdflag.FlagNotDefinedError](err); ok { // This is a flag we do not know. We must assume that any args we see // after this might be flag arguments, not package names, so make // packageNames non-nil to indicate that the package list is complete. @@ -392,7 +393,8 @@ func testFlags(args []string) (packageNames, passToTest []string) { // directory, but 'go test' defaults it to the working directory of the 'go' // command. Set it explicitly if it is needed due to some other flag that // requests output. - if testProfile() != "" && !outputDirSet { + needOutputDir := testProfile() != "" || testArtifacts + if needOutputDir && !outputDirSet { injectedFlags = append(injectedFlags, "-test.outputdir="+testOutputDir.getAbs()) } diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go index 120ef5339bede0..e95b07d8c813aa 100644 --- a/src/cmd/go/internal/tool/tool.go +++ b/src/cmd/go/internal/tool/tool.go @@ -161,9 +161,9 @@ func listTools(ctx context.Context) { fmt.Println(name) } - modload.InitWorkfile() - modload.LoadModFile(ctx) - modTools := slices.Sorted(maps.Keys(modload.MainModules.Tools())) + modload.InitWorkfile(modload.LoaderState) + modload.LoadModFile(modload.LoaderState, ctx) + modTools := slices.Sorted(maps.Keys(modload.LoaderState.MainModules.Tools())) for _, tool := range modTools { fmt.Println(tool) } @@ -252,11 +252,11 @@ func loadBuiltinTool(toolName string) string { } func loadModTool(ctx context.Context, name string) string { - modload.InitWorkfile() - modload.LoadModFile(ctx) + modload.InitWorkfile(modload.LoaderState) + modload.LoadModFile(modload.LoaderState, ctx) matches := []string{} - for tool := range modload.MainModules.Tools() { + for tool := range modload.LoaderState.MainModules.Tools() { if tool == name || defaultExecName(tool) == name { matches = append(matches, tool) } @@ -308,7 +308,7 @@ func buildAndRunBuiltinTool(ctx context.Context, toolName, tool string, args []s // Ignore go.mod and go.work: we don't need them, and we want to be able // to run the tool even if there's an issue with the module or workspace the // user happens to be in. - modload.RootMode = modload.NoRoot + modload.LoaderState.RootMode = modload.NoRoot runFunc := func(b *work.Builder, ctx context.Context, a *work.Action) error { cmdline := str.StringList(builtTool(a), a.Args) @@ -336,7 +336,7 @@ func buildAndRunModtool(ctx context.Context, toolName, tool string, args []strin } func buildAndRunTool(ctx context.Context, tool string, args []string, runTool work.ActorFunc) { - work.BuildInit() + work.BuildInit(modload.LoaderState) b := work.NewBuilder("") defer func() { if err := b.Close(); err != nil { @@ -345,11 +345,11 @@ func buildAndRunTool(ctx context.Context, tool string, args []string, runTool wo }() pkgOpts := load.PackageOpts{MainOnly: true} - p := load.PackagesAndErrors(ctx, pkgOpts, []string{tool})[0] + p := load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, []string{tool})[0] p.Internal.OmitDebug = true p.Internal.ExeName = p.DefaultExecName() - a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p) + a1 := b.LinkAction(modload.LoaderState, work.ModeBuild, work.ModeBuild, p) a1.CacheExecutable = true a := &work.Action{Mode: "go tool", Actor: runTool, Args: args, Deps: []*work.Action{a1}} b.Do(ctx, a) diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go index e8712613366e8c..d54277ed1b4bea 100644 --- a/src/cmd/go/internal/toolchain/select.go +++ b/src/cmd/go/internal/toolchain/select.go @@ -353,9 +353,9 @@ func Exec(gotoolchain string) { // Set up modules without an explicit go.mod, to download distribution. modload.Reset() - modload.ForceUseModules = true - modload.RootMode = modload.NoRoot - modload.Init() + modload.LoaderState.ForceUseModules = true + modload.LoaderState.RootMode = modload.NoRoot + modload.Init(modload.LoaderState) // Download and unpack toolchain module into module cache. // Note that multiple go commands might be doing this at the same time, @@ -529,7 +529,7 @@ func raceSafeCopy(old, new string) error { // The toolchain line overrides the version line func modGoToolchain() (file, goVers, toolchain string) { wd := base.UncachedCwd() - file = modload.FindGoWork(wd) + file = modload.FindGoWork(modload.LoaderState, wd) // $GOWORK can be set to a file that does not yet exist, if we are running 'go work init'. // Do not try to load the file in that case if _, err := os.Stat(file); err != nil { @@ -692,9 +692,9 @@ func maybeSwitchForGoInstallVersion(minVers string) { // command lines if we add new flags in the future. // Set up modules without an explicit go.mod, to download go.mod. - modload.ForceUseModules = true - modload.RootMode = modload.NoRoot - modload.Init() + modload.LoaderState.ForceUseModules = true + modload.LoaderState.RootMode = modload.NoRoot + modload.Init(modload.LoaderState) defer modload.Reset() // See internal/load.PackagesAndErrorsOutsideModule @@ -705,7 +705,7 @@ func maybeSwitchForGoInstallVersion(minVers string) { allowed = nil } noneSelected := func(path string) (version string) { return "none" } - _, err := modload.QueryPackages(ctx, path, version, noneSelected, allowed) + _, err := modload.QueryPackages(modload.LoaderState, ctx, path, version, noneSelected, allowed) if errors.Is(err, gover.ErrTooNew) { // Run early switch, same one go install or go run would eventually do, // if it understood all the command-line flags. diff --git a/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go index 67234ac20d4628..6a6a0eee57ce85 100644 --- a/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go +++ b/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go @@ -155,10 +155,10 @@ func TestScripts(t *testing.T) { t.Log(buf) } if err != nil { - if notInstalled := (vcweb.ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) { + if _, ok := errors.AsType[vcweb.ServerNotInstalledError](err); ok || errors.Is(err, exec.ErrNotFound) { t.Skip(err) } - if skip := (vcweb.SkipError{}); errors.As(err, &skip) { + if skip, ok := errors.AsType[vcweb.SkipError](err); ok { if skip.Msg == "" { t.Skip("SKIP") } else { diff --git a/src/cmd/go/internal/vcweb/vcweb.go b/src/cmd/go/internal/vcweb/vcweb.go index b81ff5e63de72a..4b4e127bb042e0 100644 --- a/src/cmd/go/internal/vcweb/vcweb.go +++ b/src/cmd/go/internal/vcweb/vcweb.go @@ -244,9 +244,9 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { }) if err != nil { s.logger.Print(err) - if notFound := (ScriptNotFoundError{}); errors.As(err, ¬Found) { + if _, ok := errors.AsType[ScriptNotFoundError](err); ok { http.NotFound(w, req) - } else if notInstalled := (ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) { + } else if _, ok := errors.AsType[ServerNotInstalledError](err); ok || errors.Is(err, exec.ErrNotFound) { http.Error(w, err.Error(), http.StatusNotImplemented) } else { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go index c26dd42b4e1a08..781bc080e89fe4 100644 --- a/src/cmd/go/internal/version/version.go +++ b/src/cmd/go/internal/version/version.go @@ -168,7 +168,7 @@ func scanFile(file string, info fs.FileInfo, mustPrint bool) bool { bi, err := buildinfo.ReadFile(file) if err != nil { if mustPrint { - if pathErr := (*os.PathError)(nil); errors.As(err, &pathErr) && filepath.Clean(pathErr.Path) == filepath.Clean(file) { + if pathErr, ok := errors.AsType[*os.PathError](err); ok && filepath.Clean(pathErr.Path) == filepath.Clean(file) { fmt.Fprintf(os.Stderr, "%v\n", file) } else { // Skip errors for non-Go binaries. diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go index 3514be80feb8cc..e274348bd6a38e 100644 --- a/src/cmd/go/internal/vet/vet.go +++ b/src/cmd/go/internal/vet/vet.go @@ -2,13 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package vet implements the “go vet” command. +// Package vet implements the “go vet” and “go fix” commands. package vet import ( "context" + "encoding/json" + "errors" "fmt" - "path/filepath" + "io" + "os" + "slices" + "strconv" + "strings" + "sync" "cmd/go/internal/base" "cmd/go/internal/cfg" @@ -18,30 +25,39 @@ import ( "cmd/go/internal/work" ) -// Break init loop. -func init() { - CmdVet.Run = runVet -} - var CmdVet = &base.Command{ CustomFlags: true, UsageLine: "go vet [build flags] [-vettool prog] [vet flags] [packages]", Short: "report likely mistakes in packages", Long: ` -Vet runs the Go vet command on the packages named by the import paths. +Vet runs the Go vet tool (cmd/vet) on the named packages +and reports diagnostics. -For more about vet and its flags, see 'go doc cmd/vet'. -For more about specifying packages, see 'go help packages'. -For a list of checkers and their flags, see 'go tool vet help'. -For details of a specific checker such as 'printf', see 'go tool vet help printf'. +It supports these flags: -The -vettool=prog flag selects a different analysis tool with alternative -or additional checks. -For example, the 'shadow' analyzer can be built and run using these commands: + -c int + display offending line with this many lines of context (default -1) + -json + emit JSON output + -fix + instead of printing each diagnostic, apply its first fix (if any) + -diff + instead of applying each fix, print the patch as a unified diff + +The -vettool=prog flag selects a different analysis tool with +alternative or additional checks. For example, the 'shadow' analyzer +can be built and run using these commands: go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest go vet -vettool=$(which shadow) +Alternative vet tools should be built atop golang.org/x/tools/go/analysis/unitchecker, +which handles the interaction with go vet. + +For more about specifying packages, see 'go help packages'. +For a list of checkers and their flags, see 'go tool vet help'. +For details of a specific checker such as 'printf', see 'go tool vet help printf'. + The build flags supported by go vet are those that control package resolution and execution, such as -C, -n, -x, -v, -tags, and -toolexec. For more about these flags, see 'go help build'. @@ -50,9 +66,64 @@ See also: go fmt, go fix. `, } -func runVet(ctx context.Context, cmd *base.Command, args []string) { - vetFlags, pkgArgs := vetFlags(args) - modload.InitWorkfile() // The vet command does custom flag processing; initialize workspaces after that. +var CmdFix = &base.Command{ + CustomFlags: true, + UsageLine: "go fix [build flags] [-fixtool prog] [fix flags] [packages]", + Short: "apply fixes suggested by static checkers", + Long: ` +Fix runs the Go fix tool (cmd/vet) on the named packages +and applies suggested fixes. + +It supports these flags: + + -diff + instead of applying each fix, print the patch as a unified diff + +The -fixtool=prog flag selects a different analysis tool with +alternative or additional fixes; see the documentation for go vet's +-vettool flag for details. + +For more about specifying packages, see 'go help packages'. + +For a list of fixers and their flags, see 'go tool fix help'. + +For details of a specific fixer such as 'hostport', +see 'go tool fix help hostport'. + +The build flags supported by go fix are those that control package resolution +and execution, such as -C, -n, -x, -v, -tags, and -toolexec. +For more about these flags, see 'go help build'. + +See also: go fmt, go vet. + `, +} + +func init() { + // avoid initialization cycle + CmdVet.Run = run + CmdFix.Run = run + + addFlags(CmdVet) + addFlags(CmdFix) +} + +var ( + // "go vet -fix" causes fixes to be applied. + vetFixFlag = CmdVet.Flag.Bool("fix", false, "apply the first fix (if any) for each diagnostic") + + // The "go fix -fix=name,..." flag is an obsolete flag formerly + // used to pass a list of names to the old "cmd/fix -r". + fixFixFlag = CmdFix.Flag.String("fix", "", "obsolete; no effect") +) + +// run implements both "go vet" and "go fix". +func run(ctx context.Context, cmd *base.Command, args []string) { + // Compute flags for the vet/fix tool (e.g. cmd/{vet,fix}). + toolFlags, pkgArgs := toolFlags(cmd, args) + + // The vet/fix commands do custom flag processing; + // initialize workspaces after that. + modload.InitWorkfile(modload.LoaderState) if cfg.DebugTrace != "" { var close func() error @@ -71,24 +142,85 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { ctx, span := trace.StartSpan(ctx, fmt.Sprint("Running ", cmd.Name(), " command")) defer span.Done() - work.BuildInit() - work.VetFlags = vetFlags - if len(vetFlags) > 0 { - work.VetExplicit = true + work.BuildInit(modload.LoaderState) + + // Flag theory: + // + // All flags supported by unitchecker are accepted by go {vet,fix}. + // Some arise from each analyzer in the tool (both to enable it + // and to configure it), whereas others [-V -c -diff -fix -flags -json] + // are core to unitchecker itself. + // + // Most are passed through to toolFlags, but not all: + // * -V and -flags are used by the handshake in the [toolFlags] function; + // * these old flags have no effect: [-all -source -tags -v]; and + // * the [-c -fix -diff -json] flags are handled specially + // as described below: + // + // command args tool args + // go vet => cmd/vet -json Parse stdout, print diagnostics to stderr. + // go vet -json => cmd/vet -json Pass stdout through. + // go vet -fix [-diff] => cmd/vet -fix [-diff] Pass stdout through. + // go fix [-diff] => cmd/fix -fix [-diff] Pass stdout through. + // go fix -json => cmd/fix -json Pass stdout through. + // + // Notes: + // * -diff requires "go vet -fix" or "go fix", and no -json. + // * -json output is the same in "vet" and "fix" modes, + // and describes both diagnostics and fixes (but does not apply them). + // * -c=n is supported by the unitchecker, but we reimplement it + // here (see printDiagnostics), and do not pass the flag through. + + work.VetExplicit = len(toolFlags) > 0 + + if cmd.Name() == "fix" || *vetFixFlag { + // fix mode: 'go fix' or 'go vet -fix' + if jsonFlag { + if diffFlag { + base.Fatalf("-json and -diff cannot be used together") + } + } else { + toolFlags = append(toolFlags, "-fix") + if diffFlag { + toolFlags = append(toolFlags, "-diff") + } + } + if contextFlag != -1 { + base.Fatalf("-c flag cannot be used when applying fixes") + } + } else { + // vet mode: 'go vet' without -fix + if !jsonFlag { + // Post-process the JSON diagnostics on stdout and format + // it as "file:line: message" diagnostics on stderr. + // (JSON reliably frames diagnostics, fixes, and errors so + // that we don't have to parse stderr or interpret non-zero + // exit codes, and interacts better with the action cache.) + toolFlags = append(toolFlags, "-json") + work.VetHandleStdout = printJSONDiagnostics + } + if diffFlag { + base.Fatalf("go vet -diff flag requires -fix") + } } - if vetTool != "" { - var err error - work.VetTool, err = filepath.Abs(vetTool) - if err != nil { - base.Fatalf("%v", err) + + // Implement legacy "go fix -fix=name,..." flag. + if *fixFixFlag != "" { + fmt.Fprintf(os.Stderr, "go %s: the -fix=%s flag is obsolete and has no effect", cmd.Name(), *fixFixFlag) + + // The buildtag fixer is now implemented by cmd/fix. + if slices.Contains(strings.Split(*fixFixFlag, ","), "buildtag") { + fmt.Fprintf(os.Stderr, "go %s: to enable the buildtag check, use -buildtag", cmd.Name()) } } + work.VetFlags = toolFlags + pkgOpts := load.PackageOpts{ModResolveTests: true} - pkgs := load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) + pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, pkgOpts, pkgArgs) load.CheckPackageErrors(pkgs) if len(pkgs) == 0 { - base.Fatalf("no packages to vet") + base.Fatalf("no packages to %s", cmd.Name()) } b := work.NewBuilder("") @@ -98,7 +230,23 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { } }() - root := &work.Action{Mode: "go vet"} + // To avoid file corruption from duplicate application of + // fixes (in fix mode), and duplicate reporting of diagnostics + // (in vet mode), we must run the tool only once for each + // source file. We achieve that by running on ptest (below) + // instead of p. + // + // As a side benefit, this also allows analyzers to make + // "closed world" assumptions and report diagnostics (such as + // "this symbol is unused") that might be false if computed + // from just the primary package p, falsified by the + // additional declarations in test files. + // + // We needn't worry about intermediate test variants, as they + // will only be executed in VetxOnly mode, for facts but not + // diagnostics. + + root := &work.Action{Mode: "go " + cmd.Name()} for _, p := range pkgs { _, ptest, pxtest, perr := load.TestPackagesFor(ctx, pkgOpts, p, nil) if perr != nil { @@ -106,10 +254,11 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { continue } if len(ptest.GoFiles) == 0 && len(ptest.CgoFiles) == 0 && pxtest == nil { - base.Errorf("go: can't vet %s: no Go files in %s", p.ImportPath, p.Dir) + base.Errorf("go: can't %s %s: no Go files in %s", cmd.Name(), p.ImportPath, p.Dir) continue } if len(ptest.GoFiles) > 0 || len(ptest.CgoFiles) > 0 { + // The test package includes all the files of primary package. root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, ptest)) } if pxtest != nil { @@ -118,3 +267,167 @@ func runVet(ctx context.Context, cmd *base.Command, args []string) { } b.Do(ctx, root) } + +// printJSONDiagnostics parses JSON (from the tool's stdout) and +// prints it (to stderr) in "file:line: message" form. +// It also ensures that we exit nonzero if there were diagnostics. +func printJSONDiagnostics(r io.Reader) error { + stdout, err := io.ReadAll(r) + if err != nil { + return err + } + if len(stdout) > 0 { + // unitchecker emits a JSON map of the form: + // output maps Package ID -> Analyzer.Name -> (error | []Diagnostic); + var tree jsonTree + if err := json.Unmarshal([]byte(stdout), &tree); err != nil { + return fmt.Errorf("parsing JSON: %v", err) + } + for _, units := range tree { + for analyzer, msg := range units { + if msg[0] == '[' { + // []Diagnostic + var diags []jsonDiagnostic + if err := json.Unmarshal([]byte(msg), &diags); err != nil { + return fmt.Errorf("parsing JSON diagnostics: %v", err) + } + for _, diag := range diags { + base.SetExitStatus(1) + printJSONDiagnostic(analyzer, diag) + } + } else { + // error + var e jsonError + if err := json.Unmarshal([]byte(msg), &e); err != nil { + return fmt.Errorf("parsing JSON error: %v", err) + } + + base.SetExitStatus(1) + return errors.New(e.Err) + } + } + } + } + return nil +} + +var stderrMu sync.Mutex // serializes concurrent writes to stdout + +func printJSONDiagnostic(analyzer string, diag jsonDiagnostic) { + stderrMu.Lock() + defer stderrMu.Unlock() + + type posn struct { + file string + line, col int + } + parsePosn := func(s string) (_ posn, _ bool) { + colon2 := strings.LastIndexByte(s, ':') + if colon2 < 0 { + return + } + colon1 := strings.LastIndexByte(s[:colon2], ':') + if colon1 < 0 { + return + } + line, err := strconv.Atoi(s[colon1+len(":") : colon2]) + if err != nil { + return + } + col, err := strconv.Atoi(s[colon2+len(":"):]) + if err != nil { + return + } + return posn{s[:colon1], line, col}, true + } + + print := func(start, end, message string) { + if posn, ok := parsePosn(start); ok { + // The (*work.Shell).reportCmd method relativizes the + // prefix of each line of the subprocess's stdout; + // but filenames in JSON aren't at the start of the line, + // so we need to apply ShortPath here too. + fmt.Fprintf(os.Stderr, "%s:%d:%d: %v\n", base.ShortPath(posn.file), posn.line, posn.col, message) + } else { + fmt.Fprintf(os.Stderr, "%s: %v\n", start, message) + } + + // -c=n: show offending line plus N lines of context. + // (Duplicates logic in unitchecker; see analysisflags.PrintPlain.) + if contextFlag >= 0 { + if end == "" { + end = start + } + var ( + startPosn, ok1 = parsePosn(start) + endPosn, ok2 = parsePosn(end) + ) + if ok1 && ok2 { + // TODO(adonovan): respect overlays (like unitchecker does). + data, _ := os.ReadFile(startPosn.file) + lines := strings.Split(string(data), "\n") + for i := startPosn.line - contextFlag; i <= endPosn.line+contextFlag; i++ { + if 1 <= i && i <= len(lines) { + fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1]) + } + } + } + } + } + + // TODO(adonovan): append " [analyzer]" to message. But we must first relax + // x/tools/go/analysis/internal/versiontest.TestVettool and revendor; sigh. + _ = analyzer + print(diag.Posn, diag.End, diag.Message) + for _, rel := range diag.Related { + print(rel.Posn, rel.End, "\t"+rel.Message) + } +} + +// -- JSON schema -- + +// (populated by golang.org/x/tools/go/analysis/internal/analysisflags/flags.go) + +// A jsonTree is a mapping from package ID to analysis name to result. +// Each result is either a jsonError or a list of jsonDiagnostic. +type jsonTree map[string]map[string]json.RawMessage + +type jsonError struct { + Err string `json:"error"` +} + +// A TextEdit describes the replacement of a portion of a file. +// Start and End are zero-based half-open indices into the original byte +// sequence of the file, and New is the new text. +type jsonTextEdit struct { + Filename string `json:"filename"` + Start int `json:"start"` + End int `json:"end"` + New string `json:"new"` +} + +// A jsonSuggestedFix describes an edit that should be applied as a whole or not +// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix +// consists of multiple non-contiguous edits. +type jsonSuggestedFix struct { + Message string `json:"message"` + Edits []jsonTextEdit `json:"edits"` +} + +// A jsonDiagnostic describes the json schema of an analysis.Diagnostic. +type jsonDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` + Message string `json:"message"` + SuggestedFixes []jsonSuggestedFix `json:"suggested_fixes,omitempty"` + Related []jsonRelatedInformation `json:"related,omitempty"` +} + +// A jsonRelated describes a secondary position and message related to +// a primary diagnostic. +type jsonRelatedInformation struct { + Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` + Message string `json:"message"` +} diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go index d0bdb58a504ae7..7342b99d6e36bb 100644 --- a/src/cmd/go/internal/vet/vetflag.go +++ b/src/cmd/go/internal/vet/vetflag.go @@ -21,70 +21,83 @@ import ( "cmd/go/internal/work" ) -// go vet flag processing -// -// We query the flags of the tool specified by -vettool and accept any -// of those flags plus any flag valid for 'go build'. The tool must -// support -flags, which prints a description of its flags in JSON to -// stdout. - -// vetTool specifies the vet command to run. -// Any tool that supports the (still unpublished) vet -// command-line protocol may be supplied; see -// golang.org/x/tools/go/analysis/unitchecker for one -// implementation. It is also used by tests. -// -// The default behavior (vetTool=="") runs 'go tool vet'. -var vetTool string // -vettool - -func init() { - // For now, we omit the -json flag for vet because we could plausibly - // support -json specific to the vet command in the future (perhaps using - // the same format as build -json). - work.AddBuildFlags(CmdVet, work.OmitJSONFlag) - CmdVet.Flag.StringVar(&vetTool, "vettool", "", "") +// go vet/fix flag processing +var ( + // We query the flags of the tool specified by -{vet,fix}tool + // and accept any of those flags plus any flag valid for 'go + // build'. The tool must support -flags, which prints a + // description of its flags in JSON to stdout. + + // toolFlag specifies the vet/fix command to run. + // Any toolFlag that supports the (unpublished) vet + // command-line protocol may be supplied; see + // golang.org/x/tools/go/analysis/unitchecker for the + // sole implementation. It is also used by tests. + // + // The default behavior ("") runs 'go tool {vet,fix}'. + // + // Do not access this flag directly; use [parseToolFlag]. + toolFlag string // -{vet,fix}tool + diffFlag bool // -diff + jsonFlag bool // -json + contextFlag = -1 // -c=n +) + +func addFlags(cmd *base.Command) { + // We run the compiler for export data. + // Suppress the build -json flag; we define our own. + work.AddBuildFlags(cmd, work.OmitJSONFlag) + + cmd.Flag.StringVar(&toolFlag, cmd.Name()+"tool", "", "") // -vettool or -fixtool + cmd.Flag.BoolVar(&diffFlag, "diff", false, "print diff instead of applying it") + cmd.Flag.BoolVar(&jsonFlag, "json", false, "print diagnostics and fixes as JSON") + cmd.Flag.IntVar(&contextFlag, "c", -1, "display offending line with this many lines of context") } -func parseVettoolFlag(args []string) { - // Extract -vettool by ad hoc flag processing: +// parseToolFlag scans args for -{vet,fix}tool and returns the effective tool filename. +func parseToolFlag(cmd *base.Command, args []string) string { + toolFlagName := cmd.Name() + "tool" // vettool or fixtool + + // Extract -{vet,fix}tool by ad hoc flag processing: // its value is needed even before we can declare // the flags available during main flag processing. for i, arg := range args { - if arg == "-vettool" || arg == "--vettool" { + if arg == "-"+toolFlagName || arg == "--"+toolFlagName { if i+1 >= len(args) { log.Fatalf("%s requires a filename", arg) } - vetTool = args[i+1] - return - } else if strings.HasPrefix(arg, "-vettool=") || - strings.HasPrefix(arg, "--vettool=") { - vetTool = arg[strings.IndexByte(arg, '=')+1:] - return + toolFlag = args[i+1] + break + } else if strings.HasPrefix(arg, "-"+toolFlagName+"=") || + strings.HasPrefix(arg, "--"+toolFlagName+"=") { + toolFlag = arg[strings.IndexByte(arg, '=')+1:] + break } } -} -// vetFlags processes the command line, splitting it at the first non-flag -// into the list of flags and list of packages. -func vetFlags(args []string) (passToVet, packageNames []string) { - parseVettoolFlag(args) - - // Query the vet command for its flags. - var tool string - if vetTool == "" { - tool = base.Tool("vet") - } else { - var err error - tool, err = filepath.Abs(vetTool) + if toolFlag != "" { + tool, err := filepath.Abs(toolFlag) if err != nil { log.Fatal(err) } + return tool } + + return base.Tool(cmd.Name()) // default to 'go tool vet|fix' +} + +// toolFlags processes the command line, splitting it at the first non-flag +// into the list of flags and list of packages. +func toolFlags(cmd *base.Command, args []string) (passToTool, packageNames []string) { + tool := parseToolFlag(cmd, args) + work.VetTool = tool + + // Query the tool for its flags. out := new(bytes.Buffer) - vetcmd := exec.Command(tool, "-flags") - vetcmd.Stdout = out - if err := vetcmd.Run(); err != nil { - fmt.Fprintf(os.Stderr, "go: can't execute %s -flags: %v\n", tool, err) + toolcmd := exec.Command(tool, "-flags") + toolcmd.Stdout = out + if err := toolcmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "go: %s -flags failed: %v\n", tool, err) base.SetExitStatus(2) base.Exit() } @@ -99,15 +112,20 @@ func vetFlags(args []string) (passToVet, packageNames []string) { base.Exit() } - // Add vet's flags to CmdVet.Flag. + // Add tool's flags to cmd.Flag. // - // Some flags, in particular -tags and -v, are known to vet but + // Some flags, in particular -tags and -v, are known to the tool but // also defined as build flags. This works fine, so we omit duplicates here. - // However some, like -x, are known to the build but not to vet. - isVetFlag := make(map[string]bool, len(analysisFlags)) - cf := CmdVet.Flag + // However some, like -x, are known to the build but not to the tool. + isToolFlag := make(map[string]bool, len(analysisFlags)) + cf := cmd.Flag for _, f := range analysisFlags { - isVetFlag[f.Name] = true + // We reimplement the unitchecker's -c=n flag. + // Don't allow it to be passed through. + if f.Name == "c" { + continue + } + isToolFlag[f.Name] = true if cf.Lookup(f.Name) == nil { if f.Bool { cf.Bool(f.Name, false, "") @@ -117,22 +135,22 @@ func vetFlags(args []string) (passToVet, packageNames []string) { } } - // Record the set of vet tool flags set by GOFLAGS. We want to pass them to - // the vet tool, but only if they aren't overridden by an explicit argument. - base.SetFromGOFLAGS(&CmdVet.Flag) + // Record the set of tool flags set by GOFLAGS. We want to pass them to + // the tool, but only if they aren't overridden by an explicit argument. + base.SetFromGOFLAGS(&cmd.Flag) addFromGOFLAGS := map[string]bool{} - CmdVet.Flag.Visit(func(f *flag.Flag) { - if isVetFlag[f.Name] { + cmd.Flag.Visit(func(f *flag.Flag) { + if isToolFlag[f.Name] { addFromGOFLAGS[f.Name] = true } }) explicitFlags := make([]string, 0, len(args)) for len(args) > 0 { - f, remainingArgs, err := cmdflag.ParseOne(&CmdVet.Flag, args) + f, remainingArgs, err := cmdflag.ParseOne(&cmd.Flag, args) if errors.Is(err, flag.ErrHelp) { - exitWithUsage() + exitWithUsage(cmd) } if errors.Is(err, cmdflag.ErrFlagTerminator) { @@ -142,7 +160,7 @@ func vetFlags(args []string) (passToVet, packageNames []string) { break } - if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) { + if _, ok := errors.AsType[cmdflag.NonFlagError](err); ok { // Everything from here on out — including the argument we just consumed — // must be a package name. packageNames = args @@ -151,12 +169,12 @@ func vetFlags(args []string) (passToVet, packageNames []string) { if err != nil { fmt.Fprintln(os.Stderr, err) - exitWithUsage() + exitWithUsage(cmd) } - if isVetFlag[f.Name] { + if isToolFlag[f.Name] { // Forward the raw arguments rather than cleaned equivalents, just in - // case the vet tool parses them idiosyncratically. + // case the tool parses them idiosyncratically. explicitFlags = append(explicitFlags, args[:len(args)-len(remainingArgs)]...) // This flag has been overridden explicitly, so don't forward its implicit @@ -168,26 +186,26 @@ func vetFlags(args []string) (passToVet, packageNames []string) { } // Prepend arguments from GOFLAGS before other arguments. - CmdVet.Flag.Visit(func(f *flag.Flag) { + cmd.Flag.Visit(func(f *flag.Flag) { if addFromGOFLAGS[f.Name] { - passToVet = append(passToVet, fmt.Sprintf("-%s=%s", f.Name, f.Value)) + passToTool = append(passToTool, fmt.Sprintf("-%s=%s", f.Name, f.Value)) } }) - passToVet = append(passToVet, explicitFlags...) - return passToVet, packageNames + passToTool = append(passToTool, explicitFlags...) + return passToTool, packageNames } -func exitWithUsage() { - fmt.Fprintf(os.Stderr, "usage: %s\n", CmdVet.UsageLine) - fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", CmdVet.LongName()) +func exitWithUsage(cmd *base.Command) { + fmt.Fprintf(os.Stderr, "usage: %s\n", cmd.UsageLine) + fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", cmd.LongName()) // This part is additional to what (*Command).Usage does: - cmd := "go tool vet" - if vetTool != "" { - cmd = vetTool + tool := toolFlag + if tool == "" { + tool = "go tool " + cmd.Name() } - fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", cmd) - fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", cmd) + fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", tool) + fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", tool) base.SetExitStatus(2) base.Exit() diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go index 3636f642e26401..f1f3bcea38d9d5 100644 --- a/src/cmd/go/internal/work/action.go +++ b/src/cmd/go/internal/work/action.go @@ -28,6 +28,7 @@ import ( "cmd/go/internal/cache" "cmd/go/internal/cfg" "cmd/go/internal/load" + "cmd/go/internal/modload" "cmd/go/internal/str" "cmd/go/internal/trace" "cmd/internal/buildid" @@ -392,7 +393,7 @@ func (b *Builder) NewObjdir() string { // at shlibpath. For the native toolchain this list is stored, newline separated, in // an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the // .go_export section. -func readpkglist(shlibpath string) (pkgs []*load.Package) { +func readpkglist(loaderstate *modload.State, shlibpath string) (pkgs []*load.Package) { var stk load.ImportStack if cfg.BuildToolchainName == "gccgo" { f, err := elf.Open(shlibpath) @@ -412,7 +413,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) { for _, line := range bytes.Split(data, []byte{'\n'}) { if path, found := bytes.CutPrefix(line, pkgpath); found { path = bytes.TrimSuffix(path, []byte{';'}) - pkgs = append(pkgs, load.LoadPackageWithFlags(string(path), base.Cwd(), &stk, nil, 0)) + pkgs = append(pkgs, load.LoadPackageWithFlags(loaderstate, string(path), base.Cwd(), &stk, nil, 0)) } } } else { @@ -423,7 +424,7 @@ func readpkglist(shlibpath string) (pkgs []*load.Package) { scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes)) for scanner.Scan() { t := scanner.Text() - pkgs = append(pkgs, load.LoadPackageWithFlags(t, base.Cwd(), &stk, nil, 0)) + pkgs = append(pkgs, load.LoadPackageWithFlags(loaderstate, t, base.Cwd(), &stk, nil, 0)) } } return @@ -445,7 +446,7 @@ func (b *Builder) cacheAction(mode string, p *load.Package, f func() *Action) *A // AutoAction returns the "right" action for go build or go install of p. func (b *Builder) AutoAction(mode, depMode BuildMode, p *load.Package) *Action { if p.Name == "main" { - return b.LinkAction(mode, depMode, p) + return b.LinkAction(modload.LoaderState, mode, depMode, p) } return b.CompileAction(mode, depMode, p) } @@ -913,7 +914,7 @@ func (b *Builder) vetAction(mode, depMode BuildMode, p *load.Package) *Action { // LinkAction returns the action for linking p into an executable // and possibly installing the result (according to mode). // depMode is the action (build or install) to use when compiling dependencies. -func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action { +func (b *Builder) LinkAction(loaderstate *modload.State, mode, depMode BuildMode, p *load.Package) *Action { // Construct link action. a := b.cacheAction("link", p, func() *Action { a := &Action{ @@ -948,7 +949,7 @@ func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action { } a.Target = a.Objdir + filepath.Join("exe", name) + cfg.ExeSuffix a.built = a.Target - b.addTransitiveLinkDeps(a, a1, "") + b.addTransitiveLinkDeps(loaderstate, a, a1, "") // Sequence the build of the main package (a1) strictly after the build // of all other dependencies that go into the link. It is likely to be after @@ -1034,7 +1035,7 @@ func (b *Builder) installAction(a1 *Action, mode BuildMode) *Action { // makes sure those are present in a.Deps. // If shlib is non-empty, then a corresponds to the build and installation of shlib, // so any rebuild of shlib should not be added as a dependency. -func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) { +func (b *Builder) addTransitiveLinkDeps(loaderstate *modload.State, a, a1 *Action, shlib string) { // Expand Deps to include all built packages, for the linker. // Use breadth-first search to find rebuilt-for-test packages // before the standard ones. @@ -1075,7 +1076,7 @@ func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) { // we'll end up building an overall library or executable that depends at runtime // on other libraries that are out-of-date, which is clearly not good either. // We call it ModeBuggyInstall to make clear that this is not right. - a.Deps = append(a.Deps, b.linkSharedAction(ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil)) + a.Deps = append(a.Deps, b.linkSharedAction(loaderstate, ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil)) } } } @@ -1116,21 +1117,21 @@ func (b *Builder) buildmodeShared(mode, depMode BuildMode, args []string, pkgs [ if err != nil { base.Fatalf("%v", err) } - return b.linkSharedAction(mode, depMode, name, a1) + return b.linkSharedAction(modload.LoaderState, mode, depMode, name, a1) } // linkSharedAction takes a grouping action a1 corresponding to a list of built packages // and returns an action that links them together into a shared library with the name shlib. // If a1 is nil, shlib should be an absolute path to an existing shared library, // and then linkSharedAction reads that library to find out the package list. -func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Action) *Action { +func (b *Builder) linkSharedAction(loaderstate *modload.State, mode, depMode BuildMode, shlib string, a1 *Action) *Action { fullShlib := shlib shlib = filepath.Base(shlib) a := b.cacheAction("build-shlib "+shlib, nil, func() *Action { if a1 == nil { // TODO(rsc): Need to find some other place to store config, // not in pkg directory. See golang.org/issue/22196. - pkgs := readpkglist(fullShlib) + pkgs := readpkglist(loaderstate, fullShlib) a1 = &Action{ Mode: "shlib packages", } @@ -1173,7 +1174,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac } } var stk load.ImportStack - p := load.LoadPackageWithFlags(pkg, base.Cwd(), &stk, nil, 0) + p := load.LoadPackageWithFlags(loaderstate, pkg, base.Cwd(), &stk, nil, 0) if p.Error != nil { base.Fatalf("load %s: %v", pkg, p.Error) } @@ -1201,7 +1202,7 @@ func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Ac add(a, dep, true) } } - b.addTransitiveLinkDeps(a, a1, shlib) + b.addTransitiveLinkDeps(loaderstate, a, a1, shlib) return a }) diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 6741b39f051cd6..45acbe85c2a6d0 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -459,8 +459,8 @@ func oneMainPkg(pkgs []*load.Package) []*load.Package { var pkgsFilter = func(pkgs []*load.Package) []*load.Package { return pkgs } func runBuild(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - BuildInit() + modload.InitWorkfile(modload.LoaderState) + BuildInit(modload.LoaderState) b := NewBuilder("") defer func() { if err := b.Close(); err != nil { @@ -468,7 +468,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) { } }() - pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args) + pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{AutoVCS: true}, args) load.CheckPackageErrors(pkgs) explicitO := len(cfg.BuildO) > 0 @@ -503,7 +503,7 @@ func runBuild(ctx context.Context, cmd *base.Command, args []string) { } if cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) + load.PrepareForCoverageBuild(modload.LoaderState, pkgs) } if cfg.BuildO != "" { @@ -694,10 +694,10 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { } } - modload.InitWorkfile() - BuildInit() - pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args) - if cfg.ModulesEnabled && !modload.HasModRoot() { + modload.InitWorkfile(modload.LoaderState) + BuildInit(modload.LoaderState) + pkgs := load.PackagesAndErrors(modload.LoaderState, ctx, load.PackageOpts{AutoVCS: true}, args) + if cfg.ModulesEnabled && !modload.HasModRoot(modload.LoaderState) { haveErrors := false allMissingErrors := true for _, pkg := range pkgs { @@ -705,7 +705,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { continue } haveErrors = true - if missingErr := (*modload.ImportMissingError)(nil); !errors.As(pkg.Error, &missingErr) { + if _, ok := errors.AsType[*modload.ImportMissingError](pkg.Error); !ok { allMissingErrors = false break } @@ -722,7 +722,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { load.CheckPackageErrors(pkgs) if cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) + load.PrepareForCoverageBuild(modload.LoaderState, pkgs) } InstallPackages(ctx, args, pkgs) @@ -859,11 +859,11 @@ func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Packag // // See golang.org/issue/40276 for details and rationale. func installOutsideModule(ctx context.Context, args []string) { - modload.ForceUseModules = true - modload.RootMode = modload.NoRoot - modload.AllowMissingModuleImports() - modload.Init() - BuildInit() + modload.LoaderState.ForceUseModules = true + modload.LoaderState.RootMode = modload.NoRoot + modload.AllowMissingModuleImports(modload.LoaderState) + modload.Init(modload.LoaderState) + BuildInit(modload.LoaderState) // Load packages. Ignore non-main packages. // Print a warning if an argument contains "..." and matches no main packages. @@ -872,7 +872,7 @@ func installOutsideModule(ctx context.Context, args []string) { // TODO(golang.org/issue/40276): don't report errors loading non-main packages // matched by a pattern. pkgOpts := load.PackageOpts{MainOnly: true} - pkgs, err := load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args) + pkgs, err := load.PackagesAndErrorsOutsideModule(modload.LoaderState, ctx, pkgOpts, args) if err != nil { base.Fatal(err) } diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go index 88c24b11acc172..584c1ac6f41d23 100644 --- a/src/cmd/go/internal/work/buildid.go +++ b/src/cmd/go/internal/work/buildid.go @@ -148,9 +148,10 @@ func (b *Builder) toolID(name string) string { path := base.Tool(name) desc := "go tool " + name - // Special case: undocumented -vettool overrides usual vet, - // for testing vet or supplying an alternative analysis tool. - if name == "vet" && VetTool != "" { + // Special case: -{vet,fix}tool overrides usual cmd/{vet,fix} + // for testing or supplying an alternative analysis tool. + // (We use only "vet" terminology in the action graph.) + if name == "vet" { path = VetTool desc = VetTool } diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index 72b9177c9dbbeb..eb012d26109f57 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -169,9 +169,10 @@ func (b *Builder) Do(ctx context.Context, root *Action) { a.Package.Incomplete = true } } else { - var ipe load.ImportPathError - if a.Package != nil && (!errors.As(err, &ipe) || ipe.ImportPath() != a.Package.ImportPath) { - err = fmt.Errorf("%s: %v", a.Package.ImportPath, err) + if a.Package != nil { + if ipe, ok := errors.AsType[load.ImportPathError](err); !ok || ipe.ImportPath() != a.Package.ImportPath { + err = fmt.Errorf("%s: %v", a.Package.ImportPath, err) + } } sh := b.Shell(a) sh.Errorf("%s", err) @@ -1265,7 +1266,8 @@ func buildVetConfig(a *Action, srcfiles []string) { } } -// VetTool is the path to an alternate vet tool binary. +// VetTool is the path to the effective vet or fix tool binary. +// The user may specify a non-default value using -{vet,fix}tool. // The caller is expected to set it (if needed) before executing any vet actions. var VetTool string @@ -1273,7 +1275,13 @@ var VetTool string // The caller is expected to set them before executing any vet actions. var VetFlags []string -// VetExplicit records whether the vet flags were set explicitly on the command line. +// VetHandleStdout determines how the stdout output of each vet tool +// invocation should be handled. The default behavior is to copy it to +// the go command's stdout, atomically. +var VetHandleStdout = copyToStdout + +// VetExplicit records whether the vet flags (which may include +// -{vet,fix}tool) were set explicitly on the command line. var VetExplicit bool func (b *Builder) vet(ctx context.Context, a *Action) error { @@ -1296,6 +1304,7 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { sh := b.Shell(a) + // We use "vet" terminology even when building action graphs for go fix. vcfg.VetxOnly = a.VetxOnly vcfg.VetxOutput = a.Objdir + "vet.out" vcfg.Stdout = a.Objdir + "vet.stdout" @@ -1322,7 +1331,7 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { // dependency tree turn on *more* analysis, as here. // (The unsafeptr check does not write any facts for use by // later vet runs, nor does unreachable.) - if a.Package.Goroot && !VetExplicit && VetTool == "" { + if a.Package.Goroot && !VetExplicit && VetTool == base.Tool("vet") { // Turn off -unsafeptr checks. // There's too much unsafe.Pointer code // that vet doesn't like in low-level packages @@ -1359,13 +1368,29 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { vcfg.PackageVetx[a1.Package.ImportPath] = a1.built } } - key := cache.ActionID(h.Sum()) + vetxKey := cache.ActionID(h.Sum()) // for .vetx file + + fmt.Fprintf(h, "stdout\n") + stdoutKey := cache.ActionID(h.Sum()) // for .stdout file - if vcfg.VetxOnly && !cfg.BuildA { + // Check the cache; -a forces a rebuild. + if !cfg.BuildA { c := cache.Default() - if file, _, err := cache.GetFile(c, key); err == nil { - a.built = file - return nil + if vcfg.VetxOnly { + if file, _, err := cache.GetFile(c, vetxKey); err == nil { + a.built = file + return nil + } + } else { + // Copy cached vet.std files to stdout. + if file, _, err := cache.GetFile(c, stdoutKey); err == nil { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() // ignore error (can't fail) + return VetHandleStdout(f) + } } } @@ -1387,31 +1412,46 @@ func (b *Builder) vet(ctx context.Context, a *Action) error { p := a.Package tool := VetTool if tool == "" { - tool = base.Tool("vet") + panic("VetTool unset") } - runErr := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg") - // If vet wrote export data, save it for input to future vets. + if err := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg"); err != nil { + return err + } + + // Vet tool succeeded, possibly with facts and JSON stdout. Save both in cache. + + // Save facts if f, err := os.Open(vcfg.VetxOutput); err == nil { + defer f.Close() // ignore error a.built = vcfg.VetxOutput - cache.Default().Put(key, f) // ignore error - f.Close() // ignore error + cache.Default().Put(vetxKey, f) // ignore error } - // If vet wrote to stdout, copy it to go's stdout, atomically. + // Save stdout. if f, err := os.Open(vcfg.Stdout); err == nil { - stdoutMu.Lock() - if _, err := io.Copy(os.Stdout, f); err != nil && runErr == nil { - runErr = fmt.Errorf("copying vet tool stdout: %w", err) + defer f.Close() // ignore error + if err := VetHandleStdout(f); err != nil { + return err } - f.Close() // ignore error - stdoutMu.Unlock() + f.Seek(0, io.SeekStart) // ignore error + cache.Default().Put(stdoutKey, f) // ignore error } - return runErr + return nil } -var stdoutMu sync.Mutex // serializes concurrent writes (e.g. JSON values) to stdout +var stdoutMu sync.Mutex // serializes concurrent writes (of e.g. JSON values) to stdout + +// copyToStdout copies the stream to stdout while holding the lock. +func copyToStdout(r io.Reader) error { + stdoutMu.Lock() + defer stdoutMu.Unlock() + if _, err := io.Copy(os.Stdout, r); err != nil { + return fmt.Errorf("copying vet tool stdout: %w", err) + } + return nil +} // linkActionID computes the action ID for a link action. func (b *Builder) linkActionID(a *Action) cache.ActionID { @@ -2153,7 +2193,7 @@ func (noToolchain) cc(b *Builder, a *Action, ofile, cfile string) error { // gcc runs the gcc C compiler to create an object from a single C file. func (b *Builder) gcc(a *Action, workdir, out string, flags []string, cfile string) error { p := a.Package - return b.ccompile(a, out, flags, cfile, b.GccCmd(p.Dir, workdir)) + return b.ccompile(modload.LoaderState, a, out, flags, cfile, b.GccCmd(p.Dir, workdir)) } // gas runs the gcc c compiler to create an object file from a single C assembly file. @@ -2167,23 +2207,23 @@ func (b *Builder) gas(a *Action, workdir, out string, flags []string, sfile stri return fmt.Errorf("package using cgo has Go assembly file %s", sfile) } } - return b.ccompile(a, out, flags, sfile, b.GccCmd(p.Dir, workdir)) + return b.ccompile(modload.LoaderState, a, out, flags, sfile, b.GccCmd(p.Dir, workdir)) } // gxx runs the g++ C++ compiler to create an object from a single C++ file. func (b *Builder) gxx(a *Action, workdir, out string, flags []string, cxxfile string) error { p := a.Package - return b.ccompile(a, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir)) + return b.ccompile(modload.LoaderState, a, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir)) } // gfortran runs the gfortran Fortran compiler to create an object from a single Fortran file. func (b *Builder) gfortran(a *Action, workdir, out string, flags []string, ffile string) error { p := a.Package - return b.ccompile(a, out, flags, ffile, b.gfortranCmd(p.Dir, workdir)) + return b.ccompile(modload.LoaderState, a, out, flags, ffile, b.gfortranCmd(p.Dir, workdir)) } // ccompile runs the given C or C++ compiler and creates an object from a single source file. -func (b *Builder) ccompile(a *Action, outfile string, flags []string, file string, compiler []string) error { +func (b *Builder) ccompile(loaderstate *modload.State, a *Action, outfile string, flags []string, file string, compiler []string) error { p := a.Package sh := b.Shell(a) file = mkAbs(p.Dir, file) @@ -2220,7 +2260,7 @@ func (b *Builder) ccompile(a *Action, outfile string, flags []string, file strin } else if m.Dir == "" { // The module is in the vendor directory. Replace the entire vendor // directory path, because the module's Dir is not filled in. - from = modload.VendorDir() + from = modload.VendorDir(loaderstate) toPath = "vendor" } else { from = m.Dir @@ -2270,7 +2310,7 @@ func (b *Builder) ccompile(a *Action, outfile string, flags []string, file strin } } if len(newFlags) < len(flags) { - return b.ccompile(a, outfile, newFlags, file, compiler) + return b.ccompile(loaderstate, a, outfile, newFlags, file, compiler) } } @@ -3343,7 +3383,7 @@ func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) { } srcs := []string{src} - p := load.GoFilesPackage(context.TODO(), load.PackageOpts{}, srcs) + p := load.GoFilesPackage(modload.LoaderState, context.TODO(), load.PackageOpts{}, srcs) if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, nil, "", false, "", srcs); e != nil { return "32", nil diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index e4e83dc8f9853e..a2954ab91ab60a 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -50,14 +50,14 @@ func makeCfgChangedEnv() []string { return slices.Clip(env) } -func BuildInit() { +func BuildInit(loaderstate *modload.State) { if buildInitStarted { base.Fatalf("go: internal error: work.BuildInit called more than once") } buildInitStarted = true base.AtExit(closeBuilders) - modload.Init() + modload.Init(loaderstate) instrumentInit() buildModeInit() cfgChangedEnv = makeCfgChangedEnv() diff --git a/src/cmd/go/internal/workcmd/edit.go b/src/cmd/go/internal/workcmd/edit.go index 18730436ca8217..3778e70b687aec 100644 --- a/src/cmd/go/internal/workcmd/edit.go +++ b/src/cmd/go/internal/workcmd/edit.go @@ -143,8 +143,8 @@ func runEditwork(ctx context.Context, cmd *base.Command, args []string) { if len(args) == 1 { gowork = args[0] } else { - modload.InitWorkfile() - gowork = modload.WorkFilePath() + modload.InitWorkfile(modload.LoaderState) + gowork = modload.WorkFilePath(modload.LoaderState) } if gowork == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") diff --git a/src/cmd/go/internal/workcmd/init.go b/src/cmd/go/internal/workcmd/init.go index 02240b8189fab5..20fef91d5e953c 100644 --- a/src/cmd/go/internal/workcmd/init.go +++ b/src/cmd/go/internal/workcmd/init.go @@ -44,11 +44,11 @@ func init() { } func runInit(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() + modload.InitWorkfile(modload.LoaderState) - modload.ForceUseModules = true + modload.LoaderState.ForceUseModules = true - gowork := modload.WorkFilePath() + gowork := modload.WorkFilePath(modload.LoaderState) if gowork == "" { gowork = filepath.Join(base.Cwd(), "go.work") } diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go index 719cf76c9bf12d..c58cd55ceee392 100644 --- a/src/cmd/go/internal/workcmd/sync.go +++ b/src/cmd/go/internal/workcmd/sync.go @@ -48,9 +48,9 @@ func init() { } func runSync(ctx context.Context, cmd *base.Command, args []string) { - modload.ForceUseModules = true - modload.InitWorkfile() - if modload.WorkFilePath() == "" { + modload.LoaderState.ForceUseModules = true + modload.InitWorkfile(modload.LoaderState) + if modload.WorkFilePath(modload.LoaderState) == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } @@ -60,7 +60,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { } mustSelectFor := map[module.Version][]module.Version{} - mms := modload.MainModules + mms := modload.LoaderState.MainModules opts := modload.PackageOpts{ Tags: imports.AnyTags(), @@ -73,7 +73,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { } for _, m := range mms.Versions() { opts.MainModule = m - _, pkgs := modload.LoadPackages(ctx, opts, "all") + _, pkgs := modload.LoadPackages(modload.LoaderState, ctx, opts, "all") opts.MainModule = module.Version{} // reset var ( @@ -91,7 +91,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { mustSelectFor[m] = mustSelect } - workFilePath := modload.WorkFilePath() // save go.work path because EnterModule clobbers it. + workFilePath := modload.WorkFilePath(modload.LoaderState) // save go.work path because EnterModule clobbers it. var goV string for _, m := range mms.Versions() { @@ -114,12 +114,12 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { // so we don't write some go.mods with the "before" toolchain // and others with the "after" toolchain. If nothing else, that // discrepancy could show up in auto-recorded toolchain lines. - changed, err := modload.EditBuildList(ctx, nil, mustSelectFor[m]) + changed, err := modload.EditBuildList(modload.LoaderState, ctx, nil, mustSelectFor[m]) if err != nil { continue } if changed { - modload.LoadPackages(ctx, modload.PackageOpts{ + modload.LoadPackages(modload.LoaderState, ctx, modload.PackageOpts{ Tags: imports.AnyTags(), Tidy: true, VendorModulesInGOROOTSrc: true, @@ -131,7 +131,7 @@ func runSync(ctx context.Context, cmd *base.Command, args []string) { }, "all") modload.WriteGoMod(ctx, modload.WriteOpts{}) } - goV = gover.Max(goV, modload.MainModules.GoVersion()) + goV = gover.Max(goV, modload.LoaderState.MainModules.GoVersion(modload.LoaderState)) } wf, err := modload.ReadWorkFile(workFilePath) diff --git a/src/cmd/go/internal/workcmd/use.go b/src/cmd/go/internal/workcmd/use.go index afbe99d3a480db..ca8de22cca884f 100644 --- a/src/cmd/go/internal/workcmd/use.go +++ b/src/cmd/go/internal/workcmd/use.go @@ -61,9 +61,9 @@ func init() { } func runUse(ctx context.Context, cmd *base.Command, args []string) { - modload.ForceUseModules = true - modload.InitWorkfile() - gowork := modload.WorkFilePath() + modload.LoaderState.ForceUseModules = true + modload.InitWorkfile(modload.LoaderState) + gowork := modload.WorkFilePath(modload.LoaderState) if gowork == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } diff --git a/src/cmd/go/internal/workcmd/vendor.go b/src/cmd/go/internal/workcmd/vendor.go index f9f0cc0898836f..36c1f7b522f9e7 100644 --- a/src/cmd/go/internal/workcmd/vendor.go +++ b/src/cmd/go/internal/workcmd/vendor.go @@ -46,8 +46,8 @@ func init() { } func runVendor(ctx context.Context, cmd *base.Command, args []string) { - modload.InitWorkfile() - if modload.WorkFilePath() == "" { + modload.InitWorkfile(modload.LoaderState) + if modload.WorkFilePath(modload.LoaderState) == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go index e81969ca4a3144..8cdfd9196e4cb1 100644 --- a/src/cmd/go/main.go +++ b/src/cmd/go/main.go @@ -24,7 +24,6 @@ import ( "cmd/go/internal/clean" "cmd/go/internal/doc" "cmd/go/internal/envcmd" - "cmd/go/internal/fix" "cmd/go/internal/fmtcmd" "cmd/go/internal/generate" "cmd/go/internal/help" @@ -55,7 +54,7 @@ func init() { clean.CmdClean, doc.CmdDoc, envcmd.CmdEnv, - fix.CmdFix, + vet.CmdFix, fmtcmd.CmdFmt, generate.CmdGenerate, modget.CmdGet, diff --git a/src/cmd/go/testdata/script/chdir.txt b/src/cmd/go/testdata/script/chdir.txt index a6feed6b45fce0..41def410d5fa37 100644 --- a/src/cmd/go/testdata/script/chdir.txt +++ b/src/cmd/go/testdata/script/chdir.txt @@ -27,6 +27,10 @@ stderr 'strings\.test' go vet -C ../strings -n stderr strings_test +# go fix +go fix -C ../strings -n +stderr strings_test + # -C must be first on command line (as of Go 1.21) ! go test -n -C ../strings stderr '^invalid value "../strings" for flag -C: -C flag must be first flag on command line$' diff --git a/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt b/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt index 027c434a322ec1..3cc23985a3934a 100644 --- a/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt +++ b/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt @@ -1,3 +1,5 @@ +skip # a 5s timeout is never going to be reliable (go.dev/issue/72140) + [!fuzz] skip [short] skip env GOCACHE=$WORK/cache diff --git a/src/cmd/go/testdata/script/vet_asm.txt b/src/cmd/go/testdata/script/vet_asm.txt index 8aa69ce1a3c999..c046773a06c0dc 100644 --- a/src/cmd/go/testdata/script/vet_asm.txt +++ b/src/cmd/go/testdata/script/vet_asm.txt @@ -1,12 +1,12 @@ -env GO111MODULE=off - # Issue 27665. Verify that "go vet" analyzes non-Go files. -[!GOARCH:amd64] skip +env GO111MODULE=off +env GOARCH=amd64 + ! go vet -asmdecl a stderr 'f: invalid MOVW of x' -# -c flag shows context +# -c=n flag shows n lines of context ! go vet -c=2 -asmdecl a stderr '...invalid MOVW...' stderr '1 .*TEXT' diff --git a/src/cmd/go/testdata/script/vet_basic.txt b/src/cmd/go/testdata/script/vet_basic.txt new file mode 100644 index 00000000000000..5ae66438ea3d81 --- /dev/null +++ b/src/cmd/go/testdata/script/vet_basic.txt @@ -0,0 +1,125 @@ +# Test basic features of "go vet"/"go fix" CLI. +# +# The example relies on two analyzers: +# - hostport (which is included in both the fix and vet suites), and +# - printf (which is only in the vet suite). +# Each reports one diagnostic with a fix. + +# vet default flags print diagnostics to stderr. Diagnostic => nonzero exit. +! go vet example.com/x +stderr 'does not work with IPv6' +stderr 'non-constant format string in call to fmt.Sprintf' + +# -hostport runs only one analyzer. Diagnostic => failure. +! go vet -hostport example.com/x +stderr 'does not work with IPv6' +! stderr 'non-constant format string' + +# -timeformat runs only one analyzer. No diagnostics => success. +go vet -timeformat example.com/x +! stderr . + +# JSON output includes diagnostics and fixes. Always success. +go vet -json example.com/x +! stderr . +stdout '"example.com/x": {' +stdout '"hostport":' +stdout '"message": "address format .* does not work with IPv6",' +stdout '"suggested_fixes":' +stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",' + +# vet -fix -diff displays a diff. +go vet -fix -diff example.com/x +stdout '\-var _ = fmt.Sprintf\(s\)' +stdout '\+var _ = fmt.Sprintf\("%s", s\)' +stdout '\-var _, _ = net.Dial\("tcp", fmt.Sprintf\("%s:%d", s, 80\)\)' +stdout '\+var _, _ = net.Dial\("tcp", net.JoinHostPort\(s, "80"\)\)' + +# vet -fix quietly applies the vet suite fixes. +cp x.go x.go.bak +go vet -fix example.com/x +grep 'fmt.Sprintf\("%s", s\)' x.go +grep 'net.JoinHostPort' x.go +! stderr . +cp x.go.bak x.go + +! go vet -diff example.com/x +stderr 'go vet -diff flag requires -fix' + +# go fix applies the fix suite fixes. +go fix example.com/x +grep 'net.JoinHostPort' x.go +! grep 'fmt.Sprintf\("%s", s\)' x.go +! stderr . +cp x.go.bak x.go + +# Show diff of fixes from the fix suite. +go fix -diff example.com/x +! stdout '\-var _ = fmt.Sprintf\(s\)' +stdout '\-var _, _ = net.Dial\("tcp", fmt.Sprintf\("%s:%d", s, 80\)\)' +stdout '\+var _, _ = net.Dial\("tcp", net.JoinHostPort\(s, "80"\)\)' + +# Show fix-suite fixes in JSON form. +go fix -json example.com/x +! stderr . +stdout '"example.com/x": {' +stdout '"hostport":' +stdout '"message": "address format .* does not work with IPv6",' +stdout '"suggested_fixes":' +stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",' +! stdout '"printf":' +! stdout '"message": "non-constant format string.*",' +! stdout '"message": "Insert.*%s.*format.string",' + +# Show vet-suite fixes in JSON form. +go vet -fix -json example.com/x +! stderr . +stdout '"example.com/x": {' +stdout '"hostport":' +stdout '"message": "address format .* does not work with IPv6",' +stdout '"suggested_fixes":' +stdout '"message": "Replace fmt.Sprintf with net.JoinHostPort",' +stdout '"printf":' +stdout '"message": "non-constant format string.*",' +stdout '"suggested_fixes":' +stdout '"message": "Insert.*%s.*format.string",' + +# Reject -diff + -json. +! go fix -diff -json example.com/x +stderr '-json and -diff cannot be used together' + +# Legacy way of selecting fixers is a no-op. +go fix -fix=old1,old2 example.com/x +stderr 'go fix: the -fix=old1,old2 flag is obsolete and has no effect' + +# -c=n flag shows n lines of context. +! go vet -c=2 -printf example.com/x +stderr 'x.go:12:21: non-constant format string in call to fmt.Sprintf' +! stderr '9 ' +stderr '10 ' +stderr '11 // This call...' +stderr '12 var _ = fmt.Sprintf\(s\)' +stderr '13 ' +stderr '14 ' +! stderr '15 ' + +-- go.mod -- +module example.com/x +go 1.25 + +-- x.go -- +package x + + +import ( + "fmt" + "net" +) + +var s string + +// This call yields a "non-constant format string" diagnostic, with a fix (go vet only). +var _ = fmt.Sprintf(s) + +// This call yields a hostport diagnostic, with a fix (go vet and go fix). +var _, _ = net.Dial("tcp", fmt.Sprintf("%s:%d", s, 80)) diff --git a/src/cmd/go/testdata/script/vet_cache.txt b/src/cmd/go/testdata/script/vet_cache.txt new file mode 100644 index 00000000000000..c84844000a43d0 --- /dev/null +++ b/src/cmd/go/testdata/script/vet_cache.txt @@ -0,0 +1,24 @@ +# Test that go vet's caching of vet tool actions replays +# the recorded stderr output even after a cache hit. + +# Set up fresh GOCACHE. +env GOCACHE=$WORK/gocache + +# First time is a cache miss. +! go vet example.com/a +stderr 'fmt.Sprint call has possible Printf formatting directive' + +# Second time is assumed to be a cache hit for the stdout JSON, +# but we don't bother to assert it. Same diagnostics again. +! go vet example.com/a +stderr 'fmt.Sprint call has possible Printf formatting directive' + +-- go.mod -- +module example.com + +-- a/a.go -- +package a + +import "fmt" + +var _ = fmt.Sprint("%s") // oops! diff --git a/src/cmd/gofmt/gofmt.go b/src/cmd/gofmt/gofmt.go index bbb8b4fd15c2f7..ad6ad636524479 100644 --- a/src/cmd/gofmt/gofmt.go +++ b/src/cmd/gofmt/gofmt.go @@ -41,6 +41,9 @@ var ( // debugging cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") + + // errors + errFormattingDiffers = fmt.Errorf("formatting differs from gofmt's") ) // Keep these in sync with go/format/format.go. @@ -218,8 +221,12 @@ func (r *reporter) Report(err error) { panic("Report with nil error") } st := r.getState() - scanner.PrintError(st.err, err) - st.exitCode = 2 + if err == errFormattingDiffers { + st.exitCode = 1 + } else { + scanner.PrintError(st.err, err) + st.exitCode = 2 + } } func (r *reporter) ExitCode() int { @@ -281,6 +288,7 @@ func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) e newName := filepath.ToSlash(filename) oldName := newName + ".orig" r.Write(diff.Diff(oldName, src, newName, res)) + return errFormattingDiffers } } diff --git a/src/cmd/gofmt/gofmt_test.go b/src/cmd/gofmt/gofmt_test.go index 6b80673af148f5..2aba0f03ff09e9 100644 --- a/src/cmd/gofmt/gofmt_test.go +++ b/src/cmd/gofmt/gofmt_test.go @@ -53,10 +53,19 @@ func gofmtFlags(filename string, maxLines int) string { return "" } -func runTest(t *testing.T, in, out string) { - // process flags - *simplifyAST = false +// Reset global variables for all flags to their default value. +func resetFlags() { + *list = false + *write = false *rewriteRule = "" + *simplifyAST = false + *doDiff = false + *allErrors = false + *cpuprofile = "" +} + +func runTest(t *testing.T, in, out string) { + resetFlags() info, err := os.Lstat(in) if err != nil { t.Error(err) @@ -159,6 +168,46 @@ func TestRewrite(t *testing.T) { } } +// TestDiff runs gofmt with the -d flag on the input files and checks that the +// expected exit code is set. +func TestDiff(t *testing.T) { + tests := []struct { + in string + exitCode int + }{ + {in: "testdata/exitcode.input", exitCode: 1}, + {in: "testdata/exitcode.golden", exitCode: 0}, + } + + for _, tt := range tests { + resetFlags() + *doDiff = true + + initParserMode() + initRewrite() + + info, err := os.Lstat(tt.in) + if err != nil { + t.Error(err) + return + } + + const maxWeight = 2 << 20 + var buf, errBuf bytes.Buffer + s := newSequencer(maxWeight, &buf, &errBuf) + s.Add(fileWeight(tt.in, info), func(r *reporter) error { + return processFile(tt.in, info, nil, r) + }) + if errBuf.Len() > 0 { + t.Logf("%q", errBuf.Bytes()) + } + + if s.GetExitCode() != tt.exitCode { + t.Errorf("%s: expected exit code %d, got %d", tt.in, tt.exitCode, s.GetExitCode()) + } + } +} + // Test case for issue 3961. func TestCRLF(t *testing.T) { const input = "testdata/crlf.input" // must contain CR/LF's diff --git a/src/cmd/gofmt/testdata/exitcode.golden b/src/cmd/gofmt/testdata/exitcode.golden new file mode 100644 index 00000000000000..06ab7d0f9a35a7 --- /dev/null +++ b/src/cmd/gofmt/testdata/exitcode.golden @@ -0,0 +1 @@ +package main diff --git a/src/cmd/gofmt/testdata/exitcode.input b/src/cmd/gofmt/testdata/exitcode.input new file mode 100644 index 00000000000000..4f2f092ce508de --- /dev/null +++ b/src/cmd/gofmt/testdata/exitcode.input @@ -0,0 +1 @@ + package main diff --git a/src/cmd/internal/bootstrap_test/experiment_toolid_test.go b/src/cmd/internal/bootstrap_test/experiment_toolid_test.go index ff2379c8998c76..ca292b700861a9 100644 --- a/src/cmd/internal/bootstrap_test/experiment_toolid_test.go +++ b/src/cmd/internal/bootstrap_test/experiment_toolid_test.go @@ -97,7 +97,7 @@ func runCmd(t *testing.T, dir string, env []string, path string, args ...string) cmd.Env = env out, err := cmd.Output() if err != nil { - if ee := (*exec.ExitError)(nil); errors.As(err, &ee) { + if ee, ok := errors.AsType[*exec.ExitError](err); ok { out = append(out, ee.Stderr...) } t.Fatalf("%s failed:\n%s\n%s", cmd, out, err) diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index 0ef13b81f6f3f1..1e2891de0a7ddc 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -579,7 +579,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } if int64(pc) > p.Pc { - ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, pc, p) + ctxt.Diag("PC padding invalid: want %d, has %d: %v", p.Pc, pc, p) } for int64(pc) != p.Pc { // emit 0xe1a00000 (MOVW R0, R0) diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go index 710dd64b304c12..814dba2c100b30 100644 --- a/src/cmd/internal/obj/arm64/a.out.go +++ b/src/cmd/internal/obj/arm64/a.out.go @@ -1020,6 +1020,12 @@ const ( AWORD AYIELD ABTI + APACIASP + AAUTIASP + APACIBSP + AAUTIBSP + AAUTIA1716 + AAUTIB1716 ALAST AB = obj.AJMP ABL = obj.ACALL diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go index 379f53bab37cdb..497429d9985922 100644 --- a/src/cmd/internal/obj/arm64/anames.go +++ b/src/cmd/internal/obj/arm64/anames.go @@ -537,5 +537,11 @@ var Anames = []string{ "WORD", "YIELD", "BTI", + "PACIASP", + "AUTIASP", + "PACIBSP", + "AUTIBSP", + "AUTIA1716", + "AUTIB1716", "LAST", } diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 743d09a319087d..3cb4be436b9b7f 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -34,6 +34,7 @@ import ( "cmd/internal/obj" "cmd/internal/objabi" "encoding/binary" + "errors" "fmt" "log" "math" @@ -3017,6 +3018,13 @@ func buildop(ctxt *obj.Link) { oprangeset(ANOOP, t) oprangeset(ADRPS, t) + oprangeset(APACIASP, t) + oprangeset(AAUTIASP, t) + oprangeset(APACIBSP, t) + oprangeset(AAUTIBSP, t) + oprangeset(AAUTIA1716, t) + oprangeset(AAUTIB1716, t) + case ACBZ: oprangeset(ACBZW, t) oprangeset(ACBNZ, t) @@ -4354,7 +4362,7 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) { // remove the NOTUSETMP flag in optab. op := c.opirr(p, p.As) if op&Sbit != 0 { - c.ctxt.Diag("can not break addition/subtraction when S bit is set", p) + c.ctxt.Diag("can not break addition/subtraction when S bit is set (%v)", p) } rt, r := p.To.Reg, p.Reg if r == obj.REG_NONE { @@ -7016,6 +7024,24 @@ func (c *ctxt7) op0(p *obj.Prog, a obj.As) uint32 { case ASEVL: return SYSHINT(5) + + case APACIASP: + return SYSHINT(25) + + case AAUTIASP: + return SYSHINT(29) + + case APACIBSP: + return SYSHINT(27) + + case AAUTIBSP: + return SYSHINT(31) + + case AAUTIA1716: + return SYSHINT(12) + + case AAUTIB1716: + return SYSHINT(14) } c.ctxt.Diag("%v: bad op0 %v", p, a) @@ -7830,3 +7856,146 @@ func (c *ctxt7) encRegShiftOrExt(p *obj.Prog, a *obj.Addr, r int16) uint32 { func pack(q uint32, arngA, arngB uint8) uint32 { return uint32(q)<<16 | uint32(arngA)<<8 | uint32(arngB) } + +// ARM64RegisterExtension constructs an ARM64 register with extension or arrangement. +func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error { + Rnum := (reg & 31) + int16(num<<5) + if isAmount { + if num < 0 || num > 7 { + return errors.New("index shift amount is out of range") + } + } + if reg <= REG_R31 && reg >= REG_R0 { + if !isAmount { + return errors.New("invalid register extension") + } + switch ext { + case "UXTB": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_UXTB + Rnum + case "UXTH": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_UXTH + Rnum + case "UXTW": + // effective address of memory is a base register value and an offset register value. + if a.Type == obj.TYPE_MEM { + a.Index = REG_UXTW + Rnum + } else { + a.Reg = REG_UXTW + Rnum + } + case "UXTX": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_UXTX + Rnum + case "SXTB": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_SXTB + Rnum + case "SXTH": + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = REG_SXTH + Rnum + case "SXTW": + if a.Type == obj.TYPE_MEM { + a.Index = REG_SXTW + Rnum + } else { + a.Reg = REG_SXTW + Rnum + } + case "SXTX": + if a.Type == obj.TYPE_MEM { + a.Index = REG_SXTX + Rnum + } else { + a.Reg = REG_SXTX + Rnum + } + case "LSL": + a.Index = REG_LSL + Rnum + default: + return errors.New("unsupported general register extension type: " + ext) + + } + } else if reg <= REG_V31 && reg >= REG_V0 { + switch ext { + case "B8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_8B & 15) << 5) + case "B16": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_16B & 15) << 5) + case "H4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_4H & 15) << 5) + case "H8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_8H & 15) << 5) + case "S2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_2S & 15) << 5) + case "S4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_4S & 15) << 5) + case "D1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_1D & 15) << 5) + case "D2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_2D & 15) << 5) + case "Q1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = REG_ARNG + (reg & 31) + ((ARNG_1Q & 15) << 5) + case "B": + if !isIndex { + return nil + } + a.Reg = REG_ELEM + (reg & 31) + ((ARNG_B & 15) << 5) + a.Index = num + case "H": + if !isIndex { + return nil + } + a.Reg = REG_ELEM + (reg & 31) + ((ARNG_H & 15) << 5) + a.Index = num + case "S": + if !isIndex { + return nil + } + a.Reg = REG_ELEM + (reg & 31) + ((ARNG_S & 15) << 5) + a.Index = num + case "D": + if !isIndex { + return nil + } + a.Reg = REG_ELEM + (reg & 31) + ((ARNG_D & 15) << 5) + a.Index = num + default: + return errors.New("unsupported simd register extension type: " + ext) + } + } else { + return errors.New("invalid register and extension combination") + } + return nil +} diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 6513e116872a0a..b7e116bae39dc4 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -464,7 +464,7 @@ type LSym struct { P []byte R []Reloc - Extra *interface{} // *FuncInfo, *VarInfo, *FileInfo, or *TypeInfo, if present + Extra *interface{} // *FuncInfo, *VarInfo, *FileInfo, *TypeInfo, or *ItabInfo, if present Pkg string PkgIdx int32 @@ -604,6 +604,15 @@ func (s *LSym) NewTypeInfo() *TypeInfo { return t } +// TypeInfo returns the *TypeInfo associated with s, or else nil. +func (s *LSym) TypeInfo() *TypeInfo { + if s.Extra == nil { + return nil + } + t, _ := (*s.Extra).(*TypeInfo) + return t +} + // An ItabInfo contains information for a symbol // that contains a runtime.itab. type ItabInfo struct { @@ -620,6 +629,15 @@ func (s *LSym) NewItabInfo() *ItabInfo { return t } +// ItabInfo returns the *ItabInfo associated with s, or else nil. +func (s *LSym) ItabInfo() *ItabInfo { + if s.Extra == nil { + return nil + } + i, _ := (*s.Extra).(*ItabInfo) + return i +} + // WasmImport represents a WebAssembly (WASM) imported function with // parameters and results translated into WASM types based on the Go function // declaration. @@ -1198,6 +1216,13 @@ type Link struct { Fingerprint goobj.FingerprintType // fingerprint of symbol indices, to catch index mismatch } +// Assert to vet's printf checker that Link.DiagFunc is a printf-like. +func _(ctxt *Link) { + ctxt.DiagFunc = func(format string, args ...any) { + _ = fmt.Sprintf(format, args...) + } +} + func (ctxt *Link) Diag(format string, args ...interface{}) { ctxt.Errors++ ctxt.DiagFunc(format, args...) diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go index ca6e2be4aa9eb6..5a61acac8741d6 100644 --- a/src/cmd/internal/obj/loong64/asm.go +++ b/src/cmd/internal/obj/loong64/asm.go @@ -2057,7 +2057,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { switch o.type_ { default: - c.ctxt.Diag("unknown type %d %v", o.type_) + c.ctxt.Diag("unknown type %d", o.type_) prasm(p) case 0: // pseudo ops @@ -4438,7 +4438,7 @@ func (c *ctxt0) specialFpMovInst(a obj.As, fclass int, tclass int) uint32 { } } - c.ctxt.Diag("bad class combination: %s %s,%s\n", a, fclass, tclass) + c.ctxt.Diag("bad class combination: %s %d,%d\n", a, fclass, tclass) return 0 } diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go index 2de5a4d6c0b8fc..a55953e741432a 100644 --- a/src/cmd/internal/obj/mips/asm0.go +++ b/src/cmd/internal/obj/mips/asm0.go @@ -1172,7 +1172,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { } switch o.type_ { default: - c.ctxt.Diag("unknown type %d %v", o.type_) + c.ctxt.Diag("unknown type %d", o.type_) prasm(p) case 0: /* pseudo ops */ diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go index 698e5ace9ccca8..69914b1c1f36d5 100644 --- a/src/cmd/internal/obj/plist.go +++ b/src/cmd/internal/obj/plist.go @@ -63,12 +63,12 @@ func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc) { switch p.To.Sym.Name { case "go_args_stackmap": if p.From.Type != TYPE_CONST || p.From.Offset != abi.FUNCDATA_ArgsPointerMaps { - ctxt.Diag("%s: FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps", p.Pos) + ctxt.Diag("%v: FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps", p) } p.To.Sym = ctxt.LookupDerived(curtext, curtext.Name+".args_stackmap") case "no_pointers_stackmap": if p.From.Type != TYPE_CONST || p.From.Offset != abi.FUNCDATA_LocalsPointerMaps { - ctxt.Diag("%s: FUNCDATA use of no_pointers_stackmap(SB) without FUNCDATA_LocalsPointerMaps", p.Pos) + ctxt.Diag("%v: FUNCDATA use of no_pointers_stackmap(SB) without FUNCDATA_LocalsPointerMaps", p) } // funcdata for functions with no local variables in frame. // Define two zero-length bitmaps, because the same index is used diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index 91642ffbcb0e50..74699cc398534e 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -2991,7 +2991,7 @@ func (ins *instruction) length() int { func (ins *instruction) validate(ctxt *obj.Link) { enc, err := encodingForAs(ins.as) if err != nil { - ctxt.Diag(err.Error()) + ctxt.Diag("%v", err) return } enc.validate(ctxt, ins) @@ -3026,7 +3026,7 @@ func instructionsForOpImmediate(p *obj.Prog, as obj.As, rs int16) []*instruction low, high, err := Split32BitImmediate(ins.imm) if err != nil { - p.Ctxt.Diag("%v: constant %d too large", p, ins.imm, err) + p.Ctxt.Diag("%v: constant %d too large: %v", p, ins.imm, err) return nil } if high == 0 { diff --git a/src/cmd/internal/robustio/robustio_darwin.go b/src/cmd/internal/robustio/robustio_darwin.go index 99fd8ebc2fff18..69ea2479308dea 100644 --- a/src/cmd/internal/robustio/robustio_darwin.go +++ b/src/cmd/internal/robustio/robustio_darwin.go @@ -13,9 +13,6 @@ const errFileNotFound = syscall.ENOENT // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { - return errno == errFileNotFound - } - return false + errno, ok := errors.AsType[syscall.Errno](err) + return ok && errno == errFileNotFound } diff --git a/src/cmd/internal/robustio/robustio_flaky.go b/src/cmd/internal/robustio/robustio_flaky.go index c56e36ca62412a..ec1a2daea65852 100644 --- a/src/cmd/internal/robustio/robustio_flaky.go +++ b/src/cmd/internal/robustio/robustio_flaky.go @@ -31,8 +31,7 @@ func retry(f func() (err error, mayRetry bool)) error { return err } - var errno syscall.Errno - if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + if errno, ok := errors.AsType[syscall.Errno](err); ok && (lowestErrno == 0 || errno < lowestErrno) { bestErr = err lowestErrno = errno } else if bestErr == nil { diff --git a/src/cmd/internal/robustio/robustio_windows.go b/src/cmd/internal/robustio/robustio_windows.go index 687dcb66f83d15..ad46ec5cfeb601 100644 --- a/src/cmd/internal/robustio/robustio_windows.go +++ b/src/cmd/internal/robustio/robustio_windows.go @@ -14,8 +14,7 @@ const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { + if errno, ok := errors.AsType[syscall.Errno](err); ok { switch errno { case syscall.ERROR_ACCESS_DENIED, syscall.ERROR_FILE_NOT_FOUND, diff --git a/src/cmd/internal/script/engine.go b/src/cmd/internal/script/engine.go index eb9344f6e2a1eb..4607868379488a 100644 --- a/src/cmd/internal/script/engine.go +++ b/src/cmd/internal/script/engine.go @@ -185,7 +185,7 @@ func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Wri var lineno int lineErr := func(err error) error { - if errors.As(err, new(*CommandError)) { + if _, ok := errors.AsType[*CommandError](err); ok { return err } return fmt.Errorf("%s:%d: %w", file, lineno, err) @@ -283,7 +283,7 @@ func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Wri // Run the command. err = e.runCommand(s, cmd, impl) if err != nil { - if stop := (stopError{}); errors.As(err, &stop) { + if stop, ok := errors.AsType[stopError](err); ok { // Since the 'stop' command halts execution of the entire script, // log its message separately from the section in which it appears. err = endSection(true) @@ -607,13 +607,13 @@ func checkStatus(cmd *command, err error) error { return nil } - if s := (stopError{}); errors.As(err, &s) { + if _, ok := errors.AsType[stopError](err); ok { // This error originated in the Stop command. // Propagate it as-is. return cmdError(cmd, err) } - if w := (waitError{}); errors.As(err, &w) { + if _, ok := errors.AsType[waitError](err); ok { // This error was surfaced from a background process by a call to Wait. // Add a call frame for Wait itself, but ignore its "want" field. // (Wait itself cannot fail to wait on commands or else it would leak diff --git a/src/cmd/internal/script/scripttest/scripttest.go b/src/cmd/internal/script/scripttest/scripttest.go index bace662a6722fd..349201fd188c1b 100644 --- a/src/cmd/internal/script/scripttest/scripttest.go +++ b/src/cmd/internal/script/scripttest/scripttest.go @@ -89,7 +89,7 @@ func Run(t testing.TB, e *script.Engine, s *script.State, filename string, testS return e.Execute(s, filename, bufio.NewReader(testScript), log) }() - if skip := (skipError{}); errors.As(err, &skip) { + if skip, ok := errors.AsType[skipError](err); ok { if skip.msg == "" { t.Skip("SKIP") } else { diff --git a/src/cmd/internal/test2json/test2json.go b/src/cmd/internal/test2json/test2json.go index d08ef389f82a21..f28051e1771db8 100644 --- a/src/cmd/internal/test2json/test2json.go +++ b/src/cmd/internal/test2json/test2json.go @@ -38,6 +38,7 @@ type event struct { FailedBuild string `json:",omitempty"` Key string `json:",omitempty"` Value string `json:",omitempty"` + Path string `json:",omitempty"` } // textBytes is a hack to get JSON to emit a []byte as a string @@ -180,6 +181,7 @@ var ( []byte("=== FAIL "), []byte("=== SKIP "), []byte("=== ATTR "), + []byte("=== ARTIFACTS "), } reports = [][]byte{ @@ -251,7 +253,6 @@ func (c *Converter) handleInputLine(line []byte) { // "=== RUN " // "=== PAUSE " // "=== CONT " - actionColon := false origLine := line ok := false indent := 0 @@ -273,7 +274,6 @@ func (c *Converter) handleInputLine(line []byte) { } for _, magic := range reports { if bytes.HasPrefix(line, magic) { - actionColon = true ok = true break } @@ -296,16 +296,11 @@ func (c *Converter) handleInputLine(line []byte) { return } - // Parse out action and test name. - i := 0 - if actionColon { - i = bytes.IndexByte(line, ':') + 1 - } - if i == 0 { - i = len(updates[0]) - } - action := strings.ToLower(strings.TrimSuffix(strings.TrimSpace(string(line[4:i])), ":")) - name := strings.TrimSpace(string(line[i:])) + // Parse out action and test name from "=== ACTION: Name". + action, name, _ := strings.Cut(string(line[len("=== "):]), " ") + action = strings.TrimSuffix(action, ":") + action = strings.ToLower(action) + name = strings.TrimSpace(name) e := &event{Action: action} if line[0] == '-' { // PASS or FAIL report @@ -336,7 +331,10 @@ func (c *Converter) handleInputLine(line []byte) { c.output.write(origLine) return } - if action == "attr" { + switch action { + case "artifacts": + name, e.Path, _ = strings.Cut(name, " ") + case "attr": var rest string name, rest, _ = strings.Cut(name, " ") e.Key, e.Value, _ = strings.Cut(rest, " ") diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go index 6a60a746a5b48b..56a076002a92d4 100644 --- a/src/cmd/link/dwarf_test.go +++ b/src/cmd/link/dwarf_test.go @@ -364,6 +364,10 @@ func TestFlagW(t *testing.T) { if runtime.GOOS == "aix" { t.Skip("internal/xcoff cannot parse file without symbol table") } + if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH) + } + t.Parallel() tmpdir := t.TempDir() @@ -382,7 +386,7 @@ func TestFlagW(t *testing.T) { {"-s", false}, // -s implies -w {"-s -w=0", true}, // -w=0 negates the implied -w } - if testenv.HasCGO() { + if testenv.HasCGO() && runtime.GOOS != "solaris" { // Solaris linker doesn't support the -S flag tests = append(tests, testCase{"-w -linkmode=external", false}, testCase{"-s -linkmode=external", false}, diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index 68474b4484f1af..8d8ea8ac542c50 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -224,6 +224,28 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade } return true + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_SUBTRACTOR*2: + // ARM64_RELOC_SUBTRACTOR must be followed by ARM64_RELOC_UNSIGNED. + // The pair of relocations resolves to the difference between two + // symbol addresses (each relocation specifies a symbol). + outer, off := ld.FoldSubSymbolOffset(ldr, targ) + if outer != s { + // TODO: support subtracted symbol in different section. + ldr.Errorf(s, "unsupported ARM64_RELOC_SUBTRACTOR reloc: target %s, outer %s", ldr.SymName(targ), ldr.SymName(outer)) + break + } + su := ldr.MakeSymbolUpdater(s) + relocs := su.Relocs() + if rIdx+1 >= relocs.Count() || relocs.At(rIdx+1).Type() != objabi.MachoRelocOffset+ld.MACHO_ARM64_RELOC_UNSIGNED*2 || relocs.At(rIdx+1).Off() != r.Off() { + ldr.Errorf(s, "unexpected ARM64_RELOC_SUBTRACTOR reloc, must be followed by ARM64_RELOC_UNSIGNED at same offset") + break + } + su.SetRelocType(rIdx+1, objabi.R_PCREL) + su.SetRelocAdd(rIdx+1, r.Add()+int64(r.Off())+int64(r.Siz())-off) + // Remove the other relocation + su.SetRelocSiz(rIdx, 0) + return true + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_BRANCH26*2 + pcrel: su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_CALLARM64) @@ -277,6 +299,17 @@ func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loade su.SetRelocSym(rIdx, syms.GOT) su.SetRelocAdd(rIdx, int64(ldr.SymGot(targ))) return true + + case objabi.MachoRelocOffset + ld.MACHO_ARM64_RELOC_POINTER_TO_GOT*2 + pcrel: + if targType != sym.SDYNIMPORT { + ldr.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", ldr.SymName(targ)) + } + ld.AddGotSym(target, ldr, syms, targ, 0) + su := ldr.MakeSymbolUpdater(s) + su.SetRelocType(rIdx, objabi.R_PCREL) + su.SetRelocSym(rIdx, syms.GOT) + su.SetRelocAdd(rIdx, r.Add()+int64(r.Siz())+int64(ldr.SymGot(targ))) + return true } // Reread the reloc to incorporate any changes in type above. diff --git a/src/cmd/link/internal/ld/fallocate_test.go b/src/cmd/link/internal/ld/fallocate_test.go index 3c6b7ef752edd3..f463b5b63b3b69 100644 --- a/src/cmd/link/internal/ld/fallocate_test.go +++ b/src/cmd/link/internal/ld/fallocate_test.go @@ -10,6 +10,7 @@ import ( "errors" "os" "path/filepath" + "runtime" "syscall" "testing" ) @@ -53,12 +54,24 @@ func TestFallocate(t *testing.T) { if got := stat.Size(); got != sz { t.Errorf("unexpected file size: got %d, want %d", got, sz) } - // The number of blocks must be enough for the requested size. - // We used to require an exact match, but it appears that - // some file systems allocate a few extra blocks in some cases. - // See issue #41127. - if got, want := stat.Sys().(*syscall.Stat_t).Blocks, (sz+511)/512; got < want { - t.Errorf("unexpected disk usage: got %d blocks, want at least %d", got, want) + if runtime.GOOS == "darwin" { + // Check the number of allocated blocks on Darwin. On Linux (and + // perhaps BSDs), stat's Blocks field may not be portable as it + // is an implementation detail of the file system. On Darwin, it + // is documented as "the actual number of blocks allocated for + // the file in 512-byte units". + // The check is introduced when fixing a Darwin-specific bug. On + // Darwin, the file allocation syscall is a bit tricky. On Linux + // and BSDs, it is more straightforward and unlikely to go wrong. + // Given these two reasons, only check it on Darwin. + // + // The number of blocks must be enough for the requested size. + // We used to require an exact match, but it appears that + // some file systems allocate a few extra blocks in some cases. + // See issue #41127. + if got, want := stat.Sys().(*syscall.Stat_t).Blocks, (sz+511)/512; got < want { + t.Errorf("unexpected disk usage: got %d blocks, want at least %d", got, want) + } } out.munmap() } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 8d2763bb57f31a..2c861129b52f9a 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1452,7 +1452,7 @@ func (ctxt *Link) hostlink() { argv = append(argv, "-s") } } else if *FlagW { - if !ctxt.IsAIX() { // The AIX linker's -S has different meaning + if !ctxt.IsAIX() && !ctxt.IsSolaris() { // The AIX and Solaris linkers' -S has different meaning argv = append(argv, "-Wl,-S") // suppress debugging symbols } } @@ -1772,7 +1772,8 @@ func (ctxt *Link) hostlink() { } // Force global symbols to be exported for dlopen, etc. - if ctxt.IsELF { + switch { + case ctxt.IsELF: if ctxt.DynlinkingGo() || ctxt.BuildMode == BuildModeCShared || !linkerFlagSupported(ctxt.Arch, argv[0], altLinker, "-Wl,--export-dynamic-symbol=main") { argv = append(argv, "-rdynamic") } else { @@ -1783,10 +1784,16 @@ func (ctxt *Link) hostlink() { sort.Strings(exports) argv = append(argv, exports...) } - } - if ctxt.HeadType == objabi.Haix { + case ctxt.IsAIX(): fileName := xcoffCreateExportFile(ctxt) argv = append(argv, "-Wl,-bE:"+fileName) + case ctxt.IsWindows() && !slices.Contains(flagExtldflags, wlPrefix+"export-all-symbols"): + fileName := peCreateExportFile(ctxt, filepath.Base(outopt)) + prefix := "" + if isMSVC { + prefix = "-Wl,-def:" + } + argv = append(argv, prefix+fileName) } const unusedArguments = "-Qunused-arguments" diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 431dad9d6bcbaa..c26263466616f5 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -106,11 +106,13 @@ const ( MACHO_ARM_RELOC_SECTDIFF = 2 MACHO_ARM_RELOC_BR24 = 5 MACHO_ARM64_RELOC_UNSIGNED = 0 + MACHO_ARM64_RELOC_SUBTRACTOR = 1 MACHO_ARM64_RELOC_BRANCH26 = 2 MACHO_ARM64_RELOC_PAGE21 = 3 MACHO_ARM64_RELOC_PAGEOFF12 = 4 MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 = 5 MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6 + MACHO_ARM64_RELOC_POINTER_TO_GOT = 7 MACHO_ARM64_RELOC_ADDEND = 10 MACHO_GENERIC_RELOC_VANILLA = 0 MACHO_FAKE_GOTPCREL = 100 diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index 5219a98dd47cf4..e0186f46b035d9 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -8,6 +8,7 @@ package ld import ( + "bytes" "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/loader" @@ -17,6 +18,8 @@ import ( "fmt" "internal/buildcfg" "math" + "os" + "path/filepath" "slices" "sort" "strconv" @@ -1748,3 +1751,46 @@ func asmbPe(ctxt *Link) { pewrite(ctxt) } + +// peCreateExportFile creates a file with exported symbols for Windows .def files. +// ld will export all symbols, even those not marked for export, unless a .def file is provided. +func peCreateExportFile(ctxt *Link, libName string) (fname string) { + fname = filepath.Join(*flagTmpdir, "export_file.def") + var buf bytes.Buffer + + if ctxt.BuildMode == BuildModeCShared { + fmt.Fprintf(&buf, "LIBRARY %s\n", libName) + } + buf.WriteString("EXPORTS\n") + + ldr := ctxt.loader + var exports []string + for s := range ldr.ForAllCgoExportStatic() { + extname := ldr.SymExtname(s) + if !strings.HasPrefix(extname, "_cgoexp_") { + continue + } + if ldr.IsFileLocal(s) { + continue // Only export non-static symbols + } + // Retrieve the name of the initial symbol + // exported by cgo. + // The corresponding Go symbol is: + // _cgoexp_hashcode_symname. + name := strings.SplitN(extname, "_", 4)[3] + exports = append(exports, name) + } + if len(exports) == 0 { + // See runtime/cgo/windows.go for details. + exports = append(exports, "_cgo_stub_export") + } + sort.Strings(exports) + buf.WriteString(strings.Join(exports, "\n")) + + err := os.WriteFile(fname, buf.Bytes(), 0666) + if err != nil { + Errorf("WriteFile %s failed: %v", fname, err) + } + + return fname +} diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 759262286d39d8..2c999ccc4e3a19 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -645,7 +645,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { sliceSym(pcln.funcnametab) // The cutab slice - sliceSym(pcln.cutab) + slice(pcln.cutab, uint64(ldr.SymSize(pcln.cutab))/4) // The filetab slice sliceSym(pcln.filetab) @@ -654,7 +654,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { sliceSym(pcln.pctab) // The pclntab slice - slice(pcln.pclntab, uint64(ldr.SymSize(pcln.pclntab))) + sliceSym(pcln.pclntab) // The ftab slice slice(pcln.pclntab, uint64(pcln.nfunc+1)) diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go index 1bce2cf9b6124d..da728e25455618 100644 --- a/src/cmd/link/internal/ld/xcoff.go +++ b/src/cmd/link/internal/ld/xcoff.go @@ -1779,10 +1779,7 @@ func xcoffCreateExportFile(ctxt *Link) (fname string) { var buf bytes.Buffer ldr := ctxt.loader - for s, nsym := loader.Sym(1), loader.Sym(ldr.NSym()); s < nsym; s++ { - if !ldr.AttrCgoExport(s) { - continue - } + for s := range ldr.ForAllCgoExportStatic() { extname := ldr.SymExtname(s) if !strings.HasPrefix(extname, "._cgoexp_") { continue diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 9f3ea3e553dd7c..0ed20d1becbb03 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -16,6 +16,7 @@ import ( "fmt" "internal/abi" "io" + "iter" "log" "math/bits" "os" @@ -1109,6 +1110,18 @@ func (l *Loader) SetAttrCgoExportStatic(i Sym, v bool) { } } +// ForAllCgoExportStatic returns an iterator over all symbols +// marked with the "cgo_export_static" compiler directive. +func (l *Loader) ForAllCgoExportStatic() iter.Seq[Sym] { + return func(yield func(Sym) bool) { + for s := range l.attrCgoExportStatic { + if !yield(s) { + break + } + } + } +} + // IsGeneratedSym returns true if a symbol's been previously marked as a // generator symbol through the SetIsGeneratedSym. The functions for generator // symbols are kept in the Link context. @@ -2437,6 +2450,9 @@ var blockedLinknames = map[string][]string{ "sync_test.runtime_blockUntilEmptyCleanupQueue": {"sync_test"}, "time.runtimeIsBubbled": {"time"}, "unique.runtime_blockUntilEmptyCleanupQueue": {"unique"}, + // Experimental features + "runtime.goroutineLeakGC": {"runtime/pprof"}, + "runtime.goroutineleakcount": {"runtime/pprof"}, // Others "net.newWindowsFile": {"net"}, // pushed from os "testing/synctest.testingSynctestTest": {"testing/synctest"}, // pushed from testing diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 0125ba8e0f56be..31822d21f39d31 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -1532,11 +1532,13 @@ func TestFlagS(t *testing.T) { } cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", exe) out, err = cmd.CombinedOutput() - if err != nil && !errors.As(err, new(*exec.ExitError)) { - // Error exit is fine as it may have no symbols. - // On darwin we need to emit dynamic symbol references so it - // actually has some symbols, and nm succeeds. - t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out) + if err != nil { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { + // Error exit is fine as it may have no symbols. + // On darwin we need to emit dynamic symbol references so it + // actually has some symbols, and nm succeeds. + t.Errorf("(mode=%s) go tool nm failed: %v\n%s", mode, err, out) + } } for _, s := range syms { if bytes.Contains(out, []byte(s)) { diff --git a/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go index 3c7a6d6e2f1d2f..3ea470387bcf08 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -41,6 +41,15 @@ func (s *CPUSet) Zero() { clear(s[:]) } +// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity] +// will silently ignore any invalid CPU bits in [CPUSet] so this is an +// efficient way of resetting the CPU affinity of a process. +func (s *CPUSet) Fill() { + for i := range s { + s[i] = ^cpuMask(0) + } +} + func cpuBitsIndex(cpu int) int { return cpu / _NCPUBITS } diff --git a/src/cmd/vendor/golang.org/x/sys/unix/fdset.go b/src/cmd/vendor/golang.org/x/sys/unix/fdset.go index 9e83d18cd04215..62ed12645f48dd 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/fdset.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/fdset.go @@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool { // Zero clears the set fds. func (fds *FdSet) Zero() { - for i := range fds.Bits { - fds.Bits[i] = 0 - } + clear(fds.Bits[:]) } diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go index 848840ae4c7586..309f5a2b0c76ae 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) { // clear zeroes the ifreq's union field to prevent trailing garbage data from // being sent to the kernel if an ifreq is reused. func (ifr *Ifreq) clear() { - for i := range ifr.raw.Ifru { - ifr.raw.Ifru[i] = 0 - } + clear(ifr.raw.Ifru[:]) } // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as diff --git a/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh b/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh index e6f31d374df52c..d0ed6119129296 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh +++ b/src/cmd/vendor/golang.org/x/sys/unix/mkall.sh @@ -49,6 +49,7 @@ esac if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) + set -e $cmd docker build --tag generate:$GOOS $GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go index 4958a657085bcd..9439af961d98b6 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) - for i := 14; i < 14+IFNAMSIZ; i++ { - sa.raw[i] = 0 - } + clear(sa.raw[14 : 14+IFNAMSIZ]) copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 88162099af5444..34a4676973042f 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { return Statvfs1(path, buf, ST_WAIT) } +func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) { + var ( + _p0 unsafe.Pointer + bufsize uintptr + ) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + /* * Exposed directly */ diff --git a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go index 640f6b153f0045..bd51337306019a 100644 --- a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents +//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW diff --git a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go index 993a2297dbe1a5..358be3c7f5eece 100644 --- a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go +++ b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go @@ -65,6 +65,22 @@ var signals = [...]string{ 15: "terminated", } +// File flags for [os.OpenFile]. The O_ prefix is used to indicate +// that these flags are specific to the OpenFile function. +const ( + O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL + O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT + O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE + O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS + O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS + O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE + O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN + O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS + O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING + O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED + O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH +) + const ( FILE_READ_DATA = 0x00000001 FILE_READ_ATTRIBUTES = 0x00000080 diff --git a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 641a5f4b775aa1..426151a0193d35 100644 --- a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -238,6 +238,7 @@ var ( procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procFormatMessageW = modkernel32.NewProc("FormatMessageW") @@ -284,6 +285,7 @@ var ( procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") + procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -2111,6 +2113,14 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { return } +func FlushConsoleInputBuffer(console Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func FlushFileBuffers(handle Handle) (err error) { r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { @@ -2481,6 +2491,14 @@ func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err erro return } +func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go index 53966ad2bcb362..ac22f68c34ccf1 100644 --- a/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go +++ b/src/cmd/vendor/golang.org/x/telemetry/internal/crashmonitor/monitor.go @@ -326,11 +326,3 @@ func parseStackPCs(crash string) ([]uintptr, error) { } return pcs, nil } - -func min(x, y int) int { - if x < y { - return x - } else { - return y - } -} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go index ce92892c817690..0ca27316e6236b 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go @@ -17,10 +17,26 @@ import ( const help = `PROGNAME is a tool for static analysis of Go programs. -PROGNAME examines Go source code and reports suspicious constructs, -such as Printf calls whose arguments do not align with the format -string. It uses heuristics that do not guarantee all reports are -genuine problems, but it can find errors not caught by the compilers. +PROGNAME examines Go source code and reports diagnostics for +suspicious constructs or opportunities for improvement. +Diagnostics may include suggested fixes. + +An example of a suspicious construct is a Printf call whose arguments +do not align with the format string. Analyzers may use heuristics that +do not guarantee all reports are genuine problems, but can find +mistakes not caught by the compiler. + +An example of an opportunity for improvement is a loop over +strings.Split(doc, "\n"), which may be replaced by a loop over the +strings.SplitSeq iterator, avoiding an array allocation. +Diagnostics in such cases may report non-problems, +but should carry fixes that may be safely applied. + +For analyzers of the first kind, use "go vet -vettool=PROGRAM" +to run the tool and report diagnostics. + +For analyzers of the second kind, use "go fix -fixtool=PROGRAM" +to run the tool and apply the fixes it suggests. ` // Help implements the help subcommand for a multichecker or unitchecker @@ -29,7 +45,7 @@ genuine problems, but it can find errors not caught by the compilers. func Help(progname string, analyzers []*analysis.Analyzer, args []string) { // No args: show summary of all analyzers. if len(args) == 0 { - fmt.Println(strings.Replace(help, "PROGNAME", progname, -1)) + fmt.Println(strings.ReplaceAll(help, "PROGNAME", progname)) fmt.Println("Registered analyzers:") fmt.Println() sort.Slice(analyzers, func(i, j int) bool { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go index e554c3cc90301e..b4e91edce3b88a 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" ) //go:embed doc.go @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "appends", - Doc: analysisutil.MustExtractDoc(doc, "appends"), + Doc: analysisinternal.MustExtractDoc(doc, "appends"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/appends", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index efbf05d596a6d2..e9c08798449386 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -19,7 +19,7 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysisinternal" ) const Doc = "report mismatches between assembly files and Go declarations" @@ -175,7 +175,7 @@ func run(pass *analysis.Pass) (any, error) { Files: for _, fname := range sfiles { - content, tf, err := analysisutil.ReadFile(pass, fname) + content, tf, err := analysisinternal.ReadFile(pass, fname) if err != nil { return nil, err } @@ -211,7 +211,7 @@ Files: resultStr = "result register" } for _, line := range retLine { - pass.Reportf(analysisutil.LineStart(tf, line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr) + pass.Reportf(tf.LineStart(line), "[%s] %s: RET without writing to %s", arch, fnName, resultStr) } } retLine = nil @@ -227,7 +227,7 @@ Files: lineno++ badf := func(format string, args ...any) { - pass.Reportf(analysisutil.LineStart(tf, lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) + pass.Reportf(tf.LineStart(lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) } if arch == "" { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go index 1914bb476168ba..8080aed020e5f9 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -17,9 +17,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -27,26 +29,26 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "assign", - Doc: analysisutil.MustExtractDoc(doc, "assign"), + Doc: analysisinternal.MustExtractDoc(doc, "assign"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/assign", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info = pass.TypesInfo + ) - nodeFilter := []ast.Node{ - (*ast.AssignStmt)(nil), - } - inspect.Preorder(nodeFilter, func(n ast.Node) { - stmt := n.(*ast.AssignStmt) + for curAssign := range inspect.Root().Preorder((*ast.AssignStmt)(nil)) { + stmt := curAssign.Node().(*ast.AssignStmt) if stmt.Tok != token.ASSIGN { - return // ignore := + continue // ignore := } if len(stmt.Lhs) != len(stmt.Rhs) { // If LHS and RHS have different cardinality, they can't be the same. - return + continue } // Delete redundant LHS, RHS pairs, taking care @@ -61,13 +63,13 @@ func run(pass *analysis.Pass) (any, error) { isSelfAssign := false var le string - if !analysisutil.HasSideEffects(pass.TypesInfo, lhs) && - !analysisutil.HasSideEffects(pass.TypesInfo, rhs) && - !isMapIndex(pass.TypesInfo, lhs) && + if typesinternal.NoEffects(info, lhs) && + typesinternal.NoEffects(info, rhs) && + !isMapIndex(info, lhs) && reflect.TypeOf(lhs) == reflect.TypeOf(rhs) { // short-circuit the heavy-weight gofmt check - le = analysisinternal.Format(pass.Fset, lhs) - re := analysisinternal.Format(pass.Fset, rhs) + le = astutil.Format(pass.Fset, lhs) + re := astutil.Format(pass.Fset, rhs) if le == re { isSelfAssign = true } @@ -109,13 +111,14 @@ func run(pass *analysis.Pass) (any, error) { } if len(exprs) == 0 { - return + continue } if len(exprs) == len(stmt.Lhs) { // If every part of the statement is a self-assignment, // remove the whole statement. - edits = []analysis.TextEdit{{Pos: stmt.Pos(), End: stmt.End()}} + tokFile := pass.Fset.File(stmt.Pos()) + edits = refactor.DeleteStmt(tokFile, curAssign) } pass.Report(analysis.Diagnostic{ @@ -126,7 +129,7 @@ func run(pass *analysis.Pass) (any, error) { TextEdits: edits, }}, }) - }) + } return nil, nil } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go index 82d5439ce571e9..9faa3f67c1deae 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go @@ -11,10 +11,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -22,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "atomic", - Doc: analysisutil.MustExtractDoc(doc, "atomic"), + Doc: analysisinternal.MustExtractDoc(doc, "atomic"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomic", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, @@ -30,7 +31,7 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "sync/atomic") { + if !typesinternal.Imports(pass.Pkg, "sync/atomic") { return nil, nil // doesn't directly import sync/atomic } @@ -54,7 +55,7 @@ func run(pass *analysis.Pass) (any, error) { continue } obj := typeutil.Callee(pass.TypesInfo, call) - if analysisinternal.IsFunctionNamed(obj, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") { + if typesinternal.IsFunctionNamed(obj, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") { checkAtomicAddAssignment(pass, n.Lhs[i], call) } } @@ -72,7 +73,7 @@ func checkAtomicAddAssignment(pass *analysis.Pass, left ast.Expr, call *ast.Call arg := call.Args[0] broken := false - gofmt := func(e ast.Expr) string { return analysisinternal.Format(pass.Fset, e) } + gofmt := func(e ast.Expr) string { return astutil.Format(pass.Fset, e) } if uarg, ok := arg.(*ast.UnaryExpr); ok && uarg.Op == token.AND { broken = gofmt(left) == gofmt(uarg.X) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go index e1cf9f9b7ade10..574fafaa95dc98 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) const Doc = "check for common mistakes involving boolean operators" @@ -84,7 +84,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[* i := 0 var sets [][]ast.Expr for j := 0; j <= len(exprs); j++ { - if j == len(exprs) || analysisutil.HasSideEffects(info, exprs[j]) { + if j == len(exprs) || !typesinternal.NoEffects(info, exprs[j]) { if i < j { sets = append(sets, exprs[i:j]) } @@ -104,7 +104,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[* func (op boolOp) checkRedundant(pass *analysis.Pass, exprs []ast.Expr) { seen := make(map[string]bool) for _, e := range exprs { - efmt := analysisinternal.Format(pass.Fset, e) + efmt := astutil.Format(pass.Fset, e) if seen[efmt] { pass.ReportRangef(e, "redundant %s: %s %s %s", op.name, efmt, op.tok, efmt) } else { @@ -150,8 +150,8 @@ func (op boolOp) checkSuspect(pass *analysis.Pass, exprs []ast.Expr) { } // e is of the form 'x != c' or 'x == c'. - xfmt := analysisinternal.Format(pass.Fset, x) - efmt := analysisinternal.Format(pass.Fset, e) + xfmt := astutil.Format(pass.Fset, x) + efmt := astutil.Format(pass.Fset, e) if prev, found := seen[xfmt]; found { // checkRedundant handles the case in which efmt == prev. if efmt != prev { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index 6e32f298dc25e3..7dd4f249e258cf 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -14,7 +14,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysisinternal" ) const Doc = "check //go:build and // +build directives" @@ -86,7 +86,7 @@ func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since this may not be a Go source file. // Read the raw bytes instead. - content, tf, err := analysisutil.ReadFile(pass, filename) + content, tf, err := analysisinternal.ReadFile(pass, filename) if err != nil { return err } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go index d9189b5b69608f..bf1202b92b73d4 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -18,7 +18,7 @@ import ( "strconv" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) const debug = false @@ -41,7 +41,7 @@ var Analyzer = &analysis.Analyzer{ } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "runtime/cgo") { + if !typesinternal.Imports(pass.Pkg, "runtime/cgo") { return nil, nil // doesn't use cgo } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index d35b85f03a721b..4190cc5900f682 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -16,8 +16,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" ) @@ -86,7 +87,7 @@ func checkCopyLocksAssign(pass *analysis.Pass, assign *ast.AssignStmt, goversion lhs := assign.Lhs for i, x := range assign.Rhs { if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "assignment copies lock value to %v: %v", analysisinternal.Format(pass.Fset, assign.Lhs[i]), path) + pass.ReportRangef(x, "assignment copies lock value to %v: %v", astutil.Format(pass.Fset, assign.Lhs[i]), path) lhs = nil // An lhs has been reported. We prefer the assignment warning and do not report twice. } } @@ -100,7 +101,7 @@ func checkCopyLocksAssign(pass *analysis.Pass, assign *ast.AssignStmt, goversion if id, ok := l.(*ast.Ident); ok && id.Name != "_" { if obj := pass.TypesInfo.Defs[id]; obj != nil && obj.Type() != nil { if path := lockPath(pass.Pkg, obj.Type(), nil); path != nil { - pass.ReportRangef(l, "for loop iteration copies lock value to %v: %v", analysisinternal.Format(pass.Fset, l), path) + pass.ReportRangef(l, "for loop iteration copies lock value to %v: %v", astutil.Format(pass.Fset, l), path) } } } @@ -132,7 +133,7 @@ func checkCopyLocksCompositeLit(pass *analysis.Pass, cl *ast.CompositeLit) { x = node.Value } if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "literal copies lock value from %v: %v", analysisinternal.Format(pass.Fset, x), path) + pass.ReportRangef(x, "literal copies lock value from %v: %v", astutil.Format(pass.Fset, x), path) } } } @@ -166,7 +167,7 @@ func checkCopyLocksCallExpr(pass *analysis.Pass, ce *ast.CallExpr) { } for _, x := range ce.Args { if path := lockPathRhs(pass, x); path != nil { - pass.ReportRangef(x, "call of %s copies lock value: %v", analysisinternal.Format(pass.Fset, ce.Fun), path) + pass.ReportRangef(x, "call of %s copies lock value: %v", astutil.Format(pass.Fset, ce.Fun), path) } } } @@ -233,7 +234,7 @@ func checkCopyLocksRangeVar(pass *analysis.Pass, rtok token.Token, e ast.Expr) { return } if path := lockPath(pass.Pkg, typ, nil); path != nil { - pass.Reportf(e.Pos(), "range var %s copies lock: %v", analysisinternal.Format(pass.Fset, e), path) + pass.Reportf(e.Pos(), "range var %s copies lock: %v", astutil.Format(pass.Fset, e), path) } } @@ -353,7 +354,7 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ // In go1.10, sync.noCopy did not implement Locker. // (The Unlock method was added only in CL 121876.) // TODO(adonovan): remove workaround when we drop go1.10. - if analysisinternal.IsTypeNamed(typ, "sync", "noCopy") { + if typesinternal.IsTypeNamed(typ, "sync", "noCopy") { return []string{typ.String()} } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go index e11957f2d099d1..3069ee9fecd847 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go @@ -10,10 +10,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -23,20 +23,20 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "defers", Requires: []*analysis.Analyzer{inspect.Analyzer}, + Doc: analysisinternal.MustExtractDoc(doc, "defers"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/defers", - Doc: analysisutil.MustExtractDoc(doc, "defers"), Run: run, } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "time") { + if !typesinternal.Imports(pass.Pkg, "time") { return nil, nil } checkDeferCall := func(node ast.Node) bool { switch v := node.(type) { case *ast.CallExpr: - if analysisinternal.IsFunctionNamed(typeutil.Callee(pass.TypesInfo, v), "time", "Since") { + if typesinternal.IsFunctionNamed(typeutil.Callee(pass.TypesInfo, v), "time", "Since") { pass.Reportf(v.Pos(), "call to time.Since is not deferred") } case *ast.FuncLit: diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go index bebec891408fef..c84d25842e3018 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go @@ -14,7 +14,7 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysisinternal" ) const Doc = `check Go toolchain directives such as //go:debug @@ -86,7 +86,7 @@ func checkGoFile(pass *analysis.Pass, f *ast.File) { func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since is not a Go source file. // Read the raw bytes instead. - content, tf, err := analysisutil.ReadFile(pass, filename) + content, tf, err := analysisinternal.ReadFile(pass, filename) if err != nil { return err } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go index b8d29d019db0b0..b3df99929dc6fc 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go @@ -12,22 +12,20 @@ import ( "go/types" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/typesinternal/typeindex" ) const Doc = `report passing non-pointer or non-error values to errors.As -The errorsas analysis reports calls to errors.As where the type +The errorsas analyzer reports calls to errors.As where the type of the second argument is not a pointer to a type implementing error.` var Analyzer = &analysis.Analyzer{ Name: "errorsas", Doc: Doc, URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/errorsas", - Requires: []*analysis.Analyzer{inspect.Analyzer}, + Requires: []*analysis.Analyzer{typeindexanalyzer.Analyzer}, Run: run, } @@ -39,38 +37,31 @@ func run(pass *analysis.Pass) (any, error) { return nil, nil } - if !analysisinternal.Imports(pass.Pkg, "errors") { - return nil, nil // doesn't directly import errors - } - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + ) - nodeFilter := []ast.Node{ - (*ast.CallExpr)(nil), - } - inspect.Preorder(nodeFilter, func(n ast.Node) { - call := n.(*ast.CallExpr) - obj := typeutil.Callee(pass.TypesInfo, call) - if !analysisinternal.IsFunctionNamed(obj, "errors", "As") { - return - } + for curCall := range index.Calls(index.Object("errors", "As")) { + call := curCall.Node().(*ast.CallExpr) if len(call.Args) < 2 { - return // not enough arguments, e.g. called with return values of another function + continue // spread call: errors.As(pair()) } - if err := checkAsTarget(pass, call.Args[1]); err != nil { + + // Check for incorrect arguments. + if err := checkAsTarget(info, call.Args[1]); err != nil { pass.ReportRangef(call, "%v", err) + continue } - }) + } return nil, nil } -var errorType = types.Universe.Lookup("error").Type() - // checkAsTarget reports an error if the second argument to errors.As is invalid. -func checkAsTarget(pass *analysis.Pass, e ast.Expr) error { - t := pass.TypesInfo.Types[e].Type - if it, ok := t.Underlying().(*types.Interface); ok && it.NumMethods() == 0 { - // A target of interface{} is always allowed, since it often indicates +func checkAsTarget(info *types.Info, e ast.Expr) error { + t := info.Types[e].Type + if types.Identical(t.Underlying(), anyType) { + // A target of any is always allowed, since it often indicates // a value forwarded from another source. return nil } @@ -78,12 +69,16 @@ func checkAsTarget(pass *analysis.Pass, e ast.Expr) error { if !ok { return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") } - if pt.Elem() == errorType { + if types.Identical(pt.Elem(), errorType) { return errors.New("second argument to errors.As should not be *error") } - _, ok = pt.Elem().Underlying().(*types.Interface) - if ok || types.Implements(pt.Elem(), errorType.Underlying().(*types.Interface)) { - return nil + if !types.IsInterface(pt.Elem()) && !types.AssignableTo(pt.Elem(), errorType) { + return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") } - return errors.New("second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") + return nil } + +var ( + anyType = types.Universe.Lookup("any").Type() + errorType = types.Universe.Lookup("error").Type() +) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go index ff9c8b4f818b10..809095d40a5456 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go @@ -13,7 +13,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/internal/analysisinternal" ) const Doc = "report assembly that clobbers the frame pointer before saving it" @@ -98,7 +98,7 @@ func run(pass *analysis.Pass) (any, error) { } for _, fname := range sfiles { - content, tf, err := analysisutil.ReadFile(pass, fname) + content, tf, err := analysisinternal.ReadFile(pass, fname) if err != nil { return nil, err } @@ -127,7 +127,7 @@ func run(pass *analysis.Pass) (any, error) { } if arch.isFPWrite(line) { - pass.Reportf(analysisutil.LineStart(tf, lineno), "frame pointer is clobbered before saving") + pass.Reportf(tf.LineStart(lineno), "frame pointer is clobbered before saving") active = false continue } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go index e9acd96547e1a7..37ecb6523bd1ca 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go @@ -13,7 +13,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typesinternal" ) @@ -46,7 +45,7 @@ func run(pass *analysis.Pass) (any, error) { // Fast path: if the package doesn't import net/http, // skip the traversal. - if !analysisinternal.Imports(pass.Pkg, "net/http") { + if !typesinternal.Imports(pass.Pkg, "net/http") { return nil, nil } @@ -118,7 +117,7 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { return false // the function called does not return two values. } isPtr, named := typesinternal.ReceiverNamed(res.At(0)) - if !isPtr || named == nil || !analysisinternal.IsTypeNamed(named, "net/http", "Response") { + if !isPtr || named == nil || !typesinternal.IsTypeNamed(named, "net/http", "Response") { return false // the first return type is not *http.Response. } @@ -133,11 +132,11 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { return ok && id.Name == "http" // function in net/http package. } - if analysisinternal.IsTypeNamed(typ, "net/http", "Client") { + if typesinternal.IsTypeNamed(typ, "net/http", "Client") { return true // method on http.Client. } ptr, ok := types.Unalias(typ).(*types.Pointer) - return ok && analysisinternal.IsTypeNamed(ptr.Elem(), "net/http", "Client") // method on *http.Client. + return ok && typesinternal.IsTypeNamed(ptr.Elem(), "net/http", "Client") // method on *http.Client. } // restOfBlock, given a traversal stack, finds the innermost containing diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go index 4022dbe7c22cce..a6dcf1cf8e8e1d 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go @@ -11,8 +11,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typeparams" ) @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "ifaceassert", - Doc: analysisutil.MustExtractDoc(doc, "ifaceassert"), + Doc: analysisinternal.MustExtractDoc(doc, "ifaceassert"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ifaceassert", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go deleted file mode 100644 index d3df898d3011cd..00000000000000 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package analysisutil defines various helper functions -// used by two or more packages beneath go/analysis. -package analysisutil - -import ( - "go/ast" - "go/token" - "go/types" - "os" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" -) - -// HasSideEffects reports whether evaluation of e has side effects. -func HasSideEffects(info *types.Info, e ast.Expr) bool { - safe := true - ast.Inspect(e, func(node ast.Node) bool { - switch n := node.(type) { - case *ast.CallExpr: - typVal := info.Types[n.Fun] - switch { - case typVal.IsType(): - // Type conversion, which is safe. - case typVal.IsBuiltin(): - // Builtin func, conservatively assumed to not - // be safe for now. - safe = false - return false - default: - // A non-builtin func or method call. - // Conservatively assume that all of them have - // side effects for now. - safe = false - return false - } - case *ast.UnaryExpr: - if n.Op == token.ARROW { - safe = false - return false - } - } - return true - }) - return !safe -} - -// ReadFile reads a file and adds it to the FileSet -// so that we can report errors against it using lineStart. -func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { - readFile := pass.ReadFile - if readFile == nil { - readFile = os.ReadFile - } - content, err := readFile(filename) - if err != nil { - return nil, nil, err - } - tf := pass.Fset.AddFile(filename, -1, len(content)) - tf.SetLinesForContent(content) - return content, tf, nil -} - -// LineStart returns the position of the start of the specified line -// within file f, or NoPos if there is no line of that number. -func LineStart(f *token.File, line int) token.Pos { - // Use binary search to find the start offset of this line. - // - // TODO(adonovan): eventually replace this function with the - // simpler and more efficient (*go/token.File).LineStart, added - // in go1.12. - - min := 0 // inclusive - max := f.Size() // exclusive - for { - offset := (min + max) / 2 - pos := f.Pos(offset) - posn := f.Position(pos) - if posn.Line == line { - return pos - (token.Pos(posn.Column) - 1) - } - - if min+1 >= max { - return token.NoPos - } - - if posn.Line < line { - min = offset - } else { - max = offset - } - } -} - -var MustExtractDoc = analysisinternal.MustExtractDoc diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go index 2580a0ac21f1f9..868226328fc5e1 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go @@ -11,7 +11,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" @@ -24,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "loopclosure", - Doc: analysisutil.MustExtractDoc(doc, "loopclosure"), + Doc: analysisinternal.MustExtractDoc(doc, "loopclosure"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/loopclosure", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -369,5 +368,5 @@ func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method str // Check that the receiver is a . or // *.. _, named := typesinternal.ReceiverNamed(recv) - return analysisinternal.IsTypeNamed(named, pkgPath, typeName) + return typesinternal.IsTypeNamed(named, pkgPath, typeName) } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go index c0746789e9cb2a..dfaecf51e25a2b 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go @@ -13,11 +13,11 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/ctrlflow" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/cfg" "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "lostcancel", - Doc: analysisutil.MustExtractDoc(doc, "lostcancel"), + Doc: analysisinternal.MustExtractDoc(doc, "lostcancel"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/lostcancel", Run: run, Requires: []*analysis.Analyzer{ @@ -50,7 +50,7 @@ var contextPackage = "context" // checkLostCancel analyzes a single named or literal function. func run(pass *analysis.Pass) (any, error) { // Fast path: bypass check if file doesn't use context.WithCancel. - if !analysisinternal.Imports(pass.Pkg, contextPackage) { + if !typesinternal.Imports(pass.Pkg, contextPackage) { return nil, nil } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go index fa1883b0c3402c..2b5a7c80378828 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go @@ -14,8 +14,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "nilfunc", - Doc: analysisutil.MustExtractDoc(doc, "nilfunc"), + Doc: analysisinternal.MustExtractDoc(doc, "nilfunc"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilfunc", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go index eebf40208d1816..f04e44143412a4 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/doc.go @@ -82,6 +82,16 @@ // ... // } // +// A local function may also be inferred as a printf wrapper. If it +// is assigned to a variable, each call made through that variable will +// be checked just like a call to a function: +// +// logf := func(format string, args ...any) { +// message := fmt.Sprintf(format, args...) +// log.Printf("%s: %s", prefix, message) +// } +// logf("%s", 123) // logf format %s has arg 123 of wrong type int +// // # Specifying printf wrappers by flag // // The -funcs flag specifies a comma-separated list of names of diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index f008eca36fe3c1..910ffe70d7e33a 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -18,13 +18,14 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/fmtstr" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" ) @@ -37,11 +38,11 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "printf", - Doc: analysisutil.MustExtractDoc(doc, "printf"), + Doc: analysisinternal.MustExtractDoc(doc, "printf"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/printf", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, - ResultType: reflect.TypeOf((*Result)(nil)), + ResultType: reflect.TypeFor[*Result](), FactTypes: []analysis.Fact{new(isWrapper)}, } @@ -70,7 +71,7 @@ func (kind Kind) String() string { // Result is the printf analyzer's result type. Clients may query the result // to learn whether a function behaves like fmt.Print or fmt.Printf. type Result struct { - funcs map[*types.Func]Kind + funcs map[types.Object]Kind } // Kind reports whether fn behaves like fmt.Print or fmt.Printf. @@ -111,149 +112,210 @@ func (f *isWrapper) String() string { func run(pass *analysis.Pass) (any, error) { res := &Result{ - funcs: make(map[*types.Func]Kind), + funcs: make(map[types.Object]Kind), } - findPrintfLike(pass, res) - checkCalls(pass) + findPrintLike(pass, res) + checkCalls(pass, res) return res, nil } -type printfWrapper struct { - obj *types.Func - fdecl *ast.FuncDecl - format *types.Var - args *types.Var +// A wrapper is a candidate print/printf wrapper function. +// +// We represent functions generally as types.Object, not *Func, so +// that we can analyze anonymous functions such as +// +// printf := func(format string, args ...any) {...}, +// +// representing them by the *types.Var symbol for the local variable +// 'printf'. +type wrapper struct { + obj types.Object // *Func or *Var + curBody inspector.Cursor // for *ast.BlockStmt + format *types.Var // optional "format string" parameter in the Func{Decl,Lit} + args *types.Var // "args ...any" parameter in the Func{Decl,Lit} callers []printfCaller - failed bool // if true, not a printf wrapper } type printfCaller struct { - w *printfWrapper + w *wrapper call *ast.CallExpr } -// maybePrintfWrapper decides whether decl (a declared function) may be a wrapper -// around a fmt.Printf or fmt.Print function. If so it returns a printfWrapper -// function describing the declaration. Later processing will analyze the -// graph of potential printf wrappers to pick out the ones that are true wrappers. -// A function may be a Printf or Print wrapper if its last argument is ...interface{}. -// If the next-to-last argument is a string, then this may be a Printf wrapper. -// Otherwise it may be a Print wrapper. -func maybePrintfWrapper(info *types.Info, decl ast.Decl) *printfWrapper { - // Look for functions with final argument type ...interface{}. - fdecl, ok := decl.(*ast.FuncDecl) - if !ok || fdecl.Body == nil { - return nil - } - fn, ok := info.Defs[fdecl.Name].(*types.Func) - // Type information may be incomplete. - if !ok { - return nil - } - - sig := fn.Type().(*types.Signature) +// formatArgsParams returns the "format string" and "args ...any" +// parameters of a potential print or printf wrapper function. +// (The format is nil in the print-like case.) +func formatArgsParams(sig *types.Signature) (format, args *types.Var) { if !sig.Variadic() { - return nil // not variadic + return nil, nil // not variadic } params := sig.Params() nparams := params.Len() // variadic => nonzero - // Check final parameter is "args ...interface{}". - args := params.At(nparams - 1) - iface, ok := types.Unalias(args.Type().(*types.Slice).Elem()).(*types.Interface) - if !ok || !iface.Empty() { - return nil - } - // Is second last param 'format string'? - var format *types.Var if nparams >= 2 { if p := params.At(nparams - 2); p.Type() == types.Typ[types.String] { format = p } } - return &printfWrapper{ - obj: fn, - fdecl: fdecl, - format: format, - args: args, + // Check final parameter is "args ...any". + // (variadic => slice) + args = params.At(nparams - 1) + iface, ok := types.Unalias(args.Type().(*types.Slice).Elem()).(*types.Interface) + if !ok || !iface.Empty() { + return nil, nil } + + return format, args } -// findPrintfLike scans the entire package to find printf-like functions. -func findPrintfLike(pass *analysis.Pass, res *Result) (any, error) { - // Gather potential wrappers and call graph between them. - byObj := make(map[*types.Func]*printfWrapper) - var wrappers []*printfWrapper - for _, file := range pass.Files { - for _, decl := range file.Decls { - w := maybePrintfWrapper(pass.TypesInfo, decl) - if w == nil { - continue +// findPrintLike scans the entire package to find print or printf-like functions. +// When it returns, all such functions have been identified. +func findPrintLike(pass *analysis.Pass, res *Result) { + var ( + inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + info = pass.TypesInfo + ) + + // Pass 1: gather candidate wrapper functions (and populate wrappers). + var ( + wrappers []*wrapper + byObj = make(map[types.Object]*wrapper) + ) + for cur := range inspect.Root().Preorder((*ast.FuncDecl)(nil), (*ast.FuncLit)(nil)) { + var ( + curBody inspector.Cursor // for *ast.BlockStmt + sig *types.Signature + obj types.Object + ) + switch f := cur.Node().(type) { + case *ast.FuncDecl: + // named function or method: + // + // func wrapf(format string, args ...any) {...} + if f.Body != nil { + curBody = cur.ChildAt(edge.FuncDecl_Body, -1) + obj = info.Defs[f.Name] + sig = obj.Type().(*types.Signature) } - byObj[w.obj] = w - wrappers = append(wrappers, w) - } - } - // Walk the graph to figure out which are really printf wrappers. - for _, w := range wrappers { - // Scan function for calls that could be to other printf-like functions. - ast.Inspect(w.fdecl.Body, func(n ast.Node) bool { - if w.failed { - return false + case *ast.FuncLit: + // anonymous function directly assigned to a variable: + // + // var wrapf = func(format string, args ...any) {...} + // wrapf := func(format string, args ...any) {...} + // wrapf = func(format string, args ...any) {...} + // + // The LHS may also be a struct field x.wrapf or + // an imported var pkg.Wrapf. + // + sig = info.TypeOf(f).(*types.Signature) + curBody = cur.ChildAt(edge.FuncLit_Body, -1) + var lhs ast.Expr + switch ek, idx := cur.ParentEdge(); ek { + case edge.ValueSpec_Values: + curName := cur.Parent().ChildAt(edge.ValueSpec_Names, idx) + lhs = curName.Node().(*ast.Ident) + case edge.AssignStmt_Rhs: + curLhs := cur.Parent().ChildAt(edge.AssignStmt_Lhs, idx) + lhs = curLhs.Node().(ast.Expr) } - // TODO: Relax these checks; issue 26555. - if assign, ok := n.(*ast.AssignStmt); ok { - for _, lhs := range assign.Lhs { - if match(pass.TypesInfo, lhs, w.format) || - match(pass.TypesInfo, lhs, w.args) { - // Modifies the format - // string or args in - // some way, so not a - // simple wrapper. - w.failed = true - return false - } + switch lhs := lhs.(type) { + case *ast.Ident: + // variable: wrapf = func(...) + obj = info.ObjectOf(lhs).(*types.Var) + case *ast.SelectorExpr: + if sel, ok := info.Selections[lhs]; ok { + // struct field: x.wrapf = func(...) + obj = sel.Obj().(*types.Var) + } else { + // imported var: pkg.Wrapf = func(...) + obj = info.Uses[lhs.Sel].(*types.Var) } } - if un, ok := n.(*ast.UnaryExpr); ok && un.Op == token.AND { - if match(pass.TypesInfo, un.X, w.format) || - match(pass.TypesInfo, un.X, w.args) { - // Taking the address of the - // format string or args, - // so not a simple wrapper. - w.failed = true - return false + } + if obj != nil { + format, args := formatArgsParams(sig) + if args != nil { + // obj (the symbol for a function/method, or variable + // assigned to an anonymous function) is a potential + // print or printf wrapper. + // + // Later processing will analyze the graph of potential + // wrappers and their function bodies to pick out the + // ones that are true wrappers. + w := &wrapper{ + obj: obj, + curBody: curBody, + format: format, // non-nil => printf + args: args, } + byObj[w.obj] = w + wrappers = append(wrappers, w) } + } + } - call, ok := n.(*ast.CallExpr) - if !ok || len(call.Args) == 0 || !match(pass.TypesInfo, call.Args[len(call.Args)-1], w.args) { - return true - } + // Pass 2: scan the body of each wrapper function + // for calls to other printf-like functions. + // + // Also, reject tricky cases where the parameters + // are potentially mutated by AssignStmt or UnaryExpr. + // TODO: Relax these checks; issue 26555. + for _, w := range wrappers { + scan: + for cur := range w.curBody.Preorder( + (*ast.AssignStmt)(nil), + (*ast.UnaryExpr)(nil), + (*ast.CallExpr)(nil), + ) { + switch n := cur.Node().(type) { + case *ast.AssignStmt: + // If the wrapper updates format or args + // it is not a simple wrapper. + for _, lhs := range n.Lhs { + if w.format != nil && match(info, lhs, w.format) || + match(info, lhs, w.args) { + break scan + } + } - fn, kind := printfNameAndKind(pass, call) - if kind != 0 { - checkPrintfFwd(pass, w, call, kind, res) - return true - } + case *ast.UnaryExpr: + // If the wrapper computes &format or &args, + // it is not a simple wrapper. + if n.Op == token.AND && + (w.format != nil && match(info, n.X, w.format) || + match(info, n.X, w.args)) { + break scan + } - // If the call is to another function in this package, - // maybe we will find out it is printf-like later. - // Remember this call for later checking. - if fn != nil && fn.Pkg() == pass.Pkg && byObj[fn] != nil { - callee := byObj[fn] - callee.callers = append(callee.callers, printfCaller{w, call}) + case *ast.CallExpr: + if len(n.Args) > 0 && match(info, n.Args[len(n.Args)-1], w.args) { + if callee := typeutil.Callee(pass.TypesInfo, n); callee != nil { + + // Call from one wrapper candidate to another? + // Record the edge so that if callee is found to be + // a true wrapper, w will be too. + if w2, ok := byObj[callee]; ok { + w2.callers = append(w2.callers, printfCaller{w, n}) + } + + // Is the candidate a true wrapper, because it calls + // a known print{,f}-like function from the allowlist + // or an imported fact, or another wrapper found + // to be a true wrapper? + // If so, convert all w's callers to kind. + kind := callKind(pass, callee, res) + if kind != KindNone { + checkForward(pass, w, n, kind, res) + } + } + } } - - return true - }) + } } - return nil, nil } func match(info *types.Info, arg ast.Expr, param *types.Var) bool { @@ -261,9 +323,9 @@ func match(info *types.Info, arg ast.Expr, param *types.Var) bool { return ok && info.ObjectOf(id) == param } -// checkPrintfFwd checks that a printf-forwarding wrapper is forwarding correctly. +// checkForward checks that a forwarding wrapper is forwarding correctly. // It diagnoses writing fmt.Printf(format, args) instead of fmt.Printf(format, args...). -func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, kind Kind, res *Result) { +func checkForward(pass *analysis.Pass, w *wrapper, call *ast.CallExpr, kind Kind, res *Result) { matched := kind == KindPrint || kind != KindNone && len(call.Args) >= 2 && match(pass.TypesInfo, call.Args[len(call.Args)-2], w.format) if !matched { @@ -292,18 +354,39 @@ func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, k pass.ReportRangef(call, "missing ... in args forwarded to %s-like function", desc) return } - fn := w.obj - var fact isWrapper - if !pass.ImportObjectFact(fn, &fact) { - fact.Kind = kind - pass.ExportObjectFact(fn, &fact) - res.funcs[fn] = kind + + // If the candidate's print{,f} status becomes known, + // propagate it back to all its so-far known callers. + if res.funcs[w.obj] != kind { + res.funcs[w.obj] = kind + + // Export a fact. + // (This is a no-op for local symbols.) + // We can't export facts on a symbol of another package, + // but we can treat the symbol as a wrapper within + // the current analysis unit. + if w.obj.Pkg() == pass.Pkg { + // Facts are associated with origins. + pass.ExportObjectFact(origin(w.obj), &isWrapper{Kind: kind}) + } + + // Propagate kind back to known callers. for _, caller := range w.callers { - checkPrintfFwd(pass, caller.w, caller.call, kind, res) + checkForward(pass, caller.w, caller.call, kind, res) } } } +func origin(obj types.Object) types.Object { + switch obj := obj.(type) { + case *types.Func: + return obj.Origin() + case *types.Var: + return obj.Origin() + } + return obj +} + // isPrint records the print functions. // If a key ends in 'f' then it is assumed to be a formatted print. // @@ -412,7 +495,7 @@ func stringConstantExpr(pass *analysis.Pass, expr ast.Expr) (string, bool) { // checkCalls triggers the print-specific checks for calls that invoke a print // function. -func checkCalls(pass *analysis.Pass) { +func checkCalls(pass *analysis.Pass, res *Result) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ (*ast.File)(nil), @@ -426,48 +509,60 @@ func checkCalls(pass *analysis.Pass) { fileVersion = versions.Lang(versions.FileVersion(pass.TypesInfo, n)) case *ast.CallExpr: - fn, kind := printfNameAndKind(pass, n) - switch kind { - case KindPrintf, KindErrorf: - checkPrintf(pass, fileVersion, kind, n, fn.FullName()) - case KindPrint: - checkPrint(pass, n, fn.FullName()) + if callee := typeutil.Callee(pass.TypesInfo, n); callee != nil { + kind := callKind(pass, callee, res) + switch kind { + case KindPrintf, KindErrorf: + checkPrintf(pass, fileVersion, kind, n, fullname(callee)) + case KindPrint: + checkPrint(pass, n, fullname(callee)) + } } } }) } -func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, kind Kind) { - fn, _ = typeutil.Callee(pass.TypesInfo, call).(*types.Func) - if fn == nil { - return nil, 0 +func fullname(obj types.Object) string { + if fn, ok := obj.(*types.Func); ok { + return fn.FullName() } + return obj.Name() +} - // Facts are associated with generic declarations, not instantiations. - fn = fn.Origin() - - _, ok := isPrint[fn.FullName()] +// callKind returns the symbol of the called function +// and its print/printf kind, if any. +// (The symbol may be a var for an anonymous function.) +// The result is memoized in res.funcs. +func callKind(pass *analysis.Pass, obj types.Object, res *Result) Kind { + kind, ok := res.funcs[obj] if !ok { - // Next look up just "printf", for use with -printf.funcs. - _, ok = isPrint[strings.ToLower(fn.Name())] - } - if ok { - if fn.FullName() == "fmt.Errorf" { - kind = KindErrorf - } else if strings.HasSuffix(fn.Name(), "f") { - kind = KindPrintf + // cache miss + _, ok := isPrint[fullname(obj)] + if !ok { + // Next look up just "printf", for use with -printf.funcs. + _, ok = isPrint[strings.ToLower(obj.Name())] + } + if ok { + // well-known printf functions + if fullname(obj) == "fmt.Errorf" { + kind = KindErrorf + } else if strings.HasSuffix(obj.Name(), "f") { + kind = KindPrintf + } else { + kind = KindPrint + } } else { - kind = KindPrint + // imported wrappers + // Facts are associated with generic declarations, not instantiations. + obj = origin(obj) + var fact isWrapper + if pass.ImportObjectFact(obj, &fact) { + kind = fact.Kind + } } - return fn, kind + res.funcs[obj] = kind // cache } - - var fact isWrapper - if pass.ImportObjectFact(fn, &fact) { - return fn, fact.Kind - } - - return fn, KindNone + return kind } // isFormatter reports whether t could satisfy fmt.Formatter. @@ -490,7 +585,7 @@ func isFormatter(typ types.Type) bool { sig := fn.Type().(*types.Signature) return sig.Params().Len() == 2 && sig.Results().Len() == 0 && - analysisinternal.IsTypeNamed(sig.Params().At(0).Type(), "fmt", "State") && + typesinternal.IsTypeNamed(sig.Params().At(0).Type(), "fmt", "State") && types.Identical(sig.Params().At(1).Type(), types.Typ[types.Rune]) } @@ -729,7 +824,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma if reason != "" { details = " (" + reason + ")" } - pass.ReportRangef(rng, "%s format %s uses non-int %s%s as argument of *", name, operation.Text, analysisinternal.Format(pass.Fset, arg), details) + pass.ReportRangef(rng, "%s format %s uses non-int %s%s as argument of *", name, operation.Text, astutil.Format(pass.Fset, arg), details) return false } } @@ -756,7 +851,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma } arg := call.Args[verbArgIndex] if isFunctionValue(pass, arg) && verb != 'p' && verb != 'T' { - pass.ReportRangef(rng, "%s format %s arg %s is a func value, not called", name, operation.Text, analysisinternal.Format(pass.Fset, arg)) + pass.ReportRangef(rng, "%s format %s arg %s is a func value, not called", name, operation.Text, astutil.Format(pass.Fset, arg)) return false } if reason, ok := matchArgType(pass, v.typ, arg); !ok { @@ -768,14 +863,14 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma if reason != "" { details = " (" + reason + ")" } - pass.ReportRangef(rng, "%s format %s has arg %s of wrong type %s%s", name, operation.Text, analysisinternal.Format(pass.Fset, arg), typeString, details) + pass.ReportRangef(rng, "%s format %s has arg %s of wrong type %s%s", name, operation.Text, astutil.Format(pass.Fset, arg), typeString, details) return false } // Detect recursive formatting via value's String/Error methods. // The '#' flag suppresses the methods, except with %x, %X, and %q. if v.typ&argString != 0 && v.verb != 'T' && (!strings.Contains(operation.Flags, "#") || strings.ContainsRune("qxX", v.verb)) { if methodName, ok := recursiveStringer(pass, arg); ok { - pass.ReportRangef(rng, "%s format %s with arg %s causes recursive %s method call", name, operation.Text, analysisinternal.Format(pass.Fset, arg), methodName) + pass.ReportRangef(rng, "%s format %s with arg %s causes recursive %s method call", name, operation.Text, astutil.Format(pass.Fset, arg), methodName) return false } } @@ -927,7 +1022,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, name string) { if sel, ok := call.Args[0].(*ast.SelectorExpr); ok { if x, ok := sel.X.(*ast.Ident); ok { if x.Name == "os" && strings.HasPrefix(sel.Sel.Name, "Std") { - pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", name, analysisinternal.Format(pass.Fset, call.Args[0])) + pass.ReportRangef(call, "%s does not take io.Writer but has first arg %s", name, astutil.Format(pass.Fset, call.Args[0])) } } } @@ -961,10 +1056,10 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, name string) { } for _, arg := range args { if isFunctionValue(pass, arg) { - pass.ReportRangef(call, "%s arg %s is a func value, not called", name, analysisinternal.Format(pass.Fset, arg)) + pass.ReportRangef(call, "%s arg %s is a func value, not called", name, astutil.Format(pass.Fset, arg)) } if methodName, ok := recursiveStringer(pass, arg); ok { - pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", name, analysisinternal.Format(pass.Fset, arg), methodName) + pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", name, astutil.Format(pass.Fset, arg), methodName) } } } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go index 57987b3d203a75..366927326fcede 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go @@ -20,7 +20,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typeparams" ) @@ -123,7 +123,7 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) { } } if amt >= minSize { - ident := analysisinternal.Format(pass.Fset, x) + ident := astutil.Format(pass.Fset, x) qualifier := "" if len(sizes) > 1 { qualifier = "may be " diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go index 78a2fa5ea3bd34..934f3913c2763b 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go @@ -18,9 +18,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -29,14 +29,14 @@ var doc string // Analyzer describes sigchanyzer analysis function detector. var Analyzer = &analysis.Analyzer{ Name: "sigchanyzer", - Doc: analysisutil.MustExtractDoc(doc, "sigchanyzer"), + Doc: analysisinternal.MustExtractDoc(doc, "sigchanyzer"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/sigchanyzer", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "os/signal") { + if !typesinternal.Imports(pass.Pkg, "os/signal") { return nil, nil // doesn't directly import signal } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go index c1ac960435d41f..2cb91c73299374 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go @@ -17,10 +17,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -29,7 +29,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "slog", - Doc: analysisutil.MustExtractDoc(doc, "slog"), + Doc: analysisinternal.MustExtractDoc(doc, "slog"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/slog", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -115,10 +115,10 @@ func run(pass *analysis.Pass) (any, error) { default: if unknownArg == nil { pass.ReportRangef(arg, "%s arg %q should be a string or a slog.Attr (possible missing key or value)", - shortName(fn), analysisinternal.Format(pass.Fset, arg)) + shortName(fn), astutil.Format(pass.Fset, arg)) } else { pass.ReportRangef(arg, "%s arg %q should probably be a string or a slog.Attr (previous arg %q cannot be a key)", - shortName(fn), analysisinternal.Format(pass.Fset, arg), analysisinternal.Format(pass.Fset, unknownArg)) + shortName(fn), astutil.Format(pass.Fset, arg), astutil.Format(pass.Fset, unknownArg)) } // Stop here so we report at most one missing key per call. return @@ -158,7 +158,7 @@ func run(pass *analysis.Pass) (any, error) { } func isAttr(t types.Type) bool { - return analysisinternal.IsTypeNamed(t, "log/slog", "Attr") + return typesinternal.IsTypeNamed(t, "log/slog", "Attr") } // shortName returns a name for the function that is shorter than FullName. diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go index a0bdf001abd602..ca303ae5c15c6d 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go @@ -12,8 +12,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" ) //go:embed doc.go @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stdmethods", - Doc: analysisutil.MustExtractDoc(doc, "stdmethods"), + Doc: analysisinternal.MustExtractDoc(doc, "stdmethods"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stdmethods", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go index 7dbff1e4d8d4ee..19c72d2cf938ae 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go @@ -13,9 +13,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" ) @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stringintconv", - Doc: analysisutil.MustExtractDoc(doc, "stringintconv"), + Doc: analysisinternal.MustExtractDoc(doc, "stringintconv"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stringintconv", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -198,7 +198,7 @@ func run(pass *analysis.Pass) (any, error) { // the type has methods, as some {String,GoString,Format} // may change the behavior of fmt.Sprint. if len(ttypes) == 1 && len(vtypes) == 1 && types.NewMethodSet(V0).Len() == 0 { - _, prefix, importEdits := analysisinternal.AddImport(pass.TypesInfo, file, "fmt", "fmt", "Sprint", arg.Pos()) + prefix, importEdits := refactor.AddImport(pass.TypesInfo, file, "fmt", "fmt", "Sprint", arg.Pos()) if types.Identical(T0, types.Typ[types.String]) { // string(x) -> fmt.Sprint(x) addFix("Format the number as a decimal", append(importEdits, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go index 360ba0e74d89d3..eba4e56bb0532b 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go @@ -13,7 +13,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" @@ -31,7 +30,7 @@ func init() { var Analyzer = &analysis.Analyzer{ Name: "testinggoroutine", - Doc: analysisutil.MustExtractDoc(doc, "testinggoroutine"), + Doc: analysisinternal.MustExtractDoc(doc, "testinggoroutine"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/testinggoroutine", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -40,7 +39,7 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - if !analysisinternal.Imports(pass.Pkg, "testing") { + if !typesinternal.Imports(pass.Pkg, "testing") { return nil, nil } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go index d4e9b025324a18..a0ed5ab14e8e14 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go @@ -15,8 +15,8 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "tests", - Doc: analysisutil.MustExtractDoc(doc, "tests"), + Doc: analysisinternal.MustExtractDoc(doc, "tests"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/tests", Run: run, } @@ -258,7 +258,7 @@ func isTestingType(typ types.Type, testingType string) bool { if !ok { return false } - return analysisinternal.IsTypeNamed(ptr.Elem(), "testing", testingType) + return typesinternal.IsTypeNamed(ptr.Elem(), "testing", testingType) } // Validate that fuzz target function's arguments are of accepted types. diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go index 4fdbb2b5415eff..45b6822c17606e 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go @@ -16,10 +16,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) const badFormat = "2006-02-01" @@ -30,7 +30,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "timeformat", - Doc: analysisutil.MustExtractDoc(doc, "timeformat"), + Doc: analysisinternal.MustExtractDoc(doc, "timeformat"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/timeformat", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (any, error) { // Note: (time.Time).Format is a method and can be a typeutil.Callee // without directly importing "time". So we cannot just skip this package - // when !analysisutil.Imports(pass.Pkg, "time"). + // when !analysisinternal.Imports(pass.Pkg, "time"). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) @@ -50,8 +50,8 @@ func run(pass *analysis.Pass) (any, error) { inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) obj := typeutil.Callee(pass.TypesInfo, call) - if !analysisinternal.IsMethodNamed(obj, "time", "Time", "Format") && - !analysisinternal.IsFunctionNamed(obj, "time", "Parse") { + if !typesinternal.IsMethodNamed(obj, "time", "Time", "Format") && + !typesinternal.IsFunctionNamed(obj, "time", "Parse") { return } if len(call.Args) > 0 { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go index 26e894bd4000eb..4de48c83930648 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -11,9 +11,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysisinternal" "golang.org/x/tools/internal/typesinternal" ) @@ -22,7 +22,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unmarshal", - Doc: analysisutil.MustExtractDoc(doc, "unmarshal"), + Doc: analysisinternal.MustExtractDoc(doc, "unmarshal"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unmarshal", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ func run(pass *analysis.Pass) (any, error) { // Note: (*"encoding/json".Decoder).Decode, (* "encoding/gob".Decoder).Decode // and (* "encoding/xml".Decoder).Decode are methods and can be a typeutil.Callee // without directly importing their packages. So we cannot just skip this package - // when !analysisutil.Imports(pass.Pkg, "encoding/..."). + // when !analysisinternal.Imports(pass.Pkg, "encoding/..."). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go index 317f034992bb5d..668a33529989d1 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -14,8 +14,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/refactor" ) //go:embed doc.go @@ -23,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unreachable", - Doc: analysisutil.MustExtractDoc(doc, "unreachable"), + Doc: analysisinternal.MustExtractDoc(doc, "unreachable"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unreachable", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, @@ -188,6 +189,11 @@ func (d *deadState) findDead(stmt ast.Stmt) { case *ast.EmptyStmt: // do not warn about unreachable empty statements default: + var ( + inspect = d.pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + curStmt, _ = inspect.Root().FindNode(stmt) + tokFile = d.pass.Fset.File(stmt.Pos()) + ) // (This call to pass.Report is a frequent source // of diagnostics beyond EOF in a truncated file; // see #71659.) @@ -196,11 +202,8 @@ func (d *deadState) findDead(stmt ast.Stmt) { End: stmt.End(), Message: "unreachable code", SuggestedFixes: []analysis.SuggestedFix{{ - Message: "Remove", - TextEdits: []analysis.TextEdit{{ - Pos: stmt.Pos(), - End: stmt.End(), - }}, + Message: "Remove", + TextEdits: refactor.DeleteStmt(tokFile, curStmt), }}, }) d.reachable = true // silence error about next statement diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go index 57c6da64ff30e3..24ff723390f597 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -14,9 +14,9 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unsafeptr", - Doc: analysisutil.MustExtractDoc(doc, "unsafeptr"), + Doc: analysisinternal.MustExtractDoc(doc, "unsafeptr"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unsafeptr", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -105,7 +105,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { } switch sel.Sel.Name { case "Pointer", "UnsafeAddr": - if analysisinternal.IsTypeNamed(info.Types[sel.X].Type, "reflect", "Value") { + if typesinternal.IsTypeNamed(info.Types[sel.X].Type, "reflect", "Value") { return true } } @@ -153,5 +153,5 @@ func hasBasicType(info *types.Info, x ast.Expr, kind types.BasicKind) bool { // isReflectHeader reports whether t is reflect.SliceHeader or reflect.StringHeader. func isReflectHeader(t types.Type) bool { - return analysisinternal.IsTypeNamed(t, "reflect", "SliceHeader", "StringHeader") + return typesinternal.IsTypeNamed(t, "reflect", "SliceHeader", "StringHeader") } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index ed4cf7ae0be35b..57ad4f0769924c 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -23,7 +23,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" @@ -34,7 +33,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unusedresult", - Doc: analysisutil.MustExtractDoc(doc, "unusedresult"), + Doc: analysisinternal.MustExtractDoc(doc, "unusedresult"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedresult", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go index 14c6986eabab3b..88e4cc8677691e 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go @@ -13,10 +13,10 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/typesinternal" ) //go:embed doc.go @@ -24,14 +24,14 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "waitgroup", - Doc: analysisutil.MustExtractDoc(doc, "waitgroup"), + Doc: analysisinternal.MustExtractDoc(doc, "waitgroup"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (any, error) { - if !analysisinternal.Imports(pass.Pkg, "sync") { + if !typesinternal.Imports(pass.Pkg, "sync") { return nil, nil // doesn't directly import sync } @@ -44,7 +44,7 @@ func run(pass *analysis.Pass) (any, error) { if push { call := n.(*ast.CallExpr) obj := typeutil.Callee(pass.TypesInfo, call) - if analysisinternal.IsMethodNamed(obj, "sync", "WaitGroup", "Add") && + if typesinternal.IsMethodNamed(obj, "sync", "WaitGroup", "Add") && hasSuffix(stack, wantSuffix) && backindex(stack, 1) == backindex(stack, 2).(*ast.BlockStmt).List[0] { // ExprStmt must be Block's first stmt diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go index 7b805b882bf2df..b407bc7791547f 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -75,7 +75,6 @@ type Config struct { VetxOutput string // where to write file of fact information Stdout string // write stdout (e.g. JSON, unified diff) to this file SucceedOnTypecheckFailure bool // obsolete awful hack; see #18395 and below - WarnDiagnostics bool // printing diagnostics should not cause a non-zero exit } // Main is the main function of a vet-like analysis tool that must be @@ -87,18 +86,9 @@ type Config struct { // -V=full describe executable for build caching // foo.cfg perform separate modular analyze on the single // unit described by a JSON config file foo.cfg. -// -// Also, subject to approval of proposal #71859: -// // -fix don't print each diagnostic, apply its first fix // -diff don't apply a fix, print the diff (requires -fix) -// -// Additionally, the environment variable GOVET has the value "vet" or -// "fix" depending on whether the command is being invoked by "go vet", -// to report diagnostics, or "go fix", to apply fixes. This is -// necessary so that callers of Main can select their analyzer suite -// before flag parsing. (Vet analyzers must report real code problems, -// whereas Fix analyzers may fix non-problems such as style issues.) +// -json print diagnostics and fixes in JSON form func Main(analyzers ...*analysis.Analyzer) { progname := filepath.Base(os.Args[0]) log.SetFlags(0) @@ -163,7 +153,7 @@ func Run(configFile string, analyzers []*analysis.Analyzer) { // In VetxOnly mode, the analysis is run only for facts. if !cfg.VetxOnly { - code = processResults(fset, cfg.ID, results, cfg.WarnDiagnostics) + code = processResults(fset, cfg.ID, results) } os.Exit(code) @@ -187,7 +177,7 @@ func readConfig(filename string) (*Config, error) { return cfg, nil } -func processResults(fset *token.FileSet, id string, results []result, warnDiagnostics bool) (exit int) { +func processResults(fset *token.FileSet, id string, results []result) (exit int) { if analysisflags.Fix { // Don't print the diagnostics, // but apply all fixes from the root actions. @@ -236,9 +226,7 @@ func processResults(fset *token.FileSet, id string, results []result, warnDiagno for _, res := range results { for _, diag := range res.diagnostics { analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag) - if !warnDiagnostics { - exit = 1 - } + exit = 1 } } } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index cea89d34dac45b..970d7507f02d0a 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -2,166 +2,39 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package analysisinternal provides gopls' internal analyses with a -// number of helper functions that operate on typed syntax trees. +// Package analysisinternal provides helper functions for use in both +// the analysis drivers in go/analysis and gopls, and in various +// analyzers. +// +// TODO(adonovan): this is not ideal as it may lead to unnecessary +// dependencies between drivers and analyzers. Split into analyzerlib +// and driverlib? package analysisinternal import ( - "bytes" "cmp" "fmt" - "go/ast" - "go/printer" - "go/scanner" "go/token" - "go/types" - "iter" - pathpkg "path" + "os" "slices" - "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/moreiters" - "golang.org/x/tools/internal/typesinternal" ) -// Deprecated: this heuristic is ill-defined. -// TODO(adonovan): move to sole use in gopls/internal/cache. -func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { - // Get the end position for the type error. - file := fset.File(start) - if file == nil { - return start - } - if offset := file.PositionFor(start, false).Offset; offset > len(src) { - return start - } else { - src = src[offset:] - } - - // Attempt to find a reasonable end position for the type error. - // - // TODO(rfindley): the heuristic implemented here is unclear. It looks like - // it seeks the end of the primary operand starting at start, but that is not - // quite implemented (for example, given a func literal this heuristic will - // return the range of the func keyword). - // - // We should formalize this heuristic, or deprecate it by finally proposing - // to add end position to all type checker errors. - // - // Nevertheless, ensure that the end position at least spans the current - // token at the cursor (this was golang/go#69505). - end := start - { - var s scanner.Scanner - fset := token.NewFileSet() - f := fset.AddFile("", fset.Base(), len(src)) - s.Init(f, src, nil /* no error handler */, scanner.ScanComments) - pos, tok, lit := s.Scan() - if tok != token.SEMICOLON && token.Pos(f.Base()) <= pos && pos <= token.Pos(f.Base()+f.Size()) { - off := file.Offset(pos) + len(lit) - src = src[off:] - end += token.Pos(off) - } +// ReadFile reads a file and adds it to the FileSet in pass +// so that we can report errors against it using lineStart. +func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { + readFile := pass.ReadFile + if readFile == nil { + readFile = os.ReadFile } - - // Look for bytes that might terminate the current operand. See note above: - // this is imprecise. - if width := bytes.IndexAny(src, " \n,():;[]+-*/"); width > 0 { - end += token.Pos(width) + content, err := readFile(filename) + if err != nil { + return nil, nil, err } - return end -} - -// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types. -// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within -// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that -// is unrecognized. -func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string { - - // Initialize matches to contain the variable types we are searching for. - matches := make(map[types.Type][]string) - for _, typ := range typs { - if typ == nil { - continue // TODO(adonovan): is this reachable? - } - matches[typ] = nil // create entry - } - - seen := map[types.Object]struct{}{} - ast.Inspect(node, func(n ast.Node) bool { - if n == nil { - return false - } - // Prevent circular definitions. If 'pos' is within an assignment statement, do not - // allow any identifiers in that assignment statement to be selected. Otherwise, - // we could do the following, where 'x' satisfies the type of 'f0': - // - // x := fakeStruct{f0: x} - // - if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() { - return false - } - if n.End() > pos { - return n.Pos() <= pos - } - ident, ok := n.(*ast.Ident) - if !ok || ident.Name == "_" { - return true - } - obj := info.Defs[ident] - if obj == nil || obj.Type() == nil { - return true - } - if _, ok := obj.(*types.TypeName); ok { - return true - } - // Prevent duplicates in matches' values. - if _, ok = seen[obj]; ok { - return true - } - seen[obj] = struct{}{} - // Find the scope for the given position. Then, check whether the object - // exists within the scope. - innerScope := pkg.Scope().Innermost(pos) - if innerScope == nil { - return true - } - _, foundObj := innerScope.LookupParent(ident.Name, pos) - if foundObj != obj { - return true - } - // The object must match one of the types that we are searching for. - // TODO(adonovan): opt: use typeutil.Map? - if names, ok := matches[obj.Type()]; ok { - matches[obj.Type()] = append(names, ident.Name) - } else { - // If the object type does not exactly match - // any of the target types, greedily find the first - // target type that the object type can satisfy. - for typ := range matches { - if equivalentTypes(obj.Type(), typ) { - matches[typ] = append(matches[typ], ident.Name) - } - } - } - return true - }) - return matches -} - -func equivalentTypes(want, got types.Type) bool { - if types.Identical(want, got) { - return true - } - // Code segment to help check for untyped equality from (golang/go#32146). - if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 { - if lhs, ok := got.Underlying().(*types.Basic); ok { - return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType - } - } - return types.AssignableTo(want, got) + tf := pass.Fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + return content, tf, nil } // A ReadFileFunc is a function that returns the @@ -193,207 +66,6 @@ func CheckReadable(pass *analysis.Pass, filename string) error { return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) } -// AddImport checks whether this file already imports pkgpath and that -// the import is in scope at pos. If so, it returns the name under -// which it was imported and no edits. Otherwise, it adds a new import -// of pkgpath, using a name derived from the preferred name, and -// returns the chosen name, a prefix to be concatenated with member to -// form a qualified name, and the edit for the new import. -// -// The member argument indicates the name of the desired symbol within -// the imported package. This is needed in the case when the existing -// import is a dot import, because then it is possible that the -// desired symbol is shadowed by other declarations in the current -// package. If member is not shadowed at pos, AddImport returns (".", -// "", nil). (AddImport accepts the caller's implicit claim that the -// imported package declares member.) -// -// Use a preferredName of "_" to request a blank import; -// member is ignored in this case. -// -// It does not mutate its arguments. -func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (name, prefix string, newImport []analysis.TextEdit) { - // Find innermost enclosing lexical block. - scope := info.Scopes[file].Innermost(pos) - if scope == nil { - panic("no enclosing lexical block") - } - - // Is there an existing import of this package? - // If so, are we in its scope? (not shadowed) - for _, spec := range file.Imports { - pkgname := info.PkgNameOf(spec) - if pkgname != nil && pkgname.Imported().Path() == pkgpath { - name = pkgname.Name() - if preferredName == "_" { - // Request for blank import; any existing import will do. - return name, "", nil - } - if name == "." { - // The scope of ident must be the file scope. - if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] { - return name, "", nil - } - } else if _, obj := scope.LookupParent(name, pos); obj == pkgname { - return name, name + ".", nil - } - } - } - - // We must add a new import. - - // Ensure we have a fresh name. - newName := preferredName - if preferredName != "_" { - newName = FreshName(scope, pos, preferredName) - } - - // Create a new import declaration either before the first existing - // declaration (which must exist), including its comments; or - // inside the declaration, if it is an import group. - // - // Use a renaming import whenever the preferred name is not - // available, or the chosen name does not match the last - // segment of its path. - newText := fmt.Sprintf("%q", pkgpath) - if newName != preferredName || newName != pathpkg.Base(pkgpath) { - newText = fmt.Sprintf("%s %q", newName, pkgpath) - } - - decl0 := file.Decls[0] - var before ast.Node = decl0 - switch decl0 := decl0.(type) { - case *ast.GenDecl: - if decl0.Doc != nil { - before = decl0.Doc - } - case *ast.FuncDecl: - if decl0.Doc != nil { - before = decl0.Doc - } - } - if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() { - // Have existing grouped import ( ... ) decl. - if IsStdPackage(pkgpath) && len(gd.Specs) > 0 { - // Add spec for a std package before - // first existing spec, followed by - // a blank line if the next one is non-std. - first := gd.Specs[0].(*ast.ImportSpec) - pos = first.Pos() - if !IsStdPackage(first.Path.Value) { - newText += "\n" - } - newText += "\n\t" - } else { - // Add spec at end of group. - pos = gd.Rparen - newText = "\t" + newText + "\n" - } - } else { - // No import decl, or non-grouped import. - // Add a new import decl before first decl. - // (gofmt will merge multiple import decls.) - pos = before.Pos() - newText = "import " + newText + "\n\n" - } - return newName, newName + ".", []analysis.TextEdit{{ - Pos: pos, - End: pos, - NewText: []byte(newText), - }} -} - -// FreshName returns the name of an identifier that is undefined -// at the specified position, based on the preferred name. -func FreshName(scope *types.Scope, pos token.Pos, preferred string) string { - newName := preferred - for i := 0; ; i++ { - if _, obj := scope.LookupParent(newName, pos); obj == nil { - break // fresh - } - newName = fmt.Sprintf("%s%d", preferred, i) - } - return newName -} - -// Format returns a string representation of the node n. -func Format(fset *token.FileSet, n ast.Node) string { - var buf strings.Builder - printer.Fprint(&buf, fset, n) // ignore errors - return buf.String() -} - -// Imports returns true if path is imported by pkg. -func Imports(pkg *types.Package, path string) bool { - for _, imp := range pkg.Imports() { - if imp.Path() == path { - return true - } - } - return false -} - -// IsTypeNamed reports whether t is (or is an alias for) a -// package-level defined type with the given package path and one of -// the given names. It returns false if t is nil. -// -// This function avoids allocating the concatenation of "pkg.Name", -// which is important for the performance of syntax matching. -func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool { - if named, ok := types.Unalias(t).(*types.Named); ok { - tname := named.Obj() - return tname != nil && - typesinternal.IsPackageLevel(tname) && - tname.Pkg().Path() == pkgPath && - slices.Contains(names, tname.Name()) - } - return false -} - -// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a -// package-level defined type with the given package path and one of the given -// names. It returns false if t is not a pointer type. -func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool { - r := typesinternal.Unpointer(t) - if r == t { - return false - } - return IsTypeNamed(r, pkgPath, names...) -} - -// IsFunctionNamed reports whether obj is a package-level function -// defined in the given package and has one of the given names. -// It returns false if obj is nil. -// -// This function avoids allocating the concatenation of "pkg.Name", -// which is important for the performance of syntax matching. -func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { - f, ok := obj.(*types.Func) - return ok && - typesinternal.IsPackageLevel(obj) && - f.Pkg().Path() == pkgPath && - f.Type().(*types.Signature).Recv() == nil && - slices.Contains(names, f.Name()) -} - -// IsMethodNamed reports whether obj is a method defined on a -// package-level type with the given package and type name, and has -// one of the given names. It returns false if obj is nil. -// -// This function avoids allocating the concatenation of "pkg.TypeName.Name", -// which is important for the performance of syntax matching. -func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { - if fn, ok := obj.(*types.Func); ok { - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { - _, T := typesinternal.ReceiverNamed(recv) - return T != nil && - IsTypeNamed(T, pkgPath, typeName) && - slices.Contains(names, fn.Name()) - } - } - return false -} - // ValidateFixes validates the set of fixes for a single diagnostic. // Any error indicates a bug in the originating analyzer. // @@ -496,172 +168,6 @@ func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { return nil } -// CanImport reports whether one package is allowed to import another. -// -// TODO(adonovan): allow customization of the accessibility relation -// (e.g. for Bazel). -func CanImport(from, to string) bool { - // TODO(adonovan): better segment hygiene. - if to == "internal" || strings.HasPrefix(to, "internal/") { - // Special case: only std packages may import internal/... - // We can't reliably know whether we're in std, so we - // use a heuristic on the first segment. - first, _, _ := strings.Cut(from, "/") - if strings.Contains(first, ".") { - return false // example.com/foo ∉ std - } - if first == "testdata" { - return false // testdata/foo ∉ std - } - } - if strings.HasSuffix(to, "/internal") { - return strings.HasPrefix(from, to[:len(to)-len("/internal")]) - } - if i := strings.LastIndex(to, "/internal/"); i >= 0 { - return strings.HasPrefix(from, to[:i]) - } - return true -} - -// DeleteStmt returns the edits to remove the [ast.Stmt] identified by -// curStmt, if it is contained within a BlockStmt, CaseClause, -// CommClause, or is the STMT in switch STMT; ... {...}. It returns nil otherwise. -func DeleteStmt(fset *token.FileSet, curStmt inspector.Cursor) []analysis.TextEdit { - stmt := curStmt.Node().(ast.Stmt) - // if the stmt is on a line by itself delete the whole line - // otherwise just delete the statement. - - // this logic would be a lot simpler with the file contents, and somewhat simpler - // if the cursors included the comments. - - tokFile := fset.File(stmt.Pos()) - lineOf := tokFile.Line - stmtStartLine, stmtEndLine := lineOf(stmt.Pos()), lineOf(stmt.End()) - - var from, to token.Pos - // bounds of adjacent syntax/comments on same line, if any - limits := func(left, right token.Pos) { - if lineOf(left) == stmtStartLine { - from = left - } - if lineOf(right) == stmtEndLine { - to = right - } - } - // TODO(pjw): there are other places a statement might be removed: - // IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] . - // (removing the blocks requires more rewriting than this routine would do) - // CommCase = "case" ( SendStmt | RecvStmt ) | "default" . - // (removing the stmt requires more rewriting, and it's unclear what the user means) - switch parent := curStmt.Parent().Node().(type) { - case *ast.SwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - case *ast.TypeSwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - if parent.Assign == stmt { - return nil // don't let the user break the type switch - } - case *ast.BlockStmt: - limits(parent.Lbrace, parent.Rbrace) - case *ast.CommClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) - if parent.Comm == stmt { - return nil // maybe the user meant to remove the entire CommClause? - } - case *ast.CaseClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) - case *ast.ForStmt: - limits(parent.For, parent.Body.Lbrace) - - default: - return nil // not one of ours - } - - if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { - from = prev.Node().End() // preceding statement ends on same line - } - if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { - to = next.Node().Pos() // following statement begins on same line - } - // and now for the comments -Outer: - for _, cg := range enclosingFile(curStmt).Comments { - for _, co := range cg.List { - if lineOf(co.End()) < stmtStartLine { - continue - } else if lineOf(co.Pos()) > stmtEndLine { - break Outer // no more are possible - } - if lineOf(co.End()) == stmtStartLine && co.End() < stmt.Pos() { - if !from.IsValid() || co.End() > from { - from = co.End() - continue // maybe there are more - } - } - if lineOf(co.Pos()) == stmtEndLine && co.Pos() > stmt.End() { - if !to.IsValid() || co.Pos() < to { - to = co.Pos() - continue // maybe there are more - } - } - } - } - // if either from or to is valid, just remove the statement - // otherwise remove the line - edit := analysis.TextEdit{Pos: stmt.Pos(), End: stmt.End()} - if from.IsValid() || to.IsValid() { - // remove just the statement. - // we can't tell if there is a ; or whitespace right after the statement - // ideally we'd like to remove the former and leave the latter - // (if gofmt has run, there likely won't be a ;) - // In type switches we know there's a semicolon somewhere after the statement, - // but the extra work for this special case is not worth it, as gofmt will fix it. - return []analysis.TextEdit{edit} - } - // remove the whole line - for lineOf(edit.Pos) == stmtStartLine { - edit.Pos-- - } - edit.Pos++ // get back tostmtStartLine - for lineOf(edit.End) == stmtEndLine { - edit.End++ - } - return []analysis.TextEdit{edit} -} - -// Comments returns an iterator over the comments overlapping the specified interval. -func Comments(file *ast.File, start, end token.Pos) iter.Seq[*ast.Comment] { - // TODO(adonovan): optimize use binary O(log n) instead of linear O(n) search. - return func(yield func(*ast.Comment) bool) { - for _, cg := range file.Comments { - for _, co := range cg.List { - if co.Pos() > end { - return - } - if co.End() < start { - continue - } - - if !yield(co) { - return - } - } - } - } -} - -// IsStdPackage reports whether the specified package path belongs to a -// package in the standard library (including internal dependencies). -func IsStdPackage(path string) bool { - // A standard package has no dot in its first segment. - // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".) - slash := strings.IndexByte(path, '/') - if slash < 0 { - slash = len(path) - } - return !strings.Contains(path[:slash], ".") && path != "testdata" -} - // Range returns an [analysis.Range] for the specified start and end positions. func Range(pos, end token.Pos) analysis.Range { return tokenRange{pos, end} @@ -672,9 +178,3 @@ type tokenRange struct{ StartPos, EndPos token.Pos } func (r tokenRange) Pos() token.Pos { return r.StartPos } func (r tokenRange) End() token.Pos { return r.EndPos } - -// enclosingFile returns the syntax tree for the file enclosing c. -func enclosingFile(c inspector.Cursor) *ast.File { - c, _ = moreiters.First(c.Enclosing((*ast.File)(nil))) - return c.Node().(*ast.File) -} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go index bfb5900f1b3195..c6cdf5997e2194 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go @@ -35,7 +35,7 @@ import ( // // var Analyzer = &analysis.Analyzer{ // Name: "halting", -// Doc: analysisutil.MustExtractDoc(doc, "halting"), +// Doc: analysisinternal.MustExtractDoc(doc, "halting"), // ... // } func MustExtractDoc(content, name string) string { diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go index c3a256c987cbc3..7e52aeaaac590a 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/comment.go @@ -7,6 +7,7 @@ package astutil import ( "go/ast" "go/token" + "iter" "strings" ) @@ -111,3 +112,24 @@ func Directives(g *ast.CommentGroup) (res []*Directive) { } return } + +// Comments returns an iterator over the comments overlapping the specified interval. +func Comments(file *ast.File, start, end token.Pos) iter.Seq[*ast.Comment] { + // TODO(adonovan): optimize use binary O(log n) instead of linear O(n) search. + return func(yield func(*ast.Comment) bool) { + for _, cg := range file.Comments { + for _, co := range cg.List { + if co.Pos() > end { + return + } + if co.End() < start { + continue + } + + if !yield(co) { + return + } + } + } + } +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go index c945de02d4a130..210f392387b426 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/equal.go @@ -26,6 +26,14 @@ func Equal(x, y ast.Node, identical func(x, y *ast.Ident) bool) bool { return equal(reflect.ValueOf(x), reflect.ValueOf(y), identical) } +// EqualSyntax reports whether x and y are equal. +// Identifiers are considered equal if they are spelled the same. +// Comments are ignored. +func EqualSyntax(x, y ast.Expr) bool { + sameName := func(x, y *ast.Ident) bool { return x.Name == y.Name } + return Equal(x, y, sameName) +} + func equal(x, y reflect.Value, identical func(x, y *ast.Ident) bool) bool { // Ensure types are the same if x.Type() != y.Type() { diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go index 14189155e4e895..a1c09835041bf0 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go @@ -6,7 +6,13 @@ package astutil import ( "go/ast" + "go/printer" "go/token" + "strings" + + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/moreiters" ) // PreorderStack traverses the tree rooted at root, @@ -67,3 +73,47 @@ func NodeContains(n ast.Node, pos token.Pos) bool { } return start <= pos && pos <= end } + +// IsChildOf reports whether cur.ParentEdge is ek. +// +// TODO(adonovan): promote to a method of Cursor. +func IsChildOf(cur inspector.Cursor, ek edge.Kind) bool { + got, _ := cur.ParentEdge() + return got == ek +} + +// EnclosingFile returns the syntax tree for the file enclosing c. +// +// TODO(adonovan): promote this to a method of Cursor. +func EnclosingFile(c inspector.Cursor) *ast.File { + c, _ = moreiters.First(c.Enclosing((*ast.File)(nil))) + return c.Node().(*ast.File) +} + +// DocComment returns the doc comment for a node, if any. +func DocComment(n ast.Node) *ast.CommentGroup { + switch n := n.(type) { + case *ast.FuncDecl: + return n.Doc + case *ast.GenDecl: + return n.Doc + case *ast.ValueSpec: + return n.Doc + case *ast.TypeSpec: + return n.Doc + case *ast.File: + return n.Doc + case *ast.ImportSpec: + return n.Doc + case *ast.Field: + return n.Doc + } + return nil +} + +// Format returns a string representation of the node n. +func Format(fset *token.FileSet, n ast.Node) string { + var buf strings.Builder + printer.Fprint(&buf, fset, n) // ignore errors + return buf.String() +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go index 4c346706a7566c..7b7c5cc677beb4 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -378,10 +378,7 @@ func (e *editGraph) twoDone(df, db int) (int, bool) { return 0, false // diagonals cannot overlap } kmin := max(-df, -db+e.delta) - kmax := db + e.delta - if df < kmax { - kmax = df - } + kmax := min(df, db+e.delta) for k := kmin; k <= kmax; k += 2 { x := e.vf.get(df, k) u := e.vb.get(db, k-e.delta) diff --git a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go index 2d72d2630435b8..811bb216ea2f24 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go @@ -103,11 +103,3 @@ func commonSuffixLenString(a, b string) int { } return i } - -func min(x, y int) int { - if x < y { - return x - } else { - return y - } -} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/packagepath/packagepath.go b/src/cmd/vendor/golang.org/x/tools/internal/packagepath/packagepath.go new file mode 100644 index 00000000000000..fa39a13f9eae51 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/packagepath/packagepath.go @@ -0,0 +1,49 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagepath provides metadata operations on package path +// strings. +package packagepath + +// (This package should not depend on go/ast.) +import "strings" + +// CanImport reports whether one package is allowed to import another. +// +// TODO(adonovan): allow customization of the accessibility relation +// (e.g. for Bazel). +func CanImport(from, to string) bool { + // TODO(adonovan): better segment hygiene. + if to == "internal" || strings.HasPrefix(to, "internal/") { + // Special case: only std packages may import internal/... + // We can't reliably know whether we're in std, so we + // use a heuristic on the first segment. + first, _, _ := strings.Cut(from, "/") + if strings.Contains(first, ".") { + return false // example.com/foo ∉ std + } + if first == "testdata" { + return false // testdata/foo ∉ std + } + } + if strings.HasSuffix(to, "/internal") { + return strings.HasPrefix(from, to[:len(to)-len("/internal")]) + } + if i := strings.LastIndex(to, "/internal/"); i >= 0 { + return strings.HasPrefix(from, to[:i]) + } + return true +} + +// IsStdPackage reports whether the specified package path belongs to a +// package in the standard library (including internal dependencies). +func IsStdPackage(path string) bool { + // A standard package has no dot in its first segment. + // (It may yet have a dot, e.g. "vendor/golang.org/x/foo".) + slash := strings.IndexByte(path, '/') + if slash < 0 { + slash = len(path) + } + return !strings.Contains(path[:slash], ".") && path != "testdata" +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go new file mode 100644 index 00000000000000..6df01d8ef9c779 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go @@ -0,0 +1,484 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package refactor + +// This file defines operations for computing deletion edits. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "slices" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" +) + +// DeleteVar returns edits to delete the declaration of a variable or +// constant whose defining identifier is curId. +// +// It handles variants including: +// - GenDecl > ValueSpec versus AssignStmt; +// - RHS expression has effects, or not; +// - entire statement/declaration may be eliminated; +// and removes associated comments. +// +// If it cannot make the necessary edits, such as for a function +// parameter or result, it returns nil. +func DeleteVar(tokFile *token.File, info *types.Info, curId inspector.Cursor) []analysis.TextEdit { + switch ek, _ := curId.ParentEdge(); ek { + case edge.ValueSpec_Names: + return deleteVarFromValueSpec(tokFile, info, curId) + + case edge.AssignStmt_Lhs: + return deleteVarFromAssignStmt(tokFile, info, curId) + } + + // e.g. function receiver, parameter, or result, + // or "switch v := expr.(T) {}" (which has no object). + return nil +} + +// deleteVarFromValueSpec returns edits to delete the declaration of a +// variable or constant within a ValueSpec. +// +// Precondition: curId is Ident beneath ValueSpec.Names beneath GenDecl. +// +// See also [deleteVarFromAssignStmt], which has parallel structure. +func deleteVarFromValueSpec(tokFile *token.File, info *types.Info, curIdent inspector.Cursor) []analysis.TextEdit { + var ( + id = curIdent.Node().(*ast.Ident) + curSpec = curIdent.Parent() + spec = curSpec.Node().(*ast.ValueSpec) + ) + + declaresOtherNames := slices.ContainsFunc(spec.Names, func(name *ast.Ident) bool { + return name != id && name.Name != "_" + }) + noRHSEffects := !slices.ContainsFunc(spec.Values, func(rhs ast.Expr) bool { + return !typesinternal.NoEffects(info, rhs) + }) + if !declaresOtherNames && noRHSEffects { + // The spec is no longer needed, either to declare + // other variables, or for its side effects. + return DeleteSpec(tokFile, curSpec) + } + + // The spec is still needed, either for + // at least one LHS, or for effects on RHS. + // Blank out or delete just one LHS. + + _, index := curIdent.ParentEdge() // index of LHS within ValueSpec.Names + + // If there is no RHS, we can delete the LHS. + if len(spec.Values) == 0 { + var pos, end token.Pos + if index == len(spec.Names)-1 { + // Delete final name. + // + // var _, lhs1 T + // ------ + pos = spec.Names[index-1].End() + end = spec.Names[index].End() + } else { + // Delete non-final name. + // + // var lhs0, _ T + // ------ + pos = spec.Names[index].Pos() + end = spec.Names[index+1].Pos() + } + return []analysis.TextEdit{{ + Pos: pos, + End: end, + }} + } + + // If the assignment is n:n and the RHS has no effects, + // we can delete the LHS and its corresponding RHS. + if len(spec.Names) == len(spec.Values) && + typesinternal.NoEffects(info, spec.Values[index]) { + + if index == len(spec.Names)-1 { + // Delete final items. + // + // var _, lhs1 = rhs0, rhs1 + // ------ ------ + return []analysis.TextEdit{ + { + Pos: spec.Names[index-1].End(), + End: spec.Names[index].End(), + }, + { + Pos: spec.Values[index-1].End(), + End: spec.Values[index].End(), + }, + } + } else { + // Delete non-final items. + // + // var lhs0, _ = rhs0, rhs1 + // ------ ------ + return []analysis.TextEdit{ + { + Pos: spec.Names[index].Pos(), + End: spec.Names[index+1].Pos(), + }, + { + Pos: spec.Values[index].Pos(), + End: spec.Values[index+1].Pos(), + }, + } + } + } + + // We cannot delete the RHS. + // Blank out the LHS. + return []analysis.TextEdit{{ + Pos: id.Pos(), + End: id.End(), + NewText: []byte("_"), + }} +} + +// Precondition: curId is Ident beneath AssignStmt.Lhs. +// +// See also [deleteVarFromValueSpec], which has parallel structure. +func deleteVarFromAssignStmt(tokFile *token.File, info *types.Info, curIdent inspector.Cursor) []analysis.TextEdit { + var ( + id = curIdent.Node().(*ast.Ident) + curStmt = curIdent.Parent() + assign = curStmt.Node().(*ast.AssignStmt) + ) + + declaresOtherNames := slices.ContainsFunc(assign.Lhs, func(lhs ast.Expr) bool { + lhsId, ok := lhs.(*ast.Ident) + return ok && lhsId != id && lhsId.Name != "_" + }) + noRHSEffects := !slices.ContainsFunc(assign.Rhs, func(rhs ast.Expr) bool { + return !typesinternal.NoEffects(info, rhs) + }) + if !declaresOtherNames && noRHSEffects { + // The assignment is no longer needed, either to + // declare other variables, or for its side effects. + if edits := DeleteStmt(tokFile, curStmt); edits != nil { + return edits + } + // Statement could not not be deleted in this context. + // Fall back to conservative deletion. + } + + // The assign is still needed, either for + // at least one LHS, or for effects on RHS, + // or because it cannot deleted because of its context. + // Blank out or delete just one LHS. + + // If the assignment is 1:1 and the RHS has no effects, + // we can delete the LHS and its corresponding RHS. + _, index := curIdent.ParentEdge() + if len(assign.Lhs) > 1 && + len(assign.Lhs) == len(assign.Rhs) && + typesinternal.NoEffects(info, assign.Rhs[index]) { + + if index == len(assign.Lhs)-1 { + // Delete final items. + // + // _, lhs1 := rhs0, rhs1 + // ------ ------ + return []analysis.TextEdit{ + { + Pos: assign.Lhs[index-1].End(), + End: assign.Lhs[index].End(), + }, + { + Pos: assign.Rhs[index-1].End(), + End: assign.Rhs[index].End(), + }, + } + } else { + // Delete non-final items. + // + // lhs0, _ := rhs0, rhs1 + // ------ ------ + return []analysis.TextEdit{ + { + Pos: assign.Lhs[index].Pos(), + End: assign.Lhs[index+1].Pos(), + }, + { + Pos: assign.Rhs[index].Pos(), + End: assign.Rhs[index+1].Pos(), + }, + } + } + } + + // We cannot delete the RHS. + // Blank out the LHS. + edits := []analysis.TextEdit{{ + Pos: id.Pos(), + End: id.End(), + NewText: []byte("_"), + }} + + // If this eliminates the final variable declared by + // an := statement, we need to turn it into an = + // assignment to avoid a "no new variables on left + // side of :=" error. + if !declaresOtherNames { + edits = append(edits, analysis.TextEdit{ + Pos: assign.TokPos, + End: assign.TokPos + token.Pos(len(":=")), + NewText: []byte("="), + }) + } + + return edits +} + +// DeleteSpec returns edits to delete the {Type,Value}Spec identified by curSpec. +// +// TODO(adonovan): add test suite. Test for consts as well. +func DeleteSpec(tokFile *token.File, curSpec inspector.Cursor) []analysis.TextEdit { + var ( + spec = curSpec.Node().(ast.Spec) + curDecl = curSpec.Parent() + decl = curDecl.Node().(*ast.GenDecl) + ) + + // If it is the sole spec in the decl, + // delete the entire decl. + if len(decl.Specs) == 1 { + return DeleteDecl(tokFile, curDecl) + } + + // Delete the spec and its comments. + _, index := curSpec.ParentEdge() // index of ValueSpec within GenDecl.Specs + pos, end := spec.Pos(), spec.End() + if doc := astutil.DocComment(spec); doc != nil { + pos = doc.Pos() // leading comment + } + if index == len(decl.Specs)-1 { + // Delete final spec. + if c := eolComment(spec); c != nil { + // var (v int // comment \n) + end = c.End() + } + } else { + // Delete non-final spec. + // var ( a T; b T ) + // ----- + end = decl.Specs[index+1].Pos() + } + return []analysis.TextEdit{{ + Pos: pos, + End: end, + }} +} + +// DeleteDecl returns edits to delete the ast.Decl identified by curDecl. +// +// TODO(adonovan): add test suite. +func DeleteDecl(tokFile *token.File, curDecl inspector.Cursor) []analysis.TextEdit { + decl := curDecl.Node().(ast.Decl) + + ek, _ := curDecl.ParentEdge() + switch ek { + case edge.DeclStmt_Decl: + return DeleteStmt(tokFile, curDecl.Parent()) + + case edge.File_Decls: + pos, end := decl.Pos(), decl.End() + if doc := astutil.DocComment(decl); doc != nil { + pos = doc.Pos() + } + + // Delete free-floating comments on same line as rparen. + // var (...) // comment + var ( + file = curDecl.Parent().Node().(*ast.File) + lineOf = tokFile.Line + declEndLine = lineOf(decl.End()) + ) + for _, cg := range file.Comments { + for _, c := range cg.List { + if c.Pos() < end { + continue // too early + } + commentEndLine := lineOf(c.End()) + if commentEndLine > declEndLine { + break // too late + } else if lineOf(c.Pos()) == declEndLine && commentEndLine == declEndLine { + end = c.End() + } + } + } + + return []analysis.TextEdit{{ + Pos: pos, + End: end, + }} + + default: + panic(fmt.Sprintf("Decl parent is %v, want DeclStmt or File", ek)) + } +} + +// DeleteStmt returns the edits to remove the [ast.Stmt] identified by +// curStmt, if it is contained within a BlockStmt, CaseClause, +// CommClause, or is the STMT in switch STMT; ... {...}. It returns nil otherwise. +func DeleteStmt(tokFile *token.File, curStmt inspector.Cursor) []analysis.TextEdit { + stmt := curStmt.Node().(ast.Stmt) + // if the stmt is on a line by itself delete the whole line + // otherwise just delete the statement. + + // this logic would be a lot simpler with the file contents, and somewhat simpler + // if the cursors included the comments. + + lineOf := tokFile.Line + stmtStartLine, stmtEndLine := lineOf(stmt.Pos()), lineOf(stmt.End()) + + var from, to token.Pos + // bounds of adjacent syntax/comments on same line, if any + limits := func(left, right token.Pos) { + if lineOf(left) == stmtStartLine { + from = left + } + if lineOf(right) == stmtEndLine { + to = right + } + } + // TODO(pjw): there are other places a statement might be removed: + // IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] . + // (removing the blocks requires more rewriting than this routine would do) + // CommCase = "case" ( SendStmt | RecvStmt ) | "default" . + // (removing the stmt requires more rewriting, and it's unclear what the user means) + switch parent := curStmt.Parent().Node().(type) { + case *ast.SwitchStmt: + limits(parent.Switch, parent.Body.Lbrace) + case *ast.TypeSwitchStmt: + limits(parent.Switch, parent.Body.Lbrace) + if parent.Assign == stmt { + return nil // don't let the user break the type switch + } + case *ast.BlockStmt: + limits(parent.Lbrace, parent.Rbrace) + case *ast.CommClause: + limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + if parent.Comm == stmt { + return nil // maybe the user meant to remove the entire CommClause? + } + case *ast.CaseClause: + limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + case *ast.ForStmt: + limits(parent.For, parent.Body.Lbrace) + + default: + return nil // not one of ours + } + + if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { + from = prev.Node().End() // preceding statement ends on same line + } + if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { + to = next.Node().Pos() // following statement begins on same line + } + // and now for the comments +Outer: + for _, cg := range astutil.EnclosingFile(curStmt).Comments { + for _, co := range cg.List { + if lineOf(co.End()) < stmtStartLine { + continue + } else if lineOf(co.Pos()) > stmtEndLine { + break Outer // no more are possible + } + if lineOf(co.End()) == stmtStartLine && co.End() < stmt.Pos() { + if !from.IsValid() || co.End() > from { + from = co.End() + continue // maybe there are more + } + } + if lineOf(co.Pos()) == stmtEndLine && co.Pos() > stmt.End() { + if !to.IsValid() || co.Pos() < to { + to = co.Pos() + continue // maybe there are more + } + } + } + } + // if either from or to is valid, just remove the statement + // otherwise remove the line + edit := analysis.TextEdit{Pos: stmt.Pos(), End: stmt.End()} + if from.IsValid() || to.IsValid() { + // remove just the statement. + // we can't tell if there is a ; or whitespace right after the statement + // ideally we'd like to remove the former and leave the latter + // (if gofmt has run, there likely won't be a ;) + // In type switches we know there's a semicolon somewhere after the statement, + // but the extra work for this special case is not worth it, as gofmt will fix it. + return []analysis.TextEdit{edit} + } + // remove the whole line + for lineOf(edit.Pos) == stmtStartLine { + edit.Pos-- + } + edit.Pos++ // get back tostmtStartLine + for lineOf(edit.End) == stmtEndLine { + edit.End++ + } + return []analysis.TextEdit{edit} +} + +// DeleteUnusedVars computes the edits required to delete the +// declarations of any local variables whose last uses are in the +// curDelend subtree, which is about to be deleted. +func DeleteUnusedVars(index *typeindex.Index, info *types.Info, tokFile *token.File, curDelend inspector.Cursor) []analysis.TextEdit { + // TODO(adonovan): we might want to generalize this by + // splitting the two phases below, so that we can gather + // across a whole sequence of deletions then finally compute the + // set of variables that are no longer wanted. + + // Count number of deletions of each var. + delcount := make(map[*types.Var]int) + for curId := range curDelend.Preorder((*ast.Ident)(nil)) { + id := curId.Node().(*ast.Ident) + if v, ok := info.Uses[id].(*types.Var); ok && + typesinternal.GetVarKind(v) == typesinternal.LocalVar { // always false before go1.25 + delcount[v]++ + } + } + + // Delete declaration of each var that became unused. + var edits []analysis.TextEdit + for v, count := range delcount { + if len(slices.Collect(index.Uses(v))) == count { + if curDefId, ok := index.Def(v); ok { + edits = append(edits, DeleteVar(tokFile, info, curDefId)...) + } + } + } + return edits +} + +func eolComment(n ast.Node) *ast.CommentGroup { + // TODO(adonovan): support: + // func f() {...} // comment + switch n := n.(type) { + case *ast.GenDecl: + if !n.TokPos.IsValid() && len(n.Specs) == 1 { + return eolComment(n.Specs[0]) + } + case *ast.ValueSpec: + return n.Comment + case *ast.TypeSpec: + return n.Comment + } + return nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/imports.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/imports.go new file mode 100644 index 00000000000000..b5440d896b9bfc --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/imports.go @@ -0,0 +1,127 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package refactor + +// This file defines operations for computing edits to imports. + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + pathpkg "path" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/packagepath" +) + +// AddImport returns the prefix (either "pkg." or "") that should be +// used to qualify references to the desired symbol (member) imported +// from the specified package, plus any necessary edits to the file's +// import declaration to add a new import. +// +// If the import already exists, and is accessible at pos, AddImport +// returns the existing name and no edits. (If the existing import is +// a dot import, the prefix is "".) +// +// Otherwise, it adds a new import, using a local name derived from +// the preferred name. To request a blank import, use a preferredName +// of "_", and discard the prefix result; member is ignored in this +// case. +// +// AddImport accepts the caller's implicit claim that the imported +// package declares member. +// +// AddImport does not mutate its arguments. +func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member string, pos token.Pos) (prefix string, edits []analysis.TextEdit) { + // Find innermost enclosing lexical block. + scope := info.Scopes[file].Innermost(pos) + if scope == nil { + panic("no enclosing lexical block") + } + + // Is there an existing import of this package? + // If so, are we in its scope? (not shadowed) + for _, spec := range file.Imports { + pkgname := info.PkgNameOf(spec) + if pkgname != nil && pkgname.Imported().Path() == pkgpath { + name := pkgname.Name() + if preferredName == "_" { + // Request for blank import; any existing import will do. + return "", nil + } + if name == "." { + // The scope of ident must be the file scope. + if s, _ := scope.LookupParent(member, pos); s == info.Scopes[file] { + return "", nil + } + } else if _, obj := scope.LookupParent(name, pos); obj == pkgname { + return name + ".", nil + } + } + } + + // We must add a new import. + + // Ensure we have a fresh name. + newName := preferredName + if preferredName != "_" { + newName = FreshName(scope, pos, preferredName) + } + + // Create a new import declaration either before the first existing + // declaration (which must exist), including its comments; or + // inside the declaration, if it is an import group. + // + // Use a renaming import whenever the preferred name is not + // available, or the chosen name does not match the last + // segment of its path. + newText := fmt.Sprintf("%q", pkgpath) + if newName != preferredName || newName != pathpkg.Base(pkgpath) { + newText = fmt.Sprintf("%s %q", newName, pkgpath) + } + + decl0 := file.Decls[0] + var before ast.Node = decl0 + switch decl0 := decl0.(type) { + case *ast.GenDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + case *ast.FuncDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + } + if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() { + // Have existing grouped import ( ... ) decl. + if packagepath.IsStdPackage(pkgpath) && len(gd.Specs) > 0 { + // Add spec for a std package before + // first existing spec, followed by + // a blank line if the next one is non-std. + first := gd.Specs[0].(*ast.ImportSpec) + pos = first.Pos() + if !packagepath.IsStdPackage(first.Path.Value) { + newText += "\n" + } + newText += "\n\t" + } else { + // Add spec at end of group. + pos = gd.Rparen + newText = "\t" + newText + "\n" + } + } else { + // No import decl, or non-grouped import. + // Add a new import decl before first decl. + // (gofmt will merge multiple import decls.) + pos = before.Pos() + newText = "import " + newText + "\n\n" + } + return newName + ".", []analysis.TextEdit{{ + Pos: pos, + End: pos, + NewText: []byte(newText), + }} +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go new file mode 100644 index 00000000000000..27b975089618b5 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go @@ -0,0 +1,29 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package refactor provides operators to compute common textual edits +// for refactoring tools. +// +// This package should not use features of the analysis API +// other than [analysis.TextEdit]. +package refactor + +import ( + "fmt" + "go/token" + "go/types" +) + +// FreshName returns the name of an identifier that is undefined +// at the specified position, based on the preferred name. +func FreshName(scope *types.Scope, pos token.Pos, preferred string) string { + newName := preferred + for i := 0; ; i++ { + if _, obj := scope.LookupParent(newName, pos); obj == nil { + break // fresh + } + newName = fmt.Sprintf("%s%d", preferred, i) + } + return newName +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/fx.go new file mode 100644 index 00000000000000..c846a53d5fe7f5 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/fx.go @@ -0,0 +1,88 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/ast" + "go/token" + "go/types" +) + +// NoEffects reports whether the expression has no side effects, i.e., it +// does not modify the memory state. This function is conservative: it may +// return false even when the expression has no effect. +func NoEffects(info *types.Info, expr ast.Expr) bool { + noEffects := true + ast.Inspect(expr, func(n ast.Node) bool { + switch v := n.(type) { + case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr, + *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr, + *ast.StarExpr, *ast.CompositeLit, + // non-expressions that may appear within expressions + *ast.KeyValueExpr, + *ast.FieldList, + *ast.Field, + *ast.Ellipsis, + *ast.IndexListExpr: + // No effect. + + case *ast.ArrayType, + *ast.StructType, + *ast.ChanType, + *ast.FuncType, + *ast.MapType, + *ast.InterfaceType: + // Type syntax: no effects, recursively. + // Prune descent. + return false + + case *ast.UnaryExpr: + // Channel send <-ch has effects. + if v.Op == token.ARROW { + noEffects = false + } + + case *ast.CallExpr: + // Type conversion has no effects. + if !info.Types[v.Fun].IsType() { + if CallsPureBuiltin(info, v) { + // A call such as len(e) has no effects of its + // own, though the subexpression e might. + } else { + noEffects = false + } + } + + case *ast.FuncLit: + // A FuncLit has no effects, but do not descend into it. + return false + + default: + // All other expressions have effects + noEffects = false + } + + return noEffects + }) + return noEffects +} + +// CallsPureBuiltin reports whether call is a call of a built-in +// function that is a pure computation over its operands (analogous to +// a + operator). Because it does not depend on program state, it may +// be evaluated at any point--though not necessarily at multiple +// points (consider new, make). +func CallsPureBuiltin(info *types.Info, call *ast.CallExpr) bool { + if id, ok := ast.Unparen(call.Fun).(*ast.Ident); ok { + if b, ok := info.ObjectOf(id).(*types.Builtin); ok { + switch b.Name() { + case "len", "cap", "complex", "imag", "real", "make", "new", "max", "min": + return true + } + // Not: append clear close copy delete panic print println recover + } + } + return false +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go new file mode 100644 index 00000000000000..f2affec4fba929 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go @@ -0,0 +1,71 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + "slices" +) + +// IsTypeNamed reports whether t is (or is an alias for) a +// package-level defined type with the given package path and one of +// the given names. It returns false if t is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool { + if named, ok := types.Unalias(t).(*types.Named); ok { + tname := named.Obj() + return tname != nil && + IsPackageLevel(tname) && + tname.Pkg().Path() == pkgPath && + slices.Contains(names, tname.Name()) + } + return false +} + +// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a +// package-level defined type with the given package path and one of the given +// names. It returns false if t is not a pointer type. +func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool { + r := Unpointer(t) + if r == t { + return false + } + return IsTypeNamed(r, pkgPath, names...) +} + +// IsFunctionNamed reports whether obj is a package-level function +// defined in the given package and has one of the given names. +// It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { + f, ok := obj.(*types.Func) + return ok && + IsPackageLevel(obj) && + f.Pkg().Path() == pkgPath && + f.Type().(*types.Signature).Recv() == nil && + slices.Contains(names, f.Name()) +} + +// IsMethodNamed reports whether obj is a method defined on a +// package-level type with the given package and type name, and has +// one of the given names. It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.TypeName.Name", +// which is important for the performance of syntax matching. +func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { + if fn, ok := obj.(*types.Func); ok { + if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + _, T := ReceiverNamed(recv) + return T != nil && + IsTypeNamed(T, pkgPath, typeName) && + slices.Contains(names, fn.Name()) + } + } + return false +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go index a5cd7e8dbfcb9d..fef74a78560480 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -2,8 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package typesinternal provides access to internal go/types APIs that are not -// yet exported. +// Package typesinternal provides helpful operators for dealing with +// go/types: +// +// - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]); +// - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]); +// - helpers for working with the [go/types] API (e.g. [NewTypesInfo]); +// - access to internal go/types APIs that are not yet +// exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and +// - common algorithms related to types (e.g. [TooNewStdSymbols]). +// +// See also: +// - [golang.org/x/tools/internal/astutil], for operations on untyped syntax; +// - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers; +// - [golang.org/x/tools/internal/refactor], for operators to compute text edits. package typesinternal import ( @@ -13,6 +25,7 @@ import ( "reflect" "unsafe" + "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/aliases" ) @@ -60,6 +73,9 @@ func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, o // which is often excessive.) // // If pkg is nil, it is equivalent to [*types.Package.Name]. +// +// TODO(adonovan): all uses of this with TypeString should be +// eliminated when https://go.dev/issues/75604 is resolved. func NameRelativeTo(pkg *types.Package) types.Qualifier { return func(other *types.Package) string { if pkg != nil && pkg == other { @@ -153,3 +169,31 @@ func NewTypesInfo() *types.Info { FileVersions: map[*ast.File]string{}, } } + +// EnclosingScope returns the innermost block logically enclosing the cursor. +func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope { + for cur := range cur.Enclosing() { + n := cur.Node() + // A function's Scope is associated with its FuncType. + switch f := n.(type) { + case *ast.FuncDecl: + n = f.Type + case *ast.FuncLit: + n = f.Type + } + if b := info.Scopes[n]; b != nil { + return b + } + } + panic("no Scope for *ast.File") +} + +// Imports reports whether path is imported by pkg. +func Imports(pkg *types.Package, path string) bool { + for _, imp := range pkg.Imports() { + if imp.Path() == path { + return true + } + } + return false +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go index e5da0495111ba1..26499cdd2e70fb 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -2,39 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package typesinternal +//go:build go1.25 -// TODO(adonovan): when CL 645115 lands, define the go1.25 version of -// this API that actually does something. +package typesinternal import "go/types" -type VarKind uint8 +type VarKind = types.VarKind const ( - _ VarKind = iota // (not meaningful) - PackageVar // a package-level variable - LocalVar // a local variable - RecvVar // a method receiver variable - ParamVar // a function parameter variable - ResultVar // a function result variable - FieldVar // a struct field + PackageVar = types.PackageVar + LocalVar = types.LocalVar + RecvVar = types.RecvVar + ParamVar = types.ParamVar + ResultVar = types.ResultVar + FieldVar = types.FieldVar ) -func (kind VarKind) String() string { - return [...]string{ - 0: "VarKind(0)", - PackageVar: "PackageVar", - LocalVar: "LocalVar", - RecvVar: "RecvVar", - ParamVar: "ParamVar", - ResultVar: "ResultVar", - FieldVar: "FieldVar", - }[kind] -} - -// GetVarKind returns an invalid VarKind. -func GetVarKind(v *types.Var) VarKind { return 0 } - -// SetVarKind has no effect. -func SetVarKind(v *types.Var, kind VarKind) {} +func GetVarKind(v *types.Var) VarKind { return v.Kind() } +func SetVarKind(v *types.Var, kind VarKind) { v.SetKind(kind) } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go new file mode 100644 index 00000000000000..17b1804b4e85e1 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/varkind_go124.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.25 + +package typesinternal + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index d272949c177189..453bba2ad5e840 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -204,23 +204,12 @@ func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) { } } -// IsZeroExpr uses simple syntactic heuristics to report whether expr -// is a obvious zero value, such as 0, "", nil, or false. -// It cannot do better without type information. -func IsZeroExpr(expr ast.Expr) bool { - switch e := expr.(type) { - case *ast.BasicLit: - return e.Value == "0" || e.Value == `""` - case *ast.Ident: - return e.Name == "nil" || e.Name == "false" - default: - return false - } -} - // TypeExpr returns syntax for the specified type. References to named types // are qualified by an appropriate (optional) qualifier function. // It may panic for types such as Tuple or Union. +// +// See also https://go.dev/issues/75604, which will provide a robust +// Type-to-valid-Go-syntax formatter. func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { switch t := t.(type) { case *types.Basic: diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index 133271355f2caf..f4c60cc3281b78 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -28,7 +28,7 @@ golang.org/x/arch/x86/x86asm # golang.org/x/build v0.0.0-20250806225920-b7c66c047964 ## explicit; go 1.23.0 golang.org/x/build/relnote -# golang.org/x/mod v0.28.0 +# golang.org/x/mod v0.29.0 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile @@ -43,12 +43,12 @@ golang.org/x/mod/zip ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.36.0 +# golang.org/x/sys v0.37.0 ## explicit; go 1.24.0 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053 +# golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 ## explicit; go 1.24.0 golang.org/x/telemetry golang.org/x/telemetry/counter @@ -63,7 +63,7 @@ golang.org/x/telemetry/internal/upload # golang.org/x/term v0.34.0 ## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.29.0 +# golang.org/x/text v0.30.0 ## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/internal @@ -73,7 +73,7 @@ golang.org/x/text/internal/tag golang.org/x/text/language golang.org/x/text/transform golang.org/x/text/unicode/norm -# golang.org/x/tools v0.37.1-0.20250924232827-4df13e317ce4 +# golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 ## explicit; go 1.24.0 golang.org/x/tools/cmd/bisect golang.org/x/tools/cover @@ -97,7 +97,6 @@ golang.org/x/tools/go/analysis/passes/hostport golang.org/x/tools/go/analysis/passes/httpresponse golang.org/x/tools/go/analysis/passes/ifaceassert golang.org/x/tools/go/analysis/passes/inspect -golang.org/x/tools/go/analysis/passes/internal/analysisutil golang.org/x/tools/go/analysis/passes/loopclosure golang.org/x/tools/go/analysis/passes/lostcancel golang.org/x/tools/go/analysis/passes/nilfunc @@ -133,6 +132,8 @@ golang.org/x/tools/internal/diff/lcs golang.org/x/tools/internal/facts golang.org/x/tools/internal/fmtstr golang.org/x/tools/internal/moreiters +golang.org/x/tools/internal/packagepath +golang.org/x/tools/internal/refactor golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal diff --git a/src/cmd/vet/doc.go b/src/cmd/vet/doc.go index 8e72c252ed9984..ca208845615c8b 100644 --- a/src/cmd/vet/doc.go +++ b/src/cmd/vet/doc.go @@ -40,6 +40,7 @@ To list the available checks, run "go tool vet help": directive check Go toolchain directives such as //go:debug errorsas report passing non-pointer or non-error values to errors.As framepointer report assembly that clobbers the frame pointer before saving it + hostport check format of addresses passed to net.Dial httpresponse check for mistakes using HTTP responses ifaceassert detect impossible interface-to-interface type assertions loopclosure check references to loop variables from within nested functions @@ -50,6 +51,7 @@ To list the available checks, run "go tool vet help": sigchanyzer check for unbuffered channel of os.Signal slog check for invalid structured logging calls stdmethods check signature of methods of well-known interfaces + stdversion report uses of too-new standard library symbols stringintconv check for string(int) conversions structtag check that struct field tags conform to reflect.StructTag.Get testinggoroutine report calls to (*testing.T).Fatal from goroutines started by a test diff --git a/src/cmd/vet/main.go b/src/cmd/vet/main.go index 49f4e2f3425694..e7164a46b0a323 100644 --- a/src/cmd/vet/main.go +++ b/src/cmd/vet/main.go @@ -7,10 +7,8 @@ package main import ( "cmd/internal/objabi" "cmd/internal/telemetry/counter" - "flag" - - "golang.org/x/tools/go/analysis/unitchecker" + "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/appends" "golang.org/x/tools/go/analysis/passes/asmdecl" "golang.org/x/tools/go/analysis/passes/assign" @@ -46,52 +44,57 @@ import ( "golang.org/x/tools/go/analysis/passes/unsafeptr" "golang.org/x/tools/go/analysis/passes/unusedresult" "golang.org/x/tools/go/analysis/passes/waitgroup" + "golang.org/x/tools/go/analysis/unitchecker" ) func main() { + // Keep consistent with cmd/fix/main.go! counter.Open() objabi.AddVersionFlag() - counter.Inc("vet/invocations") - unitchecker.Main( - appends.Analyzer, - asmdecl.Analyzer, - assign.Analyzer, - atomic.Analyzer, - bools.Analyzer, - buildtag.Analyzer, - cgocall.Analyzer, - composite.Analyzer, - copylock.Analyzer, - defers.Analyzer, - directive.Analyzer, - errorsas.Analyzer, - framepointer.Analyzer, - httpresponse.Analyzer, - hostport.Analyzer, - ifaceassert.Analyzer, - loopclosure.Analyzer, - lostcancel.Analyzer, - nilfunc.Analyzer, - printf.Analyzer, - shift.Analyzer, - sigchanyzer.Analyzer, - slog.Analyzer, - stdmethods.Analyzer, - stdversion.Analyzer, - stringintconv.Analyzer, - structtag.Analyzer, - tests.Analyzer, - testinggoroutine.Analyzer, - timeformat.Analyzer, - unmarshal.Analyzer, - unreachable.Analyzer, - unsafeptr.Analyzer, - unusedresult.Analyzer, - waitgroup.Analyzer, - ) - // It's possible that unitchecker will exit early. In - // those cases the flags won't be counted. - counter.CountFlags("vet/flag:", *flag.CommandLine) + unitchecker.Main(suite...) // (never returns) +} + +// The vet suite analyzers report diagnostics. +// (Diagnostics must describe real problems, but need not +// suggest fixes, and fixes are not necessarily safe to apply.) +var suite = []*analysis.Analyzer{ + appends.Analyzer, + asmdecl.Analyzer, + assign.Analyzer, + atomic.Analyzer, + bools.Analyzer, + buildtag.Analyzer, + cgocall.Analyzer, + composite.Analyzer, + copylock.Analyzer, + defers.Analyzer, + directive.Analyzer, + errorsas.Analyzer, + // fieldalignment.Analyzer omitted: too noisy + framepointer.Analyzer, + httpresponse.Analyzer, + hostport.Analyzer, + ifaceassert.Analyzer, + loopclosure.Analyzer, + lostcancel.Analyzer, + nilfunc.Analyzer, + printf.Analyzer, + // shadow.Analyzer omitted: too noisy + shift.Analyzer, + sigchanyzer.Analyzer, + slog.Analyzer, + stdmethods.Analyzer, + stdversion.Analyzer, + stringintconv.Analyzer, + structtag.Analyzer, + tests.Analyzer, + testinggoroutine.Analyzer, + timeformat.Analyzer, + unmarshal.Analyzer, + unreachable.Analyzer, + unsafeptr.Analyzer, + unusedresult.Analyzer, + waitgroup.Analyzer, } diff --git a/src/cmd/vet/testdata/print/print.go b/src/cmd/vet/testdata/print/print.go index e00222c42b5aef..3761da420bea3c 100644 --- a/src/cmd/vet/testdata/print/print.go +++ b/src/cmd/vet/testdata/print/print.go @@ -162,7 +162,7 @@ func PrintfTests() { Printf("hi") // ok const format = "%s %s\n" Printf(format, "hi", "there") - Printf(format, "hi") // ERROR "Printf format %s reads arg #2, but call has 1 arg$" + Printf(format, "hi") // ERROR "Printf format %s reads arg #2, but call has 1 arg" Printf("%s %d %.3v %q", "str", 4) // ERROR "Printf format %.3v reads arg #3, but call has 2 args" f := new(ptrStringer) f.Warn(0, "%s", "hello", 3) // ERROR "Warn call has possible Printf formatting directive %s" diff --git a/src/cmd/vet/vet_test.go b/src/cmd/vet/vet_test.go index 54eabca938c3a9..0d509de528f247 100644 --- a/src/cmd/vet/vet_test.go +++ b/src/cmd/vet/vet_test.go @@ -28,7 +28,8 @@ func TestMain(m *testing.M) { os.Exit(0) } - os.Setenv("GO_VETTEST_IS_VET", "1") // Set for subprocesses to inherit. + // Set for subprocesses to inherit. + os.Setenv("GO_VETTEST_IS_VET", "1") // ignore error os.Exit(m.Run()) } @@ -115,7 +116,7 @@ func TestVet(t *testing.T) { cmd.Env = append(os.Environ(), "GOWORK=off") cmd.Dir = "testdata/rangeloop" cmd.Stderr = new(strings.Builder) // all vet output goes to stderr - cmd.Run() + cmd.Run() // ignore error stderr := cmd.Stderr.(fmt.Stringer).String() filename := filepath.FromSlash("testdata/rangeloop/rangeloop.go") @@ -134,7 +135,7 @@ func TestVet(t *testing.T) { if err := errorCheck(stderr, false, filename, filepath.Base(filename)); err != nil { t.Errorf("error check failed: %s", err) - t.Log("vet stderr:\n", cmd.Stderr) + t.Logf("vet stderr:\n<<%s>>", cmd.Stderr) } }) @@ -146,7 +147,7 @@ func TestVet(t *testing.T) { cmd.Env = append(os.Environ(), "GOWORK=off") cmd.Dir = "testdata/stdversion" cmd.Stderr = new(strings.Builder) // all vet output goes to stderr - cmd.Run() + cmd.Run() // ignore error stderr := cmd.Stderr.(fmt.Stringer).String() filename := filepath.FromSlash("testdata/stdversion/stdversion.go") @@ -165,7 +166,7 @@ func TestVet(t *testing.T) { if err := errorCheck(stderr, false, filename, filepath.Base(filename)); err != nil { t.Errorf("error check failed: %s", err) - t.Log("vet stderr:\n", cmd.Stderr) + t.Logf("vet stderr:\n<<%s>>", cmd.Stderr) } }) } @@ -184,7 +185,7 @@ func cgoEnabled(t *testing.T) bool { func errchk(c *exec.Cmd, files []string, t *testing.T) { output, err := c.CombinedOutput() if _, ok := err.(*exec.ExitError); !ok { - t.Logf("vet output:\n%s", output) + t.Logf("vet output:\n<<%s>>", output) t.Fatal(err) } fullshort := make([]string, 0, len(files)*2) @@ -205,7 +206,6 @@ func TestTags(t *testing.T) { "x testtag y": 1, "othertag": 2, } { - tag, wantFile := tag, wantFile t.Run(tag, func(t *testing.T) { t.Parallel() t.Logf("-tags=%s", tag) @@ -266,7 +266,7 @@ func errorCheck(outStr string, wantAuto bool, fullshort ...string) (err error) { errmsgs, out = partitionStrings(we.prefix, out) } if len(errmsgs) == 0 { - errs = append(errs, fmt.Errorf("%s:%d: missing error %q", we.file, we.lineNum, we.reStr)) + errs = append(errs, fmt.Errorf("%s:%d: missing error %q (prefix: %s)", we.file, we.lineNum, we.reStr, we.prefix)) continue } matched := false diff --git a/src/crypto/internal/entropy/entropy.go b/src/crypto/internal/entropy/entropy.go index 5319e9e47a7455..73fd5298007a11 100644 --- a/src/crypto/internal/entropy/entropy.go +++ b/src/crypto/internal/entropy/entropy.go @@ -3,9 +3,11 @@ // license that can be found in the LICENSE file. // Package entropy provides the passive entropy source for the FIPS 140-3 -// module. It is only used in FIPS mode by [crypto/internal/fips140/drbg.Read]. +// module. It is only used in FIPS mode by [crypto/internal/fips140/drbg.Read] +// from the FIPS 140-3 Go Cryptographic Module v1.0.0. Later versions of the +// module have an internal CPU jitter-based entropy source. // -// This complies with IG 9.3.A, Additional Comment 12, which until January 1, +// This complied with IG 9.3.A, Additional Comment 12, which until January 1, // 2026 allows new modules to meet an [earlier version] of Resolution 2(b): // "A software module that contains an approved DRBG that receives a LOAD // command (or its logical equivalent) with entropy obtained from [...] inside diff --git a/src/crypto/internal/fips140/drbg/rand.go b/src/crypto/internal/fips140/drbg/rand.go index c1a3ea0ae658ff..3ccb018e326047 100644 --- a/src/crypto/internal/fips140/drbg/rand.go +++ b/src/crypto/internal/fips140/drbg/rand.go @@ -9,21 +9,53 @@ package drbg import ( - "crypto/internal/entropy" "crypto/internal/fips140" + "crypto/internal/fips140/entropy" "crypto/internal/randutil" "crypto/internal/sysrand" "io" "sync" + "sync/atomic" ) -var drbgs = sync.Pool{ +// memory is a scratch buffer that is accessed between samples by the entropy +// source to expose it to memory access timings. +// +// We reuse it and share it between Seed calls to avoid the significant (~500µs) +// cost of zeroing a new allocation every time. The entropy source accesses it +// using atomics (and doesn't care about its contents). +// +// It should end up in the .noptrbss section, and become backed by physical pages +// at first use. This ensures that programs that do not use the FIPS 140-3 module +// do not incur any memory use or initialization penalties. +var memory entropy.ScratchBuffer + +func getEntropy() *[SeedSize]byte { + var retries int + seed, err := entropy.Seed(&memory) + for err != nil { + // The CPU jitter-based SP 800-90B entropy source has a non-negligible + // chance of failing the startup health tests. + // + // Each time it does, it enters a permanent failure state, and we + // restart it anew. This is not expected to happen more than a few times + // in a row. + if retries++; retries > 100 { + panic("fips140/drbg: failed to obtain initial entropy") + } + seed, err = entropy.Seed(&memory) + } + return &seed +} + +// getEntropy is very slow (~500µs), so we don't want it on the hot path. +// We keep both a persistent DRBG instance and a pool of additional instances. +// Occasional uses will use drbgInstance, even if the pool was emptied since the +// last use. Frequent concurrent uses will fill the pool and use it. +var drbgInstance atomic.Pointer[Counter] +var drbgPool = sync.Pool{ New: func() any { - var c *Counter - entropy.Depleted(func(seed *[48]byte) { - c = NewCounter(seed) - }) - return c + return NewCounter(getEntropy()) }, } @@ -44,8 +76,15 @@ func Read(b []byte) { additionalInput := new([SeedSize]byte) sysrand.Read(additionalInput[:16]) - drbg := drbgs.Get().(*Counter) - defer drbgs.Put(drbg) + drbg := drbgInstance.Swap(nil) + if drbg == nil { + drbg = drbgPool.Get().(*Counter) + } + defer func() { + if !drbgInstance.CompareAndSwap(nil, drbg) { + drbgPool.Put(drbg) + } + }() for len(b) > 0 { size := min(len(b), maxRequestSize) @@ -54,9 +93,7 @@ func Read(b []byte) { // Section 9.3.2: if Generate reports a reseed is required, the // additional input is passed to Reseed along with the entropy and // then nulled before the next Generate call. - entropy.Depleted(func(seed *[48]byte) { - drbg.Reseed(seed, additionalInput) - }) + drbg.Reseed(getEntropy(), additionalInput) additionalInput = nil continue } diff --git a/src/crypto/internal/fips140/entropy/entropy.go b/src/crypto/internal/fips140/entropy/entropy.go new file mode 100644 index 00000000000000..f5b2f53752a028 --- /dev/null +++ b/src/crypto/internal/fips140/entropy/entropy.go @@ -0,0 +1,204 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package entropy implements a CPU jitter-based SP 800-90B entropy source. +package entropy + +import ( + "crypto/internal/fips140deps/time" + "errors" + "sync/atomic" + "unsafe" +) + +// Version returns the version of the entropy source. +// +// This is independent of the FIPS 140-3 module version, in order to reuse the +// ESV certificate across module versions. +func Version() string { + return "v1.0.0" +} + +// ScratchBuffer is a large buffer that will be written to using atomics, to +// generate noise from memory access timings. Its contents do not matter. +type ScratchBuffer [1 << 25]byte + +// Seed returns a 384-bit seed with full entropy. +// +// memory is passed in to allow changing the allocation strategy without +// modifying the frozen and certified entropy source in this package. +// +// Seed returns an error if the entropy source startup health tests fail, which +// has a non-negligible chance of happening. +func Seed(memory *ScratchBuffer) ([48]byte, error) { + // Collect w = 1024 samples, each certified to provide no less than h = 0.5 + // bits of entropy, for a total of hᵢₙ = w × h = 512 bits of entropy, over + // nᵢₙ = w × n = 8192 bits of input data. + var samples [1024]byte + if err := Samples(samples[:], memory); err != nil { + return [48]byte{}, err + } + + // Use a vetted unkeyed conditioning component, SHA-384, with nw = 384 and + // nₒᵤₜ = 384. Per the formula in SP 800-90B Section 3.1.5.1.2, the output + // entropy hₒᵤₜ is: + // + // sage: n_in = 8192 + // sage: n_out = 384 + // sage: nw = 384 + // sage: h_in = 512 + // sage: P_high = 2^(-h_in) + // sage: P_low = (1 - P_high) / (2^n_in - 1) + // sage: n = min(n_out, nw) + // sage: ψ = 2^(n_in - n) * P_low + P_high + // sage: U = 2^(n_in - n) + sqrt(2 * n * 2^(n_in - n) * ln(2)) + // sage: ω = U * P_low + // sage: h_out = -log(max(ψ, ω), 2) + // sage: h_out.n() + // 384.000000000000 + // + // According to Implementation Guidance D.K, Resolution 19, since + // + // - the conditioning component is vetted, + // - hᵢₙ = 512 ≥ nₒᵤₜ + 64 = 448, and + // - nₒᵤₜ ≤ security strength of SHA-384 = 384 (per SP 800-107 Rev. 1, Table 1), + // + // we can claim the output has full entropy. + return SHA384(&samples), nil +} + +// Samples starts a new entropy source, collects the requested number of +// samples, conducts startup health tests, and returns the samples or an error +// if the health tests fail. +// +// The health tests have a non-negligible chance of failing. +func Samples(samples []uint8, memory *ScratchBuffer) error { + if len(samples) < 1024 { + return errors.New("entropy: at least 1024 samples are required for startup health tests") + } + s := newSource(memory) + for range 4 { + // Warm up the source to avoid any initial bias. + _ = s.Sample() + } + for i := range samples { + samples[i] = s.Sample() + } + if err := RepetitionCountTest(samples); err != nil { + return err + } + if err := AdaptiveProportionTest(samples); err != nil { + return err + } + return nil +} + +type source struct { + memory *ScratchBuffer + lcgState uint32 + previous int64 +} + +func newSource(memory *ScratchBuffer) *source { + return &source{ + memory: memory, + lcgState: uint32(time.HighPrecisionNow()), + previous: time.HighPrecisionNow(), + } +} + +// touchMemory performs a write to memory at the given index. +// +// The memory slice is passed in and may be shared across sources e.g. to avoid +// the significant (~500µs) cost of zeroing a new allocation on every [Seed] call. +func touchMemory(memory *ScratchBuffer, idx uint32) { + idx = idx / 4 * 4 // align to 32 bits + u32 := (*uint32)(unsafe.Pointer(&memory[idx])) + last := atomic.LoadUint32(u32) + atomic.SwapUint32(u32, last+13) +} + +func (s *source) Sample() uint8 { + // Perform a few memory accesses in an unpredictable pattern to expose the + // next measurement to as much system noise as possible. + memory, lcgState := s.memory, s.lcgState + if memory == nil { // remove the nil check from the inlined touchMemory calls + panic("entropy: nil memory buffer") + } + for range 64 { + lcgState = 1664525*lcgState + 1013904223 + // Discard the lower bits, which tend to fall into short cycles. + idx := (lcgState >> 6) & (1<<25 - 1) + touchMemory(memory, idx) + } + s.lcgState = lcgState + + t := time.HighPrecisionNow() + sample := t - s.previous + s.previous = t + + // Reduce the symbol space to 256 values, assuming most of the entropy is in + // the least-significant bits, which represent the highest-resolution timing + // differences. + return uint8(sample) +} + +// RepetitionCountTest implements the repetition count test from SP 800-90B +// Section 4.4.1. It returns an error if any symbol is repeated C = 41 or more +// times in a row. +// +// This C value is calculated from a target failure probability α = 2⁻²⁰ and a +// claimed min-entropy per symbol h = 0.5 bits, using the formula in SP 800-90B +// Section 4.4.1. +// +// sage: α = 2^-20 +// sage: H = 0.5 +// sage: 1 + ceil(-log(α, 2) / H) +// 41 +func RepetitionCountTest(samples []uint8) error { + x := samples[0] + count := 1 + for _, y := range samples[1:] { + if y == x { + count++ + if count >= 41 { + return errors.New("entropy: repetition count health test failed") + } + } else { + x = y + count = 1 + } + } + return nil +} + +// AdaptiveProportionTest implements the adaptive proportion test from SP 800-90B +// Section 4.4.2. It returns an error if any symbol appears C = 410 or more +// times in the last W = 512 samples. +// +// This C value is calculated from a target failure probability α = 2⁻²⁰, a +// window size W = 512, and a claimed min-entropy per symbol h = 0.5 bits, using +// the formula in SP 800-90B Section 4.4.2, equivalent to the Microsoft Excel +// formula 1+CRITBINOM(W, power(2,(−H)),1−α). +// +// sage: from scipy.stats import binom +// sage: α = 2^-20 +// sage: H = 0.5 +// sage: W = 512 +// sage: C = 1 + binom.ppf(1 - α, W, 2**(-H)) +// sage: ceil(C) +// 410 +func AdaptiveProportionTest(samples []uint8) error { + var counts [256]int + for i, x := range samples { + counts[x]++ + if i >= 512 { + counts[samples[i-512]]-- + } + if counts[x] >= 410 { + return errors.New("entropy: adaptive proportion health test failed") + } + } + return nil +} diff --git a/src/crypto/internal/fips140/entropy/sha384.go b/src/crypto/internal/fips140/entropy/sha384.go new file mode 100644 index 00000000000000..c20f76b57979af --- /dev/null +++ b/src/crypto/internal/fips140/entropy/sha384.go @@ -0,0 +1,226 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package entropy + +import "math/bits" + +// This file includes a SHA-384 implementation to insulate the entropy source +// from any changes in the FIPS 140-3 module's crypto/internal/fips140/sha512 +// package. We support 1024-byte inputs for the entropy source, and arbitrary +// length inputs for ACVP testing. + +var initState = [8]uint64{ + 0xcbbb9d5dc1059ed8, + 0x629a292a367cd507, + 0x9159015a3070dd17, + 0x152fecd8f70e5939, + 0x67332667ffc00b31, + 0x8eb44a8768581511, + 0xdb0c2e0d64f98fa7, + 0x47b5481dbefa4fa4, +} + +func SHA384(p *[1024]byte) [48]byte { + h := initState + + sha384Block(&h, (*[128]byte)(p[0:128])) + sha384Block(&h, (*[128]byte)(p[128:256])) + sha384Block(&h, (*[128]byte)(p[256:384])) + sha384Block(&h, (*[128]byte)(p[384:512])) + sha384Block(&h, (*[128]byte)(p[512:640])) + sha384Block(&h, (*[128]byte)(p[640:768])) + sha384Block(&h, (*[128]byte)(p[768:896])) + sha384Block(&h, (*[128]byte)(p[896:1024])) + + var padlen [128]byte + padlen[0] = 0x80 + bePutUint64(padlen[112+8:], 1024*8) + sha384Block(&h, &padlen) + + return digestBytes(&h) +} + +func TestingOnlySHA384(p []byte) [48]byte { + if len(p) == 1024 { + return SHA384((*[1024]byte)(p)) + } + + h := initState + bitLen := uint64(len(p)) * 8 + + // Process full 128-byte blocks. + for len(p) >= 128 { + sha384Block(&h, (*[128]byte)(p[:128])) + p = p[128:] + } + + // Process final block and padding. + var finalBlock [128]byte + copy(finalBlock[:], p) + finalBlock[len(p)] = 0x80 + if len(p) >= 112 { + sha384Block(&h, &finalBlock) + finalBlock = [128]byte{} + } + bePutUint64(finalBlock[112+8:], bitLen) + sha384Block(&h, &finalBlock) + + return digestBytes(&h) +} + +func digestBytes(h *[8]uint64) [48]byte { + var digest [48]byte + bePutUint64(digest[0:], h[0]) + bePutUint64(digest[8:], h[1]) + bePutUint64(digest[16:], h[2]) + bePutUint64(digest[24:], h[3]) + bePutUint64(digest[32:], h[4]) + bePutUint64(digest[40:], h[5]) + return digest +} + +var _K = [...]uint64{ + 0x428a2f98d728ae22, + 0x7137449123ef65cd, + 0xb5c0fbcfec4d3b2f, + 0xe9b5dba58189dbbc, + 0x3956c25bf348b538, + 0x59f111f1b605d019, + 0x923f82a4af194f9b, + 0xab1c5ed5da6d8118, + 0xd807aa98a3030242, + 0x12835b0145706fbe, + 0x243185be4ee4b28c, + 0x550c7dc3d5ffb4e2, + 0x72be5d74f27b896f, + 0x80deb1fe3b1696b1, + 0x9bdc06a725c71235, + 0xc19bf174cf692694, + 0xe49b69c19ef14ad2, + 0xefbe4786384f25e3, + 0x0fc19dc68b8cd5b5, + 0x240ca1cc77ac9c65, + 0x2de92c6f592b0275, + 0x4a7484aa6ea6e483, + 0x5cb0a9dcbd41fbd4, + 0x76f988da831153b5, + 0x983e5152ee66dfab, + 0xa831c66d2db43210, + 0xb00327c898fb213f, + 0xbf597fc7beef0ee4, + 0xc6e00bf33da88fc2, + 0xd5a79147930aa725, + 0x06ca6351e003826f, + 0x142929670a0e6e70, + 0x27b70a8546d22ffc, + 0x2e1b21385c26c926, + 0x4d2c6dfc5ac42aed, + 0x53380d139d95b3df, + 0x650a73548baf63de, + 0x766a0abb3c77b2a8, + 0x81c2c92e47edaee6, + 0x92722c851482353b, + 0xa2bfe8a14cf10364, + 0xa81a664bbc423001, + 0xc24b8b70d0f89791, + 0xc76c51a30654be30, + 0xd192e819d6ef5218, + 0xd69906245565a910, + 0xf40e35855771202a, + 0x106aa07032bbd1b8, + 0x19a4c116b8d2d0c8, + 0x1e376c085141ab53, + 0x2748774cdf8eeb99, + 0x34b0bcb5e19b48a8, + 0x391c0cb3c5c95a63, + 0x4ed8aa4ae3418acb, + 0x5b9cca4f7763e373, + 0x682e6ff3d6b2b8a3, + 0x748f82ee5defb2fc, + 0x78a5636f43172f60, + 0x84c87814a1f0ab72, + 0x8cc702081a6439ec, + 0x90befffa23631e28, + 0xa4506cebde82bde9, + 0xbef9a3f7b2c67915, + 0xc67178f2e372532b, + 0xca273eceea26619c, + 0xd186b8c721c0c207, + 0xeada7dd6cde0eb1e, + 0xf57d4f7fee6ed178, + 0x06f067aa72176fba, + 0x0a637dc5a2c898a6, + 0x113f9804bef90dae, + 0x1b710b35131c471b, + 0x28db77f523047d84, + 0x32caab7b40c72493, + 0x3c9ebe0a15c9bebc, + 0x431d67c49c100d4c, + 0x4cc5d4becb3e42b6, + 0x597f299cfc657e2a, + 0x5fcb6fab3ad6faec, + 0x6c44198c4a475817, +} + +func sha384Block(dh *[8]uint64, p *[128]byte) { + var w [80]uint64 + for i := range 80 { + if i < 16 { + w[i] = beUint64(p[i*8:]) + } else { + v1 := w[i-2] + t1 := bits.RotateLeft64(v1, -19) ^ bits.RotateLeft64(v1, -61) ^ (v1 >> 6) + v2 := w[i-15] + t2 := bits.RotateLeft64(v2, -1) ^ bits.RotateLeft64(v2, -8) ^ (v2 >> 7) + + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + } + + a, b, c, d, e, f, g, h := dh[0], dh[1], dh[2], dh[3], dh[4], dh[5], dh[6], dh[7] + + for i := range 80 { + t1 := h + (bits.RotateLeft64(e, -14) ^ bits.RotateLeft64(e, -18) ^ + bits.RotateLeft64(e, -41)) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + t2 := (bits.RotateLeft64(a, -28) ^ bits.RotateLeft64(a, -34) ^ + bits.RotateLeft64(a, -39)) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + dh[0] += a + dh[1] += b + dh[2] += c + dh[3] += d + dh[4] += e + dh[5] += f + dh[6] += g + dh[7] += h +} + +func beUint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func bePutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} diff --git a/src/crypto/internal/fips140/fips140.go b/src/crypto/internal/fips140/fips140.go index d03219b540e27f..76054b00684e2b 100644 --- a/src/crypto/internal/fips140/fips140.go +++ b/src/crypto/internal/fips140/fips140.go @@ -48,6 +48,8 @@ func Supported() error { } // See EnableFIPS in cmd/internal/obj/fips.go for commentary. + // Also, js/wasm and windows/386 don't have good enough timers + // for the CPU jitter entropy source. switch { case runtime.GOARCH == "wasm", runtime.GOOS == "windows" && runtime.GOARCH == "386", diff --git a/src/crypto/internal/fips140/mlkem/cast.go b/src/crypto/internal/fips140/mlkem/cast.go index a432d1fdab0e2b..ea089c1b76c0c0 100644 --- a/src/crypto/internal/fips140/mlkem/cast.go +++ b/src/crypto/internal/fips140/mlkem/cast.go @@ -9,9 +9,10 @@ import ( "crypto/internal/fips140" _ "crypto/internal/fips140/check" "errors" + "sync" ) -func init() { +var fipsSelfTest = sync.OnceFunc(func() { fips140.CAST("ML-KEM-768", func() error { var d = &[32]byte{ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, @@ -40,14 +41,12 @@ func init() { dk := &DecapsulationKey768{} kemKeyGen(dk, d, z) ek := dk.EncapsulationKey() - Ke, c := ek.EncapsulateInternal(m) - Kd, err := dk.Decapsulate(c) - if err != nil { - return err - } + var cc [CiphertextSize768]byte + Ke, _ := kemEncaps(&cc, ek, m) + Kd := kemDecaps(dk, &cc) if !bytes.Equal(Ke, K) || !bytes.Equal(Kd, K) { return errors.New("unexpected result") } return nil }) -} +}) diff --git a/src/crypto/internal/fips140/mlkem/mlkem1024.go b/src/crypto/internal/fips140/mlkem/mlkem1024.go index 1419cf20fa9c67..edde161422cb6f 100644 --- a/src/crypto/internal/fips140/mlkem/mlkem1024.go +++ b/src/crypto/internal/fips140/mlkem/mlkem1024.go @@ -113,6 +113,7 @@ func GenerateKey1024() (*DecapsulationKey1024, error) { } func generateKey1024(dk *DecapsulationKey1024) (*DecapsulationKey1024, error) { + fipsSelfTest() var d [32]byte drbg.Read(d[:]) var z [32]byte @@ -126,6 +127,7 @@ func generateKey1024(dk *DecapsulationKey1024) (*DecapsulationKey1024, error) { // GenerateKeyInternal1024 is a derandomized version of GenerateKey1024, // exclusively for use in tests. func GenerateKeyInternal1024(d, z *[32]byte) *DecapsulationKey1024 { + fipsSelfTest() dk := &DecapsulationKey1024{} kemKeyGen1024(dk, d, z) return dk @@ -278,6 +280,7 @@ func (ek *EncapsulationKey1024) Encapsulate() (sharedKey, ciphertext []byte) { } func (ek *EncapsulationKey1024) encapsulate(cc *[CiphertextSize1024]byte) (sharedKey, ciphertext []byte) { + fipsSelfTest() var m [messageSize]byte drbg.Read(m[:]) // Note that the modulus check (step 2 of the encapsulation key check from @@ -289,6 +292,7 @@ func (ek *EncapsulationKey1024) encapsulate(cc *[CiphertextSize1024]byte) (share // EncapsulateInternal is a derandomized version of Encapsulate, exclusively for // use in tests. func (ek *EncapsulationKey1024) EncapsulateInternal(m *[32]byte) (sharedKey, ciphertext []byte) { + fipsSelfTest() cc := &[CiphertextSize1024]byte{} return kemEncaps1024(cc, ek, m) } @@ -394,6 +398,7 @@ func pkeEncrypt1024(cc *[CiphertextSize1024]byte, ex *encryptionKey1024, m *[mes // // The shared key must be kept secret. func (dk *DecapsulationKey1024) Decapsulate(ciphertext []byte) (sharedKey []byte, err error) { + fipsSelfTest() if len(ciphertext) != CiphertextSize1024 { return nil, errors.New("mlkem: invalid ciphertext length") } diff --git a/src/crypto/internal/fips140/mlkem/mlkem768.go b/src/crypto/internal/fips140/mlkem/mlkem768.go index 298660e4e977dd..088c2954de6a5c 100644 --- a/src/crypto/internal/fips140/mlkem/mlkem768.go +++ b/src/crypto/internal/fips140/mlkem/mlkem768.go @@ -172,6 +172,7 @@ func GenerateKey768() (*DecapsulationKey768, error) { } func generateKey(dk *DecapsulationKey768) (*DecapsulationKey768, error) { + fipsSelfTest() var d [32]byte drbg.Read(d[:]) var z [32]byte @@ -185,6 +186,7 @@ func generateKey(dk *DecapsulationKey768) (*DecapsulationKey768, error) { // GenerateKeyInternal768 is a derandomized version of GenerateKey768, // exclusively for use in tests. func GenerateKeyInternal768(d, z *[32]byte) *DecapsulationKey768 { + fipsSelfTest() dk := &DecapsulationKey768{} kemKeyGen(dk, d, z) return dk @@ -337,6 +339,7 @@ func (ek *EncapsulationKey768) Encapsulate() (sharedKey, ciphertext []byte) { } func (ek *EncapsulationKey768) encapsulate(cc *[CiphertextSize768]byte) (sharedKey, ciphertext []byte) { + fipsSelfTest() var m [messageSize]byte drbg.Read(m[:]) // Note that the modulus check (step 2 of the encapsulation key check from @@ -348,6 +351,7 @@ func (ek *EncapsulationKey768) encapsulate(cc *[CiphertextSize768]byte) (sharedK // EncapsulateInternal is a derandomized version of Encapsulate, exclusively for // use in tests. func (ek *EncapsulationKey768) EncapsulateInternal(m *[32]byte) (sharedKey, ciphertext []byte) { + fipsSelfTest() cc := &[CiphertextSize768]byte{} return kemEncaps(cc, ek, m) } @@ -453,6 +457,7 @@ func pkeEncrypt(cc *[CiphertextSize768]byte, ex *encryptionKey, m *[messageSize] // // The shared key must be kept secret. func (dk *DecapsulationKey768) Decapsulate(ciphertext []byte) (sharedKey []byte, err error) { + fipsSelfTest() if len(ciphertext) != CiphertextSize768 { return nil, errors.New("mlkem: invalid ciphertext length") } diff --git a/src/crypto/internal/fips140/subtle/xor_asm.go b/src/crypto/internal/fips140/subtle/xor_asm.go index b07239da3e31c1..bb85aefef4013e 100644 --- a/src/crypto/internal/fips140/subtle/xor_asm.go +++ b/src/crypto/internal/fips140/subtle/xor_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (amd64 || arm64 || mips || mipsle || mips64 || mips64le || ppc64 || ppc64le || riscv64) && !purego +//go:build (amd64 || arm64 || ppc64 || ppc64le || riscv64) && !purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_generic.go b/src/crypto/internal/fips140/subtle/xor_generic.go index ed484bc630e98d..0b31eec60197d3 100644 --- a/src/crypto/internal/fips140/subtle/xor_generic.go +++ b/src/crypto/internal/fips140/subtle/xor_generic.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !arm64 && !loong64 && !mips && !mipsle && !mips64 && !mips64le && !ppc64 && !ppc64le && !riscv64) || purego +//go:build (!amd64 && !arm64 && !loong64 && !ppc64 && !ppc64le && !riscv64) || purego package subtle diff --git a/src/crypto/internal/fips140/subtle/xor_mips64x.s b/src/crypto/internal/fips140/subtle/xor_mips64x.s deleted file mode 100644 index e580235914aeaf..00000000000000 --- a/src/crypto/internal/fips140/subtle/xor_mips64x.s +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (mips64 || mips64le) && !purego - -#include "textflag.h" - -// func xorBytes(dst, a, b *byte, n int) -TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0 - MOVV dst+0(FP), R1 - MOVV a+8(FP), R2 - MOVV b+16(FP), R3 - MOVV n+24(FP), R4 - -xor_64_check: - SGTU $64, R4, R5 // R5 = 1 if (64 > R4) - BNE R5, xor_32_check -xor_64: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV 16(R2), R8 - MOVV 24(R2), R9 - MOVV (R3), R10 - MOVV 8(R3), R11 - MOVV 16(R3), R12 - MOVV 24(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, (R1) - MOVV R11, 8(R1) - MOVV R12, 16(R1) - MOVV R13, 24(R1) - MOVV 32(R2), R6 - MOVV 40(R2), R7 - MOVV 48(R2), R8 - MOVV 56(R2), R9 - MOVV 32(R3), R10 - MOVV 40(R3), R11 - MOVV 48(R3), R12 - MOVV 56(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, 32(R1) - MOVV R11, 40(R1) - MOVV R12, 48(R1) - MOVV R13, 56(R1) - ADDV $64, R2 - ADDV $64, R3 - ADDV $64, R1 - SUBV $64, R4 - SGTU $64, R4, R5 - BEQ R0, R5, xor_64 - BEQ R0, R4, end - -xor_32_check: - SGTU $32, R4, R5 - BNE R5, xor_16_check -xor_32: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV 16(R2), R8 - MOVV 24(R2), R9 - MOVV (R3), R10 - MOVV 8(R3), R11 - MOVV 16(R3), R12 - MOVV 24(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVV R10, (R1) - MOVV R11, 8(R1) - MOVV R12, 16(R1) - MOVV R13, 24(R1) - ADDV $32, R2 - ADDV $32, R3 - ADDV $32, R1 - SUBV $32, R4 - BEQ R0, R4, end - -xor_16_check: - SGTU $16, R4, R5 - BNE R5, xor_8_check -xor_16: - MOVV (R2), R6 - MOVV 8(R2), R7 - MOVV (R3), R8 - MOVV 8(R3), R9 - XOR R6, R8 - XOR R7, R9 - MOVV R8, (R1) - MOVV R9, 8(R1) - ADDV $16, R2 - ADDV $16, R3 - ADDV $16, R1 - SUBV $16, R4 - BEQ R0, R4, end - -xor_8_check: - SGTU $8, R4, R5 - BNE R5, xor_4_check -xor_8: - MOVV (R2), R6 - MOVV (R3), R7 - XOR R6, R7 - MOVV R7, (R1) - ADDV $8, R1 - ADDV $8, R2 - ADDV $8, R3 - SUBV $8, R4 - BEQ R0, R4, end - -xor_4_check: - SGTU $4, R4, R5 - BNE R5, xor_2_check -xor_4: - MOVW (R2), R6 - MOVW (R3), R7 - XOR R6, R7 - MOVW R7, (R1) - ADDV $4, R2 - ADDV $4, R3 - ADDV $4, R1 - SUBV $4, R4 - BEQ R0, R4, end - -xor_2_check: - SGTU $2, R4, R5 - BNE R5, xor_1 -xor_2: - MOVH (R2), R6 - MOVH (R3), R7 - XOR R6, R7 - MOVH R7, (R1) - ADDV $2, R2 - ADDV $2, R3 - ADDV $2, R1 - SUBV $2, R4 - BEQ R0, R4, end - -xor_1: - MOVB (R2), R6 - MOVB (R3), R7 - XOR R6, R7 - MOVB R7, (R1) - -end: - RET diff --git a/src/crypto/internal/fips140/subtle/xor_mipsx.s b/src/crypto/internal/fips140/subtle/xor_mipsx.s deleted file mode 100644 index 1a6b3f409dddc9..00000000000000 --- a/src/crypto/internal/fips140/subtle/xor_mipsx.s +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (mips || mipsle) && !purego - -#include "textflag.h" - -// func xorBytes(dst, a, b *byte, n int) -TEXT ·xorBytes(SB), NOSPLIT|NOFRAME, $0 - MOVW dst+0(FP), R1 - MOVW a+4(FP), R2 - MOVW b+8(FP), R3 - MOVW n+12(FP), R4 - - SGTU $64, R4, R5 // R5 = 1 if (64 > R4) - BNE R5, xor_32_check -xor_64: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - MOVW 16(R2), R6 - MOVW 20(R2), R7 - MOVW 24(R2), R8 - MOVW 28(R2), R9 - MOVW 16(R3), R10 - MOVW 20(R3), R11 - MOVW 24(R3), R12 - MOVW 28(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 16(R1) - MOVW R11, 20(R1) - MOVW R12, 24(R1) - MOVW R13, 28(R1) - MOVW 32(R2), R6 - MOVW 36(R2), R7 - MOVW 40(R2), R8 - MOVW 44(R2), R9 - MOVW 32(R3), R10 - MOVW 36(R3), R11 - MOVW 40(R3), R12 - MOVW 44(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 32(R1) - MOVW R11, 36(R1) - MOVW R12, 40(R1) - MOVW R13, 44(R1) - MOVW 48(R2), R6 - MOVW 52(R2), R7 - MOVW 56(R2), R8 - MOVW 60(R2), R9 - MOVW 48(R3), R10 - MOVW 52(R3), R11 - MOVW 56(R3), R12 - MOVW 60(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 48(R1) - MOVW R11, 52(R1) - MOVW R12, 56(R1) - MOVW R13, 60(R1) - ADD $64, R2 - ADD $64, R3 - ADD $64, R1 - SUB $64, R4 - SGTU $64, R4, R5 - BEQ R0, R5, xor_64 - BEQ R0, R4, end - -xor_32_check: - SGTU $32, R4, R5 - BNE R5, xor_16_check -xor_32: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - MOVW 16(R2), R6 - MOVW 20(R2), R7 - MOVW 24(R2), R8 - MOVW 28(R2), R9 - MOVW 16(R3), R10 - MOVW 20(R3), R11 - MOVW 24(R3), R12 - MOVW 28(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, 16(R1) - MOVW R11, 20(R1) - MOVW R12, 24(R1) - MOVW R13, 28(R1) - ADD $32, R2 - ADD $32, R3 - ADD $32, R1 - SUB $32, R4 - BEQ R0, R4, end - -xor_16_check: - SGTU $16, R4, R5 - BNE R5, xor_8_check -xor_16: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW 8(R2), R8 - MOVW 12(R2), R9 - MOVW (R3), R10 - MOVW 4(R3), R11 - MOVW 8(R3), R12 - MOVW 12(R3), R13 - XOR R6, R10 - XOR R7, R11 - XOR R8, R12 - XOR R9, R13 - MOVW R10, (R1) - MOVW R11, 4(R1) - MOVW R12, 8(R1) - MOVW R13, 12(R1) - ADD $16, R2 - ADD $16, R3 - ADD $16, R1 - SUB $16, R4 - BEQ R0, R4, end - -xor_8_check: - SGTU $8, R4, R5 - BNE R5, xor_4_check -xor_8: - MOVW (R2), R6 - MOVW 4(R2), R7 - MOVW (R3), R8 - MOVW 4(R3), R9 - XOR R6, R8 - XOR R7, R9 - MOVW R8, (R1) - MOVW R9, 4(R1) - ADD $8, R1 - ADD $8, R2 - ADD $8, R3 - SUB $8, R4 - BEQ R0, R4, end - -xor_4_check: - SGTU $4, R4, R5 - BNE R5, xor_2_check -xor_4: - MOVW (R2), R6 - MOVW (R3), R7 - XOR R6, R7 - MOVW R7, (R1) - ADD $4, R2 - ADD $4, R3 - ADD $4, R1 - SUB $4, R4 - BEQ R0, R4, end - -xor_2_check: - SGTU $2, R4, R5 - BNE R5, xor_1 -xor_2: - MOVH (R2), R6 - MOVH (R3), R7 - XOR R6, R7 - MOVH R7, (R1) - ADD $2, R2 - ADD $2, R3 - ADD $2, R1 - SUB $2, R4 - BEQ R0, R4, end - -xor_1: - MOVB (R2), R6 - MOVB (R3), R7 - XOR R6, R7 - MOVB R7, (R1) - -end: - RET diff --git a/src/crypto/internal/fips140deps/fipsdeps_test.go b/src/crypto/internal/fips140deps/fipsdeps_test.go index 2c3bc8184e71bc..97552dc1ce10f1 100644 --- a/src/crypto/internal/fips140deps/fipsdeps_test.go +++ b/src/crypto/internal/fips140deps/fipsdeps_test.go @@ -88,7 +88,8 @@ func TestImports(t *testing.T) { } } - // Ensure that all packages except check and check's dependencies import check. + // Ensure that all packages except check, check's dependencies, and the + // entropy source (which is used only from .../fips140/drbg) import check. for pkg := range allPackages { switch pkg { case "crypto/internal/fips140/check": @@ -99,6 +100,7 @@ func TestImports(t *testing.T) { case "crypto/internal/fips140/sha3": case "crypto/internal/fips140/sha256": case "crypto/internal/fips140/sha512": + case "crypto/internal/fips140/entropy": default: if !importCheck[pkg] { t.Errorf("package %s does not import crypto/internal/fips140/check", pkg) diff --git a/src/crypto/internal/fips140deps/time/time.go b/src/crypto/internal/fips140deps/time/time.go new file mode 100644 index 00000000000000..eea37b772e4351 --- /dev/null +++ b/src/crypto/internal/fips140deps/time/time.go @@ -0,0 +1,21 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package time + +import "time" + +var start = time.Now() + +// HighPrecisionNow returns a high-resolution timestamp suitable for measuring +// small time differences. It uses the time package's monotonic clock. +// +// Its unit, epoch, and resolution are unspecified, and may change, but can be +// assumed to be sufficiently precise to measure time differences on the order +// of tens to hundreds of nanoseconds. +func HighPrecisionNow() int64 { + return int64(time.Since(start)) +} diff --git a/src/crypto/internal/fips140deps/time/time_windows.go b/src/crypto/internal/fips140deps/time/time_windows.go new file mode 100644 index 00000000000000..410ede4ee91705 --- /dev/null +++ b/src/crypto/internal/fips140deps/time/time_windows.go @@ -0,0 +1,17 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package time + +import "internal/syscall/windows" + +// HighPrecisionNow returns a high-resolution timestamp suitable for measuring +// small time differences. It uses Windows' QueryPerformanceCounter. +// +// Its unit, epoch, and resolution are unspecified, and may change, but can be +// assumed to be sufficiently precise to measure time differences on the order +// of tens to hundreds of nanoseconds. +func HighPrecisionNow() int64 { + return windows.QueryPerformanceCounter() +} diff --git a/src/crypto/internal/fips140test/cast_test.go b/src/crypto/internal/fips140test/cast_test.go index b043a71f04effa..5bbc964b617b2b 100644 --- a/src/crypto/internal/fips140test/cast_test.go +++ b/src/crypto/internal/fips140test/cast_test.go @@ -48,8 +48,8 @@ var allCASTs = []string{ "HKDF-SHA2-256", "HMAC-SHA2-256", "KAS-ECC-SSC P-256", - "ML-KEM PCT", - "ML-KEM PCT", + "ML-KEM PCT", // -768 + "ML-KEM PCT", // -1024 "ML-KEM-768", "PBKDF2", "RSA sign and verify PCT", @@ -104,29 +104,44 @@ func TestAllCASTs(t *testing.T) { // TestConditionals causes the conditional CASTs and PCTs to be invoked. func TestConditionals(t *testing.T) { - mlkem.GenerateKey768() + // ML-KEM PCT + kMLKEM, err := mlkem.GenerateKey768() + if err != nil { + t.Error(err) + } else { + // ML-KEM-768 + kMLKEM.EncapsulationKey().Encapsulate() + } + // ECDH PCT kDH, err := ecdh.GenerateKey(ecdh.P256(), rand.Reader) if err != nil { t.Error(err) } else { + // KAS-ECC-SSC P-256 ecdh.ECDH(ecdh.P256(), kDH, kDH.PublicKey()) } + // ECDSA PCT kDSA, err := ecdsa.GenerateKey(ecdsa.P256(), rand.Reader) if err != nil { t.Error(err) } else { + // ECDSA P-256 SHA2-512 sign and verify ecdsa.SignDeterministic(ecdsa.P256(), sha256.New, kDSA, make([]byte, 32)) } + // Ed25519 sign and verify PCT k25519, err := ed25519.GenerateKey() if err != nil { t.Error(err) } else { + // Ed25519 sign and verify ed25519.Sign(k25519, make([]byte, 32)) } + // RSA sign and verify PCT kRSA, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Error(err) } else { + // RSASSA-PKCS-v1.5 2048-bit sign and verify rsa.SignPKCS1v15(kRSA, crypto.SHA256.String(), make([]byte, 32)) } t.Log("completed successfully") diff --git a/src/crypto/internal/fips140test/entropy_test.go b/src/crypto/internal/fips140test/entropy_test.go new file mode 100644 index 00000000000000..a33e2e7bbc907d --- /dev/null +++ b/src/crypto/internal/fips140test/entropy_test.go @@ -0,0 +1,276 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !fips140v1.0 + +package fipstest + +import ( + "bytes" + "crypto/internal/cryptotest" + "crypto/internal/fips140/drbg" + "crypto/internal/fips140/entropy" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "flag" + "fmt" + "internal/testenv" + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +var flagEntropySamples = flag.String("entropy-samples", "", "store entropy samples with the provided `suffix`") +var flagNISTSP80090B = flag.Bool("nist-sp800-90b", false, "run NIST SP 800-90B tests (requires docker)") + +func TestEntropySamples(t *testing.T) { + cryptotest.MustSupportFIPS140(t) + now := time.Now().UTC() + + var seqSamples [1_000_000]uint8 + samplesOrTryAgain(t, seqSamples[:]) + seqSamplesName := fmt.Sprintf("entropy_samples_sequential_%s_%s_%s_%s_%s.bin", entropy.Version(), + runtime.GOOS, runtime.GOARCH, *flagEntropySamples, now.Format("20060102T150405Z")) + if *flagEntropySamples != "" { + if err := os.WriteFile(seqSamplesName, seqSamples[:], 0644); err != nil { + t.Fatalf("failed to write samples to %q: %v", seqSamplesName, err) + } + t.Logf("wrote %s", seqSamplesName) + } + + var restartSamples [1000][1000]uint8 + for i := range restartSamples { + var samples [1024]uint8 + samplesOrTryAgain(t, samples[:]) + copy(restartSamples[i][:], samples[:]) + } + restartSamplesName := fmt.Sprintf("entropy_samples_restart_%s_%s_%s_%s_%s.bin", entropy.Version(), + runtime.GOOS, runtime.GOARCH, *flagEntropySamples, now.Format("20060102T150405Z")) + if *flagEntropySamples != "" { + f, err := os.Create(restartSamplesName) + if err != nil { + t.Fatalf("failed to create %q: %v", restartSamplesName, err) + } + for i := range restartSamples { + if _, err := f.Write(restartSamples[i][:]); err != nil { + t.Fatalf("failed to write samples to %q: %v", restartSamplesName, err) + } + } + if err := f.Close(); err != nil { + t.Fatalf("failed to close %q: %v", restartSamplesName, err) + } + t.Logf("wrote %s", restartSamplesName) + } + + if *flagNISTSP80090B { + if *flagEntropySamples == "" { + t.Fatalf("-nist-sp800-90b requires -entropy-samples to be set too") + } + + // Check if the nist-sp800-90b docker image is already present, + // and build it otherwise. + if err := testenv.Command(t, + "docker", "image", "inspect", "nist-sp800-90b", + ).Run(); err != nil { + t.Logf("building nist-sp800-90b docker image") + dockerfile := filepath.Join(t.TempDir(), "Dockerfile.SP800-90B_EntropyAssessment") + if err := os.WriteFile(dockerfile, []byte(NISTSP80090BDockerfile), 0644); err != nil { + t.Fatalf("failed to write Dockerfile: %v", err) + } + out, err := testenv.Command(t, + "docker", "build", "-t", "nist-sp800-90b", "-f", dockerfile, "/var/empty", + ).CombinedOutput() + if err != nil { + t.Fatalf("failed to build nist-sp800-90b docker image: %v\n%s", err, out) + } + } + + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get current working directory: %v", err) + } + t.Logf("running ea_non_iid analysis") + out, err := testenv.Command(t, + "docker", "run", "--rm", "-v", fmt.Sprintf("%s:%s", pwd, pwd), "-w", pwd, + "nist-sp800-90b", "ea_non_iid", seqSamplesName, "8", + ).CombinedOutput() + if err != nil { + t.Fatalf("ea_non_iid failed: %v\n%s", err, out) + } + t.Logf("\n%s", out) + + H_I := string(out) + H_I = strings.TrimSpace(H_I[strings.LastIndexByte(H_I, ' ')+1:]) + t.Logf("running ea_restart analysis with H_I = %s", H_I) + out, err = testenv.Command(t, + "docker", "run", "--rm", "-v", fmt.Sprintf("%s:%s", pwd, pwd), "-w", pwd, + "nist-sp800-90b", "ea_restart", restartSamplesName, "8", H_I, + ).CombinedOutput() + if err != nil { + t.Fatalf("ea_restart failed: %v\n%s", err, out) + } + t.Logf("\n%s", out) + } +} + +var NISTSP80090BDockerfile = ` +FROM ubuntu:24.04 +RUN apt-get update && apt-get install -y build-essential git \ + libbz2-dev libdivsufsort-dev libjsoncpp-dev libgmp-dev libmpfr-dev libssl-dev \ + && rm -rf /var/lib/apt/lists/* +RUN git clone --depth 1 https://github.com/usnistgov/SP800-90B_EntropyAssessment.git +RUN cd SP800-90B_EntropyAssessment && git checkout 8924f158c97e7b805e0f95247403ad4c44b9cd6f +WORKDIR ./SP800-90B_EntropyAssessment/cpp/ +RUN make all +RUN cd selftest && ./selftest +RUN cp ea_non_iid ea_restart /usr/local/bin/ +` + +var memory entropy.ScratchBuffer + +// samplesOrTryAgain calls entropy.Samples up to 10 times until it succeeds. +// Samples has a non-negligible chance of failing the health tests, as required +// by SP 800-90B. +func samplesOrTryAgain(t *testing.T, samples []uint8) { + t.Helper() + for range 10 { + if err := entropy.Samples(samples, &memory); err != nil { + t.Logf("entropy.Samples() failed: %v", err) + continue + } + return + } + t.Fatal("entropy.Samples() failed 10 times in a row") +} + +func TestEntropySHA384(t *testing.T) { + var input [1024]uint8 + for i := range input { + input[i] = uint8(i) + } + want := sha512.Sum384(input[:]) + got := entropy.SHA384(&input) + if got != want { + t.Errorf("SHA384() = %x, want %x", got, want) + } + + for l := range 1024*3 + 1 { + input := make([]byte, l) + rand.Read(input) + want := sha512.Sum384(input) + got := entropy.TestingOnlySHA384(input) + if got != want { + t.Errorf("TestingOnlySHA384(%d bytes) = %x, want %x", l, got, want) + } + } +} + +func TestEntropyRepetitionCountTest(t *testing.T) { + good := bytes.Repeat(append(bytes.Repeat([]uint8{42}, 40), 1), 100) + if err := entropy.RepetitionCountTest(good); err != nil { + t.Errorf("RepetitionCountTest(good) = %v, want nil", err) + } + + bad := bytes.Repeat([]uint8{0}, 40) + bad = append(bad, bytes.Repeat([]uint8{1}, 40)...) + bad = append(bad, bytes.Repeat([]uint8{42}, 41)...) + bad = append(bad, bytes.Repeat([]uint8{2}, 40)...) + if err := entropy.RepetitionCountTest(bad); err == nil { + t.Error("RepetitionCountTest(bad) = nil, want error") + } + + bad = bytes.Repeat([]uint8{42}, 41) + if err := entropy.RepetitionCountTest(bad); err == nil { + t.Error("RepetitionCountTest(bad) = nil, want error") + } +} + +func TestEntropyAdaptiveProportionTest(t *testing.T) { + good := bytes.Repeat([]uint8{0}, 409) + good = append(good, bytes.Repeat([]uint8{1}, 512-409)...) + good = append(good, bytes.Repeat([]uint8{0}, 409)...) + if err := entropy.AdaptiveProportionTest(good); err != nil { + t.Errorf("AdaptiveProportionTest(good) = %v, want nil", err) + } + + // These fall out of the window. + bad := bytes.Repeat([]uint8{1}, 100) + bad = append(bad, bytes.Repeat([]uint8{1, 2, 3, 4, 5, 6}, 100)...) + // These are in the window. + bad = append(bad, bytes.Repeat([]uint8{42}, 410)...) + if err := entropy.AdaptiveProportionTest(bad[:len(bad)-1]); err != nil { + t.Errorf("AdaptiveProportionTest(bad[:len(bad)-1]) = %v, want nil", err) + } + if err := entropy.AdaptiveProportionTest(bad); err == nil { + t.Error("AdaptiveProportionTest(bad) = nil, want error") + } +} + +func TestEntropyUnchanged(t *testing.T) { + testenv.MustHaveSource(t) + + h := sha256.New() + root := os.DirFS("../fips140/entropy") + if err := fs.WalkDir(root, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + data, err := fs.ReadFile(root, path) + if err != nil { + return err + } + t.Logf("Hashing %s (%d bytes)", path, len(data)) + fmt.Fprintf(h, "%s %d\n", path, len(data)) + h.Write(data) + return nil + }); err != nil { + t.Fatalf("WalkDir: %v", err) + } + + // The crypto/internal/fips140/entropy package is certified as a FIPS 140-3 + // entropy source through the Entropy Source Validation program, + // independently of the FIPS 140-3 module. It must not change even across + // FIPS 140-3 module versions, in order to reuse the ESV certificate. + exp := "2541273241ae8aafe55026328354ed3799df1e2fb308b2097833203a42911b53" + if got := hex.EncodeToString(h.Sum(nil)); got != exp { + t.Errorf("hash of crypto/internal/fips140/entropy = %s, want %s", got, exp) + } +} + +func TestEntropyRace(t *testing.T) { + // Check that concurrent calls to Seed don't trigger the race detector. + for range 16 { + go func() { + _, _ = entropy.Seed(&memory) + }() + } + // Same, with the higher-level DRBG. + for range 16 { + go func() { + var b [64]byte + drbg.Read(b[:]) + }() + } +} + +var sink byte + +func BenchmarkEntropySeed(b *testing.B) { + for b.Loop() { + seed, err := entropy.Seed(&memory) + if err != nil { + b.Fatalf("entropy.Seed() failed: %v", err) + } + sink ^= seed[0] + } +} diff --git a/src/crypto/sha256/sha256_test.go b/src/crypto/sha256/sha256_test.go index 11b24db7d6b0a0..a18a536ba2896f 100644 --- a/src/crypto/sha256/sha256_test.go +++ b/src/crypto/sha256/sha256_test.go @@ -471,3 +471,17 @@ func BenchmarkHash256K(b *testing.B) { func BenchmarkHash1M(b *testing.B) { benchmarkSize(b, 1024*1024) } + +func TestAllocatonsWithTypeAsserts(t *testing.T) { + cryptotest.SkipTestAllocations(t) + allocs := testing.AllocsPerRun(100, func() { + h := New() + h.Write([]byte{1, 2, 3}) + marshaled, _ := h.(encoding.BinaryMarshaler).MarshalBinary() + marshaled, _ = h.(encoding.BinaryAppender).AppendBinary(marshaled[:0]) + h.(encoding.BinaryUnmarshaler).UnmarshalBinary(marshaled) + }) + if allocs != 0 { + t.Fatalf("allocs = %v; want = 0", allocs) + } +} diff --git a/src/crypto/tls/bogo_shim_test.go b/src/crypto/tls/bogo_shim_test.go index 7cab568db80953..8f171d925959c8 100644 --- a/src/crypto/tls/bogo_shim_test.go +++ b/src/crypto/tls/bogo_shim_test.go @@ -11,8 +11,10 @@ import ( "encoding/base64" "encoding/json" "encoding/pem" + "errors" "flag" "fmt" + "html/template" "internal/byteorder" "internal/testenv" "io" @@ -25,10 +27,13 @@ import ( "strconv" "strings" "testing" + "time" "golang.org/x/crypto/cryptobyte" ) +const boringsslModVer = "v0.0.0-20250620172916-f51d8b099832" + var ( port = flag.String("port", "", "") server = flag.Bool("server", false, "") @@ -537,6 +542,7 @@ func orderlyShutdown(tlsConn *Conn) { } func TestBogoSuite(t *testing.T) { + testenv.MustHaveGoBuild(t) if testing.Short() { t.Skip("skipping in short mode") } @@ -555,9 +561,9 @@ func TestBogoSuite(t *testing.T) { var bogoDir string if *bogoLocalDir != "" { + ensureLocalBogo(t, *bogoLocalDir) bogoDir = *bogoLocalDir } else { - const boringsslModVer = "v0.0.0-20250620172916-f51d8b099832" bogoDir = cryptotest.FetchModule(t, "boringssl.googlesource.com/boringssl.git", boringsslModVer) } @@ -606,6 +612,12 @@ func TestBogoSuite(t *testing.T) { t.Fatalf("failed to parse results JSON: %s", err) } + if *bogoReport != "" { + if err := generateReport(results, *bogoReport); err != nil { + t.Fatalf("failed to generate report: %v", err) + } + } + // assertResults contains test results we want to make sure // are present in the output. They are only checked if -bogo-filter // was not passed. @@ -655,6 +667,66 @@ func TestBogoSuite(t *testing.T) { } } +// ensureLocalBogo fetches BoringSSL to localBogoDir at the correct revision +// (from boringsslModVer) if localBogoDir doesn't already exist. +// +// If localBogoDir does exist, ensureLocalBogo fails the test if it isn't +// a directory. +func ensureLocalBogo(t *testing.T, localBogoDir string) { + t.Helper() + + if stat, err := os.Stat(localBogoDir); err == nil { + if !stat.IsDir() { + t.Fatalf("local bogo dir (%q) exists but is not a directory", localBogoDir) + } + + t.Logf("using local bogo checkout from %q", localBogoDir) + return + } else if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("failed to stat local bogo dir (%q): %v", localBogoDir, err) + } + + testenv.MustHaveExecPath(t, "git") + + idx := strings.LastIndex(boringsslModVer, "-") + if idx == -1 || idx == len(boringsslModVer)-1 { + t.Fatalf("invalid boringsslModVer format: %q", boringsslModVer) + } + commitSHA := boringsslModVer[idx+1:] + + t.Logf("cloning boringssl@%s to %q", commitSHA, localBogoDir) + cloneCmd := testenv.Command(t, "git", "clone", "--no-checkout", "https://boringssl.googlesource.com/boringssl", localBogoDir) + if err := cloneCmd.Run(); err != nil { + t.Fatalf("git clone failed: %v", err) + } + + checkoutCmd := testenv.Command(t, "git", "checkout", commitSHA) + checkoutCmd.Dir = localBogoDir + if err := checkoutCmd.Run(); err != nil { + t.Fatalf("git checkout failed: %v", err) + } + + t.Logf("using fresh local bogo checkout from %q", localBogoDir) + return +} + +func generateReport(results bogoResults, outPath string) error { + data := reportData{ + Results: results, + Timestamp: time.Unix(int64(results.SecondsSinceEpoch), 0).Format("2006-01-02 15:04:05"), + Revision: boringsslModVer, + } + + tmpl := template.Must(template.New("report").Parse(reportTemplate)) + file, err := os.Create(outPath) + if err != nil { + return err + } + defer file.Close() + + return tmpl.Execute(file, data) +} + // bogoResults is a copy of boringssl.googlesource.com/boringssl/testresults.Results type bogoResults struct { Version int `json:"version"` @@ -669,3 +741,127 @@ type bogoResults struct { Error string `json:"error,omitempty"` } `json:"tests"` } + +type reportData struct { + Results bogoResults + SkipReasons map[string]string + Timestamp string + Revision string +} + +const reportTemplate = ` + + + + BoGo Results Report + + + +

    BoGo Results Report

    + +
    + Generated: {{.Timestamp}} | BoGo Revision: {{.Revision}}
    + {{range $status, $count := .Results.NumFailuresByType}} + {{$status}}: {{$count}} | + {{end}} +
    + +
    + + +
    + + + + + + + + + + + + + {{range $name, $test := .Results.Tests}} + + + + + + + + {{end}} + +
    Test NameStatusActualExpectedError
    {{$name}}{{$test.Actual}}{{$test.Actual}}{{$test.Expected}}{{$test.Error}}
    + + + + +` diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go index 09dc9ea94c939f..2de120a1329f87 100644 --- a/src/crypto/tls/conn.go +++ b/src/crypto/tls/conn.go @@ -1578,9 +1578,9 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) { // the handshake (RFC 9001, Section 5.7). c.quicSetReadSecret(QUICEncryptionLevelApplication, c.cipherSuite, c.in.trafficSecret) } else { - var a alert c.out.Lock() - if !errors.As(c.out.err, &a) { + a, ok := errors.AsType[alert](c.out.err) + if !ok { a = alertInternalError } c.out.Unlock() diff --git a/src/crypto/tls/ech.go b/src/crypto/tls/ech.go index 76727a890896a0..d3472d8dc4aafb 100644 --- a/src/crypto/tls/ech.go +++ b/src/crypto/tls/ech.go @@ -568,16 +568,6 @@ func parseECHExt(ext []byte) (echType echExtType, cs echCipher, configID uint8, return echType, cs, configID, bytes.Clone(encap), bytes.Clone(payload), nil } -func marshalEncryptedClientHelloConfigList(configs []EncryptedClientHelloKey) ([]byte, error) { - builder := cryptobyte.NewBuilder(nil) - builder.AddUint16LengthPrefixed(func(builder *cryptobyte.Builder) { - for _, c := range configs { - builder.AddBytes(c.Config) - } - }) - return builder.Bytes() -} - func (c *Conn) processECHClientHello(outer *clientHelloMsg, echKeys []EncryptedClientHelloKey) (*clientHelloMsg, *echServerContext, error) { echType, echCiphersuite, configID, encap, payload, err := parseECHExt(outer.encryptedClientHello) if err != nil { diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go index 1e0b5f06672d15..a2cf176a86c0d8 100644 --- a/src/crypto/tls/handshake_server.go +++ b/src/crypto/tls/handshake_server.go @@ -357,7 +357,7 @@ func negotiateALPN(serverProtos, clientProtos []string, quic bool) (string, erro if http11fallback { return "", nil } - return "", fmt.Errorf("tls: client requested unsupported application protocols (%s)", clientProtos) + return "", fmt.Errorf("tls: client requested unsupported application protocols (%q)", clientProtos) } // supportsECDHE returns whether ECDHE key exchanges can be used with this @@ -965,10 +965,9 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error { chains, err := certs[0].Verify(opts) if err != nil { - var errCertificateInvalid x509.CertificateInvalidError - if errors.As(err, &x509.UnknownAuthorityError{}) { + if _, ok := errors.AsType[x509.UnknownAuthorityError](err); ok { c.sendAlert(alertUnknownCA) - } else if errors.As(err, &errCertificateInvalid) && errCertificateInvalid.Reason == x509.Expired { + } else if errCertificateInvalid, ok := errors.AsType[x509.CertificateInvalidError](err); ok && errCertificateInvalid.Reason == x509.Expired { c.sendAlert(alertCertificateExpired) } else { c.sendAlert(alertBadCertificate) diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go index 941f2a3373feb9..43183db2a19770 100644 --- a/src/crypto/tls/handshake_server_test.go +++ b/src/crypto/tls/handshake_server_test.go @@ -403,8 +403,7 @@ func TestAlertForwarding(t *testing.T) { err := Server(s, testConfig).Handshake() s.Close() - var opErr *net.OpError - if !errors.As(err, &opErr) || opErr.Err != error(alertUnknownCA) { + if opErr, ok := errors.AsType[*net.OpError](err); !ok || opErr.Err != error(alertUnknownCA) { t.Errorf("Got error: %s; expected: %s", err, error(alertUnknownCA)) } } diff --git a/src/crypto/tls/handshake_test.go b/src/crypto/tls/handshake_test.go index ea8ac6fc837ae7..3e2c5663087828 100644 --- a/src/crypto/tls/handshake_test.go +++ b/src/crypto/tls/handshake_test.go @@ -46,7 +46,9 @@ var ( keyFile = flag.String("keylog", "", "destination file for KeyLogWriter") bogoMode = flag.Bool("bogo-mode", false, "Enabled bogo shim mode, ignore everything else") bogoFilter = flag.String("bogo-filter", "", "BoGo test filter") - bogoLocalDir = flag.String("bogo-local-dir", "", "Local BoGo to use, instead of fetching from source") + bogoLocalDir = flag.String("bogo-local-dir", "", + "If not-present, checkout BoGo into this dir, or otherwise use it as a pre-existing checkout") + bogoReport = flag.String("bogo-html-report", "", "File path to render an HTML report with BoGo results") ) func runTestAndUpdateIfNeeded(t *testing.T, name string, run func(t *testing.T, update bool), wait bool) { diff --git a/src/crypto/tls/quic.go b/src/crypto/tls/quic.go index 3be479eb12f085..2ba2242b2d93d2 100644 --- a/src/crypto/tls/quic.go +++ b/src/crypto/tls/quic.go @@ -362,12 +362,11 @@ func quicError(err error) error { if err == nil { return nil } - var ae AlertError - if errors.As(err, &ae) { + if _, ok := errors.AsType[AlertError](err); ok { return err } - var a alert - if !errors.As(err, &a) { + a, ok := errors.AsType[alert](err) + if !ok { a = alertInternalError } // Return an error wrapping the original error and an AlertError. diff --git a/src/crypto/tls/quic_test.go b/src/crypto/tls/quic_test.go index f6e8c55d9d63e4..5f4b2b7707d01e 100644 --- a/src/crypto/tls/quic_test.go +++ b/src/crypto/tls/quic_test.go @@ -368,8 +368,7 @@ func TestQUICHandshakeError(t *testing.T) { if !errors.Is(err, AlertError(alertBadCertificate)) { t.Errorf("connection handshake terminated with error %q, want alertBadCertificate", err) } - var e *CertificateVerificationError - if !errors.As(err, &e) { + if _, ok := errors.AsType[*CertificateVerificationError](err); !ok { t.Errorf("connection handshake terminated with error %q, want CertificateVerificationError", err) } } diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go index 4abcc1b7b590e2..680dcee203a828 100644 --- a/src/crypto/x509/parser.go +++ b/src/crypto/x509/parser.go @@ -429,10 +429,8 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string if err != nil { return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) } - if len(uri.Host) > 0 { - if _, ok := domainToReverseLabels(uri.Host); !ok { - return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) - } + if len(uri.Host) > 0 && !domainNameValid(uri.Host, false) { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) } uris = append(uris, uri) case nameTypeIP: @@ -598,15 +596,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) } - trimmedDomain := domain - if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { - // constraints can have a leading - // period to exclude the domain - // itself, but that's not valid in a - // normal domain name. - trimmedDomain = trimmedDomain[1:] - } - if _, ok := domainToReverseLabels(trimmedDomain); !ok { + if !domainNameValid(domain, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain) } dnsNames = append(dnsNames, domain) @@ -647,12 +637,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) } } else { - // Otherwise it's a domain name. - domain := constraint - if len(domain) > 0 && domain[0] == '.' { - domain = domain[1:] - } - if _, ok := domainToReverseLabels(domain); !ok { + if !domainNameValid(constraint, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) } } @@ -668,15 +653,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) } - trimmedDomain := domain - if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { - // constraints can have a leading - // period to exclude the domain itself, - // but that's not valid in a normal - // domain name. - trimmedDomain = trimmedDomain[1:] - } - if _, ok := domainToReverseLabels(trimmedDomain); !ok { + if !domainNameValid(domain, true) { return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain) } uriDomains = append(uriDomains, domain) @@ -1317,3 +1294,62 @@ func ParseRevocationList(der []byte) (*RevocationList, error) { return rl, nil } + +// domainNameValid is an alloc-less version of the checks that +// domainToReverseLabels does. +func domainNameValid(s string, constraint bool) bool { + // TODO(#75835): This function omits a number of checks which we + // really should be doing to enforce that domain names are valid names per + // RFC 1034. We previously enabled these checks, but this broke a + // significant number of certificates we previously considered valid, and we + // happily create via CreateCertificate (et al). We should enable these + // checks, but will need to gate them behind a GODEBUG. + // + // I have left the checks we previously enabled, noted with "TODO(#75835)" so + // that we can easily re-enable them once we unbreak everyone. + + // TODO(#75835): this should only be true for constraints. + if len(s) == 0 { + return true + } + + // Do not allow trailing period (FQDN format is not allowed in SANs or + // constraints). + if s[len(s)-1] == '.' { + return false + } + + // TODO(#75835): domains must have at least one label, cannot have + // a leading empty label, and cannot be longer than 253 characters. + // if len(s) == 0 || (!constraint && s[0] == '.') || len(s) > 253 { + // return false + // } + + lastDot := -1 + if constraint && s[0] == '.' { + s = s[1:] + } + + for i := 0; i <= len(s); i++ { + if i < len(s) && (s[i] < 33 || s[i] > 126) { + // Invalid character. + return false + } + if i == len(s) || s[i] == '.' { + labelLen := i + if lastDot >= 0 { + labelLen -= lastDot + 1 + } + if labelLen == 0 { + return false + } + // TODO(#75835): labels cannot be longer than 63 characters. + // if labelLen > 63 { + // return false + // } + lastDot = i + } + } + + return true +} diff --git a/src/crypto/x509/parser_test.go b/src/crypto/x509/parser_test.go index 3b9d9aed826b01..d53b805b786990 100644 --- a/src/crypto/x509/parser_test.go +++ b/src/crypto/x509/parser_test.go @@ -5,9 +5,13 @@ package x509 import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "encoding/asn1" "encoding/pem" "os" + "strings" "testing" cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" @@ -251,3 +255,106 @@ d5l1tRhScKu2NBgm74nYmJxJYgvuTA38wGhRrGU= } } } + +func TestDomainNameValid(t *testing.T) { + for _, tc := range []struct { + name string + dnsName string + constraint bool + valid bool + }{ + // TODO(#75835): these tests are for stricter name validation, which we + // had to disable. Once we reenable these strict checks, behind a + // GODEBUG, we should add them back in. + // {"empty name, name", "", false, false}, + // {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, false}, + // {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, false}, + // {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, false}, + // {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, false}, + // {"64 char single label, name", strings.Repeat("a", 64), false, false}, + // {"64 char single label, constraint", strings.Repeat("a", 64), true, false}, + // {"64 char label, name", "a." + strings.Repeat("a", 64), false, false}, + // {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, false}, + + // TODO(#75835): these are the inverse of the tests above, they should be removed + // once the strict checking is enabled. + {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, true}, + {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, true}, + {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, true}, + {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, true}, + {"64 char single label, name", strings.Repeat("a", 64), false, true}, + {"64 char single label, constraint", strings.Repeat("a", 64), true, true}, + {"64 char label, name", "a." + strings.Repeat("a", 64), false, true}, + {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, true}, + + // Check we properly enforce properties of domain names. + {"empty name, constraint", "", true, true}, + {"empty label, name", "a..a", false, false}, + {"empty label, constraint", "a..a", true, false}, + {"period, name", ".", false, false}, + {"period, constraint", ".", true, false}, // TODO(roland): not entirely clear if this is a valid constraint (require at least one label?) + {"valid, name", "a.b.c", false, true}, + {"valid, constraint", "a.b.c", true, true}, + {"leading period, name", ".a.b.c", false, false}, + {"leading period, constraint", ".a.b.c", true, true}, + {"trailing period, name", "a.", false, false}, + {"trailing period, constraint", "a.", true, false}, + {"bare label, name", "a", false, true}, + {"bare label, constraint", "a", true, true}, + {"63 char single label, name", strings.Repeat("a", 63), false, true}, + {"63 char single label, constraint", strings.Repeat("a", 63), true, true}, + {"63 char label, name", "a." + strings.Repeat("a", 63), false, true}, + {"63 char label, constraint", "a." + strings.Repeat("a", 63), true, true}, + } { + t.Run(tc.name, func(t *testing.T) { + valid := domainNameValid(tc.dnsName, tc.constraint) + if tc.valid != valid { + t.Errorf("domainNameValid(%q, %t) = %v; want %v", tc.dnsName, tc.constraint, !tc.valid, tc.valid) + } + // Also check that we enforce the same properties as domainToReverseLabels + trimmedName := tc.dnsName + if tc.constraint && len(trimmedName) > 1 && trimmedName[0] == '.' { + trimmedName = trimmedName[1:] + } + _, revValid := domainToReverseLabels(trimmedName) + if valid != revValid { + t.Errorf("domainNameValid(%q, %t) = %t != domainToReverseLabels(%q) = %t", tc.dnsName, tc.constraint, valid, trimmedName, revValid) + } + }) + } +} + +func TestRoundtripWeirdSANs(t *testing.T) { + // TODO(#75835): check that certificates we create with CreateCertificate that have malformed SAN values + // can be parsed by ParseCertificate. We should eventually restrict this, but for now we have to maintain + // this property as people have been relying on it. + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + badNames := []string{ + "baredomain", + "baredomain.", + strings.Repeat("a", 255), + strings.Repeat("a", 65) + ".com", + } + tmpl := &Certificate{ + EmailAddresses: badNames, + DNSNames: badNames, + } + b, err := CreateCertificate(rand.Reader, tmpl, tmpl, &k.PublicKey, k) + if err != nil { + t.Fatal(err) + } + _, err = ParseCertificate(b) + if err != nil { + t.Fatalf("Couldn't roundtrip certificate: %v", err) + } +} + +func FuzzDomainNameValid(f *testing.F) { + f.Fuzz(func(t *testing.T, data string) { + domainNameValid(data, false) + domainNameValid(data, true) + }) +} diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 7cc0fb2e3e0385..bf7e7ec058db2b 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -391,6 +391,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { // domainToReverseLabels converts a textual domain name like foo.example.com to // the list of labels in reverse order, e.g. ["com", "example", "foo"]. func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + reverseLabels = make([]string, 0, strings.Count(domain, ".")+1) for len(domain) > 0 { if i := strings.LastIndexByte(domain, '.'); i == -1 { reverseLabels = append(reverseLabels, domain) @@ -428,7 +429,7 @@ func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { return reverseLabels, true } -func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) { +func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // If the constraint contains an @, then it specifies an exact mailbox // name. if strings.Contains(constraint, "@") { @@ -441,10 +442,10 @@ func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, erro // Otherwise the constraint is like a DNS constraint of the domain part // of the mailbox. - return matchDomainConstraint(mailbox.domain, constraint) + return matchDomainConstraint(mailbox.domain, constraint, reversedDomainsCache, reversedConstraintsCache) } -func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { +func matchURIConstraint(uri *url.URL, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // From RFC 5280, Section 4.2.1.10: // “a uniformResourceIdentifier that does not include an authority // component with a host name specified as a fully qualified domain @@ -473,7 +474,7 @@ func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String()) } - return matchDomainConstraint(host, constraint) + return matchDomainConstraint(host, constraint, reversedDomainsCache, reversedConstraintsCache) } func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { @@ -490,16 +491,21 @@ func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { return true, nil } -func matchDomainConstraint(domain, constraint string) (bool, error) { +func matchDomainConstraint(domain, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { // The meaning of zero length constraints is not specified, but this // code follows NSS and accepts them as matching everything. if len(constraint) == 0 { return true, nil } - domainLabels, ok := domainToReverseLabels(domain) - if !ok { - return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + domainLabels, found := reversedDomainsCache[domain] + if !found { + var ok bool + domainLabels, ok = domainToReverseLabels(domain) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + } + reversedDomainsCache[domain] = domainLabels } // RFC 5280 says that a leading period in a domain name means that at @@ -513,9 +519,14 @@ func matchDomainConstraint(domain, constraint string) (bool, error) { constraint = constraint[1:] } - constraintLabels, ok := domainToReverseLabels(constraint) - if !ok { - return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + constraintLabels, found := reversedConstraintsCache[constraint] + if !found { + var ok bool + constraintLabels, ok = domainToReverseLabels(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + } + reversedConstraintsCache[constraint] = constraintLabels } if len(domainLabels) < len(constraintLabels) || @@ -636,6 +647,19 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V } } + // Each time we do constraint checking, we need to check the constraints in + // the current certificate against all of the names that preceded it. We + // reverse these names using domainToReverseLabels, which is a relatively + // expensive operation. Since we check each name against each constraint, + // this requires us to do N*C calls to domainToReverseLabels (where N is the + // total number of names that preceed the certificate, and C is the total + // number of constraints in the certificate). By caching the results of + // calling domainToReverseLabels, we can reduce that to N+C calls at the + // cost of keeping all of the parsed names and constraints in memory until + // we return from isValid. + reversedDomainsCache := map[string][]string{} + reversedConstraintsCache := map[string][]string{} + if (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() { toCheck := []*Certificate{} @@ -656,20 +680,20 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, func(parsedName, constraint any) (bool, error) { - return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string)) + return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string), reversedDomainsCache, reversedConstraintsCache) }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil { return err } case nameTypeDNS: name := string(data) - if _, ok := domainToReverseLabels(name); !ok { + if !domainNameValid(name, false) { return fmt.Errorf("x509: cannot parse dnsName %q", name) } if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, func(parsedName, constraint any) (bool, error) { - return matchDomainConstraint(parsedName.(string), constraint.(string)) + return matchDomainConstraint(parsedName.(string), constraint.(string), reversedDomainsCache, reversedConstraintsCache) }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil { return err } @@ -683,7 +707,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri, func(parsedName, constraint any) (bool, error) { - return matchURIConstraint(parsedName.(*url.URL), constraint.(string)) + return matchURIConstraint(parsedName.(*url.URL), constraint.(string), reversedDomainsCache, reversedConstraintsCache) }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil { return err } @@ -927,7 +951,10 @@ func alreadyInChain(candidate *Certificate, chain []*Certificate) bool { if !bytes.Equal(candidate.RawSubject, cert.RawSubject) { continue } - if !candidate.PublicKey.(pubKeyEqual).Equal(cert.PublicKey) { + // We enforce the canonical encoding of SPKI (by only allowing the + // correct AI paremeter encodings in parseCertificate), so it's safe to + // directly compare the raw bytes. + if !bytes.Equal(candidate.RawSubjectPublicKeyInfo, cert.RawSubjectPublicKeyInfo) { continue } var certSAN *pkix.Extension diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go index 7991f49946d587..60a4cea9146adf 100644 --- a/src/crypto/x509/verify_test.go +++ b/src/crypto/x509/verify_test.go @@ -6,6 +6,7 @@ package x509 import ( "crypto" + "crypto/dsa" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -1351,7 +1352,7 @@ var nameConstraintTests = []struct { func TestNameConstraints(t *testing.T) { for i, test := range nameConstraintTests { - result, err := matchDomainConstraint(test.domain, test.constraint) + result, err := matchDomainConstraint(test.domain, test.constraint, map[string][]string{}, map[string][]string{}) if err != nil && !test.expectError { t.Errorf("unexpected error for test #%d: domain=%s, constraint=%s, err=%s", i, test.domain, test.constraint, err) @@ -3048,3 +3049,129 @@ func TestInvalidPolicyWithAnyKeyUsage(t *testing.T) { t.Fatalf("unexpected error, got %q, want %q", err, expectedErr) } } + +func TestCertificateChainSignedByECDSA(t *testing.T) { + caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + root := &Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: "X"}, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + IsCA: true, + KeyUsage: KeyUsageCertSign | KeyUsageCRLSign, + BasicConstraintsValid: true, + } + caDER, err := CreateCertificate(rand.Reader, root, root, &caKey.PublicKey, caKey) + if err != nil { + t.Fatal(err) + } + root, err = ParseCertificate(caDER) + if err != nil { + t.Fatal(err) + } + + leafKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + leaf := &Certificate{ + SerialNumber: big.NewInt(42), + Subject: pkix.Name{CommonName: "leaf"}, + NotBefore: time.Now().Add(-10 * time.Minute), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: KeyUsageDigitalSignature, + ExtKeyUsage: []ExtKeyUsage{ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + leafDER, err := CreateCertificate(rand.Reader, leaf, root, &leafKey.PublicKey, caKey) + if err != nil { + t.Fatal(err) + } + leaf, err = ParseCertificate(leafDER) + if err != nil { + t.Fatal(err) + } + + inter, err := ParseCertificate(dsaSelfSignedCNX(t)) + if err != nil { + t.Fatal(err) + } + + inters := NewCertPool() + inters.AddCert(root) + inters.AddCert(inter) + + wantErr := "certificate signed by unknown authority" + _, err = leaf.Verify(VerifyOptions{Intermediates: inters, Roots: NewCertPool()}) + if !strings.Contains(err.Error(), wantErr) { + t.Errorf("got %v, want %q", err, wantErr) + } +} + +// dsaSelfSignedCNX produces DER-encoded +// certificate with the properties: +// +// Subject=Issuer=CN=X +// DSA SPKI +// Matching inner/outer signature OIDs +// Dummy ECDSA signature +func dsaSelfSignedCNX(t *testing.T) []byte { + t.Helper() + var params dsa.Parameters + if err := dsa.GenerateParameters(¶ms, rand.Reader, dsa.L1024N160); err != nil { + t.Fatal(err) + } + + var dsaPriv dsa.PrivateKey + dsaPriv.Parameters = params + if err := dsa.GenerateKey(&dsaPriv, rand.Reader); err != nil { + t.Fatal(err) + } + dsaPub := &dsaPriv.PublicKey + + type dsaParams struct{ P, Q, G *big.Int } + paramDER, err := asn1.Marshal(dsaParams{dsaPub.P, dsaPub.Q, dsaPub.G}) + if err != nil { + t.Fatal(err) + } + yDER, err := asn1.Marshal(dsaPub.Y) + if err != nil { + t.Fatal(err) + } + + spki := publicKeyInfo{ + Algorithm: pkix.AlgorithmIdentifier{ + Algorithm: oidPublicKeyDSA, + Parameters: asn1.RawValue{FullBytes: paramDER}, + }, + PublicKey: asn1.BitString{Bytes: yDER, BitLength: 8 * len(yDER)}, + } + + rdn := pkix.Name{CommonName: "X"}.ToRDNSequence() + b, err := asn1.Marshal(rdn) + if err != nil { + t.Fatal(err) + } + rawName := asn1.RawValue{FullBytes: b} + + algoIdent := pkix.AlgorithmIdentifier{Algorithm: oidSignatureDSAWithSHA256} + tbs := tbsCertificate{ + Version: 0, + SerialNumber: big.NewInt(1002), + SignatureAlgorithm: algoIdent, + Issuer: rawName, + Validity: validity{NotBefore: time.Now().Add(-time.Hour), NotAfter: time.Now().Add(24 * time.Hour)}, + Subject: rawName, + PublicKey: spki, + } + c := certificate{ + TBSCertificate: tbs, + SignatureAlgorithm: algoIdent, + SignatureValue: asn1.BitString{Bytes: []byte{0}, BitLength: 8}, + } + dsaDER, err := asn1.Marshal(c) + if err != nil { + t.Fatal(err) + } + return dsaDER +} diff --git a/src/debug/buildinfo/buildinfo.go b/src/debug/buildinfo/buildinfo.go index 12e3b750d233a7..d202d5050a2786 100644 --- a/src/debug/buildinfo/buildinfo.go +++ b/src/debug/buildinfo/buildinfo.go @@ -67,7 +67,7 @@ const ( // with module support. func ReadFile(name string) (info *BuildInfo, err error) { defer func() { - if pathErr := (*fs.PathError)(nil); errors.As(err, &pathErr) { + if _, ok := errors.AsType[*fs.PathError](err); ok { err = fmt.Errorf("could not read Go build info: %w", err) } else if err != nil { err = fmt.Errorf("could not read Go build info from %s: %w", name, err) diff --git a/src/debug/elf/file.go b/src/debug/elf/file.go index 50452b5bef45f4..1d56a06c3fb221 100644 --- a/src/debug/elf/file.go +++ b/src/debug/elf/file.go @@ -25,6 +25,7 @@ import ( "internal/saferio" "internal/zstd" "io" + "math" "os" "strings" "unsafe" @@ -830,17 +831,9 @@ func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) error { switch t { case R_X86_64_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_X86_64_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -872,12 +865,7 @@ func (f *File) applyRelocations386(dst []byte, rels []byte) error { sym := &symbols[symNo-1] if t == R_386_32 { - if rel.Off+4 >= uint32(len(dst)) { - continue - } - val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4]) - val += uint32(sym.Value) - f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val) + putUint(f.ByteOrder, dst, uint64(rel.Off), 4, sym.Value, 0, true) } } @@ -910,12 +898,7 @@ func (f *File) applyRelocationsARM(dst []byte, rels []byte) error { switch t { case R_ARM_ABS32: - if rel.Off+4 >= uint32(len(dst)) { - continue - } - val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4]) - val += uint32(sym.Value) - f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val) + putUint(f.ByteOrder, dst, uint64(rel.Off), 4, sym.Value, 0, true) } } @@ -955,17 +938,9 @@ func (f *File) applyRelocationsARM64(dst []byte, rels []byte) error { switch t { case R_AARCH64_ABS64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_AARCH64_ABS32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1001,11 +976,7 @@ func (f *File) applyRelocationsPPC(dst []byte, rels []byte) error { switch t { case R_PPC_ADDR32: - if rela.Off+4 >= uint32(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, uint64(rela.Off), 4, sym.Value, 0, false) } } @@ -1041,17 +1012,9 @@ func (f *File) applyRelocationsPPC64(dst []byte, rels []byte) error { switch t { case R_PPC64_ADDR64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_PPC64_ADDR32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1084,12 +1047,7 @@ func (f *File) applyRelocationsMIPS(dst []byte, rels []byte) error { switch t { case R_MIPS_32: - if rel.Off+4 >= uint32(len(dst)) { - continue - } - val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4]) - val += uint32(sym.Value) - f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val) + putUint(f.ByteOrder, dst, uint64(rel.Off), 4, sym.Value, 0, true) } } @@ -1132,17 +1090,9 @@ func (f *File) applyRelocationsMIPS64(dst []byte, rels []byte) error { switch t { case R_MIPS_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_MIPS_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1180,17 +1130,9 @@ func (f *File) applyRelocationsLOONG64(dst []byte, rels []byte) error { switch t { case R_LARCH_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_LARCH_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1226,17 +1168,9 @@ func (f *File) applyRelocationsRISCV64(dst []byte, rels []byte) error { switch t { case R_RISCV_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_RISCV_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1272,17 +1206,9 @@ func (f *File) applyRelocationss390x(dst []byte, rels []byte) error { switch t { case R_390_64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) case R_390_32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1318,17 +1244,10 @@ func (f *File) applyRelocationsSPARC64(dst []byte, rels []byte) error { switch t { case R_SPARC_64, R_SPARC_UA64: - if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val64 := sym.Value + uint64(rela.Addend) - f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val64) + putUint(f.ByteOrder, dst, rela.Off, 8, sym.Value, rela.Addend, false) + case R_SPARC_32, R_SPARC_UA32: - if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { - continue - } - val32 := uint32(sym.Value) + uint32(rela.Addend) - f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val32) + putUint(f.ByteOrder, dst, rela.Off, 4, sym.Value, rela.Addend, false) } } @@ -1903,3 +1822,38 @@ type nobitsSectionReader struct{} func (*nobitsSectionReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, errors.New("unexpected read from SHT_NOBITS section") } + +// putUint writes a relocation to slice +// at offset start of length length (4 or 8 bytes), +// adding sym+addend to the existing value if readUint is true, +// or just writing sym+addend if readUint is false. +// If the write would extend beyond the end of slice, putUint does nothing. +// If the addend is negative, putUint does nothing. +// If the addition would overflow, putUint does nothing. +func putUint(byteOrder binary.ByteOrder, slice []byte, start, length, sym uint64, addend int64, readUint bool) { + if start+length > uint64(len(slice)) || math.MaxUint64-start < length { + return + } + if addend < 0 { + return + } + + s := slice[start : start+length] + + switch length { + case 4: + ae := uint32(addend) + if readUint { + ae += byteOrder.Uint32(s) + } + byteOrder.PutUint32(s, uint32(sym)+ae) + case 8: + ae := uint64(addend) + if readUint { + ae += byteOrder.Uint64(s) + } + byteOrder.PutUint64(s, sym+ae) + default: + panic("can't happen") + } +} diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go index 0b64f06d368b53..f4be515b98ef1c 100644 --- a/src/encoding/asn1/asn1.go +++ b/src/encoding/asn1/asn1.go @@ -22,6 +22,7 @@ package asn1 import ( "errors" "fmt" + "internal/saferio" "math" "math/big" "reflect" @@ -666,10 +667,17 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type offset += t.length numElements++ } - ret = reflect.MakeSlice(sliceType, numElements, numElements) + elemSize := uint64(elemType.Size()) + safeCap := saferio.SliceCapWithSize(elemSize, uint64(numElements)) + if safeCap < 0 { + err = SyntaxError{fmt.Sprintf("%s slice too big: %d elements of %d bytes", elemType.Kind(), numElements, elemSize)} + return + } + ret = reflect.MakeSlice(sliceType, 0, safeCap) params := fieldParameters{} offset := 0 for i := 0; i < numElements; i++ { + ret = reflect.Append(ret, reflect.Zero(elemType)) offset, err = parseField(ret.Index(i), bytes, offset, params) if err != nil { return diff --git a/src/encoding/asn1/asn1_test.go b/src/encoding/asn1/asn1_test.go index 0597740bd5e5ec..41cc0ba50ec304 100644 --- a/src/encoding/asn1/asn1_test.go +++ b/src/encoding/asn1/asn1_test.go @@ -7,10 +7,12 @@ package asn1 import ( "bytes" "encoding/hex" + "errors" "fmt" "math" "math/big" "reflect" + "runtime" "strings" "testing" "time" @@ -1216,3 +1218,39 @@ func TestImplicitTypeRoundtrip(t *testing.T) { t.Fatalf("Unexpected diff after roundtripping struct\na: %#v\nb: %#v", a, b) } } + +func TestParsingMemoryConsumption(t *testing.T) { + // Craft a syntatically valid, but empty, ~10 MB DER bomb. A successful + // unmarshal of this bomb should yield ~280 MB. However, the parsing should + // fail due to the empty content; and, in such cases, we want to make sure + // that we do not unnecessarily allocate memories. + derBomb := make([]byte, 10_000_000) + for i := range derBomb { + derBomb[i] = 0x30 + } + derBomb = append([]byte{0x30, 0x83, 0x98, 0x96, 0x80}, derBomb...) + + var m runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&m) + memBefore := m.TotalAlloc + + var out []struct { + Id []int + Critical bool `asn1:"optional"` + Value []byte + } + _, err := Unmarshal(derBomb, &out) + if !errors.As(err, &SyntaxError{}) { + t.Fatalf("Incorrect error result: want (%v), but got (%v) instead", &SyntaxError{}, err) + } + + runtime.ReadMemStats(&m) + memDiff := m.TotalAlloc - memBefore + + // Ensure that the memory allocated does not exceed 10<<21 (~20 MB) when + // the parsing fails. + if memDiff > 10<<21 { + t.Errorf("Too much memory allocated while parsing DER: %v MiB", memDiff/1024/1024) + } +} diff --git a/src/encoding/base32/base32_test.go b/src/encoding/base32/base32_test.go index f5d3c49e38fbef..6f8d564def3c86 100644 --- a/src/encoding/base32/base32_test.go +++ b/src/encoding/base32/base32_test.go @@ -709,7 +709,6 @@ func TestBufferedDecodingPadding(t *testing.T) { } for _, testcase := range testcases { - testcase := testcase pr, pw := io.Pipe() go func() { for _, chunk := range testcase.chunks { diff --git a/src/encoding/json/decode_test.go b/src/encoding/json/decode_test.go index d12495f90b7141..0b26b8eb91813b 100644 --- a/src/encoding/json/decode_test.go +++ b/src/encoding/json/decode_test.go @@ -1237,6 +1237,62 @@ var unmarshalTests = []struct { out: (chan int)(nil), err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[chan int](), Offset: 1}, }, + + // #75619 + { + CaseName: Name("QuotedInt/GoSyntax"), + in: `{"X": "-0000123"}`, + ptr: new(struct { + X int64 `json:",string"` + }), + out: struct { + X int64 `json:",string"` + }{-123}, + }, + { + CaseName: Name("QuotedInt/Invalid"), + in: `{"X": "123 "}`, + ptr: new(struct { + X int64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 123 ", Type: reflect.TypeFor[int64](), Field: "X", Offset: int64(len(`{"X": "123 "`))}, + }, + { + CaseName: Name("QuotedUint/GoSyntax"), + in: `{"X": "0000123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + out: struct { + X uint64 `json:",string"` + }{123}, + }, + { + CaseName: Name("QuotedUint/Invalid"), + in: `{"X": "0x123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 0x123", Type: reflect.TypeFor[uint64](), Field: "X", Offset: int64(len(`{"X": "0x123"`))}, + }, + { + CaseName: Name("QuotedFloat/GoSyntax"), + in: `{"X": "0x1_4p-2"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + out: struct { + X float64 `json:",string"` + }{0x1_4p-2}, + }, + { + CaseName: Name("QuotedFloat/Invalid"), + in: `{"X": "1.5e1_"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 1.5e1_", Type: reflect.TypeFor[float64](), Field: "X", Offset: int64(len(`{"X": "1.5e1_"`))}, + }, } func TestMarshal(t *testing.T) { diff --git a/src/encoding/json/internal/jsonopts/options.go b/src/encoding/json/internal/jsonopts/options.go index e4c3f47d36adc8..39da81b34549ec 100644 --- a/src/encoding/json/internal/jsonopts/options.go +++ b/src/encoding/json/internal/jsonopts/options.go @@ -48,16 +48,16 @@ type ArshalValues struct { // DefaultOptionsV2 is the set of all options that define default v2 behavior. var DefaultOptionsV2 = Struct{ Flags: jsonflags.Flags{ - Presence: uint64(jsonflags.AllFlags & ^jsonflags.WhitespaceFlags), - Values: uint64(0), + Presence: uint64(jsonflags.DefaultV1Flags), + Values: uint64(0), // all flags in DefaultV1Flags are false }, } // DefaultOptionsV1 is the set of all options that define default v1 behavior. var DefaultOptionsV1 = Struct{ Flags: jsonflags.Flags{ - Presence: uint64(jsonflags.AllFlags & ^jsonflags.WhitespaceFlags), - Values: uint64(jsonflags.DefaultV1Flags), + Presence: uint64(jsonflags.DefaultV1Flags), + Values: uint64(jsonflags.DefaultV1Flags), // all flags in DefaultV1Flags are true }, } diff --git a/src/encoding/json/internal/jsonopts/options_test.go b/src/encoding/json/internal/jsonopts/options_test.go index ebfaf05c833e05..caa686e4f0d579 100644 --- a/src/encoding/json/internal/jsonopts/options_test.go +++ b/src/encoding/json/internal/jsonopts/options_test.go @@ -200,6 +200,9 @@ func TestGet(t *testing.T) { if v, ok := json.GetOption(opts, json.WithUnmarshalers); v != nil || ok { t.Errorf(`GetOption(..., WithUnmarshalers) = (%v, %v), want (nil, false)`, v, ok) } + if v, ok := json.GetOption(json.DefaultOptionsV2(), json.WithMarshalers); v != nil || ok { + t.Errorf(`GetOption(..., WithMarshalers) = (%v, %v), want (nil, false)`, v, ok) + } } var sink struct { diff --git a/src/encoding/json/jsontext/coder_test.go b/src/encoding/json/jsontext/coder_test.go index 4a9efb3b8f97a5..8602e3e7fff286 100644 --- a/src/encoding/json/jsontext/coder_test.go +++ b/src/encoding/json/jsontext/coder_test.go @@ -486,7 +486,7 @@ func testCoderInterleaved(t *testing.T, where jsontest.CasePos, modeName string, // Retry as a ReadToken call. expectError := dec.PeekKind() == '}' || dec.PeekKind() == ']' if expectError { - if !errors.As(err, new(*SyntacticError)) { + if _, ok := errors.AsType[*SyntacticError](err); !ok { t.Fatalf("%s: Decoder.ReadToken error is %T, want %T", where, err, new(SyntacticError)) } tickTock = !tickTock diff --git a/src/encoding/json/jsontext/decode.go b/src/encoding/json/jsontext/decode.go index f505de44684a51..511832f2ae0879 100644 --- a/src/encoding/json/jsontext/decode.go +++ b/src/encoding/json/jsontext/decode.go @@ -792,6 +792,12 @@ func (d *decoderState) CheckNextValue(last bool) error { return nil } +// AtEOF reports whether the decoder is at EOF. +func (d *decoderState) AtEOF() bool { + _, err := d.consumeWhitespace(d.prevEnd) + return err == io.ErrUnexpectedEOF +} + // CheckEOF verifies that the input has no more data. func (d *decoderState) CheckEOF() error { return d.checkEOF(d.prevEnd) diff --git a/src/encoding/json/jsontext/fuzz_test.go b/src/encoding/json/jsontext/fuzz_test.go index 60d16b9e27805c..3ad181d43416b8 100644 --- a/src/encoding/json/jsontext/fuzz_test.go +++ b/src/encoding/json/jsontext/fuzz_test.go @@ -53,9 +53,10 @@ func FuzzCoder(f *testing.F) { } else { val, err := dec.ReadValue() if err != nil { - expectError := dec.PeekKind() == '}' || dec.PeekKind() == ']' - if expectError && errors.As(err, new(*SyntacticError)) { - continue + if expectError := dec.PeekKind() == '}' || dec.PeekKind() == ']'; expectError { + if _, ok := errors.AsType[*SyntacticError](err); ok { + continue + } } if err == io.EOF { break diff --git a/src/encoding/json/jsontext/state.go b/src/encoding/json/jsontext/state.go index d214fd5190325e..538dfe32bfa9d2 100644 --- a/src/encoding/json/jsontext/state.go +++ b/src/encoding/json/jsontext/state.go @@ -24,8 +24,7 @@ import ( // The name of a duplicate JSON object member can be extracted as: // // err := ... -// var serr jsontext.SyntacticError -// if errors.As(err, &serr) && serr.Err == jsontext.ErrDuplicateName { +// if serr, ok := errors.AsType[jsontext.SyntacticError](err); ok && serr.Err == jsontext.ErrDuplicateName { // ptr := serr.JSONPointer // JSON pointer to duplicate name // name := ptr.LastToken() // duplicate name itself // ... diff --git a/src/encoding/json/stream_test.go b/src/encoding/json/stream_test.go index 9e5d48d39d2fcf..0e937cfaa13c78 100644 --- a/src/encoding/json/stream_test.go +++ b/src/encoding/json/stream_test.go @@ -557,3 +557,67 @@ func TestTokenTruncation(t *testing.T) { } } } + +func TestDecoderInputOffset(t *testing.T) { + const input = ` [ + [ ] , [ "one" ] , [ "one" , "two" ] , + { } , { "alpha" : "bravo" } , { "alpha" : "bravo" , "fizz" : "buzz" } + ] ` + wantOffsets := []int64{ + 0, 1, 2, 5, 6, 7, 8, 9, 12, 13, 18, 19, 20, 21, 24, 25, 30, 31, + 38, 39, 40, 41, 46, 47, 48, 49, 52, 53, 60, 61, 70, 71, 72, 73, + 76, 77, 84, 85, 94, 95, 103, 104, 112, 113, 114, 116, 117, 117, + 117, 117, + } + wantMores := []bool{ + true, true, false, true, true, false, true, true, true, false, + true, false, true, true, true, false, true, true, true, true, + true, false, false, false, false, + } + + d := NewDecoder(strings.NewReader(input)) + checkOffset := func() { + t.Helper() + got := d.InputOffset() + if len(wantOffsets) == 0 { + t.Fatalf("InputOffset = %d, want nil", got) + } + want := wantOffsets[0] + if got != want { + t.Fatalf("InputOffset = %d, want %d", got, want) + } + wantOffsets = wantOffsets[1:] + } + checkMore := func() { + t.Helper() + got := d.More() + if len(wantMores) == 0 { + t.Fatalf("More = %v, want nil", got) + } + want := wantMores[0] + if got != want { + t.Fatalf("More = %v, want %v", got, want) + } + wantMores = wantMores[1:] + } + checkOffset() + checkMore() + checkOffset() + for { + if _, err := d.Token(); err == io.EOF { + break + } else if err != nil { + t.Fatalf("Token error: %v", err) + } + checkOffset() + checkMore() + checkOffset() + } + checkOffset() + checkMore() + checkOffset() + + if len(wantOffsets)+len(wantMores) > 0 { + t.Fatal("unconsumed testdata") + } +} diff --git a/src/encoding/json/v2/arshal.go b/src/encoding/json/v2/arshal.go index 6b4bcb0c74cf7c..573d26567f3368 100644 --- a/src/encoding/json/v2/arshal.go +++ b/src/encoding/json/v2/arshal.go @@ -440,8 +440,9 @@ func UnmarshalRead(in io.Reader, out any, opts ...Options) (err error) { // Unlike [Unmarshal] and [UnmarshalRead], decode options are ignored because // they must have already been specified on the provided [jsontext.Decoder]. // -// The input may be a stream of one or more JSON values, +// The input may be a stream of zero or more JSON values, // where this only unmarshals the next JSON value in the stream. +// If there are no more top-level JSON values, it reports [io.EOF]. // The output must be a non-nil pointer. // See [Unmarshal] for details about the conversion of JSON into a Go value. func UnmarshalDecode(in *jsontext.Decoder, out any, opts ...Options) (err error) { diff --git a/src/encoding/json/v2/arshal_default.go b/src/encoding/json/v2/arshal_default.go index c2307fa31d7fcc..078d345e1439c2 100644 --- a/src/encoding/json/v2/arshal_default.go +++ b/src/encoding/json/v2/arshal_default.go @@ -474,10 +474,21 @@ func makeIntArshaler(t reflect.Type) *arshaler { break } val = jsonwire.UnquoteMayCopy(val, flags.IsVerbatim()) - if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) && string(val) == "null" { - if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { - va.SetInt(0) + if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) { + // For historical reasons, v1 parsed a quoted number + // according to the Go syntax and permitted a quoted null. + // See https://go.dev/issue/75619 + n, err := strconv.ParseInt(string(val), 10, bits) + if err != nil { + if string(val) == "null" { + if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { + va.SetInt(0) + } + return nil + } + return newUnmarshalErrorAfterWithValue(dec, t, errors.Unwrap(err)) } + va.SetInt(n) return nil } fallthrough @@ -561,10 +572,21 @@ func makeUintArshaler(t reflect.Type) *arshaler { break } val = jsonwire.UnquoteMayCopy(val, flags.IsVerbatim()) - if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) && string(val) == "null" { - if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { - va.SetUint(0) + if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) { + // For historical reasons, v1 parsed a quoted number + // according to the Go syntax and permitted a quoted null. + // See https://go.dev/issue/75619 + n, err := strconv.ParseUint(string(val), 10, bits) + if err != nil { + if string(val) == "null" { + if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { + va.SetUint(0) + } + return nil + } + return newUnmarshalErrorAfterWithValue(dec, t, errors.Unwrap(err)) } + va.SetUint(n) return nil } fallthrough @@ -671,10 +693,21 @@ func makeFloatArshaler(t reflect.Type) *arshaler { if !stringify { break } - if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) && string(val) == "null" { - if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { - va.SetFloat(0) + if uo.Flags.Get(jsonflags.StringifyWithLegacySemantics) { + // For historical reasons, v1 parsed a quoted number + // according to the Go syntax and permitted a quoted null. + // See https://go.dev/issue/75619 + n, err := strconv.ParseFloat(string(val), bits) + if err != nil { + if string(val) == "null" { + if !uo.Flags.Get(jsonflags.MergeWithLegacySemantics) { + va.SetFloat(0) + } + return nil + } + return newUnmarshalErrorAfterWithValue(dec, t, errors.Unwrap(err)) } + va.SetFloat(n) return nil } if n, err := jsonwire.ConsumeNumber(val); n != len(val) || err != nil { diff --git a/src/encoding/json/v2/arshal_funcs.go b/src/encoding/json/v2/arshal_funcs.go index 673caf3c37693d..28916af948db6e 100644 --- a/src/encoding/json/v2/arshal_funcs.go +++ b/src/encoding/json/v2/arshal_funcs.go @@ -9,6 +9,7 @@ package json import ( "errors" "fmt" + "io" "reflect" "sync" @@ -306,6 +307,9 @@ func UnmarshalFromFunc[T any](fn func(*jsontext.Decoder, T) error) *Unmarshalers fnc: func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error { xd := export.Decoder(dec) prevDepth, prevLength := xd.Tokens.DepthLength() + if prevDepth == 1 && xd.AtEOF() { + return io.EOF // check EOF early to avoid fn reporting an EOF + } xd.Flags.Set(jsonflags.WithinArshalCall | 1) v, _ := reflect.TypeAssert[T](va.castTo(t)) err := fn(dec, v) diff --git a/src/encoding/json/v2/arshal_methods.go b/src/encoding/json/v2/arshal_methods.go index 2decd144dbeaf9..1621eadc080763 100644 --- a/src/encoding/json/v2/arshal_methods.go +++ b/src/encoding/json/v2/arshal_methods.go @@ -9,6 +9,7 @@ package json import ( "encoding" "errors" + "io" "reflect" "encoding/json/internal" @@ -302,6 +303,9 @@ func makeMethodArshaler(fncs *arshaler, t reflect.Type) *arshaler { } xd := export.Decoder(dec) prevDepth, prevLength := xd.Tokens.DepthLength() + if prevDepth == 1 && xd.AtEOF() { + return io.EOF // check EOF early to avoid fn reporting an EOF + } xd.Flags.Set(jsonflags.WithinArshalCall | 1) unmarshaler, _ := reflect.TypeAssert[UnmarshalerFrom](va.Addr()) err := unmarshaler.UnmarshalJSONFrom(dec) diff --git a/src/encoding/json/v2/arshal_test.go b/src/encoding/json/v2/arshal_test.go index 75093345a3b93e..dc15c5a5f531b5 100644 --- a/src/encoding/json/v2/arshal_test.go +++ b/src/encoding/json/v2/arshal_test.go @@ -7833,7 +7833,8 @@ func TestUnmarshal(t *testing.T) { })), wantErr: EU(errSomeError).withType(0, T[unmarshalJSONv2Func]()), }, { - name: jsontest.Name("Methods/Invalid/JSONv2/TooFew"), + name: jsontest.Name("Methods/Invalid/JSONv2/TooFew"), + inBuf: `{}`, inVal: addr(unmarshalJSONv2Func(func(*jsontext.Decoder) error { return nil // do nothing })), @@ -9234,6 +9235,43 @@ func TestUnmarshalReuse(t *testing.T) { }) } +type unmarshalerEOF struct{} + +func (unmarshalerEOF) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + return io.EOF // should be wrapped and converted by Unmarshal to io.ErrUnexpectedEOF +} + +// TestUnmarshalEOF verifies that io.EOF is only ever returned by +// UnmarshalDecode for a top-level value. +func TestUnmarshalEOF(t *testing.T) { + opts := WithUnmarshalers(UnmarshalFromFunc(func(dec *jsontext.Decoder, _ *struct{}) error { + return io.EOF // should be wrapped and converted by Unmarshal to io.ErrUnexpectedEOF + })) + + for _, in := range []string{"", "[", "[null", "[null]"} { + for _, newOut := range []func() any{ + func() any { return new(unmarshalerEOF) }, + func() any { return new([]unmarshalerEOF) }, + func() any { return new(struct{}) }, + func() any { return new([]struct{}) }, + } { + wantErr := io.ErrUnexpectedEOF + if gotErr := Unmarshal([]byte(in), newOut(), opts); !errors.Is(gotErr, wantErr) { + t.Errorf("Unmarshal = %v, want %v", gotErr, wantErr) + } + if gotErr := UnmarshalRead(strings.NewReader(in), newOut(), opts); !errors.Is(gotErr, wantErr) { + t.Errorf("Unmarshal = %v, want %v", gotErr, wantErr) + } + switch gotErr := UnmarshalDecode(jsontext.NewDecoder(strings.NewReader(in)), newOut(), opts); { + case in != "" && !errors.Is(gotErr, wantErr): + t.Errorf("Unmarshal = %v, want %v", gotErr, wantErr) + case in == "" && gotErr != io.EOF: + t.Errorf("Unmarshal = %v, want %v", gotErr, io.EOF) + } + } + } +} + type ReaderFunc func([]byte) (int, error) func (f ReaderFunc) Read(b []byte) (int, error) { return f(b) } diff --git a/src/encoding/json/v2/errors.go b/src/encoding/json/v2/errors.go index 9485d7b527793c..4421f8b821cb5c 100644 --- a/src/encoding/json/v2/errors.go +++ b/src/encoding/json/v2/errors.go @@ -10,6 +10,7 @@ import ( "cmp" "errors" "fmt" + "io" "reflect" "strconv" "strings" @@ -28,8 +29,7 @@ import ( // The name of an unknown JSON object member can be extracted as: // // err := ... -// var serr json.SemanticError -// if errors.As(err, &serr) && serr.Err == json.ErrUnknownName { +// if serr, ok := errors.AsType[json.SemanticError](err); ok && serr.Err == json.ErrUnknownName { // ptr := serr.JSONPointer // JSON pointer to unknown name // name := ptr.LastToken() // unknown name itself // ... @@ -119,7 +119,7 @@ func newInvalidFormatError(c coder, t reflect.Type) error { // newMarshalErrorBefore wraps err in a SemanticError assuming that e // is positioned right before the next token or value, which causes an error. func newMarshalErrorBefore(e *jsontext.Encoder, t reflect.Type, err error) error { - return &SemanticError{action: "marshal", GoType: t, Err: err, + return &SemanticError{action: "marshal", GoType: t, Err: toUnexpectedEOF(err), ByteOffset: e.OutputOffset() + int64(export.Encoder(e).CountNextDelimWhitespace()), JSONPointer: jsontext.Pointer(export.Encoder(e).AppendStackPointer(nil, +1))} } @@ -135,7 +135,7 @@ func newUnmarshalErrorBefore(d *jsontext.Decoder, t reflect.Type, err error) err if export.Decoder(d).Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) { k = d.PeekKind() } - return &SemanticError{action: "unmarshal", GoType: t, Err: err, + return &SemanticError{action: "unmarshal", GoType: t, Err: toUnexpectedEOF(err), ByteOffset: d.InputOffset() + int64(export.Decoder(d).CountNextDelimWhitespace()), JSONPointer: jsontext.Pointer(export.Decoder(d).AppendStackPointer(nil, +1)), JSONKind: k} @@ -158,7 +158,7 @@ func newUnmarshalErrorBeforeWithSkipping(d *jsontext.Decoder, t reflect.Type, er // is positioned right after the previous token or value, which caused an error. func newUnmarshalErrorAfter(d *jsontext.Decoder, t reflect.Type, err error) error { tokOrVal := export.Decoder(d).PreviousTokenOrValue() - return &SemanticError{action: "unmarshal", GoType: t, Err: err, + return &SemanticError{action: "unmarshal", GoType: t, Err: toUnexpectedEOF(err), ByteOffset: d.InputOffset() - int64(len(tokOrVal)), JSONPointer: jsontext.Pointer(export.Decoder(d).AppendStackPointer(nil, -1)), JSONKind: jsontext.Value(tokOrVal).Kind()} @@ -207,6 +207,7 @@ func newSemanticErrorWithPosition(c coder, t reflect.Type, prevDepth int, prevLe if serr == nil { serr = &SemanticError{Err: err} } + serr.Err = toUnexpectedEOF(serr.Err) var currDepth int var currLength int64 var coderState interface{ AppendStackPointer([]byte, int) []byte } @@ -433,3 +434,11 @@ func newDuplicateNameError(ptr jsontext.Pointer, quotedName []byte, offset int64 Err: jsontext.ErrDuplicateName, } } + +// toUnexpectedEOF converts [io.EOF] to [io.ErrUnexpectedEOF]. +func toUnexpectedEOF(err error) error { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err +} diff --git a/src/encoding/json/v2/example_test.go b/src/encoding/json/v2/example_test.go index c6bf0a864d8385..6d539bbd36b529 100644 --- a/src/encoding/json/v2/example_test.go +++ b/src/encoding/json/v2/example_test.go @@ -371,8 +371,7 @@ func Example_unknownMembers() { // Specifying RejectUnknownMembers causes Unmarshal // to reject the presence of any unknown members. err = json.Unmarshal([]byte(input), new(Color), json.RejectUnknownMembers(true)) - var serr *json.SemanticError - if errors.As(err, &serr) && serr.Err == json.ErrUnknownName { + if serr, ok := errors.AsType[*json.SemanticError](err); ok && serr.Err == json.ErrUnknownName { fmt.Println("Unmarshal error:", serr.Err, strconv.Quote(serr.JSONPointer.LastToken())) } diff --git a/src/encoding/json/v2/options.go b/src/encoding/json/v2/options.go index 0942d2d30784f9..9685f20f9f805f 100644 --- a/src/encoding/json/v2/options.go +++ b/src/encoding/json/v2/options.go @@ -97,9 +97,8 @@ func GetOption[T any](opts Options, setter func(T) Options) (T, bool) { } // DefaultOptionsV2 is the full set of all options that define v2 semantics. -// It is equivalent to all options under [Options], [encoding/json.Options], -// and [encoding/json/jsontext.Options] being set to false or the zero value, -// except for the options related to whitespace formatting. +// It is equivalent to the set of options in [encoding/json.DefaultOptionsV1] +// all being set to false. All other options are not present. func DefaultOptionsV2() Options { return &jsonopts.DefaultOptionsV2 } diff --git a/src/encoding/json/v2_decode.go b/src/encoding/json/v2_decode.go index 1041ec7ee402d4..f17d7ebccada0a 100644 --- a/src/encoding/json/v2_decode.go +++ b/src/encoding/json/v2_decode.go @@ -14,6 +14,7 @@ import ( "fmt" "reflect" "strconv" + "strings" "encoding/json/internal/jsonwire" "encoding/json/jsontext" @@ -119,7 +120,20 @@ type UnmarshalTypeError struct { func (e *UnmarshalTypeError) Error() string { var s string if e.Struct != "" || e.Field != "" { - s = "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + // The design of UnmarshalTypeError overly assumes a struct-based + // Go representation for the JSON value. + // The logic in jsontext represents paths using a JSON Pointer, + // which is agnostic to the Go type system. + // Trying to convert a JSON Pointer into a UnmarshalTypeError.Field + // is difficult. As a heuristic, if the last path token looks like + // an index into a JSON array (e.g., ".foo.bar.0"), + // avoid the phrase "Go struct field ". + intoWhat := "Go struct field " + i := strings.LastIndexByte(e.Field, '.') + len(".") + if len(e.Field[i:]) > 0 && strings.TrimRight(e.Field[i:], "0123456789") == "" { + intoWhat = "" // likely a Go slice or array + } + s = "json: cannot unmarshal " + e.Value + " into " + intoWhat + e.Struct + "." + e.Field + " of type " + e.Type.String() } else { s = "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() } diff --git a/src/encoding/json/v2_decode_test.go b/src/encoding/json/v2_decode_test.go index f9b0a60f47cfd5..26b4448721e4ec 100644 --- a/src/encoding/json/v2_decode_test.go +++ b/src/encoding/json/v2_decode_test.go @@ -1243,6 +1243,62 @@ var unmarshalTests = []struct { out: (chan int)(nil), err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[chan int]()}, }, + + // #75619 + { + CaseName: Name("QuotedInt/GoSyntax"), + in: `{"X": "-0000123"}`, + ptr: new(struct { + X int64 `json:",string"` + }), + out: struct { + X int64 `json:",string"` + }{-123}, + }, + { + CaseName: Name("QuotedInt/Invalid"), + in: `{"X": "123 "}`, + ptr: new(struct { + X int64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 123 ", Type: reflect.TypeFor[int64](), Field: "X", Offset: int64(len(`{"X": `))}, + }, + { + CaseName: Name("QuotedUint/GoSyntax"), + in: `{"X": "0000123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + out: struct { + X uint64 `json:",string"` + }{123}, + }, + { + CaseName: Name("QuotedUint/Invalid"), + in: `{"X": "0x123"}`, + ptr: new(struct { + X uint64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 0x123", Type: reflect.TypeFor[uint64](), Field: "X", Offset: int64(len(`{"X": `))}, + }, + { + CaseName: Name("QuotedFloat/GoSyntax"), + in: `{"X": "0x1_4p-2"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + out: struct { + X float64 `json:",string"` + }{0x1_4p-2}, + }, + { + CaseName: Name("QuotedFloat/Invalid"), + in: `{"X": "1.5e1_"}`, + ptr: new(struct { + X float64 `json:",string"` + }), + err: &UnmarshalTypeError{Value: "number 1.5e1_", Type: reflect.TypeFor[float64](), Field: "X", Offset: int64(len(`{"X": `))}, + }, } func TestMarshal(t *testing.T) { @@ -2307,6 +2363,34 @@ func TestUnmarshalTypeError(t *testing.T) { } } +func TestUnmarshalTypeErrorMessage(t *testing.T) { + err := &UnmarshalTypeError{ + Value: "number 5", + Type: reflect.TypeFor[int](), + Offset: 1234, + Struct: "Root", + } + + for _, tt := range []struct { + field string + want string + }{ + {"", "json: cannot unmarshal number 5 into Go struct field Root. of type int"}, + {"1", "json: cannot unmarshal number 5 into Root.1 of type int"}, + {"foo", "json: cannot unmarshal number 5 into Go struct field Root.foo of type int"}, + {"foo.1", "json: cannot unmarshal number 5 into Root.foo.1 of type int"}, + {"foo.bar", "json: cannot unmarshal number 5 into Go struct field Root.foo.bar of type int"}, + {"foo.bar.1", "json: cannot unmarshal number 5 into Root.foo.bar.1 of type int"}, + {"foo.bar.baz", "json: cannot unmarshal number 5 into Go struct field Root.foo.bar.baz of type int"}, + } { + err.Field = tt.field + got := err.Error() + if got != tt.want { + t.Errorf("Error:\n\tgot: %v\n\twant: %v", got, tt.want) + } + } +} + func TestUnmarshalSyntax(t *testing.T) { var x any tests := []struct { diff --git a/src/encoding/json/v2_options.go b/src/encoding/json/v2_options.go index 4dea88ad7edaf6..2bdec86fdeab5b 100644 --- a/src/encoding/json/v2_options.go +++ b/src/encoding/json/v2_options.go @@ -227,9 +227,7 @@ type Options = jsonopts.Options // - [jsontext.EscapeForJS] // - [jsontext.PreserveRawStrings] // -// All other boolean options are set to false. -// All non-boolean options are set to the zero value, -// except for [jsontext.WithIndent], which defaults to "\t". +// All other options are not present. // // The [Marshal] and [Unmarshal] functions in this package are // semantically identical to calling the v2 equivalents with this option: @@ -506,7 +504,9 @@ func ReportErrorsWithLegacySemantics(v bool) Options { // When marshaling, such Go values are serialized as their usual // JSON representation, but quoted within a JSON string. // When unmarshaling, such Go values must be deserialized from -// a JSON string containing their usual JSON representation. +// a JSON string containing their usual JSON representation or +// Go number representation for that numeric kind. +// Note that the Go number grammar is a superset of the JSON number grammar. // A JSON null quoted in a JSON string is a valid substitute for JSON null // while unmarshaling into a Go value that `string` takes effect on. // diff --git a/src/encoding/json/v2_stream.go b/src/encoding/json/v2_stream.go index 28e72c0a529e15..dcee553ee13336 100644 --- a/src/encoding/json/v2_stream.go +++ b/src/encoding/json/v2_stream.go @@ -20,6 +20,10 @@ type Decoder struct { dec *jsontext.Decoder opts jsonv2.Options err error + + // hadPeeked reports whether [Decoder.More] was called. + // It is reset by [Decoder.Decode] and [Decoder.Token]. + hadPeeked bool } // NewDecoder returns a new decoder that reads from r. @@ -76,6 +80,7 @@ func (dec *Decoder) Decode(v any) error { } return dec.err } + dec.hadPeeked = false return jsonv2.Unmarshal(b, v, dec.opts) } @@ -206,6 +211,7 @@ func (dec *Decoder) Token() (Token, error) { } return nil, transformSyntacticError(err) } + dec.hadPeeked = false switch k := tok.Kind(); k { case 'n': return nil, nil @@ -230,6 +236,7 @@ func (dec *Decoder) Token() (Token, error) { // More reports whether there is another element in the // current array or object being parsed. func (dec *Decoder) More() bool { + dec.hadPeeked = true k := dec.dec.PeekKind() return k > 0 && k != ']' && k != '}' } @@ -238,5 +245,18 @@ func (dec *Decoder) More() bool { // The offset gives the location of the end of the most recently returned token // and the beginning of the next token. func (dec *Decoder) InputOffset() int64 { - return dec.dec.InputOffset() + offset := dec.dec.InputOffset() + if dec.hadPeeked { + // Historically, InputOffset reported the location of + // the end of the most recently returned token + // unless [Decoder.More] is called, in which case, it reported + // the beginning of the next token. + unreadBuffer := dec.dec.UnreadBuffer() + trailingTokens := bytes.TrimLeft(unreadBuffer, " \n\r\t") + if len(trailingTokens) > 0 { + leadingWhitespace := len(unreadBuffer) - len(trailingTokens) + offset += int64(leadingWhitespace) + } + } + return offset } diff --git a/src/encoding/json/v2_stream_test.go b/src/encoding/json/v2_stream_test.go index b8951f82054e96..0885631fb5937f 100644 --- a/src/encoding/json/v2_stream_test.go +++ b/src/encoding/json/v2_stream_test.go @@ -537,3 +537,67 @@ func TestTokenTruncation(t *testing.T) { } } } + +func TestDecoderInputOffset(t *testing.T) { + const input = ` [ + [ ] , [ "one" ] , [ "one" , "two" ] , + { } , { "alpha" : "bravo" } , { "alpha" : "bravo" , "fizz" : "buzz" } + ] ` + wantOffsets := []int64{ + 0, 1, 2, 5, 6, 7, 8, 9, 12, 13, 18, 19, 20, 21, 24, 25, 30, 31, + 38, 39, 40, 41, 46, 47, 48, 49, 52, 53, 60, 61, 70, 71, 72, 73, + 76, 77, 84, 85, 94, 95, 103, 104, 112, 113, 114, 116, 117, 117, + 117, 117, + } + wantMores := []bool{ + true, true, false, true, true, false, true, true, true, false, + true, false, true, true, true, false, true, true, true, true, + true, false, false, false, false, + } + + d := NewDecoder(strings.NewReader(input)) + checkOffset := func() { + t.Helper() + got := d.InputOffset() + if len(wantOffsets) == 0 { + t.Fatalf("InputOffset = %d, want nil", got) + } + want := wantOffsets[0] + if got != want { + t.Fatalf("InputOffset = %d, want %d", got, want) + } + wantOffsets = wantOffsets[1:] + } + checkMore := func() { + t.Helper() + got := d.More() + if len(wantMores) == 0 { + t.Fatalf("More = %v, want nil", got) + } + want := wantMores[0] + if got != want { + t.Fatalf("More = %v, want %v", got, want) + } + wantMores = wantMores[1:] + } + checkOffset() + checkMore() + checkOffset() + for { + if _, err := d.Token(); err == io.EOF { + break + } else if err != nil { + t.Fatalf("Token error: %v", err) + } + checkOffset() + checkMore() + checkOffset() + } + checkOffset() + checkMore() + checkOffset() + + if len(wantOffsets)+len(wantMores) > 0 { + t.Fatal("unconsumed testdata") + } +} diff --git a/src/encoding/pem/pem.go b/src/encoding/pem/pem.go index dcc7416ee21ffe..21887008ca2182 100644 --- a/src/encoding/pem/pem.go +++ b/src/encoding/pem/pem.go @@ -37,7 +37,7 @@ type Block struct { // line bytes. The remainder of the byte array (also not including the new line // bytes) is also returned and this will always be smaller than the original // argument. -func getLine(data []byte) (line, rest []byte) { +func getLine(data []byte) (line, rest []byte, consumed int) { i := bytes.IndexByte(data, '\n') var j int if i < 0 { @@ -49,7 +49,7 @@ func getLine(data []byte) (line, rest []byte) { i-- } } - return bytes.TrimRight(data[0:i], " \t"), data[j:] + return bytes.TrimRight(data[0:i], " \t"), data[j:], j } // removeSpacesAndTabs returns a copy of its input with all spaces and tabs @@ -90,20 +90,32 @@ func Decode(data []byte) (p *Block, rest []byte) { // pemStart begins with a newline. However, at the very beginning of // the byte array, we'll accept the start string without it. rest = data + for { - if bytes.HasPrefix(rest, pemStart[1:]) { - rest = rest[len(pemStart)-1:] - } else if _, after, ok := bytes.Cut(rest, pemStart); ok { - rest = after - } else { + // Find the first END line, and then find the last BEGIN line before + // the end line. This lets us skip any repeated BEGIN lines that don't + // have a matching END. + endIndex := bytes.Index(rest, pemEnd) + if endIndex < 0 { + return nil, data + } + endTrailerIndex := endIndex + len(pemEnd) + beginIndex := bytes.LastIndex(rest[:endIndex], pemStart[1:]) + if beginIndex < 0 || beginIndex > 0 && rest[beginIndex-1] != '\n' { return nil, data } + rest = rest[beginIndex+len(pemStart)-1:] + endIndex -= beginIndex + len(pemStart) - 1 + endTrailerIndex -= beginIndex + len(pemStart) - 1 var typeLine []byte - typeLine, rest = getLine(rest) + var consumed int + typeLine, rest, consumed = getLine(rest) if !bytes.HasSuffix(typeLine, pemEndOfLine) { continue } + endIndex -= consumed + endTrailerIndex -= consumed typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)] p = &Block{ @@ -117,7 +129,7 @@ func Decode(data []byte) (p *Block, rest []byte) { if len(rest) == 0 { return nil, data } - line, next := getLine(rest) + line, next, consumed := getLine(rest) key, val, ok := bytes.Cut(line, colon) if !ok { @@ -129,21 +141,13 @@ func Decode(data []byte) (p *Block, rest []byte) { val = bytes.TrimSpace(val) p.Headers[string(key)] = string(val) rest = next + endIndex -= consumed + endTrailerIndex -= consumed } - var endIndex, endTrailerIndex int - - // If there were no headers, the END line might occur - // immediately, without a leading newline. - if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) { - endIndex = 0 - endTrailerIndex = len(pemEnd) - 1 - } else { - endIndex = bytes.Index(rest, pemEnd) - endTrailerIndex = endIndex + len(pemEnd) - } - - if endIndex < 0 { + // If there were headers, there must be a newline between the headers + // and the END line, so endIndex should be >= 0. + if len(p.Headers) > 0 && endIndex < 0 { continue } @@ -163,21 +167,24 @@ func Decode(data []byte) (p *Block, rest []byte) { } // The line must end with only whitespace. - if s, _ := getLine(restOfEndLine); len(s) != 0 { + if s, _, _ := getLine(restOfEndLine); len(s) != 0 { continue } - base64Data := removeSpacesAndTabs(rest[:endIndex]) - p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) - n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) - if err != nil { - continue + p.Bytes = []byte{} + if endIndex > 0 { + base64Data := removeSpacesAndTabs(rest[:endIndex]) + p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) + n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) + if err != nil { + continue + } + p.Bytes = p.Bytes[:n] } - p.Bytes = p.Bytes[:n] // the -1 is because we might have only matched pemEnd without the // leading newline if the PEM block was empty. - _, rest = getLine(rest[endIndex+len(pemEnd)-1:]) + _, rest, _ = getLine(rest[endIndex+len(pemEnd)-1:]) return p, rest } } diff --git a/src/encoding/pem/pem_test.go b/src/encoding/pem/pem_test.go index e252ffd8ed1613..2c9b3eabcd1c12 100644 --- a/src/encoding/pem/pem_test.go +++ b/src/encoding/pem/pem_test.go @@ -34,7 +34,7 @@ var getLineTests = []GetLineTest{ func TestGetLine(t *testing.T) { for i, test := range getLineTests { - x, y := getLine([]byte(test.in)) + x, y, _ := getLine([]byte(test.in)) if string(x) != test.out1 || string(y) != test.out2 { t.Errorf("#%d got:%+v,%+v want:%s,%s", i, x, y, test.out1, test.out2) } @@ -46,6 +46,7 @@ func TestDecode(t *testing.T) { if !reflect.DeepEqual(result, certificate) { t.Errorf("#0 got:%#v want:%#v", result, certificate) } + result, remainder = Decode(remainder) if !reflect.DeepEqual(result, privateKey) { t.Errorf("#1 got:%#v want:%#v", result, privateKey) @@ -68,7 +69,7 @@ func TestDecode(t *testing.T) { } result, remainder = Decode(remainder) - if result == nil || result.Type != "HEADERS" || len(result.Headers) != 1 { + if result == nil || result.Type != "VALID HEADERS" || len(result.Headers) != 1 { t.Errorf("#5 expected single header block but got :%v", result) } @@ -381,15 +382,15 @@ ZWAaUoVtWIQ52aKS0p19G99hhb+IVANC4akkdHV4SP8i7MVNZhfUmg== # This shouldn't be recognised because of the missing newline after the headers. ------BEGIN HEADERS----- +-----BEGIN INVALID HEADERS----- Header: 1 ------END HEADERS----- +-----END INVALID HEADERS----- # This should be valid, however. ------BEGIN HEADERS----- +-----BEGIN VALID HEADERS----- Header: 1 ------END HEADERS-----`) +-----END VALID HEADERS-----`) var certificate = &Block{Type: "CERTIFICATE", Headers: map[string]string{}, diff --git a/src/encoding/xml/marshal.go b/src/encoding/xml/marshal.go index 133503fa2de41c..13fbeeeedc75ce 100644 --- a/src/encoding/xml/marshal.go +++ b/src/encoding/xml/marshal.go @@ -416,12 +416,6 @@ func (p *printer) popPrefix() { } } -var ( - marshalerType = reflect.TypeFor[Marshaler]() - marshalerAttrType = reflect.TypeFor[MarshalerAttr]() - textMarshalerType = reflect.TypeFor[encoding.TextMarshaler]() -) - // marshalValue writes one or more XML elements representing val. // If val was obtained from a struct field, finfo must have its details. func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { @@ -450,24 +444,32 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat typ := val.Type() // Check for marshaler. - if val.CanInterface() && typ.Implements(marshalerType) { - return p.marshalInterface(val.Interface().(Marshaler), defaultStart(typ, finfo, startTemplate)) + if val.CanInterface() { + if marshaler, ok := reflect.TypeAssert[Marshaler](val); ok { + return p.marshalInterface(marshaler, defaultStart(typ, finfo, startTemplate)) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(marshalerType) { - return p.marshalInterface(pv.Interface().(Marshaler), defaultStart(pv.Type(), finfo, startTemplate)) + if pv.CanInterface() { + if marshaler, ok := reflect.TypeAssert[Marshaler](pv); ok { + return p.marshalInterface(marshaler, defaultStart(pv.Type(), finfo, startTemplate)) + } } } // Check for text marshaler. - if val.CanInterface() && typ.Implements(textMarshalerType) { - return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), defaultStart(typ, finfo, startTemplate)) + if val.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](val); ok { + return p.marshalTextInterface(textMarshaler, defaultStart(typ, finfo, startTemplate)) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), defaultStart(pv.Type(), finfo, startTemplate)) + if pv.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](pv); ok { + return p.marshalTextInterface(textMarshaler, defaultStart(pv.Type(), finfo, startTemplate)) + } } } @@ -503,7 +505,7 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name } else { fv := xmlname.value(val, dontInitNilPointers) - if v, ok := fv.Interface().(Name); ok && v.Local != "" { + if v, ok := reflect.TypeAssert[Name](fv); ok && v.Local != "" { start.Name = v } } @@ -580,21 +582,9 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat // marshalAttr marshals an attribute with the given name and value, adding to start.Attr. func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) error { - if val.CanInterface() && val.Type().Implements(marshalerAttrType) { - attr, err := val.Interface().(MarshalerAttr).MarshalXMLAttr(name) - if err != nil { - return err - } - if attr.Name.Local != "" { - start.Attr = append(start.Attr, attr) - } - return nil - } - - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { - attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if val.CanInterface() { + if marshaler, ok := reflect.TypeAssert[MarshalerAttr](val); ok { + attr, err := marshaler.MarshalXMLAttr(name) if err != nil { return err } @@ -605,19 +595,25 @@ func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) } } - if val.CanInterface() && val.Type().Implements(textMarshalerType) { - text, err := val.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() { + if marshaler, ok := reflect.TypeAssert[MarshalerAttr](pv); ok { + attr, err := marshaler.MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + return nil + } } - start.Attr = append(start.Attr, Attr{name, string(text)}) - return nil } - if val.CanAddr() { - pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if val.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](val); ok { + text, err := textMarshaler.MarshalText() if err != nil { return err } @@ -626,6 +622,20 @@ func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) } } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](pv); ok { + text, err := textMarshaler.MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + return nil + } + } + } + // Dereference or skip nil pointer, interface values. switch val.Kind() { case reflect.Pointer, reflect.Interface: @@ -647,7 +657,8 @@ func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) } if val.Type() == attrType { - start.Attr = append(start.Attr, val.Interface().(Attr)) + attr, _ := reflect.TypeAssert[Attr](val) + start.Attr = append(start.Attr, attr) return nil } @@ -854,20 +865,9 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { if err := s.trim(finfo.parents); err != nil { return err } - if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { - data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if err := emit(p, data); err != nil { - return err - } - continue - } - if vf.CanAddr() { - pv := vf.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if vf.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](vf); ok { + data, err := textMarshaler.MarshalText() if err != nil { return err } @@ -877,6 +877,21 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { continue } } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() { + if textMarshaler, ok := reflect.TypeAssert[encoding.TextMarshaler](pv); ok { + data, err := textMarshaler.MarshalText() + if err != nil { + return err + } + if err := emit(p, data); err != nil { + return err + } + continue + } + } + } var scratch [64]byte vf = indirect(vf) @@ -902,7 +917,7 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { return err } case reflect.Slice: - if elem, ok := vf.Interface().([]byte); ok { + if elem, ok := reflect.TypeAssert[[]byte](vf); ok { if err := emit(p, elem); err != nil { return err } diff --git a/src/encoding/xml/marshal_test.go b/src/encoding/xml/marshal_test.go index b8bce7170a60b6..6c7e711aac0566 100644 --- a/src/encoding/xml/marshal_test.go +++ b/src/encoding/xml/marshal_test.go @@ -2561,7 +2561,6 @@ var closeTests = []struct { func TestClose(t *testing.T) { for _, tt := range closeTests { - tt := tt t.Run(tt.desc, func(t *testing.T) { var out strings.Builder enc := NewEncoder(&out) diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go index af25c20f0618dc..d3cb74b2c4311a 100644 --- a/src/encoding/xml/read.go +++ b/src/encoding/xml/read.go @@ -255,28 +255,36 @@ func (d *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { } val = val.Elem() } - if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + if val.CanInterface() { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. - return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + if unmarshaler, ok := reflect.TypeAssert[UnmarshalerAttr](val); ok { + return unmarshaler.UnmarshalXMLAttr(attr) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { - return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + if pv.CanInterface() { + if unmarshaler, ok := reflect.TypeAssert[UnmarshalerAttr](pv); ok { + return unmarshaler.UnmarshalXMLAttr(attr) + } } } // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. - if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + if val.CanInterface() { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. - return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](val); ok { + return textUnmarshaler.UnmarshalText([]byte(attr.Value)) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + if pv.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](pv); ok { + return textUnmarshaler.UnmarshalText([]byte(attr.Value)) + } } } @@ -303,12 +311,7 @@ func (d *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { return copyValue(val, []byte(attr.Value)) } -var ( - attrType = reflect.TypeFor[Attr]() - unmarshalerType = reflect.TypeFor[Unmarshaler]() - unmarshalerAttrType = reflect.TypeFor[UnmarshalerAttr]() - textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]() -) +var attrType = reflect.TypeFor[Attr]() const ( maxUnmarshalDepth = 10000 @@ -352,27 +355,35 @@ func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) e val = val.Elem() } - if val.CanInterface() && val.Type().Implements(unmarshalerType) { + if val.CanInterface() { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. - return d.unmarshalInterface(val.Interface().(Unmarshaler), start) + if unmarshaler, ok := reflect.TypeAssert[Unmarshaler](val); ok { + return d.unmarshalInterface(unmarshaler, start) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { - return d.unmarshalInterface(pv.Interface().(Unmarshaler), start) + if pv.CanInterface() { + if unmarshaler, ok := reflect.TypeAssert[Unmarshaler](pv); ok { + return d.unmarshalInterface(unmarshaler, start) + } } } - if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { - return d.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler)) + if val.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](val); ok { + return d.unmarshalTextInterface(textUnmarshaler) + } } if val.CanAddr() { pv := val.Addr() - if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - return d.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler)) + if pv.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](pv); ok { + return d.unmarshalTextInterface(textUnmarshaler) + } } } @@ -453,7 +464,7 @@ func (d *Decoder) unmarshal(val reflect.Value, start *StartElement, depth int) e return UnmarshalError(e) } fv := finfo.value(sv, initNilPointers) - if _, ok := fv.Interface().(Name); ok { + if _, ok := reflect.TypeAssert[Name](fv); ok { fv.Set(reflect.ValueOf(start.Name)) } } @@ -578,20 +589,24 @@ Loop: } } - if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { - if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return err + if saveData.IsValid() && saveData.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](saveData); ok { + if err := textUnmarshaler.UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} } - saveData = reflect.Value{} } if saveData.IsValid() && saveData.CanAddr() { pv := saveData.Addr() - if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { - return err + if pv.CanInterface() { + if textUnmarshaler, ok := reflect.TypeAssert[encoding.TextUnmarshaler](pv); ok { + if err := textUnmarshaler.UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} } - saveData = reflect.Value{} } } diff --git a/src/errors/errors.go b/src/errors/errors.go index 5059be12ed441f..8b926cfe148c44 100644 --- a/src/errors/errors.go +++ b/src/errors/errors.go @@ -41,12 +41,12 @@ // // because the former will succeed if err wraps [io/fs.ErrExist]. // -// [As] examines the tree of its first argument looking for an error that can be -// assigned to its second argument, which must be a pointer. If it succeeds, it -// performs the assignment and returns true. Otherwise, it returns false. The form +// [AsType] examines the tree of its argument looking for an error whose +// type matches its type argument. If it succeeds, it returns the +// corresponding value of that type and true. Otherwise, it returns the +// zero value of that type and false. The form // -// var perr *fs.PathError -// if errors.As(err, &perr) { +// if perr, ok := errors.AsType[*fs.PathError](err); ok { // fmt.Println(perr.Path) // } // diff --git a/src/errors/example_test.go b/src/errors/example_test.go index 278df8c7da6e5a..92ef36b1010edb 100644 --- a/src/errors/example_test.go +++ b/src/errors/example_test.go @@ -102,6 +102,18 @@ func ExampleAs() { // Failed at path: non-existing } +func ExampleAsType() { + if _, err := os.Open("non-existing"); err != nil { + if pathError, ok := errors.AsType[*fs.PathError](err); ok { + fmt.Println("Failed at path:", pathError.Path) + } else { + fmt.Println(err) + } + } + // Output: + // Failed at path: non-existing +} + func ExampleUnwrap() { err1 := errors.New("error1") err2 := fmt.Errorf("error2: [%w]", err1) diff --git a/src/errors/wrap.go b/src/errors/wrap.go index eec9591dae7b93..2ebb951f1de93d 100644 --- a/src/errors/wrap.go +++ b/src/errors/wrap.go @@ -80,6 +80,10 @@ func is(err, target error, targetComparable bool) bool { // As finds the first error in err's tree that matches target, and if one is found, sets // target to that error value and returns true. Otherwise, it returns false. // +// For most uses, prefer [AsType]. As is equivalent to [AsType] but sets its target +// argument rather than returning the matching error and doesn't require its target +// argument to implement error. +// // The tree consists of err itself, followed by the errors obtained by repeatedly // calling its Unwrap() error or Unwrap() []error method. When err wraps multiple // errors, As examines err followed by a depth-first traversal of its children. @@ -145,3 +149,60 @@ func as(err error, target any, targetVal reflectlite.Value, targetType reflectli } var errorType = reflectlite.TypeOf((*error)(nil)).Elem() + +// AsType finds the first error in err's tree that matches the type E, and +// if one is found, returns that error value and true. Otherwise, it +// returns the zero value of E and false. +// +// The tree consists of err itself, followed by the errors obtained by +// repeatedly calling its Unwrap() error or Unwrap() []error method. When +// err wraps multiple errors, AsType examines err followed by a +// depth-first traversal of its children. +// +// An error err matches the type E if the type assertion err.(E) holds, +// or if the error has a method As(any) bool such that err.As(target) +// returns true when target is a non-nil *E. In the latter case, the As +// method is responsible for setting target. +func AsType[E error](err error) (E, bool) { + if err == nil { + var zero E + return zero, false + } + var pe *E // lazily initialized + return asType(err, &pe) +} + +func asType[E error](err error, ppe **E) (_ E, _ bool) { + for { + if e, ok := err.(E); ok { + return e, true + } + if x, ok := err.(interface{ As(any) bool }); ok { + if *ppe == nil { + *ppe = new(E) + } + if x.As(*ppe) { + return **ppe, true + } + } + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + if err == nil { + continue + } + if x, ok := asType(err, ppe); ok { + return x, true + } + } + return + default: + return + } + } +} diff --git a/src/errors/wrap_test.go b/src/errors/wrap_test.go index 58ed95fd9a0fc7..81c795a6bb8b18 100644 --- a/src/errors/wrap_test.go +++ b/src/errors/wrap_test.go @@ -239,6 +239,123 @@ func TestAsValidation(t *testing.T) { } } +func TestAsType(t *testing.T) { + var errT errorT + var errP *fs.PathError + type timeout interface { + Timeout() bool + error + } + _, errF := os.Open("non-existing") + poserErr := &poser{"oh no", nil} + + testAsType(t, + nil, + errP, + false, + ) + testAsType(t, + wrapped{"pitied the fool", errorT{"T"}}, + errorT{"T"}, + true, + ) + testAsType(t, + errF, + errF, + true, + ) + testAsType(t, + errT, + errP, + false, + ) + testAsType(t, + wrapped{"wrapped", nil}, + errT, + false, + ) + testAsType(t, + &poser{"error", nil}, + errorT{"poser"}, + true, + ) + testAsType(t, + &poser{"path", nil}, + poserPathErr, + true, + ) + testAsType(t, + poserErr, + poserErr, + true, + ) + testAsType(t, + errors.New("err"), + timeout(nil), + false, + ) + testAsType(t, + errF, + errF.(timeout), + true) + testAsType(t, + wrapped{"path error", errF}, + errF.(timeout), + true, + ) + testAsType(t, + multiErr{}, + errT, + false, + ) + testAsType(t, + multiErr{errors.New("a"), errorT{"T"}}, + errorT{"T"}, + true, + ) + testAsType(t, + multiErr{errorT{"T"}, errors.New("a")}, + errorT{"T"}, + true, + ) + testAsType(t, + multiErr{errorT{"a"}, errorT{"b"}}, + errorT{"a"}, + true, + ) + testAsType(t, + multiErr{multiErr{errors.New("a"), errorT{"a"}}, errorT{"b"}}, + errorT{"a"}, + true, + ) + testAsType(t, + multiErr{wrapped{"path error", errF}}, + errF.(timeout), + true, + ) + testAsType(t, + multiErr{nil}, + errT, + false, + ) +} + +type compError interface { + comparable + error +} + +func testAsType[E compError](t *testing.T, err error, want E, wantOK bool) { + t.Helper() + name := fmt.Sprintf("AsType[%T](Errorf(..., %v))", want, err) + t.Run(name, func(t *testing.T) { + got, gotOK := errors.AsType[E](err) + if gotOK != wantOK || got != want { + t.Fatalf("got %v, %t; want %v, %t", got, gotOK, want, wantOK) + } + }) +} + func BenchmarkIs(b *testing.B) { err1 := errors.New("1") err2 := multiErr{multiErr{multiErr{err1, errorT{"a"}}, errorT{"b"}}} @@ -260,6 +377,15 @@ func BenchmarkAs(b *testing.B) { } } +func BenchmarkAsType(b *testing.B) { + err := multiErr{multiErr{multiErr{errors.New("a"), errorT{"a"}}, errorT{"b"}}} + for range b.N { + if _, ok := errors.AsType[errorT](err); !ok { + b.Fatal("AsType failed") + } + } +} + func TestUnwrap(t *testing.T) { err1 := errors.New("1") erra := wrapped{"wrap 2", err1} diff --git a/src/fmt/errors.go b/src/fmt/errors.go index 1ac83404bc7c55..a0ce7ada346dbc 100644 --- a/src/fmt/errors.go +++ b/src/fmt/errors.go @@ -6,6 +6,7 @@ package fmt import ( "errors" + "internal/stringslite" "slices" ) @@ -19,7 +20,22 @@ import ( // order they appear in the arguments. // It is invalid to supply the %w verb with an operand that does not implement // the error interface. The %w verb is otherwise a synonym for %v. -func Errorf(format string, a ...any) error { +func Errorf(format string, a ...any) (err error) { + // This function has been split in a somewhat unnatural way + // so that both it and the errors.New call can be inlined. + if err = errorf(format, a...); err != nil { + return err + } + // No formatting was needed. We can avoid some allocations and other work. + // See https://go.dev/cl/708836 for details. + return errors.New(format) +} + +// errorf formats and returns an error value, or nil if no formatting is required. +func errorf(format string, a ...any) error { + if len(a) == 0 && stringslite.IndexByte(format, '%') == -1 { + return nil + } p := newPrinter() p.wrapErrs = true p.doPrintf(format, a) diff --git a/src/fmt/errors_test.go b/src/fmt/errors_test.go index 4eb55faffe7a18..52bf42d0a623a6 100644 --- a/src/fmt/errors_test.go +++ b/src/fmt/errors_test.go @@ -54,6 +54,15 @@ func TestErrorf(t *testing.T) { }, { err: noVetErrorf("%w is not an error", "not-an-error"), wantText: "%!w(string=not-an-error) is not an error", + }, { + err: fmt.Errorf("no verbs"), + wantText: "no verbs", + }, { + err: noVetErrorf("no verbs with extra arg", "extra"), + wantText: "no verbs with extra arg%!(EXTRA string=extra)", + }, { + err: noVetErrorf("too many verbs: %w %v"), + wantText: "too many verbs: %!w(MISSING) %!v(MISSING)", }, { err: noVetErrorf("wrapped two errors: %w %w", errString("1"), errString("2")), wantText: "wrapped two errors: 1 2", diff --git a/src/fmt/fmt_test.go b/src/fmt/fmt_test.go index 86e458ae6481fb..c07da5683c2463 100644 --- a/src/fmt/fmt_test.go +++ b/src/fmt/fmt_test.go @@ -1480,6 +1480,7 @@ func BenchmarkFprintIntNoAlloc(b *testing.B) { var mallocBuf bytes.Buffer var mallocPointer *int // A pointer so we know the interface value won't allocate. +var sink any var mallocTest = []struct { count int @@ -1510,6 +1511,10 @@ var mallocTest = []struct { mallocBuf.Reset() Fprintf(&mallocBuf, "%x %x %x", mallocPointer, mallocPointer, mallocPointer) }}, + {0, `Errorf("hello")`, func() { _ = Errorf("hello") }}, + {2, `Errorf("hello: %x")`, func() { _ = Errorf("hello: %x", mallocPointer) }}, + {1, `sink = Errorf("hello")`, func() { sink = Errorf("hello") }}, + {2, `sink = Errorf("hello: %x")`, func() { sink = Errorf("hello: %x", mallocPointer) }}, } var _ bytes.Buffer diff --git a/src/go.mod b/src/go.mod index 4b400fe87189d1..c5e901b9ef22e8 100644 --- a/src/go.mod +++ b/src/go.mod @@ -3,11 +3,11 @@ module std go 1.26 require ( - golang.org/x/crypto v0.42.0 - golang.org/x/net v0.44.0 + golang.org/x/crypto v0.43.0 + golang.org/x/net v0.46.0 ) require ( - golang.org/x/sys v0.36.0 // indirect - golang.org/x/text v0.29.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect ) diff --git a/src/go.sum b/src/go.sum index c90970a9cbaaad..4a52682161fc23 100644 --- a/src/go.sum +++ b/src/go.sum @@ -1,8 +1,8 @@ -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= -golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= -golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index d50a98d34c9092..8966254d0d9110 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -237,7 +237,6 @@ var depsRules = ` internal/types/errors, mime/quotedprintable, net/internal/socktest, - net/url, runtime/trace, text/scanner, text/tabwriter; @@ -300,6 +299,12 @@ var depsRules = ` FMT < text/template/parse; + internal/bytealg, internal/itoa, math/bits, slices, strconv, unique + < net/netip; + + FMT, net/netip + < net/url; + net/url, text/template/parse < text/template < internal/lazytemplate; @@ -414,9 +419,6 @@ var depsRules = ` < golang.org/x/net/dns/dnsmessage, golang.org/x/net/lif; - internal/bytealg, internal/itoa, math/bits, slices, strconv, unique - < net/netip; - os, net/netip < internal/routebsd; @@ -485,11 +487,16 @@ var depsRules = ` internal/byteorder < crypto/internal/fips140deps/byteorder; internal/cpu, internal/goarch < crypto/internal/fips140deps/cpu; internal/godebug < crypto/internal/fips140deps/godebug; + time, internal/syscall/windows < crypto/internal/fips140deps/time; + + crypto/internal/fips140deps/time, errors, math/bits, sync/atomic, unsafe + < crypto/internal/fips140/entropy; STR, hash, crypto/internal/impl, crypto/internal/entropy, crypto/internal/randutil, + crypto/internal/fips140/entropy, crypto/internal/fips140deps/byteorder, crypto/internal/fips140deps/cpu, crypto/internal/fips140deps/godebug @@ -559,7 +566,7 @@ var depsRules = ` # CRYPTO-MATH is crypto that exposes math/big APIs - no cgo, net; fmt now ok. - CRYPTO, FMT, math/big + CRYPTO, FMT, math/big, internal/saferio < crypto/internal/boring/bbig < crypto/internal/fips140cache < crypto/rand diff --git a/src/go/types/alias.go b/src/go/types/alias.go index f15ff570303540..a2eeb3afac8e85 100644 --- a/src/go/types/alias.go +++ b/src/go/types/alias.go @@ -116,7 +116,6 @@ func unalias(a0 *Alias) Type { for a := a0; a != nil; a, _ = t.(*Alias) { t = a.fromRHS } - // It's fine to memoize nil types since it's the zero value for actual. // It accomplishes nothing. a0.actual = t diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index f7a98ae28061bb..2798b9e0c4eb47 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -2480,8 +2480,8 @@ func TestInstantiateErrors(t *testing.T) { t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs) } - var argErr *ArgumentError - if !errors.As(err, &argErr) { + argErr, ok := errors.AsType[*ArgumentError](err) + if !ok { t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs) } @@ -2496,8 +2496,8 @@ func TestArgumentErrorUnwrapping(t *testing.T) { Index: 1, Err: Error{Msg: "test"}, } - var e Error - if !errors.As(err, &e) { + e, ok := errors.AsType[Error](err) + if !ok { t.Fatalf("error %v does not wrap types.Error", err) } if e.Msg != "test" { diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go index 3b40a9f84830e6..1524451af766ff 100644 --- a/src/go/types/assignments.go +++ b/src/go/types/assignments.go @@ -94,7 +94,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) { // x.typ is typed // A generic (non-instantiated) function value cannot be assigned to a variable. - if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 { check.errorf(x, WrongTypeArgCount, "cannot use generic function %s without instantiation in %s", x, context) x.mode = invalid return @@ -264,7 +264,7 @@ func (check *Checker) assignVar(lhs, rhs ast.Expr, x *operand, context string) { var target *target // avoid calling ExprString if not needed if T != nil { - if _, ok := under(T).(*Signature); ok { + if _, ok := T.Underlying().(*Signature); ok { target = newTarget(T, ExprString(lhs)) } } diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go index 9b03a40cbc3440..f59c9ddd120360 100644 --- a/src/go/types/builtins.go +++ b/src/go/types/builtins.go @@ -147,7 +147,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // len(x) mode := invalid var val constant.Value - switch t := arrayPtrDeref(under(x.typ)).(type) { + switch t := arrayPtrDeref(x.typ.Underlying()).(type) { case *Basic: if isString(t) && id == _Len { if x.mode == constant_ { @@ -206,7 +206,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b if mode == invalid { // avoid error if underlying type is invalid - if isValid(under(x.typ)) { + if isValid(x.typ.Underlying()) { code := InvalidCap if id == _Len { code = InvalidLen @@ -325,7 +325,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // (applyTypeFunc never calls f with a type parameter) f := func(typ Type) Type { assert(!isTypeParam(typ)) - if t, _ := under(typ).(*Basic); t != nil { + if t, _ := typ.Underlying().(*Basic); t != nil { switch t.kind { case Float32: return Typ[Complex64] @@ -475,7 +475,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // (applyTypeFunc never calls f with a type parameter) f := func(typ Type) Type { assert(!isTypeParam(typ)) - if t, _ := under(typ).(*Basic); t != nil { + if t, _ := typ.Underlying().(*Basic); t != nil { switch t.kind { case Complex64: return Typ[Float32] @@ -1023,7 +1023,7 @@ func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) { }() } - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Array: return hasVarSize(u.elem, seen) case *Struct: @@ -1115,7 +1115,7 @@ func makeSig(res Type, args ...Type) *Signature { // otherwise it returns typ. func arrayPtrDeref(typ Type) Type { if p, ok := Unalias(typ).(*Pointer); ok { - if a, _ := under(p.base).(*Array); a != nil { + if a, _ := p.base.Underlying().(*Array); a != nil { return a } } diff --git a/src/go/types/call.go b/src/go/types/call.go index 33fe8bb9bfa84f..44549b8727cf0b 100644 --- a/src/go/types/call.go +++ b/src/go/types/call.go @@ -211,7 +211,7 @@ func (check *Checker) callExpr(x *operand, call *ast.CallExpr) exprKind { check.errorf(call.Args[0], BadDotDotDotSyntax, "invalid use of ... in conversion to %s", T) break } - if t, _ := under(T).(*Interface); t != nil && !isTypeParam(T) { + if t, _ := T.Underlying().(*Interface); t != nil && !isTypeParam(T) { if !t.IsMethodSet() { check.errorf(call, MisplacedConstraintIface, "cannot use interface %s in conversion (contains specific type constraints or is comparable)", T) break @@ -815,7 +815,7 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr, def *TypeName, w obj, index, indirect = lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel, false) if obj == nil { // Don't report another error if the underlying type was invalid (go.dev/issue/49541). - if !isValid(under(x.typ)) { + if !isValid(x.typ.Underlying()) { goto Error } diff --git a/src/go/types/const.go b/src/go/types/const.go index c1ed14abe2c12d..dc9efa0130eae3 100644 --- a/src/go/types/const.go +++ b/src/go/types/const.go @@ -35,7 +35,7 @@ func (check *Checker) overflow(x *operand, opPos token.Pos) { // x.typ cannot be a type parameter (type // parameters cannot be constant types). if isTyped(x.typ) { - check.representable(x, under(x.typ).(*Basic)) + check.representable(x, x.typ.Underlying().(*Basic)) return } diff --git a/src/go/types/conversions.go b/src/go/types/conversions.go index 6a9f263c57c377..daef80adf8058b 100644 --- a/src/go/types/conversions.go +++ b/src/go/types/conversions.go @@ -21,7 +21,7 @@ func (check *Checker) conversion(x *operand, T Type) { constArg := x.mode == constant_ constConvertibleTo := func(T Type, val *constant.Value) bool { - switch t, _ := under(T).(*Basic); { + switch t, _ := T.Underlying().(*Basic); { case t == nil: // nothing to do case representableConst(x.val, check, t, val): @@ -145,8 +145,8 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { origT := T V := Unalias(x.typ) T = Unalias(T) - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() Vp, _ := V.(*TypeParam) Tp, _ := T.(*TypeParam) @@ -161,7 +161,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { // and their pointer base types are not type parameters" if V, ok := V.(*Pointer); ok { if T, ok := T.(*Pointer); ok { - if IdenticalIgnoreTags(under(V.base), under(T.base)) && !isTypeParam(V.base) && !isTypeParam(T.base) { + if IdenticalIgnoreTags(V.base.Underlying(), T.base.Underlying()) && !isTypeParam(V.base) && !isTypeParam(T.base) { return true } } @@ -214,7 +214,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { return false } case *Pointer: - if a, _ := under(a.Elem()).(*Array); a != nil { + if a, _ := a.Elem().Underlying().(*Array); a != nil { if Identical(s.Elem(), a.Elem()) { if check == nil || check.allowVersion(go1_17) { return true @@ -295,23 +295,23 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { } func isUintptr(typ Type) bool { - t, _ := under(typ).(*Basic) + t, _ := typ.Underlying().(*Basic) return t != nil && t.kind == Uintptr } func isUnsafePointer(typ Type) bool { - t, _ := under(typ).(*Basic) + t, _ := typ.Underlying().(*Basic) return t != nil && t.kind == UnsafePointer } func isPointer(typ Type) bool { - _, ok := under(typ).(*Pointer) + _, ok := typ.Underlying().(*Pointer) return ok } func isBytesOrRunes(typ Type) bool { - if s, _ := under(typ).(*Slice); s != nil { - t, _ := under(s.elem).(*Basic) + if s, _ := typ.Underlying().(*Slice); s != nil { + t, _ := s.elem.Underlying().(*Basic) return t != nil && (t.kind == Byte || t.kind == Rune) } return false diff --git a/src/go/types/decl.go b/src/go/types/decl.go index 42423d291cee8d..15726dc4e2427d 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -226,8 +226,8 @@ func (check *Checker) validCycle(obj Object) (valid bool) { start := obj.color() - grey // index of obj in objPath cycle := check.objPath[start:] tparCycle := false // if set, the cycle is through a type parameter list - nval := 0 // number of (constant or variable) values in the cycle; valid if !generic - ndef := 0 // number of type definitions in the cycle; valid if !generic + nval := 0 // number of (constant or variable) values in the cycle + ndef := 0 // number of type definitions in the cycle loop: for _, obj := range cycle { switch obj := obj.(type) { @@ -236,7 +236,7 @@ loop: case *TypeName: // If we reach a generic type that is part of a cycle // and we are in a type parameter list, we have a cycle - // through a type parameter list, which is invalid. + // through a type parameter list. if check.inTParamList && isGeneric(obj.typ) { tparCycle = true break loop @@ -287,20 +287,23 @@ loop: }() } - if !tparCycle { - // A cycle involving only constants and variables is invalid but we - // ignore them here because they are reported via the initialization - // cycle check. - if nval == len(cycle) { - return true - } + // Cycles through type parameter lists are ok (go.dev/issue/68162). + if tparCycle { + return true + } - // A cycle involving only types (and possibly functions) must have at least - // one type definition to be permitted: If there is no type definition, we - // have a sequence of alias type names which will expand ad infinitum. - if nval == 0 && ndef > 0 { - return true - } + // A cycle involving only constants and variables is invalid but we + // ignore them here because they are reported via the initialization + // cycle check. + if nval == len(cycle) { + return true + } + + // A cycle involving only types (and possibly functions) must have at least + // one type definition to be permitted: If there is no type definition, we + // have a sequence of alias type names which will expand ad infinitum. + if nval == 0 && ndef > 0 { + return true } check.cycleError(cycle, firstInSrc(cycle)) @@ -463,7 +466,7 @@ func (check *Checker) constDecl(obj *Const, typ, init ast.Expr, inherited bool) if !isConstType(t) { // don't report an error if the type is an invalid C (defined) type // (go.dev/issue/22090) - if isValid(under(t)) { + if isValid(t.Underlying()) { check.errorf(typ, InvalidConstType, "invalid constant type %s", t) } obj.typ = Typ[Invalid] @@ -548,7 +551,7 @@ func (check *Checker) isImportedConstraint(typ Type) bool { if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil { return false } - u, _ := named.under().(*Interface) + u, _ := named.Underlying().(*Interface) return u != nil && !u.IsMethodSet() } @@ -637,31 +640,33 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *TypeName named := check.newNamed(obj, nil, nil) setDefType(def, named) + // The RHS of a named N can be nil if, for example, N is defined as a cycle of aliases with + // gotypesalias=0. Consider: + // + // type D N // N.resolve() will panic + // type N A + // type A = N // N.fromRHS is not set before N.resolve(), since A does not call setDefType + // + // There is likely a better way to detect such cases, but it may not be worth the effort. + // Instead, we briefly permit a nil N.fromRHS while type-checking D. + named.allowNilRHS = true + defer (func() { named.allowNilRHS = false })() + if tdecl.TypeParams != nil { check.openScope(tdecl, "type parameters") defer check.closeScope() check.collectTypeParams(&named.tparams, tdecl.TypeParams) } - // determine underlying type of named rhs = check.definedType(tdecl.Type, obj) assert(rhs != nil) named.fromRHS = rhs - // If the underlying type was not set while type-checking the right-hand - // side, it is invalid and an error should have been reported elsewhere. - if named.underlying == nil { - named.underlying = Typ[Invalid] - } - - // Disallow a lone type parameter as the RHS of a type declaration (go.dev/issue/45639). - // We don't need this restriction anymore if we make the underlying type of a type - // parameter its constraint interface: if the RHS is a lone type parameter, we will - // use its underlying type (like we do for any RHS in a type declaration), and its - // underlying type is an interface and the type declaration is well defined. + // spec: "In a type definition the given type cannot be a type parameter." + // (See also go.dev/issue/45639.) if isTypeParam(rhs) { check.error(tdecl.Type, MisplacedTypeParam, "cannot use a type parameter as RHS in type declaration") - named.underlying = Typ[Invalid] + named.fromRHS = Typ[Invalid] } } @@ -814,7 +819,7 @@ func (check *Checker) collectMethods(obj *TypeName) { } func (check *Checker) checkFieldUniqueness(base *Named) { - if t, _ := base.under().(*Struct); t != nil { + if t, _ := base.Underlying().(*Struct); t != nil { var mset objset for i := 0; i < base.NumMethods(); i++ { m := base.Method(i) diff --git a/src/go/types/expr.go b/src/go/types/expr.go index 97d8c42997863c..65995d9bffe690 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -336,7 +336,7 @@ func (check *Checker) updateExprType(x ast.Expr, typ Type, final bool) { // If the new type is not final and still untyped, just // update the recorded type. if !final && isUntyped(typ) { - old.typ = under(typ).(*Basic) + old.typ = typ.Underlying().(*Basic) check.untyped[x] = old return } @@ -398,7 +398,7 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const return nil, nil, InvalidUntypedConversion } - switch u := under(target).(type) { + switch u := target.Underlying().(type) { case *Basic: if x.mode == constant_ { v, code := check.representation(x, u) @@ -605,7 +605,7 @@ Error: // incomparableCause returns a more specific cause why typ is not comparable. // If there is no more specific cause, the result is "". func (check *Checker) incomparableCause(typ Type) string { - switch under(typ).(type) { + switch typ.Underlying().(type) { case *Slice, *Signature, *Map: return compositeKind(typ) + " can only be compared to nil" } @@ -955,7 +955,7 @@ type target struct { // The result is nil if typ is not a signature. func newTarget(typ Type, desc string) *target { if typ != nil { - if sig, _ := under(typ).(*Signature); sig != nil { + if sig, _ := typ.Underlying().(*Signature); sig != nil { return &target{sig, desc} } } @@ -1101,7 +1101,7 @@ func (check *Checker) exprInternal(T *target, x *operand, e ast.Expr, hint Type) check.errorf(x, InvalidAssert, invalidOp+"cannot use type assertion on type parameter value %s", x) goto Error } - if _, ok := under(x.typ).(*Interface); !ok { + if _, ok := x.typ.Underlying().(*Interface); !ok { check.errorf(x, InvalidAssert, invalidOp+"%s is not an interface", x) goto Error } diff --git a/src/go/types/gcsizes.go b/src/go/types/gcsizes.go index 227c53e1d280a7..adc350a6c2d001 100644 --- a/src/go/types/gcsizes.go +++ b/src/go/types/gcsizes.go @@ -19,7 +19,7 @@ func (s *gcSizes) Alignof(T Type) (result int64) { // For arrays and structs, alignment is defined in terms // of alignment of the elements and fields, respectively. - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Array: // spec: "For a variable x of array type: unsafe.Alignof(x) // is the same as unsafe.Alignof(x[0]), but at least 1." @@ -99,7 +99,7 @@ func (s *gcSizes) Offsetsof(fields []*Var) []int64 { } func (s *gcSizes) Sizeof(T Type) int64 { - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: assert(isTyped(T)) k := t.kind diff --git a/src/go/types/index.go b/src/go/types/index.go index 1d4f36dcf3c577..42e472001318cb 100644 --- a/src/go/types/index.go +++ b/src/go/types/index.go @@ -36,7 +36,7 @@ func (check *Checker) indexExpr(x *operand, e *indexedExpr) (isFuncInst bool) { return false case value: - if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + if sig, _ := x.typ.Underlying().(*Signature); sig != nil && sig.TypeParams().Len() > 0 { // function instantiation return true } @@ -51,7 +51,7 @@ func (check *Checker) indexExpr(x *operand, e *indexedExpr) (isFuncInst bool) { // ordinary index expression valid := false length := int64(-1) // valid if >= 0 - switch typ := under(x.typ).(type) { + switch typ := x.typ.Underlying().(type) { case *Basic: if isString(typ) { valid = true @@ -74,7 +74,7 @@ func (check *Checker) indexExpr(x *operand, e *indexedExpr) (isFuncInst bool) { x.typ = typ.elem case *Pointer: - if typ, _ := under(typ.base).(*Array); typ != nil { + if typ, _ := typ.base.Underlying().(*Array); typ != nil { valid = true length = typ.len x.mode = variable @@ -125,7 +125,7 @@ func (check *Checker) indexExpr(x *operand, e *indexedExpr) (isFuncInst bool) { mode = value } case *Pointer: - if t, _ := under(t.base).(*Array); t != nil { + if t, _ := t.base.Underlying().(*Array); t != nil { l = t.len e = t.elem } @@ -252,7 +252,7 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) { // but don't go from untyped string to string. cu = Typ[String] if !isTypeParam(x.typ) { - cu = under(x.typ) // untyped string remains untyped + cu = x.typ.Underlying() // untyped string remains untyped } } @@ -297,7 +297,7 @@ func (check *Checker) sliceExpr(x *operand, e *ast.SliceExpr) { x.typ = &Slice{elem: u.elem} case *Pointer: - if u, _ := under(u.base).(*Array); u != nil { + if u, _ := u.base.Underlying().(*Array); u != nil { valid = true length = u.len x.typ = &Slice{elem: u.elem} diff --git a/src/go/types/infer.go b/src/go/types/infer.go index e955880674c136..a8da7ac674abf9 100644 --- a/src/go/types/infer.go +++ b/src/go/types/infer.go @@ -671,7 +671,7 @@ func coreTerm(tpar *TypeParam) (*term, bool) { if n == 1 { if debug { u, _ := commonUnder(tpar, nil) - assert(under(single.typ) == u) + assert(single.typ.Underlying() == u) } return single, true } diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go index eef473447da414..6488494cd83227 100644 --- a/src/go/types/instantiate.go +++ b/src/go/types/instantiate.go @@ -86,7 +86,7 @@ func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, e // // For Named types the resulting instance may be unexpanded. // -// check may be nil (when not type-checking syntax); pos is used only only if check is non-nil. +// check may be nil (when not type-checking syntax); pos is used only if check is non-nil. func (check *Checker) instance(pos token.Pos, orig genericType, targs []Type, expanding *Named, ctxt *Context) (res Type) { // The order of the contexts below matters: we always prefer instances in the // expanding instance context in order to preserve reference cycles. @@ -229,12 +229,12 @@ func (check *Checker) verify(pos token.Pos, tparams []*TypeParam, targs []Type, // If the provided cause is non-nil, it may be set to an error string // explaining why V does not implement (or satisfy, for constraints) T. func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool { - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() if !isValid(Vu) || !isValid(Tu) { return true // avoid follow-on errors } - if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) { + if p, _ := Vu.(*Pointer); p != nil && !isValid(p.base.Underlying()) { return true // avoid follow-on errors (see go.dev/issue/49541 for an example) } @@ -342,7 +342,7 @@ func (check *Checker) implements(V, T Type, constraint bool, cause *string) bool // If V ∉ t.typ but V ∈ ~t.typ then remember this type // so we can suggest it as an alternative in the error // message. - if alt == nil && !t.tilde && Identical(t.typ, under(t.typ)) { + if alt == nil && !t.tilde && Identical(t.typ, t.typ.Underlying()) { tt := *t tt.tilde = true if tt.includes(V) { diff --git a/src/go/types/lookup.go b/src/go/types/lookup.go index 16d63ae0f118ce..c479303a350b8c 100644 --- a/src/go/types/lookup.go +++ b/src/go/types/lookup.go @@ -155,7 +155,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string // *typ where typ is an interface (incl. a type parameter) has no methods. if isPtr { - if _, ok := under(typ).(*Interface); ok { + if _, ok := typ.Underlying().(*Interface); ok { return } } @@ -205,7 +205,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string } } - switch t := under(typ).(type) { + switch t := typ.Underlying().(type) { case *Struct: // look for a matching field and collect embedded types for i, f := range t.fields { @@ -376,7 +376,7 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b // The comparator is used to compare signatures. // If a method is missing and cause is not nil, *cause describes the error. func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y Type) bool, cause *string) (method *Func, wrongType bool) { - methods := under(T).(*Interface).typeSet().methods // T must be an interface + methods := T.Underlying().(*Interface).typeSet().methods // T must be an interface if len(methods) == 0 { return nil, false } @@ -396,7 +396,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y var m *Func // method on T we're trying to implement var f *Func // method on V, if found (state is one of ok, wrongName, wrongSig) - if u, _ := under(V).(*Interface); u != nil { + if u, _ := V.Underlying().(*Interface); u != nil { tset := u.typeSet() for _, m = range methods { _, f = tset.LookupMethod(m.pkg, m.name, false) @@ -537,7 +537,7 @@ func (check *Checker) hasAllMethods(V, T Type, static bool, equivalent func(x, y // hasInvalidEmbeddedFields reports whether T is a struct (or a pointer to a struct) that contains // (directly or indirectly) embedded fields with invalid types. func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool { - if S, _ := under(derefStructPtr(T)).(*Struct); S != nil && !seen[S] { + if S, _ := derefStructPtr(T).Underlying().(*Struct); S != nil && !seen[S] { if seen == nil { seen = make(map[*Struct]bool) } @@ -552,14 +552,14 @@ func hasInvalidEmbeddedFields(T Type, seen map[*Struct]bool) bool { } func isInterfacePtr(T Type) bool { - p, _ := under(T).(*Pointer) + p, _ := T.Underlying().(*Pointer) return p != nil && IsInterface(p.base) } // check may be nil. func (check *Checker) interfacePtrError(T Type) string { assert(isInterfacePtr(T)) - if p, _ := under(T).(*Pointer); isTypeParam(p.base) { + if p, _ := T.Underlying().(*Pointer); isTypeParam(p.base) { return check.sprintf("type %s is pointer to type parameter, not type parameter", T) } return check.sprintf("type %s is pointer to interface, not interface", T) @@ -632,8 +632,8 @@ func deref(typ Type) (Type, bool) { // derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a // (named or unnamed) struct and returns its base. Otherwise it returns typ. func derefStructPtr(typ Type) Type { - if p, _ := under(typ).(*Pointer); p != nil { - if _, ok := under(p.base).(*Struct); ok { + if p, _ := typ.Underlying().(*Pointer); p != nil { + if _, ok := p.base.Underlying().(*Struct); ok { return p.base } } diff --git a/src/go/types/methodset.go b/src/go/types/methodset.go index ac8f0bdd288e27..2eac62e6028e0f 100644 --- a/src/go/types/methodset.go +++ b/src/go/types/methodset.go @@ -133,7 +133,7 @@ func NewMethodSet(T Type) *MethodSet { } } - switch t := under(typ).(type) { + switch t := typ.Underlying().(type) { case *Struct: for i, f := range t.fields { if fset == nil { diff --git a/src/go/types/named.go b/src/go/types/named.go index b2f99ffccb9062..d7e4c2479634fc 100644 --- a/src/go/types/named.go +++ b/src/go/types/named.go @@ -65,14 +65,14 @@ import ( // - We say that a Named type is "resolved" if its RHS information has been // loaded or fully type-checked. For Named types constructed from export // data, this may involve invoking a loader function to extract information -// from export data. For instantiated named types this involves reading -// information from their origin. +// from export data. For instantiated Named types this involves reading +// information from their origin and substituting type arguments into a +// "synthetic" RHS; this process is called "expanding" the RHS (see below). // - We say that a Named type is "expanded" if it is an instantiated type and -// type parameters in its underlying type and methods have been substituted -// with the type arguments from the instantiation. A type may be partially -// expanded if some but not all of these details have been substituted. -// Similarly, we refer to these individual details (underlying type or -// method) as being "expanded". +// type parameters in its RHS and methods have been substituted with the type +// arguments from the instantiation. A type may be partially expanded if some +// but not all of these details have been substituted. Similarly, we refer to +// these individual details (RHS or method) as being "expanded". // - When all information is known for a named type, we say it is "complete". // // Some invariants to keep in mind: each declared Named type has a single @@ -110,18 +110,17 @@ type Named struct { check *Checker // non-nil during type-checking; nil otherwise obj *TypeName // corresponding declared object for declared types; see above for instantiated types - // fromRHS holds the type (on RHS of declaration) this *Named type is derived - // from (for cycle reporting). Only used by validType, and therefore does not - // require synchronization. - fromRHS Type + // flags indicating temporary violations of the invariants for fromRHS and underlying + allowNilRHS bool // same as below, as well as briefly during checking of a type declaration + allowNilUnderlying bool // may be true from creation via [NewNamed] until [Named.SetUnderlying] - // information for instantiated types; nil otherwise - inst *instance + underlying Type // underlying type, or nil + inst *instance // information for instantiated types; nil otherwise - mu sync.Mutex // guards all fields below - state_ uint32 // the current state of this type; must only be accessed atomically - underlying Type // possibly a *Named during setup; never a *Named once set up completely - tparams *TypeParamList // type parameters, or nil + mu sync.Mutex // guards all fields below + state_ uint32 // the current state of this type; must only be accessed atomically + fromRHS Type // the declaration RHS this type is derived from + tparams *TypeParamList // type parameters, or nil // methods declared for this type (not the method set of this type) // Signatures are type-checked lazily. @@ -148,7 +147,7 @@ type namedState uint32 // Note: the order of states is relevant const ( - unresolved namedState = iota // tparams, underlying type and methods might be unavailable + unresolved namedState = iota // type parameters, RHS, underlying, and methods might be unavailable resolved // resolve has run; methods might be unexpanded (for instances) loaded // loader has run; constraints might be unexpanded (for generic types) complete // all data is known @@ -161,10 +160,18 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named { if asNamed(underlying) != nil { panic("underlying type must not be *Named") } - return (*Checker)(nil).newNamed(obj, underlying, methods) + n := (*Checker)(nil).newNamed(obj, underlying, methods) + if underlying == nil { + n.allowNilRHS = true + n.allowNilUnderlying = true + } else { + n.SetUnderlying(underlying) + } + return n + } -// resolve resolves the type parameters, methods, and underlying type of n. +// resolve resolves the type parameters, methods, and RHS of n. // // For the purposes of resolution, there are three categories of named types: // 1. Instantiated Types @@ -174,18 +181,17 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named { // Note that the above form a partition. // // Instantiated types: -// Type parameters, methods, and underlying type of n become accessible, -// though methods are lazily populated as needed. +// Type parameters, methods, and RHS of n become accessible, though methods +// are lazily populated as needed. // // Lazy loaded types: -// Type parameters, methods, and underlying type of n become accessible -// and are fully expanded. +// Type parameters, methods, and RHS of n become accessible and are fully +// expanded. // // All others: -// Effectively, nothing happens. The underlying type of n may still be -// a named type. +// Effectively, nothing happens. func (n *Named) resolve() *Named { - if n.state() > unresolved { // avoid locking below + if n.state() >= resolved { // avoid locking below return n } @@ -194,21 +200,19 @@ func (n *Named) resolve() *Named { n.mu.Lock() defer n.mu.Unlock() - if n.state() > unresolved { + if n.state() >= resolved { return n } if n.inst != nil { - assert(n.underlying == nil) // n is an unresolved instance - assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil + assert(n.fromRHS == nil) // instantiated types are not declared types + assert(n.loader == nil) // cannot import an instantiation orig := n.inst.orig orig.resolve() - underlying := n.expandUnderlying() + n.fromRHS = n.expandRHS() n.tparams = orig.tparams - n.underlying = underlying - n.fromRHS = orig.fromRHS // for cycle detection if len(orig.methods) == 0 { n.setState(complete) // nothing further to do @@ -227,25 +231,25 @@ func (n *Named) resolve() *Named { // methods would need to support reentrant calls though. It would // also make the API more future-proof towards further extensions. if n.loader != nil { - assert(n.underlying == nil) - assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil + assert(n.fromRHS == nil) // not loaded yet + assert(n.inst == nil) // cannot import an instantiation tparams, underlying, methods, delayed := n.loader(n) n.loader = nil n.tparams = bindTParams(tparams) - n.underlying = underlying n.fromRHS = underlying // for cycle detection n.methods = methods - // advance state to avoid deadlock calling delayed functions - n.setState(loaded) + n.setState(loaded) // avoid deadlock calling delayed functions for _, f := range delayed { f() } } + assert(n.fromRHS != nil || n.allowNilRHS) + assert(n.underlying == nil) // underlying comes after resolving n.setState(complete) return n } @@ -262,8 +266,8 @@ func (n *Named) setState(state namedState) { } // newNamed is like NewNamed but with a *Checker receiver. -func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named { - typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods} +func (check *Checker) newNamed(obj *TypeName, fromRHS Type, methods []*Func) *Named { + typ := &Named{check: check, obj: obj, fromRHS: fromRHS, methods: methods} if obj.typ == nil { obj.typ = typ } @@ -303,25 +307,13 @@ func (check *Checker) newNamedInstance(pos token.Pos, orig *Named, targs []Type, return typ } -func (t *Named) cleanup() { - assert(t.inst == nil || t.inst.orig.inst == nil) - // Ensure that every defined type created in the course of type-checking has - // either non-*Named underlying type, or is unexpanded. - // - // This guarantees that we don't leak any types whose underlying type is - // *Named, because any unexpanded instances will lazily compute their - // underlying type by substituting in the underlying type of their origin. - // The origin must have either been imported or type-checked and expanded - // here, and in either case its underlying type will be fully expanded. - switch t.underlying.(type) { - case nil: - if t.TypeArgs().Len() == 0 { - panic("nil underlying") - } - case *Named, *Alias: - t.under() // t.under may add entries to check.cleaners +func (n *Named) cleanup() { + // Instances can have a nil underlying at the end of type checking — they + // will lazily expand it as needed. All other types must have one. + if n.inst == nil { + n.resolve().under() } - t.check = nil + n.check = nil } // Obj returns the type name for the declaration defining the named type t. For @@ -480,10 +472,12 @@ func (t *Named) SetUnderlying(underlying Type) { if asNamed(underlying) != nil { panic("underlying type must not be *Named") } - t.resolve().underlying = underlying - if t.fromRHS == nil { - t.fromRHS = underlying // for cycle detection - } + // Invariant: Presence of underlying type implies it was resolved. + t.fromRHS = underlying + t.allowNilRHS = false + t.resolve() + t.underlying = underlying + t.allowNilUnderlying = false } // AddMethod adds method m unless it is already in the method list. @@ -526,9 +520,20 @@ func (t *Named) methodIndex(name string, foldCase bool) int { // Alias types. // // [underlying type]: https://go.dev/ref/spec#Underlying_types. -func (t *Named) Underlying() Type { - // TODO(gri) Investigate if Unalias can be moved to where underlying is set. - return Unalias(t.resolve().underlying) +func (n *Named) Underlying() Type { + n.resolve() + + // The gccimporter depends on writing a nil underlying via NewNamed and + // immediately reading it back. Rather than putting that in under() and + // complicating things there, we just check for that special case here. + if n.fromRHS == nil { + assert(n.allowNilRHS) + if n.allowNilUnderlying { + return nil + } + } + + return n.under() } func (t *Named) String() string { return TypeString(t, nil) } @@ -539,89 +544,55 @@ func (t *Named) String() string { return TypeString(t, nil) } // TODO(rfindley): reorganize the loading and expansion methods under this // heading. -// under returns the expanded underlying type of n0; possibly by following -// forward chains of named types. If an underlying type is found, resolve -// the chain by setting the underlying type for each defined type in the -// chain before returning it. If no underlying type is found or a cycle -// is detected, the result is Typ[Invalid]. If a cycle is detected and -// n0.check != nil, the cycle is reported. -// -// This is necessary because the underlying type of named may be itself a -// named type that is incomplete: -// -// type ( -// A B -// B *C -// C A -// ) -// -// The type of C is the (named) type of A which is incomplete, -// and which has as its underlying type the named type B. -func (n0 *Named) under() Type { - u := n0.Underlying() - - // If the underlying type of a defined type is not a defined - // (incl. instance) type, then that is the desired underlying - // type. - var n1 *Named - switch u1 := u.(type) { - case nil: - // After expansion via Underlying(), we should never encounter a nil - // underlying. - panic("nil underlying") - default: - // common case - return u - case *Named: - // handled below - n1 = u1 - } - - if n0.check == nil { - panic("Named.check == nil but type is incomplete") - } - - // Invariant: after this point n0 as well as any named types in its - // underlying chain should be set up when this function exits. - check := n0.check - n := n0 - - seen := make(map[*Named]int) // types that need their underlying type resolved - var path []Object // objects encountered, for cycle reporting - -loop: - for { - seen[n] = len(seen) - path = append(path, n.obj) - n = n1 - if i, ok := seen[n]; ok { - // cycle - check.cycleError(path[i:], firstInSrc(path[i:])) - u = Typ[Invalid] - break - } - u = n.Underlying() - switch u1 := u.(type) { +// under returns the (possibly expanded) underlying type of n. +// +// It does so by following RHS type chains. If a type literal is found, each +// named type in the chain has its underlying set to that type. Aliases are +// skipped because their underlying type is not memoized. +// +// This function also checks for instantiated layout cycles, which are +// reachable only in the case where resolve() expanded an instantiated +// type which became self-referencing without indirection. If such a +// cycle is found, the result is Typ[Invalid]; if n.check != nil, the +// cycle is also reported. +func (n *Named) under() Type { + assert(n.state() >= resolved) + + if n.underlying != nil { + return n.underlying + } + + var rhs Type = n + var u Type + + seen := make(map[*Named]int) + var path []Object + + for u == nil { + switch t := rhs.(type) { case nil: u = Typ[Invalid] - break loop - default: - break loop + case *Alias: + rhs = unalias(t) case *Named: - // Continue collecting *Named types in the chain. - n1 = u1 + if i, ok := seen[t]; ok { + n.check.cycleError(path[i:], firstInSrc(path[i:])) + u = Typ[Invalid] + break + } + seen[t] = len(seen) + path = append(path, t.obj) + t.resolve() + assert(t.fromRHS != nil || t.allowNilRHS) + rhs = t.fromRHS + default: + u = rhs // any type literal works } } - for n := range seen { - // We should never have to update the underlying type of an imported type; - // those underlying types should have been resolved during the import. - // Also, doing so would lead to a race condition (was go.dev/issue/31749). - // Do this check always, not just in debug mode (it's cheap). - if n.obj.pkg != check.pkg { - panic("imported type with unresolved underlying type") - } - n.underlying = u + // go back up the chain + for t := range seen { + t.underlying = u } return u @@ -649,78 +620,108 @@ func (check *Checker) context() *Context { return check.ctxt } -// expandUnderlying substitutes type arguments in the underlying type n.orig, -// returning the result. Returns Typ[Invalid] if there was an error. -func (n *Named) expandUnderlying() Type { +// expandRHS crafts a synthetic RHS for an instantiated type using the RHS of +// its origin type (which must be a generic type). +// +// Suppose that we had: +// +// type T[P any] struct { +// f P +// } +// +// type U T[int] +// +// When we go to U, we observe T[int]. Since T[int] is an instantiation, it has no +// declaration. Here, we craft a synthetic RHS for T[int] as if it were declared, +// somewhat similar to: +// +// type T[int] struct { +// f int +// } +// +// And note that the synthetic RHS here is the same as the underlying for U. Now, +// consider: +// +// type T[_ any] U +// type U int +// type V T[U] +// +// The synthetic RHS for T[U] becomes: +// +// type T[U] U +// +// Whereas the underlying of V is int, not U. +func (n *Named) expandRHS() (rhs Type) { check := n.check if check != nil && check.conf._Trace { - check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n) + check.trace(n.obj.pos, "-- Named.expandRHS %s", n) check.indent++ defer func() { check.indent-- - check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying) + check.trace(n.obj.pos, "=> %s (rhs = %s)", n, rhs) }() } - assert(n.inst.orig.underlying != nil) + assert(n.state() == unresolved) + if n.inst.ctxt == nil { n.inst.ctxt = NewContext() } + ctxt := n.inst.ctxt orig := n.inst.orig - targs := n.inst.targs - if asNamed(orig.underlying) != nil { - // We should only get a Named underlying type here during type checking - // (for example, in recursive type declarations). - assert(check != nil) - } + assert(orig.state() >= resolved) + assert(orig.fromRHS != nil) + + targs := n.inst.targs + tpars := orig.tparams - if orig.tparams.Len() != targs.Len() { - // Mismatching arg and tparam length may be checked elsewhere. + if targs.Len() != tpars.Len() { return Typ[Invalid] } - // Ensure that an instance is recorded before substituting, so that we - // resolve n for any recursive references. - h := n.inst.ctxt.instanceHash(orig, targs.list()) - n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n) - assert(n == n2) + h := ctxt.instanceHash(orig, targs.list()) + u := ctxt.update(h, orig, targs.list(), n) // block fixed point infinite instantiation + assert(n == u) - smap := makeSubstMap(orig.tparams.list(), targs.list()) - var ctxt *Context + m := makeSubstMap(tpars.list(), targs.list()) if check != nil { ctxt = check.context() } - underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt) - // If the underlying type of n is an interface, we need to set the receiver of - // its methods accurately -- we set the receiver of interface methods on - // the RHS of a type declaration to the defined type. - if iface, _ := underlying.(*Interface); iface != nil { + + rhs = check.subst(n.obj.pos, orig.fromRHS, m, n, ctxt) + + // TODO(markfreeman): Can we handle this in substitution? + // If the RHS is an interface, we must set the receiver of interface methods + // to the named type. + if iface, _ := rhs.(*Interface); iface != nil { if methods, copied := replaceRecvType(iface.methods, orig, n); copied { - // If the underlying type doesn't actually use type parameters, it's - // possible that it wasn't substituted. In this case we need to create - // a new *Interface before modifying receivers. - if iface == orig.underlying { - old := iface - iface = check.newInterface() - iface.embeddeds = old.embeddeds - assert(old.complete) // otherwise we are copying incomplete data - iface.complete = old.complete - iface.implicit = old.implicit // should be false but be conservative - underlying = iface + // If the RHS doesn't use type parameters, it may not have been + // substituted; we need to craft a new interface first. + if iface == orig.fromRHS { + assert(iface.complete) // otherwise we are copying incomplete data + + crafted := check.newInterface() + crafted.complete = true + crafted.implicit = false + crafted.embeddeds = iface.embeddeds + + iface = crafted } iface.methods = methods iface.tset = nil // recompute type set with new methods - // If check != nil, check.newInterface will have saved the interface for later completion. - if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated + // go.dev/issue/61561: We have to complete the interface even without a checker. + if check == nil { iface.typeSet() } + + return iface } } - return underlying + return rhs } // safeUnderlying returns the underlying type of typ without expanding diff --git a/src/go/types/object.go b/src/go/types/object.go index 823c03c7fd94ce..1bba5d52a8b2b6 100644 --- a/src/go/types/object.go +++ b/src/go/types/object.go @@ -298,7 +298,8 @@ func NewTypeName(pos token.Pos, pkg *Package, name string, typ Type) *TypeName { // lazily calls resolve to finish constructing the Named object. func _NewTypeNameLazy(pos token.Pos, pkg *Package, name string, load func(*Named) ([]*TypeParam, Type, []*Func, []func())) *TypeName { obj := NewTypeName(pos, pkg, name, nil) - NewNamed(obj, nil, nil).loader = load + n := (*Checker)(nil).newNamed(obj, nil, nil) + n.loader = load return obj } @@ -641,7 +642,7 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) { } else { // TODO(gri) should this be fromRHS for *Named? // (See discussion in #66559.) - typ = under(typ) + typ = typ.Underlying() } } diff --git a/src/go/types/operand.go b/src/go/types/operand.go index d933c173ff60ee..9abeaafae0c4d2 100644 --- a/src/go/types/operand.go +++ b/src/go/types/operand.go @@ -198,7 +198,7 @@ func operandString(x *operand, qf Qualifier) string { what := compositeKind(x.typ) if what == "" { // x.typ must be basic type - what = under(x.typ).(*Basic).name + what = x.typ.Underlying().(*Basic).name } desc += what + " " } @@ -233,7 +233,7 @@ func operandString(x *operand, qf Qualifier) string { // ("array", "slice", etc.) or the empty string if typ is not // composite but a basic type. func compositeKind(typ Type) string { - switch under(typ).(type) { + switch typ.Underlying().(type) { case *Basic: return "" case *Array: @@ -323,8 +323,8 @@ func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Cod return true, 0 } - Vu := under(V) - Tu := under(T) + Vu := V.Underlying() + Tu := T.Underlying() Vp, _ := V.(*TypeParam) Tp, _ := T.(*TypeParam) diff --git a/src/go/types/predicates.go b/src/go/types/predicates.go index 45f28726eecba6..ad929db2667eba 100644 --- a/src/go/types/predicates.go +++ b/src/go/types/predicates.go @@ -31,11 +31,11 @@ func isString(t Type) bool { return isBasic(t, IsString) } func isIntegerOrFloat(t Type) bool { return isBasic(t, IsInteger|IsFloat) } func isConstType(t Type) bool { return isBasic(t, IsConstType) } -// isBasic reports whether under(t) is a basic type with the specified info. +// isBasic reports whether t.Underlying() is a basic type with the specified info. // If t is a type parameter the result is false; i.e., // isBasic does not look inside a type parameter. func isBasic(t Type, info BasicInfo) bool { - u, _ := under(t).(*Basic) + u, _ := t.Underlying().(*Basic) return u != nil && u.info&info != 0 } @@ -51,7 +51,7 @@ func allString(t Type) bool { return allBasic(t, IsString) } func allOrdered(t Type) bool { return allBasic(t, IsOrdered) } func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) } -// allBasic reports whether under(t) is a basic type with the specified info. +// allBasic reports whether t.Underlying() is a basic type with the specified info. // If t is a type parameter, the result is true if isBasic(t, info) is true // for all specific types of the type parameter's type set. func allBasic(t Type, info BasicInfo) bool { @@ -110,7 +110,7 @@ func isUntypedNumeric(t Type) bool { // IsInterface reports whether t is an interface type. func IsInterface(t Type) bool { - _, ok := under(t).(*Interface) + _, ok := t.Underlying().(*Interface) return ok } @@ -166,7 +166,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError { } seen[T] = true - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: // assume invalid types to be comparable to avoid follow-up errors if t.kind == UntypedNil { @@ -209,7 +209,7 @@ func comparableType(T Type, dynamic bool, seen map[Type]bool) *typeError { // hasNil reports whether type t includes the nil value. func hasNil(t Type) bool { - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Basic: return u.kind == UnsafePointer case *Slice, *Pointer, *Signature, *Map, *Chan: diff --git a/src/go/types/range.go b/src/go/types/range.go index 303d001c72d05b..d38150dea77480 100644 --- a/src/go/types/range.go +++ b/src/go/types/range.go @@ -38,7 +38,7 @@ func (check *Checker) rangeStmt(inner stmtContext, rangeStmt *ast.RangeStmt, noN check.expr(nil, &x, rangeVar) if isTypes2 && x.mode != invalid && sValue == nil && !check.hasCallOrRecv { - if t, ok := arrayPtrDeref(under(x.typ)).(*Array); ok { + if t, ok := arrayPtrDeref(x.typ.Underlying()).(*Array); ok { for { // Put constant info on the thing inside parentheses. // That's where (*../noder/writer).expr expects it. diff --git a/src/go/types/signature.go b/src/go/types/signature.go index fa41c797b29403..2f8be54e17177c 100644 --- a/src/go/types/signature.go +++ b/src/go/types/signature.go @@ -226,7 +226,7 @@ func (check *Checker) collectRecv(rparam *ast.Field, scopePos token.Pos) (*Var, case *Alias: // Methods on generic aliases are not permitted. // Only report an error if the alias type is valid. - if isValid(unalias(t)) { + if isValid(t) { check.errorf(rbase, InvalidRecv, "cannot define new methods on generic alias type %s", t) } // Ok to continue but do not set basetype in this case so that @@ -461,7 +461,7 @@ func (check *Checker) validRecv(pos positioner, recv *Var) { break } var cause string - switch u := T.under().(type) { + switch u := T.Underlying().(type) { case *Basic: // unsafe.Pointer is treated like a regular pointer if u.kind == UnsafePointer { diff --git a/src/go/types/sizeof_test.go b/src/go/types/sizeof_test.go index fa07eb10f1907c..4ff255ffa027eb 100644 --- a/src/go/types/sizeof_test.go +++ b/src/go/types/sizeof_test.go @@ -30,7 +30,7 @@ func TestSizeof(t *testing.T) { {Interface{}, 40, 80}, {Map{}, 16, 32}, {Chan{}, 12, 24}, - {Named{}, 60, 112}, + {Named{}, 64, 120}, {TypeParam{}, 28, 48}, {term{}, 12, 24}, diff --git a/src/go/types/sizes.go b/src/go/types/sizes.go index 51ea224f0b2997..e2ff047bc859c3 100644 --- a/src/go/types/sizes.go +++ b/src/go/types/sizes.go @@ -57,7 +57,7 @@ func (s *StdSizes) Alignof(T Type) (result int64) { // For arrays and structs, alignment is defined in terms // of alignment of the elements and fields, respectively. - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Array: // spec: "For a variable x of array type: unsafe.Alignof(x) // is the same as unsafe.Alignof(x[0]), but at least 1." @@ -165,7 +165,7 @@ var basicSizes = [...]byte{ } func (s *StdSizes) Sizeof(T Type) int64 { - switch t := under(T).(type) { + switch t := T.Underlying().(type) { case *Basic: assert(isTyped(T)) k := t.kind @@ -310,7 +310,7 @@ func (conf *Config) offsetsof(T *Struct) []int64 { func (conf *Config) offsetof(T Type, index []int) int64 { var offs int64 for _, i := range index { - s := under(T).(*Struct) + s := T.Underlying().(*Struct) d := conf.offsetsof(s)[i] if d < 0 { return -1 diff --git a/src/go/types/struct.go b/src/go/types/struct.go index a6970832c7c0d4..0133eb450db6d7 100644 --- a/src/go/types/struct.go +++ b/src/go/types/struct.go @@ -137,7 +137,7 @@ func (check *Checker) structType(styp *Struct, e *ast.StructType) { // Because we have a name, typ must be of the form T or *T, where T is the name // of a (named or alias) type, and t (= deref(typ)) must be the type of T. // We must delay this check to the end because we don't want to instantiate - // (via under(t)) a possibly incomplete type. + // (via t.Underlying()) a possibly incomplete type. // for use in the closure below embeddedTyp := typ @@ -145,7 +145,7 @@ func (check *Checker) structType(styp *Struct, e *ast.StructType) { check.later(func() { t, isPtr := deref(embeddedTyp) - switch u := under(t).(type) { + switch u := t.Underlying().(type) { case *Basic: if !isValid(t) { // error was reported before diff --git a/src/go/types/typeparam.go b/src/go/types/typeparam.go index 2ffef8f613319a..eafcb1c145d1f8 100644 --- a/src/go/types/typeparam.go +++ b/src/go/types/typeparam.go @@ -116,7 +116,7 @@ func (t *TypeParam) iface() *Interface { // determine constraint interface var ityp *Interface - switch u := under(bound).(type) { + switch u := bound.Underlying().(type) { case *Basic: if !isValid(u) { // error is reported elsewhere diff --git a/src/go/types/typeset.go b/src/go/types/typeset.go index 46ed5ce1802e0b..3a61c307561963 100644 --- a/src/go/types/typeset.go +++ b/src/go/types/typeset.go @@ -117,13 +117,13 @@ func (s *_TypeSet) all(f func(t, u Type) bool) bool { for _, t := range s.terms { assert(t.typ != nil) - // Unalias(x) == under(x) for ~x terms + // Unalias(x) == x.Underlying() for ~x terms u := Unalias(t.typ) if !t.tilde { - u = under(u) + u = u.Underlying() } if debug { - assert(Identical(u, under(u))) + assert(Identical(u, u.Underlying())) } if !f(t.typ, u) { return false @@ -267,7 +267,7 @@ func computeInterfaceTypeSet(check *Checker, pos token.Pos, ityp *Interface) *_T } var comparable bool var terms termlist - switch u := under(typ).(type) { + switch u := typ.Underlying().(type) { case *Interface: // For now we don't permit type parameters as constraints. assert(!isTypeParam(typ)) @@ -383,7 +383,7 @@ func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos tok var allTerms termlist for _, t := range utyp.terms { var terms termlist - u := under(t.typ) + u := t.typ.Underlying() if ui, _ := u.(*Interface); ui != nil { // For now we don't permit type parameters as constraints. assert(!isTypeParam(t.typ)) diff --git a/src/go/types/typeset_test.go b/src/go/types/typeset_test.go index 51560924839eae..a4feb7469b0637 100644 --- a/src/go/types/typeset_test.go +++ b/src/go/types/typeset_test.go @@ -65,7 +65,7 @@ func TestTypeSetString(t *testing.T) { if obj == nil { t.Fatalf("%s: T not found (invalid test case)", body) } - T, ok := under(obj.Type()).(*Interface) + T, ok := obj.Type().Underlying().(*Interface) if !ok { t.Fatalf("%s: %v is not an interface (invalid test case)", body, obj) } diff --git a/src/go/types/typestring.go b/src/go/types/typestring.go index 804e80407ef52a..bd13459832deca 100644 --- a/src/go/types/typestring.go +++ b/src/go/types/typestring.go @@ -458,7 +458,7 @@ func (w *typeWriter) tuple(tup *Tuple, variadic bool) { } else { // special case: // append(s, "foo"...) leads to signature func([]byte, string...) - if t, _ := under(typ).(*Basic); t == nil || t.kind != String { + if t, _ := typ.Underlying().(*Basic); t == nil || t.kind != String { w.error("expected string type") continue } diff --git a/src/go/types/typeterm.go b/src/go/types/typeterm.go index 1cd4e1651f30cd..b9ba8f38f91c83 100644 --- a/src/go/types/typeterm.go +++ b/src/go/types/typeterm.go @@ -118,7 +118,7 @@ func (x *term) includes(t Type) bool { u := t if x.tilde { - u = under(u) + u = u.Underlying() } return Identical(x.typ, u) } @@ -158,11 +158,11 @@ func (x *term) disjoint(y *term) bool { } ux := x.typ if y.tilde { - ux = under(ux) + ux = ux.Underlying() } uy := y.typ if x.tilde { - uy = under(uy) + uy = uy.Underlying() } return !Identical(ux, uy) } diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go index c040ee2a29cf82..88ec4b77fca7e9 100644 --- a/src/go/types/typexpr.go +++ b/src/go/types/typexpr.go @@ -168,11 +168,11 @@ func (check *Checker) validVarType(e ast.Expr, typ Type) { return } - // We don't want to call under() or complete interfaces while we are in + // We don't want to call typ.Underlying() or complete interfaces while we are in // the middle of type-checking parameter declarations that might belong // to interface methods. Delay this check to the end of type-checking. check.later(func() { - if t, _ := under(typ).(*Interface); t != nil { + if t, _ := typ.Underlying().(*Interface); t != nil { tset := computeInterfaceTypeSet(check, e.Pos(), t) // TODO(gri) is this the correct position? if !tset.IsMethodSet() { if tset.comparable { @@ -237,7 +237,7 @@ func (check *Checker) typInternal(e0 ast.Expr, def *TypeName) (T Type) { check.indent-- var under Type if T != nil { - // Calling under() here may lead to endless instantiations. + // Calling T.Underlying() here may lead to endless instantiations. // Test case: type T[P any] *T[P] under = safeUnderlying(T) } @@ -421,7 +421,7 @@ func setDefType(def *TypeName, typ Type) { case *Basic: assert(t == Typ[Invalid]) case *Named: - t.underlying = typ + t.fromRHS = typ default: panic(fmt.Sprintf("unexpected type %T", t)) } diff --git a/src/go/types/under.go b/src/go/types/under.go index 43bf0ad07cd25f..6056b2e4829d3b 100644 --- a/src/go/types/under.go +++ b/src/go/types/under.go @@ -9,19 +9,8 @@ package types import "iter" -// under returns the true expanded underlying type. -// If it doesn't exist, the result is Typ[Invalid]. -// under must only be called when a type is known -// to be fully set up. -func under(t Type) Type { - if t := asNamed(t); t != nil { - return t.under() - } - return t.Underlying() -} - // If typ is a type parameter, underIs returns the result of typ.underIs(f). -// Otherwise, underIs returns the result of f(under(typ)). +// Otherwise, underIs returns the result of f(typ.Underlying()). func underIs(typ Type, f func(Type) bool) bool { return all(typ, func(_, u Type) bool { return f(u) @@ -34,7 +23,7 @@ func all(t Type, f func(t, u Type) bool) bool { if p, _ := Unalias(t).(*TypeParam); p != nil { return p.typeset(f) } - return f(t, under(t)) + return f(t, t.Underlying()) } // typeset is an iterator over the (type/underlying type) pairs of the diff --git a/src/go/types/unify.go b/src/go/types/unify.go index abcbab433a1bd1..ea96de82f0f297 100644 --- a/src/go/types/unify.go +++ b/src/go/types/unify.go @@ -273,7 +273,7 @@ func (u *unifier) inferred(tparams []*TypeParam) []Type { // it is a non-type parameter interface. Otherwise it returns nil. func asInterface(x Type) (i *Interface) { if _, ok := Unalias(x).(*TypeParam); !ok { - i, _ = under(x).(*Interface) + i, _ = x.Underlying().(*Interface) } return i } @@ -342,7 +342,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { if traceInference { u.tracef("%s ≡ under %s", x, ny) } - y = ny.under() + y = ny.Underlying() // Per the spec, a defined type cannot have an underlying type // that is a type parameter. assert(!isTypeParam(y)) @@ -433,7 +433,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { u.set(px, y) default: // Neither x nor y are defined types. - if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv { + if yc, _ := y.Underlying().(*Chan); yc != nil && yc.dir != SendRecv { // y is a directed channel type: select y. u.set(px, y) } diff --git a/src/go/types/union.go b/src/go/types/union.go index 30365e30bc2a06..3f7efdf1f38a9d 100644 --- a/src/go/types/union.go +++ b/src/go/types/union.go @@ -94,7 +94,7 @@ func parseUnion(check *Checker, uexpr ast.Expr) Type { continue } - u := under(t.typ) + u := t.typ.Underlying() f, _ := u.(*Interface) if t.tilde { if f != nil { diff --git a/src/go/types/universe.go b/src/go/types/universe.go index 4753111c118108..70935dc35ff4be 100644 --- a/src/go/types/universe.go +++ b/src/go/types/universe.go @@ -119,7 +119,7 @@ func defPredeclaredTypes() { { obj := NewTypeName(nopos, nil, "error", nil) obj.setColor(black) - typ := NewNamed(obj, nil, nil) + typ := (*Checker)(nil).newNamed(obj, nil, nil) // error.Error() string recv := newVar(RecvVar, nopos, nil, "", typ) @@ -131,7 +131,8 @@ func defPredeclaredTypes() { ityp := &Interface{methods: []*Func{err}, complete: true} computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset - typ.SetUnderlying(ityp) + typ.fromRHS = ityp + typ.Underlying() def(obj) } @@ -139,12 +140,13 @@ func defPredeclaredTypes() { { obj := NewTypeName(nopos, nil, "comparable", nil) obj.setColor(black) - typ := NewNamed(obj, nil, nil) + typ := (*Checker)(nil).newNamed(obj, nil, nil) // interface{} // marked as comparable ityp := &Interface{complete: true, tset: &_TypeSet{nil, allTermlist, true}} - typ.SetUnderlying(ityp) + typ.fromRHS = ityp + typ.Underlying() def(obj) } } diff --git a/src/go/types/validtype.go b/src/go/types/validtype.go index f87b82c439ebc3..578dcba1f97f3b 100644 --- a/src/go/types/validtype.go +++ b/src/go/types/validtype.go @@ -94,13 +94,6 @@ func (check *Checker) validType0(pos token.Pos, typ Type, nest, path []*Named) b // break // } - // Don't report a 2nd error if we already know the type is invalid - // (e.g., if a cycle was detected earlier, via under). - // Note: ensure that t.orig is fully resolved by calling Underlying(). - if !isValid(t.Underlying()) { - return false - } - // If the current type t is also found in nest, (the memory of) t is // embedded in itself, indicating an invalid recursive type. for _, e := range nest { @@ -128,8 +121,9 @@ func (check *Checker) validType0(pos token.Pos, typ Type, nest, path []*Named) b // are not yet available to other goroutines). assert(t.obj.pkg == check.pkg) assert(t.Origin().obj.pkg == check.pkg) - t.underlying = Typ[Invalid] - t.Origin().underlying = Typ[Invalid] + + // let t become invalid when it resolves + t.Origin().fromRHS = Typ[Invalid] // Find the starting point of the cycle and report it. // Because each type in nest must also appear in path (see invariant below), diff --git a/src/internal/buildcfg/cfg.go b/src/internal/buildcfg/cfg.go index 9ab29568d22704..a75960b8e6c034 100644 --- a/src/internal/buildcfg/cfg.go +++ b/src/internal/buildcfg/cfg.go @@ -321,18 +321,13 @@ func goriscv64() int { } type gowasmFeatures struct { - SatConv bool - SignExt bool + // Legacy features, now always enabled + //SatConv bool + //SignExt bool } func (f gowasmFeatures) String() string { var flags []string - if f.SatConv { - flags = append(flags, "satconv") - } - if f.SignExt { - flags = append(flags, "signext") - } return strings.Join(flags, ",") } @@ -340,9 +335,9 @@ func gowasm() (f gowasmFeatures) { for opt := range strings.SplitSeq(envOr("GOWASM", ""), ",") { switch opt { case "satconv": - f.SatConv = true + // ignore, always enabled case "signext": - f.SignExt = true + // ignore, always enabled case "": // ignore default: @@ -452,12 +447,10 @@ func gogoarchTags() []string { return list case "wasm": var list []string - if GOWASM.SatConv { - list = append(list, GOARCH+".satconv") - } - if GOWASM.SignExt { - list = append(list, GOARCH+".signext") - } + // SatConv is always enabled + list = append(list, GOARCH+".satconv") + // SignExt is always enabled + list = append(list, GOARCH+".signext") return list } return nil diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index 707776b374b0d4..f1a1d8632ef507 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -79,10 +79,12 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { dwarf5Supported := (goos != "darwin" && goos != "ios" && goos != "aix") baseline := goexperiment.Flags{ - RegabiWrappers: regabiSupported, - RegabiArgs: regabiSupported, - Dwarf5: dwarf5Supported, - RandomizedHeapBase64: true, + RegabiWrappers: regabiSupported, + RegabiArgs: regabiSupported, + Dwarf5: dwarf5Supported, + RandomizedHeapBase64: true, + SizeSpecializedMalloc: true, + GreenTeaGC: true, } // Start with the statically enabled set of experiments. diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go index 2d008825459bb2..852305e8553aab 100644 --- a/src/internal/godebugs/table.go +++ b/src/internal/godebugs/table.go @@ -42,6 +42,7 @@ var All = []Info{ {Name: "http2client", Package: "net/http"}, {Name: "http2debug", Package: "net/http", Opaque: true}, {Name: "http2server", Package: "net/http"}, + {Name: "httpcookiemaxnum", Package: "net/http", Changed: 24, Old: "0"}, {Name: "httplaxcontentlength", Package: "net/http", Changed: 22, Old: "1"}, {Name: "httpmuxgo121", Package: "net/http", Changed: 22, Old: "1"}, {Name: "httpservecontentkeepheaders", Package: "net/http", Changed: 23, Old: "1"}, diff --git a/src/internal/goexperiment/exp_goroutineleakprofile_off.go b/src/internal/goexperiment/exp_goroutineleakprofile_off.go new file mode 100644 index 00000000000000..63eafe9e6c74db --- /dev/null +++ b/src/internal/goexperiment/exp_goroutineleakprofile_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.goroutineleakprofile + +package goexperiment + +const GoroutineLeakProfile = false +const GoroutineLeakProfileInt = 0 diff --git a/src/internal/goexperiment/exp_goroutineleakprofile_on.go b/src/internal/goexperiment/exp_goroutineleakprofile_on.go new file mode 100644 index 00000000000000..28a662eceb46f8 --- /dev/null +++ b/src/internal/goexperiment/exp_goroutineleakprofile_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.goroutineleakprofile + +package goexperiment + +const GoroutineLeakProfile = true +const GoroutineLeakProfileInt = 1 diff --git a/src/internal/goexperiment/exp_synctest_off.go b/src/internal/goexperiment/exp_synctest_off.go deleted file mode 100644 index fade13f89ca79c..00000000000000 --- a/src/internal/goexperiment/exp_synctest_off.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build !goexperiment.synctest - -package goexperiment - -const Synctest = false -const SynctestInt = 0 diff --git a/src/internal/goexperiment/exp_synctest_on.go b/src/internal/goexperiment/exp_synctest_on.go deleted file mode 100644 index 9c44be7276138b..00000000000000 --- a/src/internal/goexperiment/exp_synctest_on.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build goexperiment.synctest - -package goexperiment - -const Synctest = true -const SynctestInt = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index 84dbf594b8792a..07aa1d0aeed15d 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -100,9 +100,6 @@ type Flags struct { // inlining phase within the Go compiler. NewInliner bool - // Synctest enables the testing/synctest package. - Synctest bool - // Dwarf5 enables DWARF version 5 debug info generation. Dwarf5 bool @@ -118,4 +115,7 @@ type Flags struct { // SizeSpecializedMalloc enables malloc implementations that are specialized per size class. SizeSpecializedMalloc bool + + // GoroutineLeakProfile enables the collection of goroutine leak profiles. + GoroutineLeakProfile bool } diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index dd9845d1b223c3..6443f6eb30b8a5 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -9,6 +9,7 @@ import ( "internal/race" "internal/syscall/windows" "io" + "runtime" "sync" "sync/atomic" "syscall" @@ -75,25 +76,6 @@ type operation struct { // fields used by runtime.netpoll runtimeCtx uintptr mode int32 - - // fields used only by net package - buf syscall.WSABuf -} - -func (o *operation) setEvent() { - h, err := windows.CreateEvent(nil, 0, 0, nil) - if err != nil { - // This shouldn't happen when all CreateEvent arguments are zero. - panic(err) - } - // Set the low bit so that the external IOCP doesn't receive the completion packet. - o.o.HEvent = h | 1 -} - -func (o *operation) close() { - if o.o.HEvent != 0 { - syscall.CloseHandle(o.o.HEvent) - } } func (fd *FD) overlapped(o *operation) *syscall.Overlapped { @@ -107,9 +89,8 @@ func (fd *FD) overlapped(o *operation) *syscall.Overlapped { return &o.o } -func (o *operation) InitBuf(buf []byte) { - o.buf.Len = uint32(len(buf)) - o.buf.Buf = unsafe.SliceData(buf) +func newWsaBuf(b []byte) *syscall.WSABuf { + return &syscall.WSABuf{Buf: unsafe.SliceData(b), Len: uint32(len(b))} } var wsaBufsPool = sync.Pool{ @@ -211,6 +192,12 @@ var wsaRsaPool = sync.Pool{ }, } +var operationPool = sync.Pool{ + New: func() any { + return new(operation) + }, +} + // waitIO waits for the IO operation o to complete. func (fd *FD) waitIO(o *operation) error { if fd.isBlocking { @@ -249,23 +236,57 @@ func (fd *FD) cancelIO(o *operation) { fd.pd.waitCanceled(int(o.mode)) } +// pin pins ptr for the duration of the IO operation. +// If fd is in blocking mode, pin does nothing. +func (fd *FD) pin(mode int, ptr any) { + if fd.isBlocking { + return + } + if mode == 'r' { + fd.readPinner.Pin(ptr) + } else { + fd.writePinner.Pin(ptr) + } +} + // execIO executes a single IO operation o. // It supports both synchronous and asynchronous IO. -// o.qty and o.flags are set to zero before calling submit -// to avoid reusing the values from a previous call. -func (fd *FD) execIO(o *operation, submit func(o *operation) (uint32, error)) (int, error) { +func (fd *FD) execIO(mode int, submit func(o *operation) (uint32, error)) (int, error) { + if mode == 'r' { + defer fd.readPinner.Unpin() + } else { + defer fd.writePinner.Unpin() + } // Notify runtime netpoll about starting IO. - err := fd.pd.prepare(int(o.mode), fd.isFile) + err := fd.pd.prepare(mode, fd.isFile) if err != nil { return 0, err } + o := operationPool.Get().(*operation) + defer operationPool.Put(o) + *o = operation{ + o: syscall.Overlapped{ + OffsetHigh: uint32(fd.offset >> 32), + Offset: uint32(fd.offset), + }, + runtimeCtx: fd.pd.runtimeCtx, + mode: int32(mode), + } // Start IO. - if !fd.isBlocking && o.o.HEvent == 0 && !fd.pollable() { + if !fd.isBlocking && !fd.pollable() { // If the handle is opened for overlapped IO but we can't // use the runtime poller, then we need to use an // event to wait for the IO to complete. - o.setEvent() + h, err := windows.CreateEvent(nil, 0, 0, nil) + if err != nil { + // This shouldn't happen when all CreateEvent arguments are zero. + panic(err) + } + // Set the low bit so that the external IOCP doesn't receive the completion packet. + o.o.HEvent = h | 1 + defer syscall.CloseHandle(h) } + fd.pin(mode, o) qty, err := submit(o) var waitErr error // Blocking operations shouldn't return ERROR_IO_PENDING. @@ -320,11 +341,6 @@ type FD struct { // System file descriptor. Immutable until Close. Sysfd syscall.Handle - // Read operation. - rop operation - // Write operation. - wop operation - // I/O poller. pd pollDesc @@ -362,6 +378,11 @@ type FD struct { isBlocking bool disassociated atomic.Bool + + // readPinner and writePinner are automatically unpinned + // before execIO returns. + readPinner runtime.Pinner + writePinner runtime.Pinner } // setOffset sets the offset fields of the overlapped object @@ -379,8 +400,6 @@ type FD struct { // using an external mechanism. func (fd *FD) setOffset(off int64) { fd.offset = off - fd.rop.o.OffsetHigh, fd.rop.o.Offset = uint32(off>>32), uint32(off) - fd.wop.o.OffsetHigh, fd.wop.o.Offset = uint32(off>>32), uint32(off) } // addOffset adds the given offset to the current offset. @@ -431,8 +450,6 @@ func (fd *FD) Init(net string, pollable bool) error { } fd.isFile = fd.kind != kindNet fd.isBlocking = !pollable - fd.rop.mode = 'r' - fd.wop.mode = 'w' // It is safe to add overlapped handles that also perform I/O // outside of the runtime poller. The runtime poller will ignore @@ -441,8 +458,6 @@ func (fd *FD) Init(net string, pollable bool) error { if err != nil { return err } - fd.rop.runtimeCtx = fd.pd.runtimeCtx - fd.wop.runtimeCtx = fd.pd.runtimeCtx if fd.kind != kindNet || socketCanUseSetFileCompletionNotificationModes { // Non-socket handles can use SetFileCompletionNotificationModes without problems. err := syscall.SetFileCompletionNotificationModes(fd.Sysfd, @@ -481,8 +496,6 @@ func (fd *FD) destroy() error { if fd.Sysfd == syscall.InvalidHandle { return syscall.EINVAL } - fd.rop.close() - fd.wop.close() // Poller may want to unregister fd in readiness notification mechanism, // so this must be executed before fd.CloseFunc. fd.pd.close() @@ -537,6 +550,10 @@ func (fd *FD) Read(buf []byte) (int, error) { defer fd.readUnlock() } + if len(buf) > 0 { + fd.pin('r', &buf[0]) + } + if len(buf) > maxRW { buf = buf[:maxRW] } @@ -547,10 +564,8 @@ func (fd *FD) Read(buf []byte) (int, error) { case kindConsole: n, err = fd.readConsole(buf) case kindFile, kindPipe: - o := &fd.rop - o.InitBuf(buf) - n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.ReadFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, fd.overlapped(o)) + n, err = fd.execIO('r', func(o *operation) (qty uint32, err error) { + err = syscall.ReadFile(fd.Sysfd, buf, &qty, fd.overlapped(o)) return qty, err }) fd.addOffset(n) @@ -564,11 +579,9 @@ func (fd *FD) Read(buf []byte) (int, error) { } } case kindNet: - o := &fd.rop - o.InitBuf(buf) - n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) { + n, err = fd.execIO('r', func(o *operation) (qty uint32, err error) { var flags uint32 - err = syscall.WSARecv(fd.Sysfd, &o.buf, 1, &qty, &flags, &o.o, nil) + err = syscall.WSARecv(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, &o.o, nil) return qty, err }) if race.Enabled { @@ -656,7 +669,7 @@ func (fd *FD) readConsole(b []byte) (int, error) { } // Pread emulates the Unix pread system call. -func (fd *FD) Pread(b []byte, off int64) (int, error) { +func (fd *FD) Pread(buf []byte, off int64) (int, error) { if fd.kind == kindPipe { // Pread does not work with pipes return 0, syscall.ESPIPE @@ -667,8 +680,12 @@ func (fd *FD) Pread(b []byte, off int64) (int, error) { } defer fd.readWriteUnlock() - if len(b) > maxRW { - b = b[:maxRW] + if len(buf) > 0 { + fd.pin('r', &buf[0]) + } + + if len(buf) > maxRW { + buf = buf[:maxRW] } if fd.isBlocking { @@ -687,17 +704,15 @@ func (fd *FD) Pread(b []byte, off int64) (int, error) { curoffset := fd.offset defer fd.setOffset(curoffset) } - o := &fd.rop - o.InitBuf(b) fd.setOffset(off) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.ReadFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, &o.o) + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { + err = syscall.ReadFile(fd.Sysfd, buf, &qty, &o.o) return qty, err }) if err == syscall.ERROR_HANDLE_EOF { err = io.EOF } - if len(b) != 0 { + if len(buf) != 0 { err = fd.eofError(n, err) } return n, err @@ -715,14 +730,15 @@ func (fd *FD) ReadFrom(buf []byte) (int, syscall.Sockaddr, error) { return 0, nil, err } defer fd.readUnlock() - o := &fd.rop - o.InitBuf(buf) + + fd.pin('r', &buf[0]) + rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { rsan := int32(unsafe.Sizeof(*rsa)) var flags uint32 - err = syscall.WSARecvFrom(fd.Sysfd, &o.buf, 1, &qty, &flags, rsa, &rsan, &o.o, nil) + err = syscall.WSARecvFrom(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, rsa, &rsan, &o.o, nil) return qty, err }) err = fd.eofError(n, err) @@ -745,14 +761,15 @@ func (fd *FD) ReadFromInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) return 0, err } defer fd.readUnlock() - o := &fd.rop - o.InitBuf(buf) + + fd.pin('r', &buf[0]) + rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { rsan := int32(unsafe.Sizeof(*rsa)) var flags uint32 - err = syscall.WSARecvFrom(fd.Sysfd, &o.buf, 1, &qty, &flags, rsa, &rsan, &o.o, nil) + err = syscall.WSARecvFrom(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, rsa, &rsan, &o.o, nil) return qty, err }) err = fd.eofError(n, err) @@ -775,14 +792,15 @@ func (fd *FD) ReadFromInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) return 0, err } defer fd.readUnlock() - o := &fd.rop - o.InitBuf(buf) + + fd.pin('r', &buf[0]) + rsa := wsaRsaPool.Get().(*syscall.RawSockaddrAny) defer wsaRsaPool.Put(rsa) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { rsan := int32(unsafe.Sizeof(*rsa)) var flags uint32 - err = syscall.WSARecvFrom(fd.Sysfd, &o.buf, 1, &qty, &flags, rsa, &rsan, &o.o, nil) + err = syscall.WSARecvFrom(fd.Sysfd, newWsaBuf(buf), 1, &qty, &flags, rsa, &rsan, &o.o, nil) return qty, err }) err = fd.eofError(n, err) @@ -807,6 +825,9 @@ func (fd *FD) Write(buf []byte) (int, error) { defer fd.writeUnlock() } + if len(buf) > 0 { + fd.pin('w', &buf[0]) + } var ntotal int for { max := len(buf) @@ -820,10 +841,8 @@ func (fd *FD) Write(buf []byte) (int, error) { case kindConsole: n, err = fd.writeConsole(b) case kindPipe, kindFile: - o := &fd.wop - o.InitBuf(b) - n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WriteFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, fd.overlapped(o)) + n, err = fd.execIO('w', func(o *operation) (qty uint32, err error) { + err = syscall.WriteFile(fd.Sysfd, b, &qty, fd.overlapped(o)) return qty, err }) fd.addOffset(n) @@ -831,10 +850,8 @@ func (fd *FD) Write(buf []byte) (int, error) { if race.Enabled { race.ReleaseMerge(unsafe.Pointer(&ioSync)) } - o := &fd.wop - o.InitBuf(b) - n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WSASend(fd.Sysfd, &o.buf, 1, &qty, 0, &o.o, nil) + n, err = fd.execIO('w', func(o *operation) (qty uint32, err error) { + err = syscall.WSASend(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, &o.o, nil) return qty, err }) } @@ -903,6 +920,10 @@ func (fd *FD) Pwrite(buf []byte, off int64) (int, error) { } defer fd.readWriteUnlock() + if len(buf) > 0 { + fd.pin('w', &buf[0]) + } + if fd.isBlocking { curoffset, err := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent) if err != nil { @@ -926,12 +947,9 @@ func (fd *FD) Pwrite(buf []byte, off int64) (int, error) { if max-ntotal > maxRW { max = ntotal + maxRW } - b := buf[ntotal:max] - o := &fd.wop - o.InitBuf(b) fd.setOffset(off + int64(ntotal)) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WriteFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, &o.o) + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { + err = syscall.WriteFile(fd.Sysfd, buf[ntotal:max], &qty, &o.o) return qty, err }) if n > 0 { @@ -960,7 +978,7 @@ func (fd *FD) Writev(buf *[][]byte) (int64, error) { } bufs := newWSABufs(buf) defer freeWSABufs(bufs) - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = syscall.WSASend(fd.Sysfd, &(*bufs)[0], uint32(len(*bufs)), &qty, 0, &o.o, nil) return qty, err }) @@ -978,25 +996,23 @@ func (fd *FD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) { if len(buf) == 0 { // handle zero-byte payload - o := &fd.wop - o.InitBuf(buf) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WSASendto(fd.Sysfd, &o.buf, 1, &qty, 0, sa, &o.o, nil) + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { + err = syscall.WSASendto(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, 0, sa, &o.o, nil) return qty, err }) return n, err } + fd.pin('w', &buf[0]) + ntotal := 0 for len(buf) > 0 { b := buf if len(b) > maxRW { b = b[:maxRW] } - o := &fd.wop - o.InitBuf(b) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = syscall.WSASendto(fd.Sysfd, &o.buf, 1, &qty, 0, sa, &o.o, nil) + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { + err = syscall.WSASendto(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, sa, &o.o, nil) return qty, err }) ntotal += int(n) @@ -1017,25 +1033,23 @@ func (fd *FD) WriteToInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error) if len(buf) == 0 { // handle zero-byte payload - o := &fd.wop - o.InitBuf(buf) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = windows.WSASendtoInet4(fd.Sysfd, &o.buf, 1, &qty, 0, sa4, &o.o, nil) + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { + err = windows.WSASendtoInet4(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, 0, sa4, &o.o, nil) return qty, err }) return n, err } + fd.pin('w', &buf[0]) + ntotal := 0 for len(buf) > 0 { b := buf if len(b) > maxRW { b = b[:maxRW] } - o := &fd.wop - o.InitBuf(b) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = windows.WSASendtoInet4(fd.Sysfd, &o.buf, 1, &qty, 0, sa4, &o.o, nil) + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { + err = windows.WSASendtoInet4(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, sa4, &o.o, nil) return qty, err }) ntotal += int(n) @@ -1056,25 +1070,23 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) if len(buf) == 0 { // handle zero-byte payload - o := &fd.wop - o.InitBuf(buf) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = windows.WSASendtoInet6(fd.Sysfd, &o.buf, 1, &qty, 0, sa6, &o.o, nil) + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { + err = windows.WSASendtoInet6(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, 0, sa6, &o.o, nil) return qty, err }) return n, err } + fd.pin('w', &buf[0]) + ntotal := 0 for len(buf) > 0 { b := buf if len(b) > maxRW { b = b[:maxRW] } - o := &fd.wop - o.InitBuf(b) - n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { - err = windows.WSASendtoInet6(fd.Sysfd, &o.buf, 1, &qty, 0, sa6, &o.o, nil) + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { + err = windows.WSASendtoInet6(fd.Sysfd, newWsaBuf(b), 1, &qty, 0, sa6, &o.o, nil) return qty, err }) ntotal += int(n) @@ -1090,17 +1102,16 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error) // called when the descriptor is first created. This is here rather // than in the net package so that it can use fd.wop. func (fd *FD) ConnectEx(ra syscall.Sockaddr) error { - o := &fd.wop - _, err := fd.execIO(o, func(o *operation) (uint32, error) { + _, err := fd.execIO('w', func(o *operation) (uint32, error) { return 0, ConnectExFunc(fd.Sysfd, ra, nil, 0, nil, &o.o) }) return err } -func (fd *FD) acceptOne(s syscall.Handle, rawsa []syscall.RawSockaddrAny, o *operation) (string, error) { +func (fd *FD) acceptOne(s syscall.Handle, rawsa []syscall.RawSockaddrAny) (string, error) { // Submit accept request. rsan := uint32(unsafe.Sizeof(rawsa[0])) - _, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + _, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { err = AcceptFunc(fd.Sysfd, s, (*byte)(unsafe.Pointer(&rawsa[0])), 0, rsan, rsan, &qty, &o.o) return qty, err @@ -1128,7 +1139,6 @@ func (fd *FD) Accept(sysSocket func() (syscall.Handle, error)) (syscall.Handle, } defer fd.readUnlock() - o := &fd.rop var rawsa [2]syscall.RawSockaddrAny for { s, err := sysSocket() @@ -1136,7 +1146,7 @@ func (fd *FD) Accept(sysSocket func() (syscall.Handle, error)) (syscall.Handle, return syscall.InvalidHandle, nil, 0, "", err } - errcall, err := fd.acceptOne(s, rawsa[:], o) + errcall, err := fd.acceptOne(s, rawsa[:]) if err == nil { return s, rawsa[:], uint32(unsafe.Sizeof(rawsa[0])), "", nil } @@ -1264,14 +1274,12 @@ func (fd *FD) RawRead(f func(uintptr) bool) error { // Use a zero-byte read as a way to get notified when this // socket is readable. h/t https://stackoverflow.com/a/42019668/332798 - o := &fd.rop - o.InitBuf(nil) - _, err := fd.execIO(o, func(o *operation) (qty uint32, err error) { + _, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { var flags uint32 if !fd.IsStream { flags |= windows.MSG_PEEK } - err = syscall.WSARecv(fd.Sysfd, &o.buf, 1, &qty, &flags, &o.o, nil) + err = syscall.WSARecv(fd.Sysfd, &syscall.WSABuf{}, 1, &qty, &flags, &o.o, nil) return qty, err }) if err == windows.WSAEMSGSIZE { @@ -1361,7 +1369,7 @@ func (fd *FD) ReadMsg(p []byte, oob []byte, flags int) (int, int, int, syscall.S msg := newWSAMsg(p, oob, flags, true) defer freeWSAMsg(msg) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { err = windows.WSARecvMsg(fd.Sysfd, msg, &qty, &o.o, nil) return qty, err }) @@ -1386,7 +1394,7 @@ func (fd *FD) ReadMsgInet4(p []byte, oob []byte, flags int, sa4 *syscall.Sockadd msg := newWSAMsg(p, oob, flags, true) defer freeWSAMsg(msg) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { err = windows.WSARecvMsg(fd.Sysfd, msg, &qty, &o.o, nil) return qty, err }) @@ -1410,7 +1418,7 @@ func (fd *FD) ReadMsgInet6(p []byte, oob []byte, flags int, sa6 *syscall.Sockadd msg := newWSAMsg(p, oob, flags, true) defer freeWSAMsg(msg) - n, err := fd.execIO(&fd.rop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('r', func(o *operation) (qty uint32, err error) { err = windows.WSARecvMsg(fd.Sysfd, msg, &qty, &o.o, nil) return qty, err }) @@ -1441,7 +1449,7 @@ func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, err return 0, 0, err } } - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendMsg(fd.Sysfd, msg, 0, nil, &o.o, nil) return qty, err }) @@ -1464,7 +1472,7 @@ func (fd *FD) WriteMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (in if sa != nil { msg.Namelen = sockaddrInet4ToRaw(msg.Name, sa) } - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendMsg(fd.Sysfd, msg, 0, nil, &o.o, nil) return qty, err }) @@ -1487,7 +1495,7 @@ func (fd *FD) WriteMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (in if sa != nil { msg.Namelen = sockaddrInet6ToRaw(msg.Name, sa) } - n, err := fd.execIO(&fd.wop, func(o *operation) (qty uint32, err error) { + n, err := fd.execIO('w', func(o *operation) (qty uint32, err error) { err = windows.WSASendMsg(fd.Sysfd, msg, 0, nil, &o.o, nil) return qty, err }) diff --git a/src/internal/poll/sendfile_windows.go b/src/internal/poll/sendfile_windows.go index a052f4a1f82400..2bdfecf0134b62 100644 --- a/src/internal/poll/sendfile_windows.go +++ b/src/internal/poll/sendfile_windows.go @@ -62,18 +62,14 @@ func SendFile(fd *FD, src uintptr, size int64) (written int64, err error, handle // See https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-transmitfile const maxChunkSizePerCall = int64(0x7fffffff - 1) - o := &fd.wop for size > 0 { chunkSize := maxChunkSizePerCall if chunkSize > size { chunkSize = size } - off := startpos + written - o.o.Offset = uint32(off) - o.o.OffsetHigh = uint32(off >> 32) - - n, err := fd.execIO(o, func(o *operation) (uint32, error) { + fd.setOffset(startpos + written) + n, err := fd.execIO('w', func(o *operation) (uint32, error) { err := syscall.TransmitFile(fd.Sysfd, hsrc, uint32(chunkSize), 0, &o.o, nil, syscall.TF_WRITE_BEHIND) if err != nil { return 0, err diff --git a/src/internal/runtime/wasitest/testdata/tcpecho.go b/src/internal/runtime/wasitest/testdata/tcpecho.go index 819e3526885642..6da56acba10a1c 100644 --- a/src/internal/runtime/wasitest/testdata/tcpecho.go +++ b/src/internal/runtime/wasitest/testdata/tcpecho.go @@ -62,8 +62,7 @@ func findListener() (net.Listener, error) { l, err := net.FileListener(f) f.Close() - var se syscall.Errno - switch errors.As(err, &se); se { + switch se, _ := errors.AsType[syscall.Errno](err); se { case syscall.ENOTSOCK: continue case syscall.EBADF: diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go index fb8bc80629a626..70c4d76dff0d0f 100644 --- a/src/internal/syscall/windows/zsyscall_windows.go +++ b/src/internal/syscall/windows/zsyscall_windows.go @@ -328,7 +328,7 @@ func GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byt } func GetFileSizeEx(handle syscall.Handle, size *int64) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileSizeEx.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(size)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileSizeEx.Addr(), uintptr(handle), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } diff --git a/src/internal/testenv/testenv_unix.go b/src/internal/testenv/testenv_unix.go index a629078842eadc..22eeca220da017 100644 --- a/src/internal/testenv/testenv_unix.go +++ b/src/internal/testenv/testenv_unix.go @@ -21,8 +21,7 @@ func syscallIsNotSupported(err error) bool { return false } - var errno syscall.Errno - if errors.As(err, &errno) { + if errno, ok := errors.AsType[syscall.Errno](err); ok { switch errno { case syscall.EPERM, syscall.EROFS: // User lacks permission: either the call requires root permission and the diff --git a/src/internal/types/testdata/fixedbugs/issue45550.go b/src/internal/types/testdata/fixedbugs/issue45550.go index 2ea4ffe3079540..32fdde6740c7a5 100644 --- a/src/internal/types/testdata/fixedbugs/issue45550.go +++ b/src/internal/types/testdata/fixedbugs/issue45550.go @@ -4,7 +4,7 @@ package p -type Builder /* ERROR "invalid recursive type" */ [T interface{ struct{ Builder[T] } }] struct{} +type Builder[T ~struct{ Builder[T] }] struct{} type myBuilder struct { Builder[myBuilder] } diff --git a/src/internal/types/testdata/fixedbugs/issue46461.go b/src/internal/types/testdata/fixedbugs/issue46461.go index e823013f995167..454f7e836537c6 100644 --- a/src/internal/types/testdata/fixedbugs/issue46461.go +++ b/src/internal/types/testdata/fixedbugs/issue46461.go @@ -7,16 +7,16 @@ package p // test case 1 -type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U] }] int +type T[U interface{ M() T[U] }] int type X int func (X) M() T[X] { return 0 } // test case 2 -type A /* ERROR "invalid recursive type" */ [T interface{ A[T] }] interface{} +type A[T interface{ A[T] }] interface{} // test case 3 -type A2 /* ERROR "invalid recursive type" */ [U interface{ A2[U] }] interface{ M() A2[U] } +type A2[U interface{ A2[U] }] interface{ M() A2[U] } type I interface{ A2[I]; M() A2[I] } diff --git a/src/internal/types/testdata/fixedbugs/issue46461a.go b/src/internal/types/testdata/fixedbugs/issue46461a.go index e4b8e1a240a973..74ed6c4882719c 100644 --- a/src/internal/types/testdata/fixedbugs/issue46461a.go +++ b/src/internal/types/testdata/fixedbugs/issue46461a.go @@ -7,17 +7,16 @@ package p // test case 1 -type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U] }] int +type T[U interface{ M() T[U] }] int type X int func (X) M() T[X] { return 0 } // test case 2 -type A /* ERROR "invalid recursive type" */ [T interface{ A[T] }] interface{} +type A[T interface{ A[T] }] interface{} // test case 3 -// TODO(gri) should report error only once -type A2 /* ERROR "invalid recursive type" */ /* ERROR "invalid recursive type" */ [U interface{ A2[U] }] interface{ M() A2[U] } +type A2[U interface{ A2[U] }] interface{ M() A2[U] } type I interface{ A2[I]; M() A2[I] } diff --git a/src/internal/types/testdata/fixedbugs/issue47796.go b/src/internal/types/testdata/fixedbugs/issue47796.go index 7f719ff6745eaa..b07cdddababf67 100644 --- a/src/internal/types/testdata/fixedbugs/issue47796.go +++ b/src/internal/types/testdata/fixedbugs/issue47796.go @@ -6,16 +6,16 @@ package p // parameterized types with self-recursive constraints type ( - T1 /* ERROR "invalid recursive type" */ [P T1[P]] interface{} - T2 /* ERROR "invalid recursive type" */ [P, Q T2[P, Q]] interface{} + T1[P T1[P]] interface{} + T2[P, Q T2[P, Q]] interface{} T3[P T2[P, Q], Q interface{ ~string }] interface{} - T4a /* ERROR "invalid recursive type" */ [P T4a[P]] interface{ ~int } - T4b /* ERROR "invalid recursive type" */ [P T4b[int]] interface{ ~int } - T4c /* ERROR "invalid recursive type" */ [P T4c[string]] interface{ ~int } + T4a[P T4a[P]] interface{ ~int } + T4b[P T4b[int]] interface{ ~int } + T4c[P T4c[string /* ERROR "string does not satisfy T4c[string]" */]] interface{ ~int } // mutually recursive constraints - T5 /* ERROR "invalid recursive type" */ [P T6[P]] interface{ int } + T5[P T6[P]] interface{ int } T6[P T5[P]] interface{ int } ) @@ -28,6 +28,6 @@ var ( // test case from issue -type Eq /* ERROR "invalid recursive type" */ [a Eq[a]] interface { +type Eq[a Eq[a]] interface { Equal(that a) bool } diff --git a/src/internal/types/testdata/fixedbugs/issue48529.go b/src/internal/types/testdata/fixedbugs/issue48529.go index bcc5e3536d3457..eca1da89232b08 100644 --- a/src/internal/types/testdata/fixedbugs/issue48529.go +++ b/src/internal/types/testdata/fixedbugs/issue48529.go @@ -4,7 +4,7 @@ package p -type T /* ERROR "invalid recursive type" */ [U interface{ M() T[U, int] }] int +type T[U interface{ M() T /* ERROR "too many type arguments for type T" */ [U, int] }] int type X int diff --git a/src/internal/types/testdata/fixedbugs/issue49439.go b/src/internal/types/testdata/fixedbugs/issue49439.go index 3852f160948913..63bedf61911914 100644 --- a/src/internal/types/testdata/fixedbugs/issue49439.go +++ b/src/internal/types/testdata/fixedbugs/issue49439.go @@ -6,21 +6,21 @@ package p import "unsafe" -type T0 /* ERROR "invalid recursive type" */ [P T0[P]] struct{} +type T0[P T0[P]] struct{} -type T1 /* ERROR "invalid recursive type" */ [P T2[P]] struct{} -type T2[P T1[P]] struct{} +type T1[P T2[P /* ERROR "P does not satisfy T1[P]" */]] struct{} +type T2[P T1[P /* ERROR "P does not satisfy T2[P]" */]] struct{} -type T3 /* ERROR "invalid recursive type" */ [P interface{ ~struct{ f T3[int] } }] struct{} +type T3[P interface{ ~struct{ f T3[int /* ERROR "int does not satisfy" */ ] } }] struct{} // valid cycle in M type N[P M[P]] struct{} -type M[Q any] struct { F *M[Q] } +type M[Q any] struct{ F *M[Q] } // "crazy" case type TC[P [unsafe.Sizeof(func() { - type T [P [unsafe.Sizeof(func(){})]byte] struct{} + type T[P [unsafe.Sizeof(func() {})]byte] struct{} })]byte] struct{} // test case from issue -type X /* ERROR "invalid recursive type" */ [T any, PT X[T]] interface{} +type X[T any, PT X /* ERROR "not enough type arguments for type X" */ [T]] interface{} diff --git a/src/internal/types/testdata/fixedbugs/issue68162.go b/src/internal/types/testdata/fixedbugs/issue68162.go new file mode 100644 index 00000000000000..8efd8a66dff4be --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue68162.go @@ -0,0 +1,24 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +type N[B N[B]] interface { + Add(B) B +} + +func Add[P N[P]](x, y P) P { + return x.Add(y) +} + +type MyInt int + +func (x MyInt) Add(y MyInt) MyInt { + return x + y +} + +func main() { + var x, y MyInt = 2, 3 + println(Add(x, y)) +} diff --git a/src/internal/types/testdata/fixedbugs/issue75194.go b/src/internal/types/testdata/fixedbugs/issue75194.go new file mode 100644 index 00000000000000..ec2f9249ec9247 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue75194.go @@ -0,0 +1,14 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A /* ERROR "invalid recursive type: A refers to itself" */ struct { + a A +} + +type B /* ERROR "invalid recursive type: B refers to itself" */ struct { + a A + b B +} diff --git a/src/io/fs/fs.go b/src/io/fs/fs.go index 8f693f2574a5b6..fca07b818c9a3b 100644 --- a/src/io/fs/fs.go +++ b/src/io/fs/fs.go @@ -6,6 +6,19 @@ // A file system can be provided by the host operating system // but also by other packages. // +// # Path Names +// +// The interfaces in this package all operate on the same +// path name syntax, regardless of the host operating system. +// +// Path names are UTF-8-encoded, +// unrooted, slash-separated sequences of path elements, like “x/y/z”. +// Path names must not contain an element that is “.” or “..” or the empty string, +// except for the special case that the name "." may be used for the root directory. +// Paths must not start or end with a slash: “/x” and “x/” are invalid. +// +// # Testing +// // See the [testing/fstest] package for support with testing // implementations of file systems. package fs @@ -41,16 +54,13 @@ type FS interface { // ValidPath reports whether the given path name // is valid for use in a call to Open. // -// Path names passed to open are UTF-8-encoded, -// unrooted, slash-separated sequences of path elements, like “x/y/z”. -// Path names must not contain an element that is “.” or “..” or the empty string, -// except for the special case that the name "." may be used for the root directory. -// Paths must not start or end with a slash: “/x” and “x/” are invalid. -// // Note that paths are slash-separated on all systems, even Windows. // Paths containing other characters such as backslash and colon // are accepted as valid, but those characters must never be // interpreted by an [FS] implementation as path element separators. +// See the [Path Names] section for more details. +// +// [Path Names]: https://pkg.go.dev/io/fs#hdr-Path_Names func ValidPath(name string) bool { if !utf8.ValidString(name) { return false diff --git a/src/io/fs/readdir_test.go b/src/io/fs/readdir_test.go index 4c409ae7a010e2..b729bf27ac40ac 100644 --- a/src/io/fs/readdir_test.go +++ b/src/io/fs/readdir_test.go @@ -72,7 +72,6 @@ func TestFileInfoToDirEntry(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.path, func(t *testing.T) { fi, err := Stat(testFs, test.path) if err != nil { @@ -94,8 +93,8 @@ func TestFileInfoToDirEntry(t *testing.T) { } func errorPath(err error) string { - var perr *PathError - if !errors.As(err, &perr) { + perr, ok := errors.AsType[*PathError](err) + if !ok { return "" } return perr.Path diff --git a/src/log/log_test.go b/src/log/log_test.go index 8cc05c5e647f95..5d5d38cc10dbae 100644 --- a/src/log/log_test.go +++ b/src/log/log_test.go @@ -272,8 +272,7 @@ func TestCallDepth(t *testing.T) { cmd.Env = append(cmd.Environ(), envVar+"=1") out, err := cmd.CombinedOutput() - var exitErr *exec.ExitError - if !errors.As(err, &exitErr) { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { t.Fatalf("expected exec.ExitError: %v", err) } diff --git a/src/log/slog/logger_test.go b/src/log/slog/logger_test.go index bf645d9c4c6310..edacef13a4d518 100644 --- a/src/log/slog/logger_test.go +++ b/src/log/slog/logger_test.go @@ -303,8 +303,7 @@ func TestCallDepthConnection(t *testing.T) { cmd.Env = append(cmd.Environ(), envVar+"=1") out, err := cmd.CombinedOutput() - var exitErr *exec.ExitError - if !errors.As(err, &exitErr) { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { t.Fatalf("expected exec.ExitError: %v", err) } diff --git a/src/mime/mediatype.go b/src/mime/mediatype.go index 66684a68b23961..c6006b614f319e 100644 --- a/src/mime/mediatype.go +++ b/src/mime/mediatype.go @@ -98,24 +98,32 @@ func FormatMediaType(t string, param map[string]string) string { func checkMediaTypeDisposition(s string) error { typ, rest := consumeToken(s) if typ == "" { - return errors.New("mime: no media type") + return errNoMediaType } if rest == "" { return nil } - if !strings.HasPrefix(rest, "/") { - return errors.New("mime: expected slash after first token") + var ok bool + if rest, ok = strings.CutPrefix(rest, "/"); !ok { + return errNoSlashAfterFirstToken } - subtype, rest := consumeToken(rest[1:]) + subtype, rest := consumeToken(rest) if subtype == "" { - return errors.New("mime: expected token after slash") + return errNoTokenAfterSlash } if rest != "" { - return errors.New("mime: unexpected content after media subtype") + return errUnexpectedContentAfterMediaSubtype } return nil } +var ( + errNoMediaType = errors.New("mime: no media type") + errNoSlashAfterFirstToken = errors.New("mime: expected slash after first token") + errNoTokenAfterSlash = errors.New("mime: expected token after slash") + errUnexpectedContentAfterMediaSubtype = errors.New("mime: unexpected content after media subtype") +) + // ErrInvalidMediaParameter is returned by [ParseMediaType] if // the media type value was found but there was an error parsing // the optional parameters @@ -169,7 +177,6 @@ func ParseMediaType(v string) (mediatype string, params map[string]string, err e if continuation == nil { continuation = make(map[string]map[string]string) } - var ok bool if pmap, ok = continuation[baseName]; !ok { continuation[baseName] = make(map[string]string) pmap = continuation[baseName] @@ -177,7 +184,7 @@ func ParseMediaType(v string) (mediatype string, params map[string]string, err e } if v, exists := pmap[key]; exists && v != value { // Duplicate parameter names are incorrect, but we allow them if they are equal. - return "", nil, errors.New("mime: duplicate parameter name") + return "", nil, errDuplicateParamName } pmap[key] = value v = rest @@ -227,27 +234,28 @@ func ParseMediaType(v string) (mediatype string, params map[string]string, err e return } +var errDuplicateParamName = errors.New("mime: duplicate parameter name") + func decode2231Enc(v string) (string, bool) { - sv := strings.SplitN(v, "'", 3) - if len(sv) != 3 { + charset, v, ok := strings.Cut(v, "'") + if !ok { return "", false } - // TODO: ignoring lang in sv[1] for now. If anybody needs it we'll + // TODO: ignoring the language part for now. If anybody needs it, we'll // need to decide how to expose it in the API. But I'm not sure // anybody uses it in practice. - charset := strings.ToLower(sv[0]) - if len(charset) == 0 { + _, extOtherVals, ok := strings.Cut(v, "'") + if !ok { return "", false } - if charset != "us-ascii" && charset != "utf-8" { - // TODO: unsupported encoding + charset = strings.ToLower(charset) + switch charset { + case "us-ascii", "utf-8": + default: + // Empty or unsupported encoding. return "", false } - encv, err := percentHexUnescape(sv[2]) - if err != nil { - return "", false - } - return encv, true + return percentHexUnescape(extOtherVals) } // consumeToken consumes a token from the beginning of provided @@ -309,11 +317,11 @@ func consumeValue(v string) (value, rest string) { func consumeMediaParam(v string) (param, value, rest string) { rest = strings.TrimLeftFunc(v, unicode.IsSpace) - if !strings.HasPrefix(rest, ";") { + var ok bool + if rest, ok = strings.CutPrefix(rest, ";"); !ok { return "", "", v } - rest = rest[1:] // consume semicolon rest = strings.TrimLeftFunc(rest, unicode.IsSpace) param, rest = consumeToken(rest) param = strings.ToLower(param) @@ -322,10 +330,9 @@ func consumeMediaParam(v string) (param, value, rest string) { } rest = strings.TrimLeftFunc(rest, unicode.IsSpace) - if !strings.HasPrefix(rest, "=") { + if rest, ok = strings.CutPrefix(rest, "="); !ok { return "", "", v } - rest = rest[1:] // consume equals sign rest = strings.TrimLeftFunc(rest, unicode.IsSpace) value, rest2 := consumeValue(rest) if value == "" && rest2 == rest { @@ -335,7 +342,7 @@ func consumeMediaParam(v string) (param, value, rest string) { return param, value, rest } -func percentHexUnescape(s string) (string, error) { +func percentHexUnescape(s string) (string, bool) { // Count %, check that they're well-formed. percents := 0 for i := 0; i < len(s); { @@ -345,16 +352,12 @@ func percentHexUnescape(s string) (string, error) { } percents++ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { - s = s[i:] - if len(s) > 3 { - s = s[0:3] - } - return "", fmt.Errorf("mime: bogus characters after %%: %q", s) + return "", false } i += 3 } if percents == 0 { - return s, nil + return s, true } t := make([]byte, len(s)-2*percents) @@ -371,7 +374,7 @@ func percentHexUnescape(s string) (string, error) { i++ } } - return string(t), nil + return string(t), true } func ishex(c byte) bool { diff --git a/src/mime/type.go b/src/mime/type.go index c86ebd3442c1dc..ac7b0447da3cf9 100644 --- a/src/mime/type.go +++ b/src/mime/type.go @@ -17,7 +17,7 @@ var ( mimeTypesLower sync.Map // map[string]string; ".z" => "application/x-compress" // extensions maps from MIME type to list of lowercase file - // extensions: "image/jpeg" => [".jpg", ".jpeg"] + // extensions: "image/jpeg" => [".jfif", ".jpg", ".jpeg", ".pjp", ".pjpeg"] extensionsMu sync.Mutex // Guards stores (but not loads) on extensions. extensions sync.Map // map[string][]string; slice values are append-only. ) @@ -50,23 +50,82 @@ func setMimeTypes(lowerExt, mixExt map[string]string) { } } +// A type is listed here if both Firefox and Chrome included them in their own +// lists. In the case where they contradict they are deconflicted using IANA's +// listed media types https://www.iana.org/assignments/media-types/media-types.xhtml +// +// Chrome's MIME mappings to file extensions are defined at +// https://chromium.googlesource.com/chromium/src.git/+/refs/heads/main/net/base/mime_util.cc +// +// Firefox's MIME types can be found at +// https://github.com/mozilla-firefox/firefox/blob/main/netwerk/mime/nsMimeTypes.h +// and the mappings to file extensions at +// https://github.com/mozilla-firefox/firefox/blob/main/uriloader/exthandler/nsExternalHelperAppService.cpp var builtinTypesLower = map[string]string{ - ".avif": "image/avif", - ".css": "text/css; charset=utf-8", - ".gif": "image/gif", - ".htm": "text/html; charset=utf-8", - ".html": "text/html; charset=utf-8", - ".jpeg": "image/jpeg", - ".jpg": "image/jpeg", - ".js": "text/javascript; charset=utf-8", - ".json": "application/json", - ".mjs": "text/javascript; charset=utf-8", - ".pdf": "application/pdf", - ".png": "image/png", - ".svg": "image/svg+xml", - ".wasm": "application/wasm", - ".webp": "image/webp", - ".xml": "text/xml; charset=utf-8", + ".ai": "application/postscript", + ".apk": "application/vnd.android.package-archive", + ".apng": "image/apng", + ".avif": "image/avif", + ".bin": "application/octet-stream", + ".bmp": "image/bmp", + ".com": "application/octet-stream", + ".css": "text/css; charset=utf-8", + ".csv": "text/csv; charset=utf-8", + ".doc": "application/msword", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".ehtml": "text/html; charset=utf-8", + ".eml": "message/rfc822", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".flac": "audio/flac", + ".gif": "image/gif", + ".gz": "application/gzip", + ".htm": "text/html; charset=utf-8", + ".html": "text/html; charset=utf-8", + ".ico": "image/vnd.microsoft.icon", + ".ics": "text/calendar; charset=utf-8", + ".jfif": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".js": "text/javascript; charset=utf-8", + ".json": "application/json", + ".m4a": "audio/mp4", + ".mjs": "text/javascript; charset=utf-8", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".oga": "audio/ogg", + ".ogg": "audio/ogg", + ".ogv": "video/ogg", + ".opus": "audio/ogg", + ".pdf": "application/pdf", + ".pjp": "image/jpeg", + ".pjpeg": "image/jpeg", + ".png": "image/png", + ".ppt": "application/vnd.ms-powerpoint", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".ps": "application/postscript", + ".rdf": "application/rdf+xml", + ".rtf": "application/rtf", + ".shtml": "text/html; charset=utf-8", + ".svg": "image/svg+xml", + ".text": "text/plain; charset=utf-8", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".txt": "text/plain; charset=utf-8", + ".vtt": "text/vtt; charset=utf-8", + ".wasm": "application/wasm", + ".wav": "audio/wav", + ".webm": "audio/webm", + ".webp": "image/webp", + ".xbl": "text/xml; charset=utf-8", + ".xbm": "image/x-xbitmap", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xls": "application/vnd.ms-excel", + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xml": "text/xml; charset=utf-8", + ".xsl": "text/xml; charset=utf-8", + ".zip": "application/zip", } var once sync.Once // guards initMime diff --git a/src/mime/type_test.go b/src/mime/type_test.go index 6bdf37b6359d20..f4ec8c8754e69f 100644 --- a/src/mime/type_test.go +++ b/src/mime/type_test.go @@ -208,7 +208,51 @@ func TestExtensionsByType2(t *testing.T) { typ string want []string }{ - {typ: "image/jpeg", want: []string{".jpeg", ".jpg"}}, + {typ: "application/postscript", want: []string{".ai", ".eps", ".ps"}}, + {typ: "application/vnd.android.package-archive", want: []string{".apk"}}, + {typ: "image/apng", want: []string{".apng"}}, + {typ: "image/avif", want: []string{".avif"}}, + {typ: "application/octet-stream", want: []string{".bin", ".com", ".exe"}}, + {typ: "image/bmp", want: []string{".bmp"}}, + {typ: "text/css; charset=utf-8", want: []string{".css"}}, + {typ: "text/csv; charset=utf-8", want: []string{".csv"}}, + {typ: "application/msword", want: []string{".doc"}}, + {typ: "application/vnd.openxmlformats-officedocument.wordprocessingml.document", want: []string{".docx"}}, + {typ: "text/html; charset=utf-8", want: []string{".ehtml", ".htm", ".html", ".shtml"}}, + {typ: "message/rfc822", want: []string{".eml"}}, + {typ: "audio/flac", want: []string{".flac"}}, + {typ: "image/gif", want: []string{".gif"}}, + {typ: "application/gzip", want: []string{".gz"}}, + {typ: "image/vnd.microsoft.icon", want: []string{".ico"}}, + {typ: "text/calendar; charset=utf-8", want: []string{".ics"}}, + {typ: "image/jpeg", want: []string{".jfif", ".jpeg", ".jpg", ".pjp", ".pjpeg"}}, + {typ: "text/javascript; charset=utf-8", want: []string{".js", ".mjs"}}, + {typ: "application/json", want: []string{".json"}}, + {typ: "audio/mp4", want: []string{".m4a"}}, + {typ: "audio/mpeg", want: []string{".mp3"}}, + {typ: "video/mp4", want: []string{".mp4"}}, + {typ: "audio/ogg", want: []string{".oga", ".ogg", ".opus"}}, + {typ: "video/ogg", want: []string{".ogv"}}, + {typ: "application/pdf", want: []string{".pdf"}}, + {typ: "image/png", want: []string{".png"}}, + {typ: "application/vnd.ms-powerpoint", want: []string{".ppt"}}, + {typ: "application/vnd.openxmlformats-officedocument.presentationml.presentation", want: []string{".pptx"}}, + {typ: "application/rdf+xml", want: []string{".rdf"}}, + {typ: "application/rtf", want: []string{".rtf"}}, + {typ: "image/svg+xml", want: []string{".svg"}}, + {typ: "text/plain; charset=utf-8", want: []string{".text", ".txt"}}, + {typ: "image/tiff", want: []string{".tif", ".tiff"}}, + {typ: "text/vtt; charset=utf-8", want: []string{".vtt"}}, + {typ: "application/wasm", want: []string{".wasm"}}, + {typ: "audio/wav", want: []string{".wav"}}, + {typ: "audio/webm", want: []string{".webm"}}, + {typ: "image/webp", want: []string{".webp"}}, + {typ: "text/xml; charset=utf-8", want: []string{".xbl", ".xml", ".xsl"}}, + {typ: "image/x-xbitmap", want: []string{".xbm"}}, + {typ: "application/xhtml+xml", want: []string{".xht", ".xhtml"}}, + {typ: "application/vnd.ms-excel", want: []string{".xls"}}, + {typ: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", want: []string{".xlsx"}}, + {typ: "application/zip", want: []string{".zip"}}, } for _, tt := range tests { diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go index 5e060a6b489bf9..940fcccf7f2139 100644 --- a/src/net/dnsclient_unix.go +++ b/src/net/dnsclient_unix.go @@ -842,8 +842,7 @@ func (r *Resolver) goLookupPTR(ctx context.Context, addr string, order hostLooku } p, server, err := r.lookup(ctx, arpa, dnsmessage.TypePTR, conf) if err != nil { - var dnsErr *DNSError - if errors.As(err, &dnsErr) && dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*DNSError](err); ok && dnsErr.IsNotFound { if order == hostLookupDNSFiles { names := lookupStaticAddr(addr) if len(names) > 0 { diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go index 826b4daba1e7fd..fc1d40f18b6f9a 100644 --- a/src/net/dnsclient_unix_test.go +++ b/src/net/dnsclient_unix_test.go @@ -2627,8 +2627,7 @@ func TestLongDNSNames(t *testing.T) { } expectedErr := DNSError{Err: errNoSuchHost.Error(), Name: v.req, IsNotFound: true} - var dnsErr *DNSError - errors.As(err, &dnsErr) + dnsErr, _ := errors.AsType[*DNSError](err) if dnsErr == nil || *dnsErr != expectedErr { t.Errorf("%v: Lookup%v: unexpected error: %v", i, testName, err) } @@ -2820,8 +2819,7 @@ func TestLookupOrderFilesNoSuchHost(t *testing.T) { } expectedErr := DNSError{Err: errNoSuchHost.Error(), Name: testName, IsNotFound: true} - var dnsErr *DNSError - errors.As(err, &dnsErr) + dnsErr, _ := errors.AsType[*DNSError](err) if dnsErr == nil || *dnsErr != expectedErr { t.Errorf("Lookup%v: unexpected error: %v", v.name, err) } @@ -2853,8 +2851,7 @@ func TestExtendedRCode(t *testing.T) { r := &Resolver{PreferGo: true, Dial: fake.DialContext} _, _, err := r.tryOneName(context.Background(), getSystemDNSConfig(), "go.dev.", dnsmessage.TypeA) - var dnsErr *DNSError - if !(errors.As(err, &dnsErr) && dnsErr.Err == errServerMisbehaving.Error()) { + if dnsErr, ok := errors.AsType[*DNSError](err); !ok || dnsErr.Err != errServerMisbehaving.Error() { t.Fatalf("r.tryOneName(): unexpected error: %v", err) } } diff --git a/src/net/http/client.go b/src/net/http/client.go index ba095ea1e34e5c..d6a801073553f7 100644 --- a/src/net/http/client.go +++ b/src/net/http/client.go @@ -172,8 +172,13 @@ func refererForURL(lastReq, newReq *url.URL, explicitRef string) string { // didTimeout is non-nil only if err != nil. func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTimeout func() bool, err error) { + cookieURL := req.URL + if req.Host != "" { + cookieURL = cloneURL(cookieURL) + cookieURL.Host = req.Host + } if c.Jar != nil { - for _, cookie := range c.Jar.Cookies(req.URL) { + for _, cookie := range c.Jar.Cookies(cookieURL) { req.AddCookie(cookie) } } @@ -183,7 +188,7 @@ func (c *Client) send(req *Request, deadline time.Time) (resp *Response, didTime } if c.Jar != nil { if rc := resp.Cookies(); len(rc) > 0 { - c.Jar.SetCookies(req.URL, rc) + c.Jar.SetCookies(cookieURL, rc) } } return resp, nil, nil @@ -685,8 +690,7 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { stripSensitiveHeaders = true } } - copyHeaders(req, stripSensitiveHeaders) - + copyHeaders(req, stripSensitiveHeaders, !includeBody) // Add the Referer header from the most recent // request URL to the new one, if it's not https->http: if ref := refererForURL(reqs[len(reqs)-1].URL, req.URL, req.Header.Get("Referer")); ref != "" { @@ -753,7 +757,7 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { // makeHeadersCopier makes a function that copies headers from the // initial Request, ireq. For every redirect, this function must be called // so that it can copy headers into the upcoming Request. -func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensitiveHeaders bool) { +func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensitiveHeaders, stripBodyHeaders bool) { // The headers to copy are from the very initial request. // We use a closured callback to keep a reference to these original headers. var ( @@ -767,7 +771,7 @@ func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensit } } - return func(req *Request, stripSensitiveHeaders bool) { + return func(req *Request, stripSensitiveHeaders, stripBodyHeaders bool) { // If Jar is present and there was some initial cookies provided // via the request header, then we may need to alter the initial // cookies as we follow redirects since each redirect may end up @@ -805,12 +809,21 @@ func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensit // (at least the safe ones). for k, vv := range ireqhdr { sensitive := false + body := false switch CanonicalHeaderKey(k) { case "Authorization", "Www-Authenticate", "Cookie", "Cookie2", "Proxy-Authorization", "Proxy-Authenticate": sensitive = true + + case "Content-Encoding", "Content-Language", "Content-Location", + "Content-Type": + // Headers relating to the body which is removed for + // POST to GET redirects + // https://fetch.spec.whatwg.org/#http-redirect-fetch + body = true + } - if !(sensitive && stripSensitiveHeaders) { + if !(sensitive && stripSensitiveHeaders) && !(body && stripBodyHeaders) { req.Header[k] = vv } } diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go index 94fddb508e0e38..d184f720319ce4 100644 --- a/src/net/http/client_test.go +++ b/src/net/http/client_test.go @@ -585,6 +585,36 @@ var echoCookiesRedirectHandler = HandlerFunc(func(w ResponseWriter, r *Request) } }) +func TestHostMismatchCookies(t *testing.T) { run(t, testHostMismatchCookies) } +func testHostMismatchCookies(t *testing.T, mode testMode) { + ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { + for _, c := range r.Cookies() { + c.Value = "SetOnServer" + SetCookie(w, c) + } + })).ts + + reqURL, _ := url.Parse(ts.URL) + hostURL := *reqURL + hostURL.Host = "cookies.example.com" + + c := ts.Client() + c.Jar = new(TestJar) + c.Jar.SetCookies(reqURL, []*Cookie{{Name: "First", Value: "SetOnClient"}}) + c.Jar.SetCookies(&hostURL, []*Cookie{{Name: "Second", Value: "SetOnClient"}}) + + req, _ := NewRequest("GET", ts.URL, NoBody) + req.Host = hostURL.Host + resp, err := c.Do(req) + if err != nil { + t.Fatalf("Get: %v", err) + } + resp.Body.Close() + + matchReturnedCookies(t, []*Cookie{{Name: "First", Value: "SetOnClient"}}, c.Jar.Cookies(reqURL)) + matchReturnedCookies(t, []*Cookie{{Name: "Second", Value: "SetOnServer"}}, c.Jar.Cookies(&hostURL)) +} + func TestClientSendsCookieFromJar(t *testing.T) { defer afterTest(t) tr := &recordingTransport{} @@ -1591,6 +1621,39 @@ func testClientStripHeadersOnRepeatedRedirect(t *testing.T, mode testMode) { } } +func TestClientStripHeadersOnPostToGetRedirect(t *testing.T) { + run(t, testClientStripHeadersOnPostToGetRedirect) +} +func testClientStripHeadersOnPostToGetRedirect(t *testing.T, mode testMode) { + ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method == "POST" { + Redirect(w, r, "/redirected", StatusFound) + return + } else if r.Method != "GET" { + t.Errorf("unexpected request method: %v", r.Method) + return + } + for key, val := range r.Header { + if strings.HasPrefix(key, "Content-") { + t.Errorf("unexpected request body header after redirect: %v: %v", key, val) + } + } + })).ts + + c := ts.Client() + + req, _ := NewRequest("POST", ts.URL, strings.NewReader("hello world")) + req.Header.Set("Content-Encoding", "a") + req.Header.Set("Content-Language", "b") + req.Header.Set("Content-Length", "c") + req.Header.Set("Content-Type", "d") + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() +} + // Issue 22233: copy host when Client follows a relative redirect. func TestClientCopyHostOnRedirect(t *testing.T) { run(t, testClientCopyHostOnRedirect) } func testClientCopyHostOnRedirect(t *testing.T, mode testMode) { diff --git a/src/net/http/cookie.go b/src/net/http/cookie.go index efe6cc3e77e5b5..f74bc1043c509e 100644 --- a/src/net/http/cookie.go +++ b/src/net/http/cookie.go @@ -7,6 +7,7 @@ package http import ( "errors" "fmt" + "internal/godebug" "log" "net" "net/http/internal/ascii" @@ -16,6 +17,8 @@ import ( "time" ) +var httpcookiemaxnum = godebug.New("httpcookiemaxnum") + // A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an // HTTP response or the Cookie header of an HTTP request. // @@ -58,16 +61,37 @@ const ( ) var ( - errBlankCookie = errors.New("http: blank cookie") - errEqualNotFoundInCookie = errors.New("http: '=' not found in cookie") - errInvalidCookieName = errors.New("http: invalid cookie name") - errInvalidCookieValue = errors.New("http: invalid cookie value") + errBlankCookie = errors.New("http: blank cookie") + errEqualNotFoundInCookie = errors.New("http: '=' not found in cookie") + errInvalidCookieName = errors.New("http: invalid cookie name") + errInvalidCookieValue = errors.New("http: invalid cookie value") + errCookieNumLimitExceeded = errors.New("http: number of cookies exceeded limit") ) +const defaultCookieMaxNum = 3000 + +func cookieNumWithinMax(cookieNum int) bool { + withinDefaultMax := cookieNum <= defaultCookieMaxNum + if httpcookiemaxnum.Value() == "" { + return withinDefaultMax + } + if customMax, err := strconv.Atoi(httpcookiemaxnum.Value()); err == nil { + withinCustomMax := customMax == 0 || cookieNum <= customMax + if withinDefaultMax != withinCustomMax { + httpcookiemaxnum.IncNonDefault() + } + return withinCustomMax + } + return withinDefaultMax +} + // ParseCookie parses a Cookie header value and returns all the cookies // which were set in it. Since the same cookie name can appear multiple times // the returned Values can contain more than one value for a given key. func ParseCookie(line string) ([]*Cookie, error) { + if !cookieNumWithinMax(strings.Count(line, ";") + 1) { + return nil, errCookieNumLimitExceeded + } parts := strings.Split(textproto.TrimString(line), ";") if len(parts) == 1 && parts[0] == "" { return nil, errBlankCookie @@ -197,11 +221,21 @@ func ParseSetCookie(line string) (*Cookie, error) { // readSetCookies parses all "Set-Cookie" values from // the header h and returns the successfully parsed Cookies. +// +// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum +// GODEBUG option is not explicitly turned off, this function will silently +// fail and return an empty slice. func readSetCookies(h Header) []*Cookie { cookieCount := len(h["Set-Cookie"]) if cookieCount == 0 { return []*Cookie{} } + // Cookie limit was unfortunately introduced at a later point in time. + // As such, we can only fail by returning an empty slice rather than + // explicit error. + if !cookieNumWithinMax(cookieCount) { + return []*Cookie{} + } cookies := make([]*Cookie, 0, cookieCount) for _, line := range h["Set-Cookie"] { if cookie, err := ParseSetCookie(line); err == nil { @@ -329,13 +363,28 @@ func (c *Cookie) Valid() error { // readCookies parses all "Cookie" values from the header h and // returns the successfully parsed Cookies. // -// if filter isn't empty, only cookies of that name are returned. +// If filter isn't empty, only cookies of that name are returned. +// +// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum +// GODEBUG option is not explicitly turned off, this function will silently +// fail and return an empty slice. func readCookies(h Header, filter string) []*Cookie { lines := h["Cookie"] if len(lines) == 0 { return []*Cookie{} } + // Cookie limit was unfortunately introduced at a later point in time. + // As such, we can only fail by returning an empty slice rather than + // explicit error. + cookieCount := 0 + for _, line := range lines { + cookieCount += strings.Count(line, ";") + 1 + } + if !cookieNumWithinMax(cookieCount) { + return []*Cookie{} + } + cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";")) for _, line := range lines { line = textproto.TrimString(line) diff --git a/src/net/http/cookie_test.go b/src/net/http/cookie_test.go index 8db4957b2cc37d..f452b4ec76830f 100644 --- a/src/net/http/cookie_test.go +++ b/src/net/http/cookie_test.go @@ -11,6 +11,7 @@ import ( "log" "os" "reflect" + "slices" "strings" "testing" "time" @@ -255,16 +256,17 @@ func TestAddCookie(t *testing.T) { } var readSetCookiesTests = []struct { - Header Header - Cookies []*Cookie + header Header + cookies []*Cookie + godebug string }{ { - Header{"Set-Cookie": {"Cookie-1=v$1"}}, - []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, + header: Header{"Set-Cookie": {"Cookie-1=v$1"}}, + cookies: []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, }, { - Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, + cookies: []*Cookie{{ Name: "NID", Value: "99=YsDT5i3E-CXax-", Path: "/", @@ -276,8 +278,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, + cookies: []*Cookie{{ Name: ".ASPXAUTH", Value: "7E3AA", Path: "/", @@ -288,8 +290,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, + cookies: []*Cookie{{ Name: "ASP.NET_SessionId", Value: "foo", Path: "/", @@ -298,8 +300,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitedefault=foo; SameSite"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitedefault=foo; SameSite"}}, + cookies: []*Cookie{{ Name: "samesitedefault", Value: "foo", SameSite: SameSiteDefaultMode, @@ -307,8 +309,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesiteinvalidisdefault=foo; SameSite=invalid"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesiteinvalidisdefault=foo; SameSite=invalid"}}, + cookies: []*Cookie{{ Name: "samesiteinvalidisdefault", Value: "foo", SameSite: SameSiteDefaultMode, @@ -316,8 +318,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitelax=foo; SameSite=Lax"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitelax=foo; SameSite=Lax"}}, + cookies: []*Cookie{{ Name: "samesitelax", Value: "foo", SameSite: SameSiteLaxMode, @@ -325,8 +327,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitestrict=foo; SameSite=Strict"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitestrict=foo; SameSite=Strict"}}, + cookies: []*Cookie{{ Name: "samesitestrict", Value: "foo", SameSite: SameSiteStrictMode, @@ -334,8 +336,8 @@ var readSetCookiesTests = []struct { }}, }, { - Header{"Set-Cookie": {"samesitenone=foo; SameSite=None"}}, - []*Cookie{{ + header: Header{"Set-Cookie": {"samesitenone=foo; SameSite=None"}}, + cookies: []*Cookie{{ Name: "samesitenone", Value: "foo", SameSite: SameSiteNoneMode, @@ -345,47 +347,66 @@ var readSetCookiesTests = []struct { // Make sure we can properly read back the Set-Cookie headers we create // for values containing spaces or commas: { - Header{"Set-Cookie": {`special-1=a z`}}, - []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, + header: Header{"Set-Cookie": {`special-1=a z`}}, + cookies: []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, }, { - Header{"Set-Cookie": {`special-2=" z"`}}, - []*Cookie{{Name: "special-2", Value: " z", Quoted: true, Raw: `special-2=" z"`}}, + header: Header{"Set-Cookie": {`special-2=" z"`}}, + cookies: []*Cookie{{Name: "special-2", Value: " z", Quoted: true, Raw: `special-2=" z"`}}, }, { - Header{"Set-Cookie": {`special-3="a "`}}, - []*Cookie{{Name: "special-3", Value: "a ", Quoted: true, Raw: `special-3="a "`}}, + header: Header{"Set-Cookie": {`special-3="a "`}}, + cookies: []*Cookie{{Name: "special-3", Value: "a ", Quoted: true, Raw: `special-3="a "`}}, }, { - Header{"Set-Cookie": {`special-4=" "`}}, - []*Cookie{{Name: "special-4", Value: " ", Quoted: true, Raw: `special-4=" "`}}, + header: Header{"Set-Cookie": {`special-4=" "`}}, + cookies: []*Cookie{{Name: "special-4", Value: " ", Quoted: true, Raw: `special-4=" "`}}, }, { - Header{"Set-Cookie": {`special-5=a,z`}}, - []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, + header: Header{"Set-Cookie": {`special-5=a,z`}}, + cookies: []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, }, { - Header{"Set-Cookie": {`special-6=",z"`}}, - []*Cookie{{Name: "special-6", Value: ",z", Quoted: true, Raw: `special-6=",z"`}}, + header: Header{"Set-Cookie": {`special-6=",z"`}}, + cookies: []*Cookie{{Name: "special-6", Value: ",z", Quoted: true, Raw: `special-6=",z"`}}, }, { - Header{"Set-Cookie": {`special-7=a,`}}, - []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, + header: Header{"Set-Cookie": {`special-7=a,`}}, + cookies: []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, }, { - Header{"Set-Cookie": {`special-8=","`}}, - []*Cookie{{Name: "special-8", Value: ",", Quoted: true, Raw: `special-8=","`}}, + header: Header{"Set-Cookie": {`special-8=","`}}, + cookies: []*Cookie{{Name: "special-8", Value: ",", Quoted: true, Raw: `special-8=","`}}, }, // Make sure we can properly read back the Set-Cookie headers // for names containing spaces: { - Header{"Set-Cookie": {`special-9 =","`}}, - []*Cookie{{Name: "special-9", Value: ",", Quoted: true, Raw: `special-9 =","`}}, + header: Header{"Set-Cookie": {`special-9 =","`}}, + cookies: []*Cookie{{Name: "special-9", Value: ",", Quoted: true, Raw: `special-9 =","`}}, }, // Quoted values (issue #46443) { - Header{"Set-Cookie": {`cookie="quoted"`}}, - []*Cookie{{Name: "cookie", Value: "quoted", Quoted: true, Raw: `cookie="quoted"`}}, + header: Header{"Set-Cookie": {`cookie="quoted"`}}, + cookies: []*Cookie{{Name: "cookie", Value: "quoted", Quoted: true, Raw: `cookie="quoted"`}}, + }, + { + header: Header{"Set-Cookie": slices.Repeat([]string{"a="}, defaultCookieMaxNum+1)}, + cookies: []*Cookie{}, + }, + { + header: Header{"Set-Cookie": slices.Repeat([]string{"a="}, 10)}, + cookies: []*Cookie{}, + godebug: "httpcookiemaxnum=5", + }, + { + header: Header{"Set-Cookie": strings.Split(strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], ";")}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false, Raw: "a="}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + header: Header{"Set-Cookie": strings.Split(strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], ";")}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false, Raw: "a="}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), }, // TODO(bradfitz): users have reported seeing this in the @@ -405,79 +426,103 @@ func toJSON(v any) string { func TestReadSetCookies(t *testing.T) { for i, tt := range readSetCookiesTests { + t.Setenv("GODEBUG", tt.godebug) for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input - c := readSetCookies(tt.Header) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies)) + c := readSetCookies(tt.header) + if !reflect.DeepEqual(c, tt.cookies) { + t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.cookies)) } } } } var readCookiesTests = []struct { - Header Header - Filter string - Cookies []*Cookie + header Header + filter string + cookies []*Cookie + godebug string }{ { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1"}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, - "c2", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + filter: "c2", + cookies: []*Cookie{ {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1"}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, - "c2", - []*Cookie{ + header: Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + filter: "c2", + cookies: []*Cookie{ {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {`Cookie-1="v$1"; c2="v2"`}}, - "", - []*Cookie{ + header: Header{"Cookie": {`Cookie-1="v$1"; c2="v2"`}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1", Quoted: true}, {Name: "c2", Value: "v2", Quoted: true}, }, }, { - Header{"Cookie": {`Cookie-1="v$1"; c2=v2;`}}, - "", - []*Cookie{ + header: Header{"Cookie": {`Cookie-1="v$1"; c2=v2;`}}, + filter: "", + cookies: []*Cookie{ {Name: "Cookie-1", Value: "v$1", Quoted: true}, {Name: "c2", Value: "v2"}, }, }, { - Header{"Cookie": {``}}, - "", - []*Cookie{}, + header: Header{"Cookie": {``}}, + filter: "", + cookies: []*Cookie{}, + }, + // GODEBUG=httpcookiemaxnum should work regardless if all cookies are sent + // via one "Cookie" field, or multiple fields. + { + header: Header{"Cookie": {strings.Repeat(";a=", defaultCookieMaxNum+1)[1:]}}, + cookies: []*Cookie{}, + }, + { + header: Header{"Cookie": slices.Repeat([]string{"a="}, 10)}, + cookies: []*Cookie{}, + godebug: "httpcookiemaxnum=5", + }, + { + header: Header{"Cookie": {strings.Repeat(";a=", defaultCookieMaxNum+1)[1:]}}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + header: Header{"Cookie": slices.Repeat([]string{"a="}, defaultCookieMaxNum+1)}, + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), }, } func TestReadCookies(t *testing.T) { for i, tt := range readCookiesTests { + t.Setenv("GODEBUG", tt.godebug) for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input - c := readCookies(tt.Header, tt.Filter) - if !reflect.DeepEqual(c, tt.Cookies) { - t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies)) + c := readCookies(tt.header, tt.filter) + if !reflect.DeepEqual(c, tt.cookies) { + t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.cookies)) } } } @@ -690,6 +735,7 @@ func TestParseCookie(t *testing.T) { line string cookies []*Cookie err error + godebug string }{ { line: "Cookie-1=v$1", @@ -723,8 +769,28 @@ func TestParseCookie(t *testing.T) { line: "k1=\\", err: errInvalidCookieValue, }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + err: errCookieNumLimitExceeded, + }, + { + line: strings.Repeat(";a=", 10)[1:], + err: errCookieNumLimitExceeded, + godebug: "httpcookiemaxnum=5", + }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: "httpcookiemaxnum=0", + }, + { + line: strings.Repeat(";a=", defaultCookieMaxNum+1)[1:], + cookies: slices.Repeat([]*Cookie{{Name: "a", Value: "", Quoted: false}}, defaultCookieMaxNum+1), + godebug: fmt.Sprintf("httpcookiemaxnum=%v", defaultCookieMaxNum+1), + }, } for i, tt := range tests { + t.Setenv("GODEBUG", tt.godebug) gotCookies, gotErr := ParseCookie(tt.line) if !errors.Is(gotErr, tt.err) { t.Errorf("#%d ParseCookie got error %v, want error %v", i, gotErr, tt.err) diff --git a/src/net/http/doc.go b/src/net/http/doc.go index f7ad3ae762fb6c..24e07352ca726d 100644 --- a/src/net/http/doc.go +++ b/src/net/http/doc.go @@ -84,27 +84,26 @@ custom Server: # HTTP/2 -Starting with Go 1.6, the http package has transparent support for the -HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 -can do so by setting [Transport.TLSNextProto] (for clients) or -[Server.TLSNextProto] (for servers) to a non-nil, empty -map. Alternatively, the following GODEBUG settings are -currently supported: +The http package has transparent support for the HTTP/2 protocol. + +[Server] and [DefaultTransport] automatically enable HTTP/2 support +when using HTTPS. [Transport] does not enable HTTP/2 by default. + +To enable or disable support for HTTP/1, HTTP/2, and/or unencrypted HTTP/2, +see the [Server.Protocols] and [Transport.Protocols] configuration fields. + +To configure advanced HTTP/2 features, see the [Server.HTTP2] and +[Transport.HTTP2] configuration fields. + +Alternatively, the following GODEBUG settings are currently supported: GODEBUG=http2client=0 # disable HTTP/2 client support GODEBUG=http2server=0 # disable HTTP/2 server support GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs GODEBUG=http2debug=2 # ... even more verbose, with frame dumps -Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug - -The http package's [Transport] and [Server] both automatically enable -HTTP/2 support for simple configurations. To enable HTTP/2 for more -complex configurations, to use lower-level HTTP/2 features, or to use -a newer version of Go's http2 package, import "golang.org/x/net/http2" -directly and use its ConfigureTransport and/or ConfigureServer -functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 -package takes precedence over the net/http package's built-in HTTP/2 -support. +The "omithttp2" build tag may be used to disable the HTTP/2 implementation +contained in the http package. */ + package http diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go index 3abd7476f04e73..0df276321cf09e 100644 --- a/src/net/http/h2_bundle.go +++ b/src/net/http/h2_bundle.go @@ -1047,6 +1047,7 @@ func http2shouldRetryDial(call *http2dialCall, req *Request) bool { // - If the resulting value is zero or out of range, use a default. type http2http2Config struct { MaxConcurrentStreams uint32 + StrictMaxConcurrentRequests bool MaxDecoderHeaderTableSize uint32 MaxEncoderHeaderTableSize uint32 MaxReadFrameSize uint32 @@ -1084,12 +1085,13 @@ func http2configFromServer(h1 *Server, h2 *http2Server) http2http2Config { // (the net/http Transport). func http2configFromTransport(h2 *http2Transport) http2http2Config { conf := http2http2Config{ - MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, - MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, - MaxReadFrameSize: h2.MaxReadFrameSize, - SendPingTimeout: h2.ReadIdleTimeout, - PingTimeout: h2.PingTimeout, - WriteByteTimeout: h2.WriteByteTimeout, + StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, } // Unlike most config fields, where out-of-range values revert to the default, @@ -1148,6 +1150,9 @@ func http2fillNetHTTPConfig(conf *http2http2Config, h2 *HTTP2Config) { if h2.MaxConcurrentStreams != 0 { conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) } + if http2http2ConfigStrictMaxConcurrentRequests(h2) { + conf.StrictMaxConcurrentRequests = true + } if h2.MaxEncoderHeaderTableSize != 0 { conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) } @@ -1183,6 +1188,10 @@ func http2fillNetHTTPConfig(conf *http2http2Config, h2 *HTTP2Config) { } } +func http2http2ConfigStrictMaxConcurrentRequests(h2 *HTTP2Config) bool { + return h2.StrictMaxConcurrentRequests +} + // Buffer chunks are allocated from a pool to reduce pressure on GC. // The maximum wasted space per dataBuffer is 2x the largest size class, // which happens when the dataBuffer has multiple chunks and there is @@ -1900,7 +1909,7 @@ func (fr *http2Framer) maxHeaderListSize() uint32 { func (f *http2Framer) startWrite(ftype http2FrameType, flags http2Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], - 0, // 3 bytes of length, filled in in endWrite + 0, // 3 bytes of length, filled in endWrite 0, 0, byte(ftype), @@ -2708,6 +2717,15 @@ type http2PriorityFrame struct { http2PriorityParam } +var http2defaultRFC9218Priority = http2PriorityParam{ + incremental: 0, + urgency: 3, +} + +// Note that HTTP/2 has had two different prioritization schemes, and +// PriorityParam struct below is a superset of both schemes. The exported +// symbols are from RFC 7540 and the non-exported ones are from RFC 9218. + // PriorityParam are the stream prioritzation parameters. type http2PriorityParam struct { // StreamDep is a 31-bit stream identifier for the @@ -2723,6 +2741,20 @@ type http2PriorityParam struct { // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 + + // "The urgency (u) parameter value is Integer (see Section 3.3.1 of + // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of + // priority. The default is 3." + urgency uint8 + + // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of + // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed + // incrementally, i.e., provide some meaningful output as chunks of the + // response arrive." + // + // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can + // avoid unnecessary type conversions and because either type takes 1 byte. + incremental uint8 } func (p http2PriorityParam) IsZero() bool { @@ -3423,7 +3455,6 @@ var ( http2VerboseLogs bool http2logFrameWrites bool http2logFrameReads bool - http2inTests bool // Enabling extended CONNECT by causes browsers to attempt to use // WebSockets-over-HTTP/2. This results in problems when the server's websocket @@ -4103,6 +4134,10 @@ type http2Server struct { type http2serverInternalState struct { mu sync.Mutex activeConns map[*http2serverConn]struct{} + + // Pool of error channels. This is per-Server rather than global + // because channels can't be reused across synctest bubbles. + errChanPool sync.Pool } func (s *http2serverInternalState) registerConn(sc *http2serverConn) { @@ -4134,6 +4169,27 @@ func (s *http2serverInternalState) startGracefulShutdown() { s.mu.Unlock() } +// Global error channel pool used for uninitialized Servers. +// We use a per-Server pool when possible to avoid using channels across synctest bubbles. +var http2errChanPool = sync.Pool{ + New: func() any { return make(chan error, 1) }, +} + +func (s *http2serverInternalState) getErrChan() chan error { + if s == nil { + return http2errChanPool.Get().(chan error) // Server used without calling ConfigureServer + } + return s.errChanPool.Get().(chan error) +} + +func (s *http2serverInternalState) putErrChan(ch chan error) { + if s == nil { + http2errChanPool.Put(ch) // Server used without calling ConfigureServer + return + } + s.errChanPool.Put(ch) +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -4146,7 +4202,10 @@ func http2ConfigureServer(s *Server, conf *http2Server) error { if conf == nil { conf = new(http2Server) } - conf.state = &http2serverInternalState{activeConns: make(map[*http2serverConn]struct{})} + conf.state = &http2serverInternalState{ + activeConns: make(map[*http2serverConn]struct{}), + errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }}, + } if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout @@ -5052,25 +5111,6 @@ func (sc *http2serverConn) readPreface() error { } } -var http2errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -func http2getErrChan() chan error { - if http2inTests { - // Channels cannot be reused across synctest tests. - return make(chan error, 1) - } else { - return http2errChanPool.Get().(chan error) - } -} - -func http2putErrChan(ch chan error) { - if !http2inTests { - http2errChanPool.Put(ch) - } -} - var http2writeDataPool = sync.Pool{ New: func() interface{} { return new(http2writeData) }, } @@ -5078,7 +5118,7 @@ var http2writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte, endStream bool) error { - ch := http2getErrChan() + ch := sc.srv.state.getErrChan() writeArg := http2writeDataPool.Get().(*http2writeData) *writeArg = http2writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(http2FrameWriteRequest{ @@ -5110,7 +5150,7 @@ func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte return http2errStreamClosed } } - http2putErrChan(ch) + sc.srv.state.putErrChan(ch) if frameWriteDone { http2writeDataPool.Put(writeArg) } @@ -6364,7 +6404,7 @@ func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeR // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = http2getErrChan() + errc = sc.srv.state.getErrChan() } if err := sc.writeFrameFromHandler(http2FrameWriteRequest{ write: headerData, @@ -6376,7 +6416,7 @@ func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeR if errc != nil { select { case err := <-errc: - http2putErrChan(errc) + sc.srv.state.putErrChan(errc) return err case <-sc.doneServing: return http2errClientDisconnected @@ -7057,7 +7097,7 @@ func (w *http2responseWriter) Push(target string, opts *PushOptions) error { method: opts.Method, url: u, header: http2cloneHeader(opts.Header), - done: http2getErrChan(), + done: sc.srv.state.getErrChan(), } select { @@ -7074,7 +7114,7 @@ func (w *http2responseWriter) Push(target string, opts *PushOptions) error { case <-st.cw: return http2errStreamClosed case err := <-msg.done: - http2putErrChan(msg.done) + sc.srv.state.putErrChan(msg.done) return err } } @@ -7577,6 +7617,7 @@ type http2ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration extendedConnectAllowed bool + strictMaxConcurrentStreams bool // rstStreamPingsBlocked works around an unfortunate gRPC behavior. // gRPC strictly limits the number of PING frames that it will receive. @@ -8007,7 +8048,8 @@ func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2Client initialWindowSize: 65535, // spec default initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxConcurrentStreams: http2initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests, + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*http2clientStream), singleUse: singleUse, seenSettingsChan: make(chan struct{}), @@ -8241,7 +8283,7 @@ func (cc *http2ClientConn) idleStateLocked() (st http2clientConnIdleState) { return } var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { + if cc.strictMaxConcurrentStreams { // We'll tell the caller we can take a new request to // prevent the caller from dialing a new TCP // connection, but then we'll block later before @@ -10884,6 +10926,8 @@ type http2OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 + // priority is used to set the priority of the newly opened stream. + priority http2PriorityParam } // FrameWriteRequest is a request to write a frame. @@ -11095,7 +11139,7 @@ func (p *http2writeQueuePool) get() *http2writeQueue { } // RFC 7540, Section 5.3.5: the default weight is 16. -const http2priorityDefaultWeight = 15 // 16 = 15 + 1 +const http2priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. type http2PriorityWriteSchedulerConfig struct { @@ -11150,8 +11194,8 @@ func http2NewPriorityWriteScheduler(cfg *http2PriorityWriteSchedulerConfig) http } } - ws := &http2priorityWriteScheduler{ - nodes: make(map[uint32]*http2priorityNode), + ws := &http2priorityWriteSchedulerRFC7540{ + nodes: make(map[uint32]*http2priorityNodeRFC7540), maxClosedNodesInTree: cfg.MaxClosedNodesInTree, maxIdleNodesInTree: cfg.MaxIdleNodesInTree, enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, @@ -11165,32 +11209,32 @@ func http2NewPriorityWriteScheduler(cfg *http2PriorityWriteSchedulerConfig) http return ws } -type http2priorityNodeState int +type http2priorityNodeStateRFC7540 int const ( - http2priorityNodeOpen http2priorityNodeState = iota - http2priorityNodeClosed - http2priorityNodeIdle + http2priorityNodeOpenRFC7540 http2priorityNodeStateRFC7540 = iota + http2priorityNodeClosedRFC7540 + http2priorityNodeIdleRFC7540 ) -// priorityNode is a node in an HTTP/2 priority tree. +// priorityNodeRFC7540 is a node in an HTTP/2 priority tree. // Each node is associated with a single stream ID. // See RFC 7540, Section 5.3. -type http2priorityNode struct { - q http2writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state http2priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree +type http2priorityNodeRFC7540 struct { + q http2writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state http2priorityNodeStateRFC7540 // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree // These links form the priority tree. - parent *http2priorityNode - kids *http2priorityNode // start of the kids list - prev, next *http2priorityNode // doubly-linked list of siblings + parent *http2priorityNodeRFC7540 + kids *http2priorityNodeRFC7540 // start of the kids list + prev, next *http2priorityNodeRFC7540 // doubly-linked list of siblings } -func (n *http2priorityNode) setParent(parent *http2priorityNode) { +func (n *http2priorityNodeRFC7540) setParent(parent *http2priorityNodeRFC7540) { if n == parent { panic("setParent to self") } @@ -11225,7 +11269,7 @@ func (n *http2priorityNode) setParent(parent *http2priorityNode) { } } -func (n *http2priorityNode) addBytes(b int64) { +func (n *http2priorityNodeRFC7540) addBytes(b int64) { n.bytes += b for ; n != nil; n = n.parent { n.subtreeBytes += b @@ -11238,7 +11282,7 @@ func (n *http2priorityNode) addBytes(b int64) { // // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true // if any ancestor p of n is still open (ignoring the root node). -func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2priorityNode, f func(*http2priorityNode, bool) bool) bool { +func (n *http2priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*http2priorityNodeRFC7540, f func(*http2priorityNodeRFC7540, bool) bool) bool { if !n.q.empty() && f(n, openParent) { return true } @@ -11249,7 +11293,7 @@ func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2prior // Don't consider the root "open" when updating openParent since // we can't send data frames on the root stream (only control frames). if n.id != 0 { - openParent = openParent || (n.state == http2priorityNodeOpen) + openParent = openParent || (n.state == http2priorityNodeOpenRFC7540) } // Common case: only one kid or all kids have the same weight. @@ -11279,7 +11323,7 @@ func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2prior *tmp = append(*tmp, n.kids) n.kids.setParent(nil) } - sort.Sort(http2sortPriorityNodeSiblings(*tmp)) + sort.Sort(http2sortPriorityNodeSiblingsRFC7540(*tmp)) for i := len(*tmp) - 1; i >= 0; i-- { (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids } @@ -11291,13 +11335,13 @@ func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2prior return false } -type http2sortPriorityNodeSiblings []*http2priorityNode +type http2sortPriorityNodeSiblingsRFC7540 []*http2priorityNodeRFC7540 -func (z http2sortPriorityNodeSiblings) Len() int { return len(z) } +func (z http2sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) } -func (z http2sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z http2sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z http2sortPriorityNodeSiblings) Less(i, k int) bool { +func (z http2sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) @@ -11311,13 +11355,13 @@ func (z http2sortPriorityNodeSiblings) Less(i, k int) bool { return bi/bk <= wi/wk } -type http2priorityWriteScheduler struct { +type http2priorityWriteSchedulerRFC7540 struct { // root is the root of the priority tree, where root.id = 0. // The root queues control frames that are not associated with any stream. - root http2priorityNode + root http2priorityNodeRFC7540 // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*http2priorityNode + nodes map[uint32]*http2priorityNodeRFC7540 // maxID is the maximum stream id in nodes. maxID uint32 @@ -11325,7 +11369,7 @@ type http2priorityWriteScheduler struct { // lists of nodes that have been closed or are idle, but are kept in // the tree for improved prioritization. When the lengths exceed either // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*http2priorityNode + closedNodes, idleNodes []*http2priorityNodeRFC7540 // From the config. maxClosedNodesInTree int @@ -11334,19 +11378,19 @@ type http2priorityWriteScheduler struct { enableWriteThrottle bool // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*http2priorityNode + tmp []*http2priorityNodeRFC7540 // pool of empty queues for reuse. queuePool http2writeQueuePool } -func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) { +func (ws *http2priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options http2OpenStreamOptions) { // The stream may be currently idle but cannot be opened or closed. if curr := ws.nodes[streamID]; curr != nil { - if curr.state != http2priorityNodeIdle { + if curr.state != http2priorityNodeIdleRFC7540 { panic(fmt.Sprintf("stream %d already opened", streamID)) } - curr.state = http2priorityNodeOpen + curr.state = http2priorityNodeOpenRFC7540 return } @@ -11358,11 +11402,11 @@ func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2 if parent == nil { parent = &ws.root } - n := &http2priorityNode{ + n := &http2priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: http2priorityDefaultWeight, - state: http2priorityNodeOpen, + weight: http2priorityDefaultWeightRFC7540, + state: http2priorityNodeOpenRFC7540, } n.setParent(parent) ws.nodes[streamID] = n @@ -11371,19 +11415,19 @@ func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2 } } -func (ws *http2priorityWriteScheduler) CloseStream(streamID uint32) { +func (ws *http2priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { if streamID == 0 { panic("violation of WriteScheduler interface: cannot close stream 0") } if ws.nodes[streamID] == nil { panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) } - if ws.nodes[streamID].state != http2priorityNodeOpen { + if ws.nodes[streamID].state != http2priorityNodeOpenRFC7540 { panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) } n := ws.nodes[streamID] - n.state = http2priorityNodeClosed + n.state = http2priorityNodeClosedRFC7540 n.addBytes(-n.bytes) q := n.q @@ -11396,7 +11440,7 @@ func (ws *http2priorityWriteScheduler) CloseStream(streamID uint32) { } } -func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) { +func (ws *http2priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority http2PriorityParam) { if streamID == 0 { panic("adjustPriority on root") } @@ -11410,11 +11454,11 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht return } ws.maxID = streamID - n = &http2priorityNode{ + n = &http2priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: http2priorityDefaultWeight, - state: http2priorityNodeIdle, + weight: http2priorityDefaultWeightRFC7540, + state: http2priorityNodeIdleRFC7540, } n.setParent(&ws.root) ws.nodes[streamID] = n @@ -11426,7 +11470,7 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht parent := ws.nodes[priority.StreamDep] if parent == nil { n.setParent(&ws.root) - n.weight = http2priorityDefaultWeight + n.weight = http2priorityDefaultWeightRFC7540 return } @@ -11467,8 +11511,8 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht n.weight = priority.Weight } -func (ws *http2priorityWriteScheduler) Push(wr http2FrameWriteRequest) { - var n *http2priorityNode +func (ws *http2priorityWriteSchedulerRFC7540) Push(wr http2FrameWriteRequest) { + var n *http2priorityNodeRFC7540 if wr.isControl() { n = &ws.root } else { @@ -11487,8 +11531,8 @@ func (ws *http2priorityWriteScheduler) Push(wr http2FrameWriteRequest) { n.q.push(wr) } -func (ws *http2priorityWriteScheduler) Pop() (wr http2FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *http2priorityNode, openParent bool) bool { +func (ws *http2priorityWriteSchedulerRFC7540) Pop() (wr http2FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *http2priorityNodeRFC7540, openParent bool) bool { limit := int32(math.MaxInt32) if openParent { limit = ws.writeThrottleLimit @@ -11514,7 +11558,7 @@ func (ws *http2priorityWriteScheduler) Pop() (wr http2FrameWriteRequest, ok bool return wr, ok } -func (ws *http2priorityWriteScheduler) addClosedOrIdleNode(list *[]*http2priorityNode, maxSize int, n *http2priorityNode) { +func (ws *http2priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*http2priorityNodeRFC7540, maxSize int, n *http2priorityNodeRFC7540) { if maxSize == 0 { return } @@ -11528,7 +11572,7 @@ func (ws *http2priorityWriteScheduler) addClosedOrIdleNode(list *[]*http2priorit *list = append(*list, n) } -func (ws *http2priorityWriteScheduler) removeNode(n *http2priorityNode) { +func (ws *http2priorityWriteSchedulerRFC7540) removeNode(n *http2priorityNodeRFC7540) { for n.kids != nil { n.kids.setParent(n.parent) } @@ -11536,6 +11580,205 @@ func (ws *http2priorityWriteScheduler) removeNode(n *http2priorityNode) { delete(ws.nodes, n.id) } +type http2streamMetadata struct { + location *http2writeQueue + priority http2PriorityParam +} + +type http2priorityWriteSchedulerRFC9218 struct { + // control contains control frames (SETTINGS, PING, etc.). + control http2writeQueue + + // heads contain the head of a circular list of streams. + // We put these heads within a nested array that represents urgency and + // incremental, as defined in + // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters. + // 8 represents u=0 up to u=7, and 2 represents i=false and i=true. + heads [8][2]*http2writeQueue + + // streams contains a mapping between each stream ID and their metadata, so + // we can quickly locate them when needing to, for example, adjust their + // priority. + streams map[uint32]http2streamMetadata + + // queuePool are empty queues for reuse. + queuePool http2writeQueuePool + + // prioritizeIncremental is used to determine whether we should prioritize + // incremental streams or not, when urgency is the same in a given Pop() + // call. + prioritizeIncremental bool +} + +func http2newPriorityWriteSchedulerRFC9128() http2WriteScheduler { + ws := &http2priorityWriteSchedulerRFC9218{ + streams: make(map[uint32]http2streamMetadata), + } + return ws +} + +func (ws *http2priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt http2OpenStreamOptions) { + if ws.streams[streamID].location != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = http2streamMetadata{ + location: q, + priority: opt.priority, + } + + u, i := opt.priority.urgency, opt.priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *http2priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *http2priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority http2PriorityParam) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + + // Remove stream from current location. + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + + // Insert stream to the new queue. + u, i = priority.urgency, priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } + + // Update the metadata. + ws.streams[streamID] = http2streamMetadata{ + location: q, + priority: priority, + } +} + +func (ws *http2priorityWriteSchedulerRFC9218) Push(wr http2FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()].location + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *http2priorityWriteSchedulerRFC9218) Pop() (http2FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + + // On the next Pop(), we want to prioritize incremental if we prioritized + // non-incremental request of the same urgency this time. Vice-versa. + // i.e. when there are incremental and non-incremental requests at the same + // priority, we give 50% of our bandwidth to the incremental ones in + // aggregate and 50% to the first non-incremental one (since + // non-incremental streams do not use round-robin writes). + ws.prioritizeIncremental = !ws.prioritizeIncremental + + // Always prioritize lowest u (i.e. highest urgency level). + for u := range ws.heads { + for i := range ws.heads[u] { + // When we want to prioritize incremental, we try to pop i=true + // first before i=false when u is the same. + if ws.prioritizeIncremental { + i = (i + 1) % 2 + } + q := ws.heads[u][i] + if q == nil { + continue + } + for { + if wr, ok := q.consume(math.MaxInt32); ok { + if i == 1 { + // For incremental streams, we update head to q.next so + // we can round-robin between multiple streams that can + // immediately benefit from partial writes. + ws.heads[u][i] = q.next + } else { + // For non-incremental streams, we try to finish one to + // completion rather than doing round-robin. However, + // we update head here so that if q.consume() is !ok + // (e.g. the stream has no more frame to consume), head + // is updated to the next q that has frames to consume + // on future iterations. This way, we do not prioritize + // writing to unavailable stream on next Pop() calls, + // preventing head-of-line blocking. + ws.heads[u][i] = q + } + return wr, true + } + q = q.next + if q == ws.heads[u][i] { + break + } + } + + } + } + return http2FrameWriteRequest{}, false +} + // NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 // priorities. Control frames like SETTINGS and PING are written before DATA // frames, but if no control frames are queued and multiple streams have queued @@ -11622,7 +11865,7 @@ type http2roundRobinWriteScheduler struct { } // newRoundRobinWriteScheduler constructs a new write scheduler. -// The round robin scheduler priorizes control frames +// The round robin scheduler prioritizes control frames // like SETTINGS and PING over DATA frames. // When there are no control frames to send, it performs a round-robin // selection from the ready streams. diff --git a/src/net/http/h2_error_test.go b/src/net/http/h2_error_test.go index 5e400683b415e7..e71825451a8e32 100644 --- a/src/net/http/h2_error_test.go +++ b/src/net/http/h2_error_test.go @@ -25,19 +25,18 @@ func (e externalStreamError) Error() string { } func TestStreamError(t *testing.T) { - var target externalStreamError streamErr := http2streamError(42, http2ErrCodeProtocol) - ok := errors.As(streamErr, &target) + extStreamErr, ok := errors.AsType[externalStreamError](streamErr) if !ok { - t.Fatalf("errors.As failed") + t.Fatalf("errors.AsType failed") } - if target.StreamID != streamErr.StreamID { - t.Errorf("got StreamID %v, expected %v", target.StreamID, streamErr.StreamID) + if extStreamErr.StreamID != streamErr.StreamID { + t.Errorf("got StreamID %v, expected %v", extStreamErr.StreamID, streamErr.StreamID) } - if target.Cause != streamErr.Cause { - t.Errorf("got Cause %v, expected %v", target.Cause, streamErr.Cause) + if extStreamErr.Cause != streamErr.Cause { + t.Errorf("got Cause %v, expected %v", extStreamErr.Cause, streamErr.Cause) } - if uint32(target.Code) != uint32(streamErr.Code) { - t.Errorf("got Code %v, expected %v", target.Code, streamErr.Code) + if uint32(extStreamErr.Code) != uint32(streamErr.Code) { + t.Errorf("got Code %v, expected %v", extStreamErr.Code, streamErr.Code) } } diff --git a/src/net/http/http2_test.go b/src/net/http/http2_test.go deleted file mode 100644 index 7841b05b593400..00000000000000 --- a/src/net/http/http2_test.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !nethttpomithttp2 - -package http - -func init() { - // Disable HTTP/2 internal channel pooling which interferes with synctest. - http2inTests = true -} diff --git a/src/net/http/httptest/recorder.go b/src/net/http/httptest/recorder.go index 17aa70f06760a2..4006f4406d27a5 100644 --- a/src/net/http/httptest/recorder.go +++ b/src/net/http/httptest/recorder.go @@ -105,23 +105,45 @@ func (rw *ResponseRecorder) writeHeader(b []byte, str string) { // Write implements http.ResponseWriter. The data in buf is written to // rw.Body, if not nil. func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + // Record the write, even if we're going to return an error. rw.writeHeader(buf, "") if rw.Body != nil { rw.Body.Write(buf) } + if !bodyAllowedForStatus(rw.Code) { + return 0, http.ErrBodyNotAllowed + } return len(buf), nil } // WriteString implements [io.StringWriter]. The data in str is written // to rw.Body, if not nil. func (rw *ResponseRecorder) WriteString(str string) (int, error) { + // Record the write, even if we're going to return an error. rw.writeHeader(nil, str) if rw.Body != nil { rw.Body.WriteString(str) } + if !bodyAllowedForStatus(rw.Code) { + return 0, http.ErrBodyNotAllowed + } return len(str), nil } +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + func checkWriteHeaderCode(code int) { // Issue 22880: require valid WriteHeader status codes. // For now we only enforce that it's three digits. diff --git a/src/net/http/httptest/recorder_test.go b/src/net/http/httptest/recorder_test.go index 4782eced43e6ce..099697651465ad 100644 --- a/src/net/http/httptest/recorder_test.go +++ b/src/net/http/httptest/recorder_test.go @@ -5,6 +5,8 @@ package httptest import ( + "bytes" + "errors" "fmt" "io" "net/http" @@ -309,6 +311,26 @@ func TestRecorder(t *testing.T) { } } +func TestBodyNotAllowed(t *testing.T) { + rw := NewRecorder() + rw.Body = new(bytes.Buffer) + rw.WriteHeader(204) + + _, err := rw.Write([]byte("hello ")) + if !errors.Is(err, http.ErrBodyNotAllowed) { + t.Errorf("expected BodyNotAllowed for Write after 204, got: %v", err) + } + + _, err = rw.WriteString("world") + if !errors.Is(err, http.ErrBodyNotAllowed) { + t.Errorf("expected BodyNotAllowed for WriteString after 204, got: %v", err) + } + + if got, want := rw.Body.String(), "hello world"; got != want { + t.Errorf("got Body=%q, want %q", got, want) + } +} + // issue 39017 - disallow Content-Length values such as "+3" func TestParseContentLength(t *testing.T) { tests := []struct { diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go index 6ed4930727404b..9d8784cd2bc7a9 100644 --- a/src/net/http/httputil/reverseproxy.go +++ b/src/net/http/httputil/reverseproxy.go @@ -133,36 +133,6 @@ type ReverseProxy struct { // At most one of Rewrite or Director may be set. Rewrite func(*ProxyRequest) - // Director is a function which modifies - // the request into a new request to be sent - // using Transport. Its response is then copied - // back to the original client unmodified. - // Director must not access the provided Request - // after returning. - // - // By default, the X-Forwarded-For header is set to the - // value of the client IP address. If an X-Forwarded-For - // header already exists, the client IP is appended to the - // existing values. As a special case, if the header - // exists in the Request.Header map but has a nil value - // (such as when set by the Director func), the X-Forwarded-For - // header is not modified. - // - // To prevent IP spoofing, be sure to delete any pre-existing - // X-Forwarded-For header coming from the client or - // an untrusted proxy. - // - // Hop-by-hop headers are removed from the request after - // Director returns, which can remove headers added by - // Director. Use a Rewrite function instead to ensure - // modifications to the request are preserved. - // - // Unparsable query parameters are removed from the outbound - // request if Request.Form is set after Director returns. - // - // At most one of Rewrite or Director may be set. - Director func(*http.Request) - // The transport used to perform proxy requests. // If nil, http.DefaultTransport is used. Transport http.RoundTripper @@ -210,6 +180,88 @@ type ReverseProxy struct { // If nil, the default is to log the provided error and return // a 502 Status Bad Gateway response. ErrorHandler func(http.ResponseWriter, *http.Request, error) + + // Director is deprecated. Use Rewrite instead. + // + // This function is insecure: + // + // - Hop-by-hop headers are removed from the request after Director + // returns, which can remove headers added by Director. + // A client can designate headers as hop-by-hop by listing them + // in the Connection header, so this permits a malicious client + // to remove any headers that may be added by Director. + // + // - X-Forwarded-For, X-Forwarded-Host, and X-Forwarded-Proto + // headers in inbound requests are preserved by default, + // which can permit IP spoofing if the Director function is + // not careful to remove these headers. + // + // Rewrite addresses these issues. + // + // As an example of converting a Director function to Rewrite: + // + // // ReverseProxy with a Director function. + // proxy := &httputil.ReverseProxy{ + // Director: func(req *http.Request) { + // req.URL.Scheme = "https" + // req.URL.Host = proxyHost + // + // // A malicious client can remove this header. + // req.Header.Set("Some-Header", "some-header-value") + // + // // X-Forwarded-* headers sent by the client are preserved, + // // since Director did not remove them. + // }, + // } + // + // // ReverseProxy with a Rewrite function. + // proxy := &httputil.ReverseProxy{ + // Rewrite: func(preq *httputil.ProxyRequest) { + // // See also ProxyRequest.SetURL. + // preq.Out.URL.Scheme = "https" + // preq.Out.URL.Host = proxyHost + // + // // This header cannot be affected by a malicious client. + // preq.Out.Header.Set("Some-Header", "some-header-value") + // + // // X-Forwarded- headers sent by the client have been + // // removed from preq.Out. + // // ProxyRequest.SetXForwarded optionally adds new ones. + // preq.SetXForwarded() + // }, + // } + // + // Director is a function which modifies + // the request into a new request to be sent + // using Transport. Its response is then copied + // back to the original client unmodified. + // Director must not access the provided Request + // after returning. + // + // By default, the X-Forwarded-For header is set to the + // value of the client IP address. If an X-Forwarded-For + // header already exists, the client IP is appended to the + // existing values. As a special case, if the header + // exists in the Request.Header map but has a nil value + // (such as when set by the Director func), the X-Forwarded-For + // header is not modified. + // + // To prevent IP spoofing, be sure to delete any pre-existing + // X-Forwarded-For header coming from the client or + // an untrusted proxy. + // + // Hop-by-hop headers are removed from the request after + // Director returns, which can remove headers added by + // Director. Use a Rewrite function instead to ensure + // modifications to the request are preserved. + // + // Unparsable query parameters are removed from the outbound + // request if Request.Form is set after Director returns. + // + // At most one of Rewrite or Director may be set. + // + // Deprecated: Use Rewrite instead. + Director func(*http.Request) } // A BufferPool is an interface for getting and returning temporary @@ -259,6 +311,10 @@ func joinURLPath(a, b *url.URL) (path, rawpath string) { // // NewSingleHostReverseProxy does not rewrite the Host header. // +// For backwards compatibility reasons, NewSingleHostReverseProxy +// returns a ReverseProxy using the deprecated Director function. +// This proxy preserves X-Forwarded-* headers sent by the client. +// // To customize the ReverseProxy behavior beyond what // NewSingleHostReverseProxy provides, use ReverseProxy directly // with a Rewrite function. The ProxyRequest SetURL method diff --git a/src/net/http/internal/httpcommon/httpcommon.go b/src/net/http/internal/httpcommon/httpcommon.go index 5e0c24035aba20..e3f8ec79e094d3 100644 --- a/src/net/http/internal/httpcommon/httpcommon.go +++ b/src/net/http/internal/httpcommon/httpcommon.go @@ -202,7 +202,7 @@ type EncodeHeadersParam struct { DefaultUserAgent string } -// EncodeHeadersParam is the result of EncodeHeaders. +// EncodeHeadersResult is the result of EncodeHeaders. type EncodeHeadersResult struct { HasBody bool HasTrailers bool @@ -550,7 +550,7 @@ type ServerRequestResult struct { // If the request should be rejected, this is a short string suitable for passing // to the http2 package's CountError function. - // It might be a bit odd to return errors this way rather than returing an error, + // It might be a bit odd to return errors this way rather than returning an error, // but this ensures we don't forget to include a CountError reason. InvalidReason string } diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go index 635d3ad9d9f132..71aade67d32046 100644 --- a/src/net/http/pprof/pprof.go +++ b/src/net/http/pprof/pprof.go @@ -77,6 +77,7 @@ import ( "fmt" "html" "internal/godebug" + "internal/goexperiment" "internal/profile" "io" "log" @@ -351,12 +352,13 @@ func collectProfile(p *pprof.Profile) (*profile.Profile, error) { } var profileSupportsDelta = map[handler]bool{ - "allocs": true, - "block": true, - "goroutine": true, - "heap": true, - "mutex": true, - "threadcreate": true, + "allocs": true, + "block": true, + "goroutineleak": true, + "goroutine": true, + "heap": true, + "mutex": true, + "threadcreate": true, } var profileDescriptions = map[string]string{ @@ -372,6 +374,12 @@ var profileDescriptions = map[string]string{ "trace": "A trace of execution of the current program. You can specify the duration in the seconds GET parameter. After you get the trace file, use the go tool trace command to investigate the trace.", } +func init() { + if goexperiment.GoroutineLeakProfile { + profileDescriptions["goroutineleak"] = "Stack traces of all leaked goroutines. Use debug=2 as a query parameter to export in the same format as an unrecovered panic." + } +} + type profileEntry struct { Name string Href string diff --git a/src/net/http/requestwrite_test.go b/src/net/http/requestwrite_test.go index 380ae9dec3244d..8b097cd5e15d1f 100644 --- a/src/net/http/requestwrite_test.go +++ b/src/net/http/requestwrite_test.go @@ -15,6 +15,7 @@ import ( "strings" "testing" "testing/iotest" + "testing/synctest" "time" ) @@ -667,6 +668,13 @@ func TestRequestWrite(t *testing.T) { func TestRequestWriteTransport(t *testing.T) { t.Parallel() + // Run this test in a synctest bubble, since it relies on the transport + // successfully probing the request body within 200ms + // (see transferWriter.probeRequestBody). + // This occasionally flakes on slow builders (#52575) if we don't use a fake clock. + synctest.Test(t, testRequestWriteTransport) +} +func testRequestWriteTransport(t *testing.T) { matchSubstr := func(substr string) func(string) error { return func(written string) error { if !strings.Contains(written, substr) { diff --git a/src/net/http/server.go b/src/net/http/server.go index 4078c899061409..02554d1a201afd 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -3066,6 +3066,9 @@ type Server struct { // automatically closed when the function returns. // If TLSNextProto is not nil, HTTP/2 support is not enabled // automatically. + // + // Historically, TLSNextProto was used to disable HTTP/2 support. + // The Server.Protocols field now provides a simpler way to do this. TLSNextProto map[string]func(*Server, *tls.Conn, Handler) // ConnState specifies an optional callback function that is @@ -3094,9 +3097,6 @@ type Server struct { ConnContext func(ctx context.Context, c net.Conn) context.Context // HTTP2 configures HTTP/2 connections. - // - // This field does not yet have any effect. - // See https://go.dev/issue/67813. HTTP2 *HTTP2Config // Protocols is the set of protocols accepted by the server. diff --git a/src/net/http/socks_bundle.go b/src/net/http/socks_bundle.go index 776b03d941a405..862e5fa2a5165d 100644 --- a/src/net/http/socks_bundle.go +++ b/src/net/http/socks_bundle.go @@ -453,7 +453,7 @@ func (up *socksUsernamePassword) Authenticate(ctx context.Context, rw io.ReadWri b = append(b, up.Username...) b = append(b, byte(len(up.Password))) b = append(b, up.Password...) - // TODO(mikio): handle IO deadlines and cancelation if + // TODO(mikio): handle IO deadlines and cancellation if // necessary if _, err := rw.Write(b); err != nil { return err diff --git a/src/net/http/transport.go b/src/net/http/transport.go index 5cef9be487a4e2..a560765d331d65 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -249,6 +249,9 @@ type Transport struct { // must return a RoundTripper that then handles the request. // If TLSNextProto is not nil, HTTP/2 support is not enabled // automatically. + // + // Historically, TLSNextProto was used to disable HTTP/2 support. + // The Transport.Protocols field now provides a simpler way to do this. TLSNextProto map[string]func(authority string, c *tls.Conn) RoundTripper // ProxyConnectHeader optionally specifies headers to send to @@ -296,9 +299,6 @@ type Transport struct { ForceAttemptHTTP2 bool // HTTP2 configures HTTP/2 connections. - // - // This field does not yet have any effect. - // See https://go.dev/issue/67813. HTTP2 *HTTP2Config // Protocols is the set of protocols supported by the transport. diff --git a/src/net/lookup_test.go b/src/net/lookup_test.go index 514cbd098ae772..2a774100a8ec67 100644 --- a/src/net/lookup_test.go +++ b/src/net/lookup_test.go @@ -1420,8 +1420,8 @@ func testLookupNoData(t *testing.T, prefix string) { return } - var dnsErr *DNSError - if errors.As(err, &dnsErr) { + dnsErr, ok := errors.AsType[*DNSError](err) + if ok { succeeded := true if !dnsErr.IsNotFound { succeeded = false @@ -1455,8 +1455,7 @@ func testLookupNoData(t *testing.T, prefix string) { func TestLookupPortNotFound(t *testing.T) { allResolvers(t, func(t *testing.T) { _, err := LookupPort("udp", "_-unknown-service-") - var dnsErr *DNSError - if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*DNSError](err); !ok || !dnsErr.IsNotFound { t.Fatalf("unexpected error: %v", err) } }) @@ -1475,8 +1474,7 @@ var tcpOnlyService = func() string { func TestLookupPortDifferentNetwork(t *testing.T) { allResolvers(t, func(t *testing.T) { _, err := LookupPort("udp", tcpOnlyService) - var dnsErr *DNSError - if !errors.As(err, &dnsErr) || !dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*DNSError](err); !ok || !dnsErr.IsNotFound { t.Fatalf("unexpected error: %v", err) } }) diff --git a/src/net/mail/message.go b/src/net/mail/message.go index 14f839a03077c1..1502b3596252ba 100644 --- a/src/net/mail/message.go +++ b/src/net/mail/message.go @@ -724,7 +724,8 @@ func (p *addrParser) consumeDomainLiteral() (string, error) { } // Parse the dtext - var dtext string + dtext := p.s + dtextLen := 0 for { if p.empty() { return "", errors.New("mail: unclosed domain-literal") @@ -741,9 +742,10 @@ func (p *addrParser) consumeDomainLiteral() (string, error) { return "", fmt.Errorf("mail: bad character in domain-literal: %q", r) } - dtext += p.s[:size] + dtextLen += size p.s = p.s[size:] } + dtext = dtext[:dtextLen] // Skip the trailing ] if !p.consume(']') { diff --git a/src/net/tcpsock_test.go b/src/net/tcpsock_test.go index 9ed49a925b4b39..085989c7499b60 100644 --- a/src/net/tcpsock_test.go +++ b/src/net/tcpsock_test.go @@ -475,6 +475,9 @@ func TestTCPReadWriteAllocs(t *testing.T) { t.Skipf("not supported on %s", runtime.GOOS) } + // Optimizations are required to remove the allocs. + testenv.SkipIfOptimizationOff(t) + ln, err := Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) @@ -509,7 +512,7 @@ func TestTCPReadWriteAllocs(t *testing.T) { } }) if allocs > 0 { - t.Fatalf("got %v; want 0", allocs) + t.Errorf("got %v; want 0", allocs) } var bufwrt [128]byte @@ -531,7 +534,7 @@ func TestTCPReadWriteAllocs(t *testing.T) { } }) if allocs > 0 { - t.Fatalf("got %v; want 0", allocs) + t.Errorf("got %v; want 0", allocs) } } diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go index 668c06c24c1372..6df3a630917d78 100644 --- a/src/net/textproto/reader.go +++ b/src/net/textproto/reader.go @@ -285,8 +285,10 @@ func (r *Reader) ReadCodeLine(expectCode int) (code int, message string, err err // // An expectCode <= 0 disables the check of the status code. func (r *Reader) ReadResponse(expectCode int) (code int, message string, err error) { - code, continued, message, err := r.readCodeLine(expectCode) + code, continued, first, err := r.readCodeLine(expectCode) multi := continued + var messageBuilder strings.Builder + messageBuilder.WriteString(first) for continued { line, err := r.ReadLine() if err != nil { @@ -297,12 +299,15 @@ func (r *Reader) ReadResponse(expectCode int) (code int, message string, err err var moreMessage string code2, continued, moreMessage, err = parseCodeLine(line, 0) if err != nil || code2 != code { - message += "\n" + strings.TrimRight(line, "\r\n") + messageBuilder.WriteByte('\n') + messageBuilder.WriteString(strings.TrimRight(line, "\r\n")) continued = true continue } - message += "\n" + moreMessage + messageBuilder.WriteByte('\n') + messageBuilder.WriteString(moreMessage) } + message = messageBuilder.String() if err != nil && multi && message != "" { // replace one line error message with all lines (full message) err = &Error{code, message} diff --git a/src/net/url/url.go b/src/net/url/url.go index 2a57659460373d..4508f266084036 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -15,7 +15,7 @@ package url import ( "errors" "fmt" - "maps" + "net/netip" "path" "slices" "strconv" @@ -364,25 +364,41 @@ func escape(s string, mode encoding) string { // A consequence is that it is impossible to tell which slashes in the Path were // slashes in the raw URL and which were %2f. This distinction is rarely important, // but when it is, the code should use the [URL.EscapedPath] method, which preserves -// the original encoding of Path. +// the original encoding of Path. The Fragment field is also stored in decoded form, +// use [URL.EscapedFragment] to retrieve the original encoding. // -// The RawPath field is an optional field which is only set when the default -// encoding of Path is different from the escaped path. See the EscapedPath method -// for more details. -// -// URL's String method uses the EscapedPath method to obtain the path. +// The [URL.String] method uses the [URL.EscapedPath] method to obtain the path. type URL struct { - Scheme string - Opaque string // encoded opaque data - User *Userinfo // username and password information - Host string // host or host:port (see Hostname and Port methods) - Path string // path (relative paths may omit leading slash) - RawPath string // encoded path hint (see EscapedPath method) - OmitHost bool // do not emit empty host (authority) - ForceQuery bool // append a query ('?') even if RawQuery is empty - RawQuery string // encoded query values, without '?' - Fragment string // fragment for references, without '#' - RawFragment string // encoded fragment hint (see EscapedFragment method) + Scheme string + Opaque string // encoded opaque data + User *Userinfo // username and password information + Host string // "host" or "host:port" (see Hostname and Port methods) + Path string // path (relative paths may omit leading slash) + Fragment string // fragment for references (without '#') + + // RawQuery contains the encoded query values, without the initial '?'. + // Use URL.Query to decode the query. + RawQuery string + + // RawPath is an optional field containing an encoded path hint. + // See the EscapedPath method for more details. + // + // In general, code should call EscapedPath instead of reading RawPath. + RawPath string + + // RawFragment is an optional field containing an encoded fragment hint. + // See the EscapedFragment method for more details. + // + // In general, code should call EscapedFragment instead of reading RawFragment. + RawFragment string + + // ForceQuery indicates whether the original URL contained a query ('?') character. + // When set, the String method will include a trailing '?', even when RawQuery is empty. + ForceQuery bool + + // OmitHost indicates the URL has an empty host (authority). + // When set, the String method will not include the host when it is empty. + OmitHost bool } // User returns a [Userinfo] containing the provided username @@ -626,40 +642,61 @@ func parseAuthority(authority string) (user *Userinfo, host string, err error) { // parseHost parses host as an authority without user // information. That is, as host[:port]. func parseHost(host string) (string, error) { - if strings.HasPrefix(host, "[") { + if openBracketIdx := strings.LastIndex(host, "["); openBracketIdx != -1 { // Parse an IP-Literal in RFC 3986 and RFC 6874. // E.g., "[fe80::1]", "[fe80::1%25en0]", "[fe80::1]:80". - i := strings.LastIndex(host, "]") - if i < 0 { + closeBracketIdx := strings.LastIndex(host, "]") + if closeBracketIdx < 0 { return "", errors.New("missing ']' in host") } - colonPort := host[i+1:] + + colonPort := host[closeBracketIdx+1:] if !validOptionalPort(colonPort) { return "", fmt.Errorf("invalid port %q after host", colonPort) } + unescapedColonPort, err := unescape(colonPort, encodeHost) + if err != nil { + return "", err + } + hostname := host[openBracketIdx+1 : closeBracketIdx] + var unescapedHostname string // RFC 6874 defines that %25 (%-encoded percent) introduces // the zone identifier, and the zone identifier can use basically // any %-encoding it likes. That's different from the host, which // can only %-encode non-ASCII bytes. // We do impose some restrictions on the zone, to avoid stupidity // like newlines. - zone := strings.Index(host[:i], "%25") - if zone >= 0 { - host1, err := unescape(host[:zone], encodeHost) + zoneIdx := strings.Index(hostname, "%25") + if zoneIdx >= 0 { + hostPart, err := unescape(hostname[:zoneIdx], encodeHost) if err != nil { return "", err } - host2, err := unescape(host[zone:i], encodeZone) + zonePart, err := unescape(hostname[zoneIdx:], encodeZone) if err != nil { return "", err } - host3, err := unescape(host[i:], encodeHost) + unescapedHostname = hostPart + zonePart + } else { + var err error + unescapedHostname, err = unescape(hostname, encodeHost) if err != nil { return "", err } - return host1 + host2 + host3, nil } + + // Per RFC 3986, only a host identified by a valid + // IPv6 address can be enclosed by square brackets. + // This excludes any IPv4, but notably not IPv4-mapped addresses. + addr, err := netip.ParseAddr(unescapedHostname) + if err != nil { + return "", fmt.Errorf("invalid host: %w", err) + } + if addr.Is4() { + return "", errors.New("invalid IP-literal") + } + return "[" + unescapedHostname + "]" + unescapedColonPort, nil } else if i := strings.LastIndex(host, ":"); i != -1 { colonPort := host[i:] if !validOptionalPort(colonPort) { @@ -1008,7 +1045,16 @@ func (v Values) Encode() string { return "" } var buf strings.Builder - for _, k := range slices.Sorted(maps.Keys(v)) { + // To minimize allocations, we eschew iterators and pre-size the slice in + // which we collect v's keys. + keys := make([]string, len(v)) + var i int + for k := range v { + keys[i] = k + i++ + } + slices.Sort(keys) + for _, k := range keys { vs := v[k] keyEscaped := QueryEscape(k) for _, v := range vs { diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go index 16e08b63c6d098..501558403ac771 100644 --- a/src/net/url/url_test.go +++ b/src/net/url/url_test.go @@ -383,6 +383,16 @@ var urltests = []URLTest{ }, "", }, + // valid IPv6 host with port and path + { + "https://[2001:db8::1]:8443/test/path", + &URL{ + Scheme: "https", + Host: "[2001:db8::1]:8443", + Path: "/test/path", + }, + "", + }, // host subcomponent; IPv6 address with zone identifier in RFC 6874 { "http://[fe80::1%25en0]/", // alphanum zone identifier @@ -707,6 +717,24 @@ var parseRequestURLTests = []struct { // RFC 6874. {"http://[fe80::1%en0]/", false}, {"http://[fe80::1%en0]:8080/", false}, + + // Tests exercising RFC 3986 compliance + {"https://[1:2:3:4:5:6:7:8]", true}, // full IPv6 address + {"https://[2001:db8::a:b:c:d]", true}, // compressed IPv6 address + {"https://[fe80::1%25eth0]", true}, // link-local address with zone ID (interface name) + {"https://[fe80::abc:def%254]", true}, // link-local address with zone ID (interface index) + {"https://[2001:db8::1]/path", true}, // compressed IPv6 address with path + {"https://[fe80::1%25eth0]/path?query=1", true}, // link-local with zone, path, and query + + {"https://[::ffff:192.0.2.1]", true}, + {"https://[:1] ", false}, + {"https://[1:2:3:4:5:6:7:8:9]", false}, + {"https://[1::1::1]", false}, + {"https://[1:2:3:]", false}, + {"https://[ffff::127.0.0.4000]", false}, + {"https://[0:0::test.com]:80", false}, + {"https://[2001:db8::test.com]", false}, + {"https://[test.com]", false}, } func TestParseRequestURI(t *testing.T) { @@ -1080,6 +1108,17 @@ var encodeQueryTests = []EncodeQueryTest{ "b": {"b1", "b2", "b3"}, "c": {"c1", "c2", "c3"}, }, "a=a1&a=a2&a=a3&b=b1&b=b2&b=b3&c=c1&c=c2&c=c3"}, + {Values{ + "a": {"a"}, + "b": {"b"}, + "c": {"c"}, + "d": {"d"}, + "e": {"e"}, + "f": {"f"}, + "g": {"g"}, + "h": {"h"}, + "i": {"i"}, + }, "a=a&b=b&c=c&d=d&e=e&f=f&g=g&h=h&i=i"}, } func TestEncodeQuery(t *testing.T) { @@ -1090,6 +1129,17 @@ func TestEncodeQuery(t *testing.T) { } } +func BenchmarkEncodeQuery(b *testing.B) { + for _, tt := range encodeQueryTests { + b.Run(tt.expected, func(b *testing.B) { + b.ReportAllocs() + for b.Loop() { + tt.m.Encode() + } + }) + } +} + var resolvePathTests = []struct { base, ref, expected string }{ @@ -1643,6 +1693,18 @@ func TestParseErrors(t *testing.T) { {"cache_object:foo", true}, {"cache_object:foo/bar", true}, {"cache_object/:foo/bar", false}, + + {"http://[192.168.0.1]/", true}, // IPv4 in brackets + {"http://[192.168.0.1]:8080/", true}, // IPv4 in brackets with port + {"http://[::ffff:192.168.0.1]/", false}, // IPv4-mapped IPv6 in brackets + {"http://[::ffff:192.168.0.1000]/", true}, // Out of range IPv4-mapped IPv6 in brackets + {"http://[::ffff:192.168.0.1]:8080/", false}, // IPv4-mapped IPv6 in brackets with port + {"http://[::ffff:c0a8:1]/", false}, // IPv4-mapped IPv6 in brackets (hex) + {"http://[not-an-ip]/", true}, // invalid IP string in brackets + {"http://[fe80::1%foo]/", true}, // invalid zone format in brackets + {"http://[fe80::1", true}, // missing closing bracket + {"http://fe80::1]/", true}, // missing opening bracket + {"http://[test.com]/", true}, // domain name in brackets } for _, tt := range tests { u, err := Parse(tt.in) diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go index 3bded3dea604fb..1decebdc222d23 100644 --- a/src/os/exec/exec_test.go +++ b/src/os/exec/exec_test.go @@ -1378,8 +1378,8 @@ func TestWaitInterrupt(t *testing.T) { // The child process should be reported as failed, // and the grandchild will exit (or die by SIGPIPE) once the // stderr pipe is closed. - if ee := new(*exec.ExitError); !errors.As(err, ee) { - t.Errorf("Wait error = %v; want %T", err, *ee) + if ee, ok := errors.AsType[*exec.ExitError](err); !ok { + t.Errorf("Wait error = %v; want %T", err, ee) } }) @@ -1423,8 +1423,8 @@ func TestWaitInterrupt(t *testing.T) { // This command ignores SIGINT, sleeping until it is killed. // Wait should return the usual error for a killed process. - if ee := new(*exec.ExitError); !errors.As(err, ee) { - t.Errorf("Wait error = %v; want %T", err, *ee) + if ee, ok := errors.AsType[*exec.ExitError](err); !ok { + t.Errorf("Wait error = %v; want %T", err, ee) } }) @@ -1471,7 +1471,7 @@ func TestWaitInterrupt(t *testing.T) { t.Logf("stderr:\n%s", cmd.Stderr) t.Logf("[%d] %v", cmd.Process.Pid, err) - if ee := new(*exec.ExitError); !errors.As(err, ee) { + if _, ok := errors.AsType[*exec.ExitError](err); !ok { t.Errorf("Wait error = %v; want %v", err, ctx.Err()) } diff --git a/src/os/os_test.go b/src/os/os_test.go index 9f6eb13e1f96a9..536734901baff6 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -840,8 +840,7 @@ func TestReaddirOfFile(t *testing.T) { if err == nil { t.Error("Readdirnames succeeded; want non-nil error") } - var pe *PathError - if !errors.As(err, &pe) || pe.Path != f.Name() { + if pe, ok := errors.AsType[*PathError](err); !ok || pe.Path != f.Name() { t.Errorf("Readdirnames returned %q; want a PathError with path %q", err, f.Name()) } if len(names) > 0 { diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go index 5ffd9f0b6c3fba..dc6975242a928c 100644 --- a/src/path/filepath/path.go +++ b/src/path/filepath/path.go @@ -248,7 +248,7 @@ func Rel(basepath, targpath string) (string, error) { buf[n] = Separator copy(buf[n+1:], targ[t0:]) } - return string(buf), nil + return Clean(string(buf)), nil } return targ[t0:], nil } diff --git a/src/path/filepath/path_test.go b/src/path/filepath/path_test.go index 7ea02a7c282378..efdb3c603592ee 100644 --- a/src/path/filepath/path_test.go +++ b/src/path/filepath/path_test.go @@ -1506,6 +1506,7 @@ var reltests = []RelTests{ {"/../../a/b", "/../../a/b/c/d", "c/d"}, {".", "a/b", "a/b"}, {".", "..", ".."}, + {"", "../../.", "../.."}, // can't do purely lexically {"..", ".", "err"}, diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index 2a8c5206624146..c6610b1968a834 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -6198,19 +6198,6 @@ func TestChanOfDir(t *testing.T) { } func TestChanOfGC(t *testing.T) { - done := make(chan bool, 1) - go func() { - select { - case <-done: - case <-time.After(5 * time.Second): - panic("deadlock in TestChanOfGC") - } - }() - - defer func() { - done <- true - }() - type T *uintptr tt := TypeOf(T(nil)) ct := ChanOf(BothDir, tt) diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s index ee7f825e1f6681..586bf89a5d3729 100644 --- a/src/runtime/asm_loong64.s +++ b/src/runtime/asm_loong64.s @@ -6,6 +6,57 @@ #include "go_tls.h" #include "funcdata.h" #include "textflag.h" +#include "cgo/abi_loong64.h" + +// When building with -buildmode=c-shared, this symbol is called when the shared +// library is loaded. +TEXT _rt0_loong64_lib(SB),NOSPLIT,$168 + // Preserve callee-save registers. + SAVE_R22_TO_R31(3*8) + SAVE_F24_TO_F31(13*8) + + // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go + MOVV R0, g + + MOVV R4, _rt0_loong64_lib_argc<>(SB) + MOVV R5, _rt0_loong64_lib_argv<>(SB) + + // Synchronous initialization. + MOVV $runtime·libpreinit(SB), R19 + JAL (R19) + + // Create a new thread to do the runtime initialization and return. + MOVV _cgo_sys_thread_create(SB), R19 + BEQ R19, nocgo + MOVV $_rt0_loong64_lib_go(SB), R4 + MOVV $0, R5 + JAL (R19) + JMP restore + +nocgo: + MOVV $0x800000, R4 // stacksize = 8192KB + MOVV $_rt0_loong64_lib_go(SB), R5 + MOVV R4, 8(R3) + MOVV R5, 16(R3) + MOVV $runtime·newosproc0(SB), R19 + JAL (R19) + +restore: + // Restore callee-save registers. + RESTORE_R22_TO_R31(3*8) + RESTORE_F24_TO_F31(13*8) + RET + +TEXT _rt0_loong64_lib_go(SB),NOSPLIT,$0 + MOVV _rt0_loong64_lib_argc<>(SB), R4 + MOVV _rt0_loong64_lib_argv<>(SB), R5 + MOVV $runtime·rt0_go(SB),R19 + JMP (R19) + +DATA _rt0_loong64_lib_argc<>(SB)/8, $0 +GLOBL _rt0_loong64_lib_argc<>(SB),NOPTR, $8 +DATA _rt0_loong64_lib_argv<>(SB)/8, $0 +GLOBL _rt0_loong64_lib_argv<>(SB),NOPTR, $8 #define REGCTXT R29 diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index fc70fa82046056..88d03dcc72c1e6 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -9,6 +9,65 @@ #include "funcdata.h" #include "textflag.h" #include "asm_ppc64x.h" +#include "cgo/abi_ppc64x.h" + +// This is called using the host ABI. argc and argv arguments +// should be in R3 and R4 respectively. +TEXT _rt0_ppc64x_lib(SB),NOSPLIT|NOFRAME,$0 + // Convert to Go ABI, and Allocate argument storage for call to newosproc0. + STACK_AND_SAVE_HOST_TO_GO_ABI(16) + + MOVD R3, _rt0_ppc64x_lib_argc<>(SB) + MOVD R4, _rt0_ppc64x_lib_argv<>(SB) + + // Synchronous initialization. + MOVD $runtime·reginit(SB), R12 + MOVD R12, CTR + BL (CTR) + MOVD $runtime·libpreinit(SB), R12 + MOVD R12, CTR + BL (CTR) + + // Create a new thread to do the runtime initialization and return. + MOVD _cgo_sys_thread_create(SB), R12 + CMP $0, R12 + BEQ nocgo + MOVD $_rt0_ppc64x_lib_go(SB), R3 + MOVD $0, R4 + MOVD R12, CTR + BL (CTR) + BR done + +nocgo: + MOVD $0x800000, R12 // stacksize = 8192KB + MOVD R12, 8+FIXED_FRAME(R1) + MOVD $_rt0_ppc64x_lib_go(SB), R12 + MOVD R12, 16+FIXED_FRAME(R1) + MOVD $runtime·newosproc0(SB),R12 + MOVD R12, CTR + BL (CTR) + +done: + UNSTACK_AND_RESTORE_GO_TO_HOST_ABI(16) + RET + +#ifdef GO_PPC64X_HAS_FUNCDESC +DEFINE_PPC64X_FUNCDESC(_rt0_ppc64x_lib_go, __rt0_ppc64x_lib_go) +TEXT __rt0_ppc64x_lib_go(SB),NOSPLIT,$0 +#else +TEXT _rt0_ppc64x_lib_go(SB),NOSPLIT,$0 +#endif + MOVD _rt0_ppc64x_lib_argc<>(SB), R3 + MOVD _rt0_ppc64x_lib_argv<>(SB), R4 + MOVD $runtime·rt0_go(SB), R12 + MOVD R12, CTR + BR (CTR) + +DATA _rt0_ppc64x_lib_argc<>(SB)/8, $0 +GLOBL _rt0_ppc64x_lib_argc<>(SB),NOPTR, $8 +DATA _rt0_ppc64x_lib_argv<>(SB)/8, $0 +GLOBL _rt0_ppc64x_lib_argv<>(SB),NOPTR, $8 + #ifdef GOOS_aix #define cgoCalleeStackSize 48 diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s index 6b16d03c9a8070..efe6047419e8b3 100644 --- a/src/runtime/asm_riscv64.s +++ b/src/runtime/asm_riscv64.s @@ -6,6 +6,104 @@ #include "funcdata.h" #include "textflag.h" + +// When building with -buildmode=c-shared, this symbol is called when the shared +// library is loaded. +TEXT _rt0_riscv64_lib(SB),NOSPLIT,$224 + // Preserve callee-save registers, along with X1 (LR). + MOV X1, (8*3)(X2) + MOV X8, (8*4)(X2) + MOV X9, (8*5)(X2) + MOV X18, (8*6)(X2) + MOV X19, (8*7)(X2) + MOV X20, (8*8)(X2) + MOV X21, (8*9)(X2) + MOV X22, (8*10)(X2) + MOV X23, (8*11)(X2) + MOV X24, (8*12)(X2) + MOV X25, (8*13)(X2) + MOV X26, (8*14)(X2) + MOV g, (8*15)(X2) + MOVD F8, (8*16)(X2) + MOVD F9, (8*17)(X2) + MOVD F18, (8*18)(X2) + MOVD F19, (8*19)(X2) + MOVD F20, (8*20)(X2) + MOVD F21, (8*21)(X2) + MOVD F22, (8*22)(X2) + MOVD F23, (8*23)(X2) + MOVD F24, (8*24)(X2) + MOVD F25, (8*25)(X2) + MOVD F26, (8*26)(X2) + MOVD F27, (8*27)(X2) + + // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go + MOV X0, g + + MOV A0, _rt0_riscv64_lib_argc<>(SB) + MOV A1, _rt0_riscv64_lib_argv<>(SB) + + // Synchronous initialization. + MOV $runtime·libpreinit(SB), T0 + JALR RA, T0 + + // Create a new thread to do the runtime initialization and return. + MOV _cgo_sys_thread_create(SB), T0 + BEQZ T0, nocgo + MOV $_rt0_riscv64_lib_go(SB), A0 + MOV $0, A1 + JALR RA, T0 + JMP restore + +nocgo: + MOV $0x800000, A0 // stacksize = 8192KB + MOV $_rt0_riscv64_lib_go(SB), A1 + MOV A0, 8(X2) + MOV A1, 16(X2) + MOV $runtime·newosproc0(SB), T0 + JALR RA, T0 + +restore: + // Restore callee-save registers, along with X1 (LR). + MOV (8*3)(X2), X1 + MOV (8*4)(X2), X8 + MOV (8*5)(X2), X9 + MOV (8*6)(X2), X18 + MOV (8*7)(X2), X19 + MOV (8*8)(X2), X20 + MOV (8*9)(X2), X21 + MOV (8*10)(X2), X22 + MOV (8*11)(X2), X23 + MOV (8*12)(X2), X24 + MOV (8*13)(X2), X25 + MOV (8*14)(X2), X26 + MOV (8*15)(X2), g + MOVD (8*16)(X2), F8 + MOVD (8*17)(X2), F9 + MOVD (8*18)(X2), F18 + MOVD (8*19)(X2), F19 + MOVD (8*20)(X2), F20 + MOVD (8*21)(X2), F21 + MOVD (8*22)(X2), F22 + MOVD (8*23)(X2), F23 + MOVD (8*24)(X2), F24 + MOVD (8*25)(X2), F25 + MOVD (8*26)(X2), F26 + MOVD (8*27)(X2), F27 + + RET + +TEXT _rt0_riscv64_lib_go(SB),NOSPLIT,$0 + MOV _rt0_riscv64_lib_argc<>(SB), A0 + MOV _rt0_riscv64_lib_argv<>(SB), A1 + MOV $runtime·rt0_go(SB), T0 + JALR ZERO, T0 + +DATA _rt0_riscv64_lib_argc<>(SB)/8, $0 +GLOBL _rt0_riscv64_lib_argc<>(SB),NOPTR, $8 +DATA _rt0_riscv64_lib_argv<>(SB)/8, $0 +GLOBL _rt0_riscv64_lib_argv<>(SB),NOPTR, $8 + // func rt0_go() TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 // X2 = stack; A0 = argc; A1 = argv diff --git a/src/runtime/cgo/gcc_libinit_windows.c b/src/runtime/cgo/gcc_libinit_windows.c index 926f9168434638..7e7ff3e667266f 100644 --- a/src/runtime/cgo/gcc_libinit_windows.c +++ b/src/runtime/cgo/gcc_libinit_windows.c @@ -15,15 +15,6 @@ #include "libcgo.h" #include "libcgo_windows.h" -// Ensure there's one symbol marked __declspec(dllexport). -// If there are no exported symbols, the unfortunate behavior of -// the binutils linker is to also strip the relocations table, -// resulting in non-PIE binary. The other option is the -// --export-all-symbols flag, but we don't need to export all symbols -// and this may overflow the export table (#40795). -// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 -__declspec(dllexport) int _cgo_dummy_export; - static volatile LONG runtime_init_once_gate = 0; static volatile LONG runtime_init_once_done = 0; diff --git a/src/runtime/cgo/windows.go b/src/runtime/cgo/windows.go new file mode 100644 index 00000000000000..7ba61753dffda2 --- /dev/null +++ b/src/runtime/cgo/windows.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package cgo + +import _ "unsafe" // for go:linkname + +// _cgo_stub_export is only used to ensure there's at least one symbol +// in the .def file passed to the external linker. +// If there are no exported symbols, the unfortunate behavior of +// the binutils linker is to also strip the relocations table, +// resulting in non-PIE binary. The other option is the +// --export-all-symbols flag, but we don't need to export all symbols +// and this may overflow the export table (#40795). +// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 +// +//go:cgo_export_static _cgo_stub_export +//go:linkname _cgo_stub_export _cgo_stub_export +var _cgo_stub_export uintptr diff --git a/src/runtime/chan.go b/src/runtime/chan.go index 639d29dc8337f0..3320c248b458b6 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -263,11 +263,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { } // No stack splits between assigning elem and enqueuing mysg // on gp.waiting where copystack can find it. - mysg.elem = ep + mysg.elem.set(ep) mysg.waitlink = nil mysg.g = gp mysg.isSelect = false - mysg.c = c + mysg.c.set(c) gp.waiting = mysg gp.param = nil c.sendq.enqueue(mysg) @@ -298,7 +298,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { if mysg.releasetime > 0 { blockevent(mysg.releasetime-t0, 2) } - mysg.c = nil + mysg.c.set(nil) releaseSudog(mysg) if closed { if c.closed == 0 { @@ -336,9 +336,9 @@ func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz } } - if sg.elem != nil { + if sg.elem.get() != nil { sendDirect(c.elemtype, sg, ep) - sg.elem = nil + sg.elem.set(nil) } gp := sg.g unlockf() @@ -395,7 +395,7 @@ func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) { // Once we read sg.elem out of sg, it will no longer // be updated if the destination's stack gets copied (shrunk). // So make sure that no preemption points can happen between read & use. - dst := sg.elem + dst := sg.elem.get() typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) // No need for cgo write barrier checks because dst is always // Go memory. @@ -406,7 +406,7 @@ func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) { // dst is on our stack or the heap, src is on another stack. // The channel is locked, so src will not move during this // operation. - src := sg.elem + src := sg.elem.get() typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) memmove(dst, src, t.Size_) } @@ -441,9 +441,9 @@ func closechan(c *hchan) { if sg == nil { break } - if sg.elem != nil { - typedmemclr(c.elemtype, sg.elem) - sg.elem = nil + if sg.elem.get() != nil { + typedmemclr(c.elemtype, sg.elem.get()) + sg.elem.set(nil) } if sg.releasetime != 0 { sg.releasetime = cputicks() @@ -463,7 +463,7 @@ func closechan(c *hchan) { if sg == nil { break } - sg.elem = nil + sg.elem.set(nil) if sg.releasetime != 0 { sg.releasetime = cputicks() } @@ -642,13 +642,13 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) } // No stack splits between assigning elem and enqueuing mysg // on gp.waiting where copystack can find it. - mysg.elem = ep + mysg.elem.set(ep) mysg.waitlink = nil gp.waiting = mysg mysg.g = gp mysg.isSelect = false - mysg.c = c + mysg.c.set(c) gp.param = nil c.recvq.enqueue(mysg) if c.timer != nil { @@ -680,7 +680,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) } success := mysg.success gp.param = nil - mysg.c = nil + mysg.c.set(nil) releaseSudog(mysg) return true, success } @@ -727,14 +727,14 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) { typedmemmove(c.elemtype, ep, qp) } // copy data from sender to queue - typedmemmove(c.elemtype, qp, sg.elem) + typedmemmove(c.elemtype, qp, sg.elem.get()) c.recvx++ if c.recvx == c.dataqsiz { c.recvx = 0 } c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz } - sg.elem = nil + sg.elem.set(nil) gp := sg.g unlockf() gp.param = unsafe.Pointer(sg) diff --git a/src/runtime/conv_wasm_test.go b/src/runtime/conv_wasm_test.go index 5054fca04dc40a..3979a7b618028b 100644 --- a/src/runtime/conv_wasm_test.go +++ b/src/runtime/conv_wasm_test.go @@ -11,6 +11,8 @@ import ( var res int64 var ures uint64 +// TODO: This test probably should be in a different place. + func TestFloatTruncation(t *testing.T) { testdata := []struct { input float64 @@ -21,36 +23,37 @@ func TestFloatTruncation(t *testing.T) { // max +- 1 { input: 0x7fffffffffffffff, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0x8000000000000000, }, // For out-of-bounds conversion, the result is implementation-dependent. - // This test verifies the implementation of wasm architecture. + // This test verifies the implementation of wasm architecture, which is, + // saturating to the min/max value. { input: 0x8000000000000000, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0x8000000000000000, }, { input: 0x7ffffffffffffffe, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0x8000000000000000, }, // neg max +- 1 { input: -0x8000000000000000, convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, { input: -0x8000000000000001, convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, { input: -0x7fffffffffffffff, convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, // trunc point +- 1 { @@ -60,7 +63,7 @@ func TestFloatTruncation(t *testing.T) { }, { input: 0x7ffffffffffffe00, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0x8000000000000000, }, { @@ -72,48 +75,48 @@ func TestFloatTruncation(t *testing.T) { { input: -0x7ffffffffffffdff, convInt64: -0x7ffffffffffffc00, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, { input: -0x7ffffffffffffe00, convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, { input: -0x7ffffffffffffdfe, convInt64: -0x7ffffffffffffc00, - convUInt64: 0x8000000000000000, + convUInt64: 0, }, // umax +- 1 { input: 0xffffffffffffffff, - convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convInt64: 0x7fffffffffffffff, + convUInt64: 0xffffffffffffffff, }, { input: 0x10000000000000000, - convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convInt64: 0x7fffffffffffffff, + convUInt64: 0xffffffffffffffff, }, { input: 0xfffffffffffffffe, - convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convInt64: 0x7fffffffffffffff, + convUInt64: 0xffffffffffffffff, }, // umax trunc +- 1 { input: 0xfffffffffffffbff, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0xfffffffffffff800, }, { input: 0xfffffffffffffc00, - convInt64: -0x8000000000000000, - convUInt64: 0x8000000000000000, + convInt64: 0x7fffffffffffffff, + convUInt64: 0xffffffffffffffff, }, { input: 0xfffffffffffffbfe, - convInt64: -0x8000000000000000, + convInt64: 0x7fffffffffffffff, convUInt64: 0xfffffffffffff800, }, } diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index 2db86e0562d6ae..2b8ca549ad84f2 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -186,6 +186,22 @@ func buildTestProg(t *testing.T, binary string, flags ...string) (string, error) t.Logf("running %v", cmd) cmd.Dir = "testdata/" + binary cmd = testenv.CleanCmdEnv(cmd) + + // If tests need any experimental flags, add them here. + // + // TODO(vsaioc): Remove `goroutineleakprofile` once the feature is no longer experimental. + edited := false + for i := range cmd.Env { + e := cmd.Env[i] + if _, vars, ok := strings.Cut(e, "GOEXPERIMENT="); ok { + cmd.Env[i] = "GOEXPERIMENT=" + vars + ",goroutineleakprofile" + edited, _ = true, vars + } + } + if !edited { + cmd.Env = append(cmd.Env, "GOEXPERIMENT=goroutineleakprofile") + } + out, err := cmd.CombinedOutput() if err != nil { target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out) diff --git a/src/runtime/decoratemappings_test.go b/src/runtime/decoratemappings_test.go new file mode 100644 index 00000000000000..7d1121c125df08 --- /dev/null +++ b/src/runtime/decoratemappings_test.go @@ -0,0 +1,72 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "os" + "regexp" + "runtime" + "testing" +) + +func validateMapLabels(t *testing.T, labels []string) { + // These are the specific region labels that need get added during the + // runtime phase. Hence they are the ones that need to be confirmed as + // present at the time the test reads its own region labels, which + // is sufficient to validate that the default `decoratemappings` value + // (enabled) was set early enough in the init process. + regions := map[string]bool{ + "allspans array": false, + "gc bits": false, + "heap": false, + "heap index": false, + "heap reservation": false, + "immortal metadata": false, + "page alloc": false, + "page alloc index": false, + "page summary": false, + "scavenge index": false, + } + for _, label := range labels { + if _, ok := regions[label]; !ok { + t.Logf("unexpected region label found: \"%s\"", label) + } + regions[label] = true + } + for label, found := range regions { + if !found { + t.Logf("region label missing: \"%s\"", label) + } + } +} + +func TestDecorateMappings(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("decoratemappings is only supported on Linux") + // /proc/self/maps is also Linux-specific + } + + var labels []string + if rawMaps, err := os.ReadFile("/proc/self/maps"); err != nil { + t.Fatalf("failed to read /proc/self/maps: %v", err) + } else { + t.Logf("maps:%s\n", string(rawMaps)) + matches := regexp.MustCompile("[^[]+ \\[anon: Go: (.+)\\]\n").FindAllSubmatch(rawMaps, -1) + for _, match_pair := range matches { + // match_pair consists of the matching substring and the parenthesized group + labels = append(labels, string(match_pair[1])) + } + } + t.Logf("DebugDecorateMappings: %v", *runtime.DebugDecorateMappings) + if *runtime.DebugDecorateMappings != 0 && runtime.SetVMANameSupported() { + validateMapLabels(t, labels) + } else { + if len(labels) > 0 { + t.Errorf("unexpected mapping labels present: %v", labels) + } else { + t.Skip("mapping labels absent as expected") + } + } +} diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 2a701115685411..9f2fcacc30ee5c 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -1936,3 +1936,7 @@ func (t *TraceStackTable) Reset() { func TraceStack(gp *G, tab *TraceStackTable) { traceStack(0, gp, (*traceStackTable)(tab)) } + +var DebugDecorateMappings = &debug.decoratemappings + +func SetVMANameSupported() bool { return setVMANameSupported() } diff --git a/src/runtime/goroutineleakprofile_test.go b/src/runtime/goroutineleakprofile_test.go new file mode 100644 index 00000000000000..f5d2dd6372e54e --- /dev/null +++ b/src/runtime/goroutineleakprofile_test.go @@ -0,0 +1,603 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "internal/testenv" + "os" + "regexp" + "strings" + "testing" +) + +func TestGoroutineLeakProfile(t *testing.T) { + if strings.Contains(os.Getenv("GOFLAGS"), "mayMoreStackPreempt") { + // Some tests have false negatives under mayMoreStackPreempt. This may be a test-only issue, + // but needs more investigation. + testenv.SkipFlaky(t, 75729) + } + + // Goroutine leak test case. + // + // Test cases can be configured with test name, the name of the entry point function, + // a set of expected leaks identified by regular expressions, and the number of times + // the test should be repeated. + // + // Repeated runs reduce flakiness in some tests. + type testCase struct { + name string + simple bool + repetitions int + expectedLeaks map[*regexp.Regexp]bool + + // flakyLeaks are goroutine leaks that are too flaky to be reliably detected. + // Still, they might pop up every once in a while. The test will pass regardless + // if they occur or nor, as they are not unexpected. + // + // Note that all flaky leaks are true positives, i.e. real goroutine leaks, + // and it is only their detection that is unreliable due to scheduling + // non-determinism. + flakyLeaks map[*regexp.Regexp]struct{} + } + + // makeAnyTest is a short-hand for creating test cases. + // Each of the leaks in the list is identified by a regular expression. + // If a leak is flaky, it is added to the flakyLeaks map. + makeAnyTest := func(name string, flaky bool, repetitions int, leaks ...string) testCase { + tc := testCase{ + name: name, + expectedLeaks: make(map[*regexp.Regexp]bool, len(leaks)), + flakyLeaks: make(map[*regexp.Regexp]struct{}, len(leaks)), + // Make sure the test is repeated at least once. + repetitions: repetitions | 1, + } + + for _, leak := range leaks { + if !flaky { + tc.expectedLeaks[regexp.MustCompile(leak)] = false + } else { + tc.flakyLeaks[regexp.MustCompile(leak)] = struct{}{} + } + } + + return tc + } + + // makeTest is a short-hand for creating non-flaky test cases. + makeTest := func(name string, leaks ...string) testCase { + tcase := makeAnyTest(name, false, 2, leaks...) + tcase.simple = true + return tcase + } + + // makeFlakyTest is a short-hand for creating flaky test cases. + makeFlakyTest := func(name string, leaks ...string) testCase { + if testing.Short() { + return makeAnyTest(name, true, 2, leaks...) + } + return makeAnyTest(name, true, 10, leaks...) + } + + goroutineHeader := regexp.MustCompile(`goroutine \d+ \[`) + + // extractLeaks takes the output of a test and splits it into a + // list of strings denoting goroutine leaks. + // + // If the input is: + // + // goroutine 1 [wait reason (leaked)]: + // main.leaked() + // ./testdata/testgoroutineleakprofile/foo.go:37 +0x100 + // created by main.main() + // ./testdata/testgoroutineleakprofile/main.go:10 +0x20 + // + // goroutine 2 [wait reason (leaked)]: + // main.leaked2() + // ./testdata/testgoroutineleakprofile/foo.go:37 +0x100 + // created by main.main() + // ./testdata/testgoroutineleakprofile/main.go:10 +0x20 + // + // The output is (as a list of strings): + // + // leaked() [wait reason] + // leaked2() [wait reason] + extractLeaks := func(output string) []string { + stacks := strings.Split(output, "\n\ngoroutine") + var leaks []string + for _, stack := range stacks { + lines := strings.Split(stack, "\n") + if len(lines) < 5 { + // Expecting at least the following lines (where n=len(lines)-1): + // + // [0] goroutine n [wait reason (leaked)] + // ... + // [n-3] bottom.leak.frame(...) + // [n-2] ./bottom/leak/frame/source.go:line + // [n-1] created by go.instruction() + // [n] ./go/instruction/source.go:line + continue + } + + if !strings.Contains(lines[0], "(leaked)") { + // Ignore non-leaked goroutines. + continue + } + + // Get the wait reason from the goroutine header. + header := lines[0] + waitReason := goroutineHeader.ReplaceAllString(header, "[") + waitReason = strings.ReplaceAll(waitReason, " (leaked)", "") + + // Get the function name from the stack trace (should be two lines above `created by`). + var funcName string + for i := len(lines) - 1; i >= 0; i-- { + if strings.Contains(lines[i], "created by") { + funcName = strings.TrimPrefix(lines[i-2], "main.") + break + } + } + if funcName == "" { + t.Fatalf("failed to extract function name from stack trace: %s", lines) + } + + leaks = append(leaks, funcName+" "+waitReason) + } + return leaks + } + + // Micro tests involve very simple leaks for each type of concurrency primitive operation. + microTests := []testCase{ + makeTest("NilRecv", + `NilRecv\.func1\(.* \[chan receive \(nil chan\)\]`, + ), + makeTest("NilSend", + `NilSend\.func1\(.* \[chan send \(nil chan\)\]`, + ), + makeTest("SelectNoCases", + `SelectNoCases\.func1\(.* \[select \(no cases\)\]`, + ), + makeTest("ChanRecv", + `ChanRecv\.func1\(.* \[chan receive\]`, + ), + makeTest("ChanSend", + `ChanSend\.func1\(.* \[chan send\]`, + ), + makeTest("Select", + `Select\.func1\(.* \[select\]`, + ), + makeTest("WaitGroup", + `WaitGroup\.func1\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeTest("MutexStack", + `MutexStack\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("MutexHeap", + `MutexHeap\.func1.1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Cond", + `Cond\.func1\(.* \[sync\.Cond\.Wait\]`, + ), + makeTest("RWMutexRLock", + `RWMutexRLock\.func1\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeTest("RWMutexLock", + `RWMutexLock\.func1\(.* \[sync\.(RW)?Mutex\.Lock\]`, + ), + makeTest("Mixed", + `Mixed\.func1\(.* \[sync\.WaitGroup\.Wait\]`, + `Mixed\.func1.1\(.* \[chan send\]`, + ), + makeTest("NoLeakGlobal"), + } + + // Stress tests are flaky and we do not strictly care about their output. + // They are only intended to stress the goroutine leak detector and profiling + // infrastructure in interesting ways. + stressTestCases := []testCase{ + makeFlakyTest("SpawnGC", + `spawnGC.func1\(.* \[chan receive\]`, + ), + makeTest("DaisyChain"), + } + + // Common goroutine leak patterns. + // Extracted from "Unveiling and Vanquishing Goroutine Leaks in Enterprise Microservices: A Dynamic Analysis Approach" + // doi:10.1109/CGO57630.2024.10444835 + patternTestCases := []testCase{ + makeTest("NoCloseRange", + `noCloseRange\(.* \[chan send\]`, + `noCloseRange\.func1\(.* \[chan receive\]`, + ), + makeTest("MethodContractViolation", + `worker\.Start\.func1\(.* \[select\]`, + ), + makeTest("DoubleSend", + `DoubleSend\.func3\(.* \[chan send\]`, + ), + makeTest("EarlyReturn", + `earlyReturn\.func1\(.* \[chan send\]`, + ), + makeTest("NCastLeak", + `nCastLeak\.func1\(.* \[chan send\]`, + `NCastLeak\.func2\(.* \[chan receive\]`, + ), + makeTest("Timeout", + // (vsaioc): Timeout is *theoretically* flaky, but the + // pseudo-random choice for select case branches makes it + // practically impossible for it to fail. + `timeout\.func1\(.* \[chan send\]`, + ), + } + + // GoKer tests from "GoBench: A Benchmark Suite of Real-World Go Concurrency Bugs". + // Refer to testdata/testgoroutineleakprofile/goker/README.md. + // + // This list is curated for tests that are not excessively flaky. + // Some tests are also excluded because they are redundant. + // + // TODO(vsaioc): Some of these might be removable (their patterns may overlap). + gokerTestCases := []testCase{ + makeFlakyTest("Cockroach584", + `Cockroach584\.func2\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach1055", + `Cockroach1055\.func2\(.* \[chan receive\]`, + `Cockroach1055\.func2\.2\(.* \[sync\.WaitGroup\.Wait\]`, + `Cockroach1055\.func2\.1\(.* \[chan receive\]`, + `Cockroach1055\.func2\.1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach1462", + `\(\*Stopper_cockroach1462\)\.RunWorker\.func1\(.* \[chan send\]`, + `Cockroach1462\.func2\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeFlakyTest("Cockroach2448", + `\(\*Store_cockroach2448\)\.processRaft\(.* \[select\]`, + `\(\*state_cockroach2448\)\.start\(.* \[select\]`, + ), + makeFlakyTest("Cockroach3710", + `\(\*Store_cockroach3710\)\.ForceRaftLogScanAndProcess\(.* \[sync\.RWMutex\.RLock\]`, + `\(\*Store_cockroach3710\)\.processRaft\.func1\(.* \[sync\.RWMutex\.Lock\]`, + ), + makeFlakyTest("Cockroach6181", + `testRangeCacheCoalescedRequests_cockroach6181\(.* \[sync\.WaitGroup\.Wait\]`, + `testRangeCacheCoalescedRequests_cockroach6181\.func1\.1\(.* \[sync\.(RW)?Mutex\.Lock\]`, + `testRangeCacheCoalescedRequests_cockroach6181\.func1\.1\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeTest("Cockroach7504", + `Cockroach7504\.func2\.1.* \[sync\.Mutex\.Lock\]`, + `Cockroach7504\.func2\.2.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach9935", + `\(\*loggingT_cockroach9935\)\.outputLogEntry\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach10214", + `\(*Store_cockroach10214\)\.sendQueuedHeartbeats\(.* \[sync\.Mutex\.Lock\]`, + `\(*Replica_cockroach10214\)\.tick\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Cockroach10790", + `\(\*Replica_cockroach10790\)\.beginCmds\.func1\(.* \[chan receive\]`, + ), + makeTest("Cockroach13197", + `\(\*Tx_cockroach13197\)\.awaitDone\(.* \[chan receive\]`, + ), + makeTest("Cockroach13755", + `\(\*Rows_cockroach13755\)\.awaitDone\(.* \[chan receive\]`, + ), + makeFlakyTest("Cockroach16167", + `Cockroach16167\.func2\(.* \[sync\.RWMutex\.RLock\]`, + `\(\*Executor_cockroach16167\)\.Start\(.* \[sync\.RWMutex\.Lock\]`, + ), + makeFlakyTest("Cockroach18101", + `restore_cockroach18101\.func1\(.* \[chan send\]`, + ), + makeTest("Cockroach24808", + `Cockroach24808\.func2\(.* \[chan send\]`, + ), + makeTest("Cockroach25456", + `Cockroach25456\.func2\(.* \[chan receive\]`, + ), + makeTest("Cockroach35073", + `Cockroach35073\.func2.1\(.* \[chan send\]`, + `Cockroach35073\.func2\(.* \[chan send\]`, + ), + makeTest("Cockroach35931", + `Cockroach35931\.func2\(.* \[chan send\]`, + ), + makeTest("Etcd5509", + `Etcd5509\.func2\(.* \[sync\.RWMutex\.Lock\]`, + ), + makeTest("Etcd6708", + `Etcd6708\.func2\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeFlakyTest("Etcd6857", + `\(\*node_etcd6857\)\.Status\(.* \[chan send\]`, + ), + makeFlakyTest("Etcd6873", + `\(\*watchBroadcasts_etcd6873\)\.stop\(.* \[chan receive\]`, + `newWatchBroadcasts_etcd6873\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Etcd7492", + `Etcd7492\.func2\(.* \[sync\.WaitGroup\.Wait\]`, + `Etcd7492\.func2\.1\(.* \[chan send\]`, + `\(\*simpleTokenTTLKeeper_etcd7492\)\.run\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Etcd7902", + `doRounds_etcd7902\.func1\(.* \[chan receive\]`, + `doRounds_etcd7902\.func1\(.* \[sync\.Mutex\.Lock\]`, + `runElectionFunc_etcd7902\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeTest("Etcd10492", + `Etcd10492\.func2\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Grpc660", + `\(\*benchmarkClient_grpc660\)\.doCloseLoopUnary\.func1\(.* \[chan send\]`, + ), + makeFlakyTest("Grpc795", + `\(\*Server_grpc795\)\.Serve\(.* \[sync\.Mutex\.Lock\]`, + `testServerGracefulStopIdempotent_grpc795\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Grpc862", + `DialContext_grpc862\.func2\(.* \[chan receive\]`), + makeTest("Grpc1275", + `testInflightStreamClosing_grpc1275\.func1\(.* \[chan receive\]`), + makeTest("Grpc1424", + `DialContext_grpc1424\.func1\(.* \[chan receive\]`), + makeFlakyTest("Grpc1460", + `\(\*http2Client_grpc1460\)\.keepalive\(.* \[chan receive\]`, + `\(\*http2Client_grpc1460\)\.NewStream\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Grpc3017", + // grpc/3017 involves a goroutine leak that also simultaneously engages many GC assists. + `Grpc3017\.func2\(.* \[chan receive\]`, + `Grpc3017\.func2\.1\(.* \[sync\.Mutex\.Lock\]`, + `\(\*lbCacheClientConn_grpc3017\)\.RemoveSubConn\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Hugo3251", + `Hugo3251\.func2\(.* \[sync\.WaitGroup\.Wait\]`, + `Hugo3251\.func2\.1\(.* \[sync\.Mutex\.Lock\]`, + `Hugo3251\.func2\.1\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeFlakyTest("Hugo5379", + `\(\*Page_hugo5379\)\.initContent\.func1\.1\(.* \[sync\.Mutex\.Lock\]`, + `pageRenderer_hugo5379\(.* \[sync\.Mutex\.Lock\]`, + `Hugo5379\.func2\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeFlakyTest("Istio16224", + `Istio16224\.func2\(.* \[sync\.Mutex\.Lock\]`, + `\(\*controller_istio16224\)\.Run\(.* \[chan send\]`, + `\(\*controller_istio16224\)\.Run\(.* \[chan receive\]`, + ), + makeFlakyTest("Istio17860", + `\(\*agent_istio17860\)\.runWait\(.* \[chan send\]`, + ), + makeFlakyTest("Istio18454", + `\(\*Worker_istio18454\)\.Start\.func1\(.* \[chan receive\]`, + `\(\*Worker_istio18454\)\.Start\.func1\(.* \[chan send\]`, + ), + // NOTE(vsaioc): + // Kubernetes/1321 is excluded due to a race condition in the original program + // that may, in extremely rare cases, lead to nil pointer dereference crashes. + // (Reproducible even with regular GC). Only kept here for posterity. + // + // makeTest(testCase{name: "Kubernetes1321"}, + // `NewMux_kubernetes1321\.gowrap1\(.* \[chan send\]`, + // `testMuxWatcherClose_kubernetes1321\(.* \[sync\.Mutex\.Lock\]`), + makeTest("Kubernetes5316", + `finishRequest_kubernetes5316\.func1\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes6632", + `\(\*idleAwareFramer_kubernetes6632\)\.monitor\(.* \[sync\.Mutex\.Lock\]`, + `\(\*idleAwareFramer_kubernetes6632\)\.WriteFrame\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes10182", + `\(\*statusManager_kubernetes10182\)\.Start\.func1\(.* \[sync\.Mutex\.Lock\]`, + `\(\*statusManager_kubernetes10182\)\.SetPodStatus\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes11298", + `After_kubernetes11298\.func1\(.* \[chan receive\]`, + `After_kubernetes11298\.func1\(.* \[sync\.Cond\.Wait\]`, + `Kubernetes11298\.func2\(.* \[chan receive\]`, + ), + makeFlakyTest("Kubernetes13135", + `Util_kubernetes13135\(.* \[sync\.Mutex\.Lock\]`, + `\(\*WatchCache_kubernetes13135\)\.Add\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Kubernetes25331", + `\(\*watchChan_kubernetes25331\)\.run\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes26980", + `Kubernetes26980\.func2\(.* \[chan receive\]`, + `Kubernetes26980\.func2\.1\(.* \[sync\.Mutex\.Lock\]`, + `\(\*processorListener_kubernetes26980\)\.pop\(.* \[chan receive\]`, + ), + makeFlakyTest("Kubernetes30872", + `\(\*DelayingDeliverer_kubernetes30872\)\.StartWithHandler\.func1\(.* \[sync\.Mutex\.Lock\]`, + `\(\*Controller_kubernetes30872\)\.Run\(.* \[sync\.Mutex\.Lock\]`, + `\(\*NamespaceController_kubernetes30872\)\.Run\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Kubernetes38669", + `\(\*cacheWatcher_kubernetes38669\)\.process\(.* \[chan send\]`, + ), + makeFlakyTest("Kubernetes58107", + `\(\*ResourceQuotaController_kubernetes58107\)\.worker\(.* \[sync\.Cond\.Wait\]`, + `\(\*ResourceQuotaController_kubernetes58107\)\.worker\(.* \[sync\.RWMutex\.RLock\]`, + `\(\*ResourceQuotaController_kubernetes58107\)\.Sync\(.* \[sync\.RWMutex\.Lock\]`, + ), + makeFlakyTest("Kubernetes62464", + `\(\*manager_kubernetes62464\)\.reconcileState\(.* \[sync\.RWMutex\.RLock\]`, + `\(\*staticPolicy_kubernetes62464\)\.RemoveContainer\(.* \[sync\.(RW)?Mutex\.Lock\]`, + ), + makeFlakyTest("Kubernetes70277", + `Kubernetes70277\.func2\(.* \[chan receive\]`, + ), + makeFlakyTest("Moby4951", + `\(\*DeviceSet_moby4951\)\.DeleteDevice\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Moby7559", + `\(\*UDPProxy_moby7559\)\.Run\(.* \[sync\.Mutex\.Lock\]`, + ), + makeTest("Moby17176", + `testDevmapperLockReleasedDeviceDeletion_moby17176\.func1\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Moby21233", + `\(\*Transfer_moby21233\)\.Watch\.func1\(.* \[chan send\]`, + `\(\*Transfer_moby21233\)\.Watch\.func1\(.* \[select\]`, + `testTransfer_moby21233\(.* \[chan receive\]`, + ), + makeTest("Moby25348", + `\(\*Manager_moby25348\)\.init\(.* \[sync\.WaitGroup\.Wait\]`, + ), + makeFlakyTest("Moby27782", + `\(\*JSONFileLogger_moby27782\)\.readLogs\(.* \[sync\.Cond\.Wait\]`, + `\(\*Watcher_moby27782\)\.readEvents\(.* \[select\]`, + ), + makeFlakyTest("Moby28462", + `monitor_moby28462\(.* \[sync\.Mutex\.Lock\]`, + `\(\*Daemon_moby28462\)\.StateChanged\(.* \[chan send\]`, + ), + makeTest("Moby30408", + `Moby30408\.func2\(.* \[chan receive\]`, + `testActive_moby30408\.func1\(.* \[sync\.Cond\.Wait\]`, + ), + makeFlakyTest("Moby33781", + `monitor_moby33781\.func1\(.* \[chan send\]`, + ), + makeFlakyTest("Moby36114", + `\(\*serviceVM_moby36114\)\.hotAddVHDsAtStart\(.* \[sync\.Mutex\.Lock\]`, + ), + makeFlakyTest("Serving2137", + `\(\*Breaker_serving2137\)\.concurrentRequest\.func1\(.* \[chan send\]`, + `\(\*Breaker_serving2137\)\.concurrentRequest\.func1\(.* \[sync\.Mutex\.Lock\]`, + `Serving2137\.func2\(.* \[chan receive\]`, + ), + makeTest("Syncthing4829", + `Syncthing4829\.func2\(.* \[sync\.RWMutex\.RLock\]`, + ), + makeTest("Syncthing5795", + `\(\*rawConnection_syncthing5795\)\.dispatcherLoop\(.* \[chan receive\]`, + `Syncthing5795\.func2.* \[chan receive\]`, + ), + } + + // Combine all test cases into a single list. + testCases := append(microTests, stressTestCases...) + testCases = append(testCases, patternTestCases...) + + // Test cases must not panic or cause fatal exceptions. + failStates := regexp.MustCompile(`fatal|panic|DATA RACE`) + + testApp := func(exepath string, testCases []testCase) { + + // Build the test program once. + exe, err := buildTestProg(t, exepath) + if err != nil { + t.Fatal(fmt.Sprintf("building testgoroutineleakprofile failed: %v", err)) + } + + for _, tcase := range testCases { + t.Run(tcase.name, func(t *testing.T) { + t.Parallel() + + cmdEnv := []string{ + "GODEBUG=asyncpreemptoff=1", + "GOEXPERIMENT=greenteagc,goroutineleakprofile", + } + + if tcase.simple { + // If the test is simple, set GOMAXPROCS=1 in order to better + // control the behavior of the scheduler. + cmdEnv = append(cmdEnv, "GOMAXPROCS=1") + } + + var output string + for i := 0; i < tcase.repetitions; i++ { + // Run program for one repetition and get runOutput trace. + runOutput := runBuiltTestProg(t, exe, tcase.name, cmdEnv...) + if len(runOutput) == 0 { + t.Errorf("Test %s produced no output. Is the goroutine leak profile collected?", tcase.name) + } + + // Zero tolerance policy for fatal exceptions, panics, or data races. + if failStates.MatchString(runOutput) { + t.Errorf("unexpected fatal exception or panic\noutput:\n%s\n\n", runOutput) + } + + output += runOutput + "\n\n" + } + + // Extract all the goroutine leaks + foundLeaks := extractLeaks(output) + + // If the test case was not expected to produce leaks, but some were reported, + // stop the test immediately. Zero tolerance policy for false positives. + if len(tcase.expectedLeaks)+len(tcase.flakyLeaks) == 0 && len(foundLeaks) > 0 { + t.Errorf("output:\n%s\n\ngoroutines leaks detected in case with no leaks", output) + } + + unexpectedLeaks := make([]string, 0, len(foundLeaks)) + + // Parse every leak and check if it is expected (maybe as a flaky leak). + leaks: + for _, leak := range foundLeaks { + // Check if the leak is expected. + // If it is, check whether it has been encountered before. + var foundNew bool + var leakPattern *regexp.Regexp + + for expectedLeak, ok := range tcase.expectedLeaks { + if expectedLeak.MatchString(leak) { + if !ok { + foundNew = true + } + + leakPattern = expectedLeak + break + } + } + + if foundNew { + // Only bother writing if we found a new leak. + tcase.expectedLeaks[leakPattern] = true + } + + if leakPattern == nil { + // We are dealing with a leak not marked as expected. + // Check if it is a flaky leak. + for flakyLeak := range tcase.flakyLeaks { + if flakyLeak.MatchString(leak) { + // The leak is flaky. Carry on to the next line. + continue leaks + } + } + + unexpectedLeaks = append(unexpectedLeaks, leak) + } + } + + missingLeakStrs := make([]string, 0, len(tcase.expectedLeaks)) + for expectedLeak, found := range tcase.expectedLeaks { + if !found { + missingLeakStrs = append(missingLeakStrs, expectedLeak.String()) + } + } + + var errors []error + if len(unexpectedLeaks) > 0 { + errors = append(errors, fmt.Errorf("unexpected goroutine leaks:\n%s\n", strings.Join(unexpectedLeaks, "\n"))) + } + if len(missingLeakStrs) > 0 { + errors = append(errors, fmt.Errorf("missing expected leaks:\n%s\n", strings.Join(missingLeakStrs, ", "))) + } + if len(errors) > 0 { + t.Fatalf("Failed with the following errors:\n%s\n\noutput:\n%s", errors, output) + } + }) + } + } + + testApp("testgoroutineleakprofile", testCases) + testApp("testgoroutineleakprofile/goker", gokerTestCases) +} diff --git a/src/runtime/iface_test.go b/src/runtime/iface_test.go index 06f6eeb9524637..5bc209cfcf0dc7 100644 --- a/src/runtime/iface_test.go +++ b/src/runtime/iface_test.go @@ -60,7 +60,7 @@ func TestCmpIfaceConcreteAlloc(t *testing.T) { t.Skip("skipping on non-gc compiler") } - n := testing.AllocsPerRun(1, func() { + n := testing.AllocsPerRun(100, func() { _ = e == ts _ = i1 == ts _ = e == 1 diff --git a/src/runtime/malloc_generated.go b/src/runtime/malloc_generated.go index 600048c67557b1..2215dbaddb2e1c 100644 --- a/src/runtime/malloc_generated.go +++ b/src/runtime/malloc_generated.go @@ -150,6 +150,10 @@ func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -304,6 +308,10 @@ func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -458,6 +466,10 @@ func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -612,6 +624,10 @@ func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -766,6 +782,10 @@ func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -920,6 +940,10 @@ func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1074,6 +1098,10 @@ func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1228,6 +1256,10 @@ func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1382,6 +1414,10 @@ func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsaf gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1536,6 +1572,10 @@ func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1690,6 +1730,10 @@ func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1844,6 +1888,10 @@ func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -1998,6 +2046,10 @@ func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2152,6 +2204,10 @@ func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2306,6 +2362,10 @@ func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2460,6 +2520,10 @@ func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2614,6 +2678,10 @@ func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2768,6 +2836,10 @@ func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -2922,6 +2994,10 @@ func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3076,6 +3152,10 @@ func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3230,6 +3310,10 @@ func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3384,6 +3468,10 @@ func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3538,6 +3626,10 @@ func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3692,6 +3784,10 @@ func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -3846,6 +3942,10 @@ func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4000,6 +4100,10 @@ func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsa gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4064,6 +4168,10 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4142,6 +4250,10 @@ func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4206,6 +4318,10 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4284,6 +4400,10 @@ func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4348,6 +4468,10 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4426,6 +4550,10 @@ func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4490,6 +4618,10 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4568,6 +4700,10 @@ func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4632,6 +4768,10 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4710,6 +4850,10 @@ func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4774,6 +4918,10 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4852,6 +5000,10 @@ func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4916,6 +5068,10 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -4994,6 +5150,10 @@ func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5058,6 +5218,10 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5136,6 +5300,10 @@ func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5200,6 +5368,10 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5278,6 +5450,10 @@ func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5342,6 +5518,10 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5420,6 +5600,10 @@ func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5484,6 +5668,10 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5562,6 +5750,10 @@ func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5626,6 +5818,10 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5704,6 +5900,10 @@ func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5768,6 +5968,10 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5846,6 +6050,10 @@ func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5910,6 +6118,10 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -5988,6 +6200,10 @@ func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6052,6 +6268,10 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { const elemsize = 0 { + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6130,6 +6350,10 @@ func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { x = add(x, elemsize-constsize) } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6223,6 +6447,10 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6316,6 +6544,10 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6409,6 +6641,10 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6502,6 +6738,10 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6595,6 +6835,10 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6688,6 +6932,10 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6781,6 +7029,10 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6874,6 +7126,10 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -6967,6 +7223,10 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7060,6 +7320,10 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7153,6 +7417,10 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7246,6 +7514,10 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7339,6 +7611,10 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7432,6 +7708,10 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7525,6 +7805,10 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7618,6 +7902,10 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7711,6 +7999,10 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7804,6 +8096,10 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7897,6 +8193,10 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -7990,6 +8290,10 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8083,6 +8387,10 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8176,6 +8484,10 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8269,6 +8581,10 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8362,6 +8678,10 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) @@ -8455,6 +8775,10 @@ func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Poi gcStart(t) } } + if valgrindenabled { + valgrindMalloc(x, size) + } + if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { assistG.gcAssistBytes -= int64(elemsize - size) diff --git a/src/runtime/malloc_stubs.go b/src/runtime/malloc_stubs.go index 7fd144418938fb..224746f3d41124 100644 --- a/src/runtime/malloc_stubs.go +++ b/src/runtime/malloc_stubs.go @@ -50,6 +50,8 @@ func mallocPanic(size uintptr, typ *_type, needzero bool) unsafe.Pointer { panic("not defined for sizeclass") } +// WARNING: mallocStub does not do any work for sanitizers so callers need +// to steer out of this codepath early if sanitizers are enabled. func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if doubleCheckMalloc { if gcphase == _GCmarktermination { @@ -77,6 +79,13 @@ func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Actually do the allocation. x, elemsize := inlinedMalloc(size, typ, needzero) + // Notify valgrind, if enabled. + // To allow the compiler to not know about valgrind, we do valgrind instrumentation + // unlike the other sanitizers. + if valgrindenabled { + valgrindMalloc(x, size) + } + // Adjust our GC assist debt to account for internal fragmentation. if gcBlackenEnabled != 0 && elemsize != 0 { if assistG := getg().m.curg; assistG != nil { diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 508de9a115723c..37a92c64bc6ecc 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -1267,6 +1267,28 @@ func markBitsForSpan(base uintptr) (mbits markBits) { return mbits } +// isMarkedOrNotInHeap returns true if a pointer is in the heap and marked, +// or if the pointer is not in the heap. Used by goroutine leak detection +// to determine if concurrency resources are reachable in memory. +func isMarkedOrNotInHeap(p unsafe.Pointer) bool { + obj, span, objIndex := findObject(uintptr(p), 0, 0) + if obj != 0 { + mbits := span.markBitsForIndex(objIndex) + return mbits.isMarked() + } + + // If we fall through to get here, the object is not in the heap. + // In this case, it is either a pointer to a stack object or a global resource. + // Treat it as reachable in memory by default, to be safe. + // + // TODO(vsaioc): we could be more precise by checking against the stacks + // of runnable goroutines. I don't think this is necessary, based on what we've seen, but + // let's keep the option open in case the runtime evolves. + // This will (naively) lead to quadratic blow-up for goroutine leak detection, + // but if it is only run on demand, maybe the extra cost is not a show-stopper. + return true +} + // advance advances the markBits to the next object in the span. func (m *markBits) advance() { if m.mask == 1<<7 { diff --git a/src/runtime/mem_sbrk.go b/src/runtime/mem_sbrk.go index 05f0fdb5d74ed6..5284bbd0009865 100644 --- a/src/runtime/mem_sbrk.go +++ b/src/runtime/mem_sbrk.go @@ -48,6 +48,16 @@ type memHdrPtr uintptr func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) } func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) } +// memAlloc allocates n bytes from the brk reservation, or if it's full, +// the system. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memAlloc(n uintptr) unsafe.Pointer { if p := memAllocNoGrow(n); p != nil { return p @@ -55,6 +65,15 @@ func memAlloc(n uintptr) unsafe.Pointer { return sbrk(n) } +// memAllocNoGrow attempts to allocate n bytes from the existing brk. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memAllocNoGrow(n uintptr) unsafe.Pointer { n = memRound(n) var prevp *memHdr @@ -78,6 +97,15 @@ func memAllocNoGrow(n uintptr) unsafe.Pointer { return nil } +// memFree makes [ap, ap+n) available for reallocation by memAlloc. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memFree(ap unsafe.Pointer, n uintptr) { n = memRound(n) memclrNoHeapPointers(ap, n) @@ -122,6 +150,15 @@ func memFree(ap unsafe.Pointer, n uintptr) { } } +// memCheck checks invariants around free list management. +// +// memlock must be held. +// +// memAlloc must be called on the system stack, otherwise a stack growth +// could cause us to call back into it. Since memlock is held, that could +// lead to a self-deadlock. +// +//go:systemstack func memCheck() { if !memDebug { return @@ -158,26 +195,31 @@ func initBloc() { } func sysAllocOS(n uintptr, _ string) unsafe.Pointer { - lock(&memlock) - p := memAlloc(n) - memCheck() - unlock(&memlock) - return p + var p uintptr + systemstack(func() { + lock(&memlock) + p = uintptr(memAlloc(n)) + memCheck() + unlock(&memlock) + }) + return unsafe.Pointer(p) } func sysFreeOS(v unsafe.Pointer, n uintptr) { - lock(&memlock) - if uintptr(v)+n == bloc { - // Address range being freed is at the end of memory, - // so record a new lower value for end of memory. - // Can't actually shrink address space because segment is shared. - memclrNoHeapPointers(v, n) - bloc -= n - } else { - memFree(v, n) - memCheck() - } - unlock(&memlock) + systemstack(func() { + lock(&memlock) + if uintptr(v)+n == bloc { + // Address range being freed is at the end of memory, + // so record a new lower value for end of memory. + // Can't actually shrink address space because segment is shared. + memclrNoHeapPointers(v, n) + bloc -= n + } else { + memFree(v, n) + memCheck() + } + unlock(&memlock) + }) } func sysUnusedOS(v unsafe.Pointer, n uintptr) { @@ -202,49 +244,55 @@ func sysFaultOS(v unsafe.Pointer, n uintptr) { } func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { - lock(&memlock) - var p unsafe.Pointer - if uintptr(v) == bloc { - // Address hint is the current end of memory, - // so try to extend the address space. - p = sbrk(n) - } - if p == nil && v == nil { - p = memAlloc(n) - memCheck() - } - unlock(&memlock) - return p + var p uintptr + systemstack(func() { + lock(&memlock) + if uintptr(v) == bloc { + // Address hint is the current end of memory, + // so try to extend the address space. + p = uintptr(sbrk(n)) + } + if p == 0 && v == nil { + p = uintptr(memAlloc(n)) + memCheck() + } + unlock(&memlock) + }) + return unsafe.Pointer(p) } func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) { - lock(&memlock) - if p := memAllocNoGrow(size + align); p != nil { - // We can satisfy the reservation from the free list. - // Trim off the unaligned parts. - pAligned := alignUp(uintptr(p), align) - if startLen := pAligned - uintptr(p); startLen > 0 { - memFree(p, startLen) + var p uintptr + systemstack(func() { + lock(&memlock) + if base := memAllocNoGrow(size + align); base != nil { + // We can satisfy the reservation from the free list. + // Trim off the unaligned parts. + start := alignUp(uintptr(base), align) + if startLen := start - uintptr(base); startLen > 0 { + memFree(base, startLen) + } + end := start + size + if endLen := (uintptr(base) + size + align) - end; endLen > 0 { + memFree(unsafe.Pointer(end), endLen) + } + memCheck() + unlock(&memlock) + p = start + return } - end := pAligned + size - if endLen := (uintptr(p) + size + align) - end; endLen > 0 { - memFree(unsafe.Pointer(end), endLen) + + // Round up bloc to align, then allocate size. + p = alignUp(bloc, align) + r := sbrk(p + size - bloc) + if r == nil { + p, size = 0, 0 + } else if l := p - uintptr(r); l > 0 { + // Free the area we skipped over for alignment. + memFree(r, l) + memCheck() } - memCheck() unlock(&memlock) - return unsafe.Pointer(pAligned), size - } - - // Round up bloc to align, then allocate size. - p := alignUp(bloc, align) - r := sbrk(p + size - bloc) - if r == nil { - p, size = 0, 0 - } else if l := p - uintptr(r); l > 0 { - // Free the area we skipped over for alignment. - memFree(r, l) - memCheck() - } - unlock(&memlock) + }) return unsafe.Pointer(p), size } diff --git a/src/runtime/memclr_arm64.s b/src/runtime/memclr_arm64.s index 3e49f7fcf6a26f..3f8acdaeff670f 100644 --- a/src/runtime/memclr_arm64.s +++ b/src/runtime/memclr_arm64.s @@ -176,7 +176,7 @@ aligned: PCALIGN $16 loop_zva: - WORD $0xd50b7420 // DC ZVA, R0 + DC ZVA, R0 ADD R5, R0, R0 SUBS R5, R1, R1 BHS loop_zva diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go index e40ce25ff9d1bc..05646132ce4e69 100644 --- a/src/runtime/metrics/doc.go +++ b/src/runtime/metrics/doc.go @@ -309,6 +309,11 @@ Below is the full list of supported metrics, ordered lexicographically. The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. + /godebug/non-default-behavior/httpcookiemaxnum:events + The number of non-default behaviors executed by the net/http + package due to a non-default GODEBUG=httpcookiemaxnum=... + setting. + /godebug/non-default-behavior/httplaxcontentlength:events The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=httplaxcontentlength=... diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 68cbfda5000573..b4b4485629b59d 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -376,8 +376,8 @@ type workType struct { // (and thus 8-byte alignment even on 32-bit architectures). bytesMarked uint64 - markrootNext uint32 // next markroot job - markrootJobs uint32 // number of markroot jobs + markrootNext atomic.Uint32 // next markroot job + markrootJobs atomic.Uint32 // number of markroot jobs nproc uint32 tstart int64 @@ -385,17 +385,44 @@ type workType struct { // Number of roots of various root types. Set by gcPrepareMarkRoots. // - // nStackRoots == len(stackRoots), but we have nStackRoots for - // consistency. - nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int + // During normal GC cycle, nStackRoots == nMaybeRunnableStackRoots == len(stackRoots); + // during goroutine leak detection, nMaybeRunnableStackRoots is the number of stackRoots + // scheduled for marking. + // In both variants, nStackRoots == len(stackRoots). + nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nMaybeRunnableStackRoots int + + // The following fields monitor the GC phase of the current cycle during + // goroutine leak detection. + goroutineLeak struct { + // Once set, it indicates that the GC will perform goroutine leak detection during + // the next GC cycle; it is set by goroutineLeakGC and unset during gcStart. + pending atomic.Bool + // Once set, it indicates that the GC has started a goroutine leak detection run; + // it is set during gcStart and unset during gcMarkTermination; + // + // Protected by STW. + enabled bool + // Once set, it indicates that the GC has performed goroutine leak detection during + // the current GC cycle; it is set during gcMarkDone, right after goroutine leak detection, + // and unset during gcMarkTermination; + // + // Protected by STW. + done bool + // The number of leaked goroutines during the last leak detection GC cycle. + // + // Write-protected by STW in findGoroutineLeaks. + count int + } // Base indexes of each root type. Set by gcPrepareMarkRoots. baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32 - // stackRoots is a snapshot of all of the Gs that existed - // before the beginning of concurrent marking. The backing - // store of this must not be modified because it might be - // shared with allgs. + // stackRoots is a snapshot of all of the Gs that existed before the + // beginning of concurrent marking. During goroutine leak detection, stackRoots + // is partitioned into two sets; to the left of nMaybeRunnableStackRoots are stackRoots + // of running / runnable goroutines and to the right of nMaybeRunnableStackRoots are + // stackRoots of unmarked / not runnable goroutines + // The stackRoots array is re-partitioned after each marking phase iteration. stackRoots []*g // Each type of GC state transition is protected by a lock. @@ -562,6 +589,55 @@ func GC() { releasem(mp) } +// goroutineLeakGC runs a GC cycle that performs goroutine leak detection. +// +//go:linkname goroutineLeakGC runtime/pprof.runtime_goroutineLeakGC +func goroutineLeakGC() { + // Set the pending flag to true, instructing the next GC cycle to + // perform goroutine leak detection. + work.goroutineLeak.pending.Store(true) + + // Spin GC cycles until the pending flag is unset. + // This ensures that goroutineLeakGC waits for a GC cycle that + // actually performs goroutine leak detection. + // + // This is needed in case multiple concurrent calls to GC + // are simultaneously fired by the system, wherein some + // of them are dropped. + // + // In the vast majority of cases, only one loop iteration is needed; + // however, multiple concurrent calls to goroutineLeakGC could lead to + // the execution of additional GC cycles. + // + // Examples: + // + // pending? | G1 | G2 + // ---------|-------------------------|----------------------- + // - | goroutineLeakGC() | goroutineLeakGC() + // - | pending.Store(true) | . + // X | for pending.Load() | . + // X | GC() | . + // X | > gcStart() | . + // X | pending.Store(false) | . + // ... + // - | > gcMarkDone() | . + // - | . | pending.Store(true) + // ... + // X | > gcMarkTermination() | . + // X | ... + // X | < GC returns | . + // X | for pending.Load | . + // X | GC() | . + // X | . | for pending.Load() + // X | . | GC() + // ... + // The first to pick up the pending flag will start a + // leak detection cycle. + for work.goroutineLeak.pending.Load() { + GC() + } +} + // gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has // already completed this mark phase, it returns immediately. func gcWaitOnMark(n uint32) { @@ -785,6 +861,15 @@ func gcStart(trigger gcTrigger) { schedEnableUser(false) } + // If goroutine leak detection is pending, enable it for this GC cycle. + if work.goroutineLeak.pending.Load() { + work.goroutineLeak.enabled = true + work.goroutineLeak.pending.Store(false) + // Set all sync objects of blocked goroutines as untraceable + // by the GC. Only set as traceable at the end of the GC cycle. + setSyncObjectsUntraceable() + } + // Enter concurrent mark phase and enable // write barriers. // @@ -995,8 +1080,20 @@ top: } } }) - if restart { - gcDebugMarkDone.restartedDueTo27993 = true + + // Check whether we need to resume the marking phase because of issue #27993 + // or because of goroutine leak detection. + if restart || (work.goroutineLeak.enabled && !work.goroutineLeak.done) { + if restart { + // Restart because of issue #27993. + gcDebugMarkDone.restartedDueTo27993 = true + } else { + // Marking has reached a fixed-point. Attempt to detect goroutine leaks. + // + // If the returned value is true, then detection already concluded for this cycle. + // Otherwise, more runnable goroutines were discovered, requiring additional mark work. + work.goroutineLeak.done = findGoroutineLeaks() + } getg().m.preemptoff = "" systemstack(func() { @@ -1047,6 +1144,172 @@ top: gcMarkTermination(stw) } +// isMaybeRunnable checks whether a goroutine may still be semantically runnable. +// For goroutines which are semantically runnable, this will eventually return true +// as the GC marking phase progresses. It returns false for leaked goroutines, or for +// goroutines which are not yet computed as possibly runnable by the GC. +func (gp *g) isMaybeRunnable() bool { + // Check whether the goroutine is actually in a waiting state first. + if readgstatus(gp) != _Gwaiting { + // If the goroutine is not waiting, then clearly it is maybe runnable. + return true + } + + switch gp.waitreason { + case waitReasonSelectNoCases, + waitReasonChanSendNilChan, + waitReasonChanReceiveNilChan: + // Select with no cases or communicating on nil channels + // make goroutines unrunnable by definition. + return false + case waitReasonChanReceive, + waitReasonSelect, + waitReasonChanSend: + // Cycle all through all *sudog to check whether + // the goroutine is waiting on a marked channel. + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + if isMarkedOrNotInHeap(unsafe.Pointer(sg.c.get())) { + return true + } + } + return false + case waitReasonSyncCondWait, + waitReasonSyncWaitGroupWait, + waitReasonSyncMutexLock, + waitReasonSyncRWMutexLock, + waitReasonSyncRWMutexRLock: + // If waiting on mutexes, wait groups, or condition variables, + // check if the synchronization primitive attached to the sudog is marked. + if gp.waiting != nil { + return isMarkedOrNotInHeap(gp.waiting.elem.get()) + } + } + return true +} + +// findMaybeRunnableGoroutines checks to see if more blocked but maybe-runnable goroutines exist. +// If so, it adds them into root set and increments work.markrootJobs accordingly. +// Returns true if we need to run another phase of markroots; returns false otherwise. +func findMaybeRunnableGoroutines() (moreWork bool) { + oldRootJobs := work.markrootJobs.Load() + + // To begin with we have a set of unchecked stackRoots between + // vIndex and ivIndex. During the loop, anything < vIndex should be + // valid stackRoots and anything >= ivIndex should be invalid stackRoots. + // The loop terminates when the two indices meet. + var vIndex, ivIndex int = work.nMaybeRunnableStackRoots, work.nStackRoots + // Reorder goroutine list + for vIndex < ivIndex { + if work.stackRoots[vIndex].isMaybeRunnable() { + vIndex = vIndex + 1 + continue + } + for ivIndex = ivIndex - 1; ivIndex != vIndex; ivIndex = ivIndex - 1 { + if gp := work.stackRoots[ivIndex]; gp.isMaybeRunnable() { + work.stackRoots[ivIndex] = work.stackRoots[vIndex] + work.stackRoots[vIndex] = gp + vIndex = vIndex + 1 + break + } + } + } + + newRootJobs := work.baseStacks + uint32(vIndex) + if newRootJobs > oldRootJobs { + work.nMaybeRunnableStackRoots = vIndex + work.markrootJobs.Store(newRootJobs) + } + return newRootJobs > oldRootJobs +} + +// setSyncObjectsUntraceable scans allgs and sets the elem and c fields of all sudogs to +// an untrackable pointer. This prevents the GC from marking these objects as live in memory +// by following these pointers when runnning deadlock detection. +func setSyncObjectsUntraceable() { + assertWorldStopped() + + forEachGRace(func(gp *g) { + // Set as untraceable all synchronization objects of goroutines + // blocked at concurrency operations that could leak. + switch { + case gp.waitreason.isSyncWait(): + // Synchronization primitives are reachable from the *sudog via + // via the elem field. + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + sg.elem.setUntraceable() + } + case gp.waitreason.isChanWait(): + // Channels and select statements are reachable from the *sudog via the c field. + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + sg.c.setUntraceable() + } + } + }) +} + +// gcRestoreSyncObjects restores the elem and c fields of all sudogs to their original values. +// Should be invoked after the goroutine leak detection phase. +func gcRestoreSyncObjects() { + assertWorldStopped() + + forEachGRace(func(gp *g) { + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + sg.elem.setTraceable() + sg.c.setTraceable() + } + }) +} + +// findGoroutineLeaks scans the remaining stackRoots and marks any which are +// blocked over exclusively unreachable concurrency primitives as leaked (deadlocked). +// Returns true if the goroutine leak check was performed (or unnecessary). +// Returns false if the GC cycle has not yet computed all maybe-runnable goroutines. +func findGoroutineLeaks() bool { + assertWorldStopped() + + // Report goroutine leaks and mark them unreachable, and resume marking + // we still need to mark these unreachable *g structs as they + // get reused, but their stack won't get scanned + if work.nMaybeRunnableStackRoots == work.nStackRoots { + // nMaybeRunnableStackRoots == nStackRoots means that all goroutines are marked. + return true + } + + // Check whether any more maybe-runnable goroutines can be found by the GC. + if findMaybeRunnableGoroutines() { + // We found more work, so we need to resume the marking phase. + return false + } + + // For the remaining goroutines, mark them as unreachable and leaked. + work.goroutineLeak.count = work.nStackRoots - work.nMaybeRunnableStackRoots + + for i := work.nMaybeRunnableStackRoots; i < work.nStackRoots; i++ { + gp := work.stackRoots[i] + casgstatus(gp, _Gwaiting, _Gleaked) + + // Add the primitives causing the goroutine leaks + // to the GC work queue, to ensure they are marked. + // + // NOTE(vsaioc): these primitives should also be reachable + // from the goroutine's stack, but let's play it safe. + switch { + case gp.waitreason.isChanWait(): + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + shade(sg.c.uintptr()) + } + case gp.waitreason.isSyncWait(): + for sg := gp.waiting; sg != nil; sg = sg.waitlink { + shade(sg.elem.uintptr()) + } + } + } + // Put the remaining roots as ready for marking and drain them. + work.markrootJobs.Add(int32(work.nStackRoots - work.nMaybeRunnableStackRoots)) + work.nMaybeRunnableStackRoots = work.nStackRoots + return true +} + // World must be stopped and mark assists and background workers must be // disabled. func gcMarkTermination(stw worldStop) { @@ -1199,7 +1462,18 @@ func gcMarkTermination(stw worldStop) { throw("non-concurrent sweep failed to drain all sweep queues") } + if work.goroutineLeak.enabled { + // Restore the elem and c fields of all sudogs to their original values. + gcRestoreSyncObjects() + } + + var goroutineLeakDone bool systemstack(func() { + // Pull the GC out of goroutine leak detection mode. + work.goroutineLeak.enabled = false + goroutineLeakDone = work.goroutineLeak.done + work.goroutineLeak.done = false + // The memstats updated above must be updated with the world // stopped to ensure consistency of some values, such as // sched.idleTime and sched.totaltime. memstats also include @@ -1273,7 +1547,11 @@ func gcMarkTermination(stw worldStop) { printlock() print("gc ", memstats.numgc, " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ", - util, "%: ") + util, "%") + if goroutineLeakDone { + print(" (checking for goroutine leaks)") + } + print(": ") prev := work.tSweepTerm for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} { if i != 0 { @@ -1647,8 +1925,8 @@ func gcMark(startTime int64) { work.tstart = startTime // Check that there's no marking work remaining. - if work.full != 0 || work.markrootNext < work.markrootJobs { - print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n") + if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); work.full != 0 || next < jobs { + print("runtime: full=", hex(work.full), " next=", next, " jobs=", jobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n") panic("non-empty mark queue after concurrent mark") } @@ -1768,8 +2046,7 @@ func gcSweep(mode gcMode) bool { prepareFreeWorkbufs() for freeSomeWbufs(false) { } - for freeSomeSpanSPMCs(false) { - } + freeDeadSpanSPMCs() // All "free" events for this mark/sweep cycle have // now happened, so we can make this profile cycle // available immediately. diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index bb5fcabab37db8..ba3824f00dc9cb 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -53,6 +53,55 @@ const ( pagesPerSpanRoot = min(512, pagesPerArena) ) +// internalBlocked returns true if the goroutine is blocked due to an +// internal (non-leaking) waitReason, e.g. waiting for the netpoller or garbage collector. +// Such goroutines are never leak detection candidates according to the GC. +// +//go:nosplit +func (gp *g) internalBlocked() bool { + reason := gp.waitreason + return reason < waitReasonChanReceiveNilChan || waitReasonSyncWaitGroupWait < reason +} + +// allGsSnapshotSortedForGC takes a snapshot of allgs and returns a sorted +// array of Gs. The array is sorted by the G's status, with running Gs +// first, followed by blocked Gs. The returned index indicates the cutoff +// between runnable and blocked Gs. +// +// The world must be stopped or allglock must be held. +func allGsSnapshotSortedForGC() ([]*g, int) { + assertWorldStoppedOrLockHeld(&allglock) + + // Reset the status of leaked goroutines in order to improve + // the precision of goroutine leak detection. + for _, gp := range allgs { + gp.atomicstatus.CompareAndSwap(_Gleaked, _Gwaiting) + } + + allgsSorted := make([]*g, len(allgs)) + + // Indices cutting off runnable and blocked Gs. + var currIndex, blockedIndex = 0, len(allgsSorted) - 1 + for _, gp := range allgs { + // not sure if we need atomic load because we are stopping the world, + // but do it just to be safe for now + if status := readgstatus(gp); status != _Gwaiting || gp.internalBlocked() { + allgsSorted[currIndex] = gp + currIndex++ + } else { + allgsSorted[blockedIndex] = gp + blockedIndex-- + } + } + + // Because the world is stopped or allglock is held, allgadd + // cannot happen concurrently with this. allgs grows + // monotonically and existing entries never change, so we can + // simply return a copy of the slice header. For added safety, + // we trim everything past len because that can still change. + return allgsSorted, blockedIndex + 1 +} + // gcPrepareMarkRoots queues root scanning jobs (stacks, globals, and // some miscellany) and initializes scanning-related state. // @@ -102,11 +151,20 @@ func gcPrepareMarkRoots() { // ignore them because they begin life without any roots, so // there's nothing to scan, and any roots they create during // the concurrent phase will be caught by the write barrier. - work.stackRoots = allGsSnapshot() + if work.goroutineLeak.enabled { + // goroutine leak finder GC --- only prepare runnable + // goroutines for marking. + work.stackRoots, work.nMaybeRunnableStackRoots = allGsSnapshotSortedForGC() + } else { + // regular GC --- scan every goroutine + work.stackRoots = allGsSnapshot() + work.nMaybeRunnableStackRoots = len(work.stackRoots) + } + work.nStackRoots = len(work.stackRoots) - work.markrootNext = 0 - work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) + work.markrootNext.Store(0) + work.markrootJobs.Store(uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nMaybeRunnableStackRoots)) // Calculate base indexes of each root type work.baseData = uint32(fixedRootCount) @@ -119,8 +177,8 @@ func gcPrepareMarkRoots() { // gcMarkRootCheck checks that all roots have been scanned. It is // purely for debugging. func gcMarkRootCheck() { - if work.markrootNext < work.markrootJobs { - print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n") + if next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs { + print(next, " of ", jobs, " markroot jobs done\n") throw("left over markroot jobs") } @@ -858,7 +916,7 @@ func scanstack(gp *g, gcw *gcWork) int64 { case _Grunning: print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") throw("scanstack: goroutine not stopped") - case _Grunnable, _Gsyscall, _Gwaiting: + case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked: // ok } @@ -1126,6 +1184,28 @@ func gcDrainMarkWorkerFractional(gcw *gcWork) { gcDrain(gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit) } +// gcNextMarkRoot safely increments work.markrootNext and returns the +// index of the next root job. The returned boolean is true if the root job +// is valid, and false if there are no more root jobs to be claimed, +// i.e. work.markrootNext >= work.markrootJobs. +func gcNextMarkRoot() (uint32, bool) { + if !work.goroutineLeak.enabled { + // If not running goroutine leak detection, assume regular GC behavior. + job := work.markrootNext.Add(1) - 1 + return job, job < work.markrootJobs.Load() + } + + // Otherwise, use a CAS loop to increment markrootNext. + for next, jobs := work.markrootNext.Load(), work.markrootJobs.Load(); next < jobs; next = work.markrootNext.Load() { + // There is still work available at the moment. + if work.markrootNext.CompareAndSwap(next, next+1) { + // We manage to snatch a root job. Return the root index. + return next, true + } + } + return 0, false +} + // gcDrain scans roots and objects in work buffers, blackening grey // objects until it is unable to get more work. It may return before // GC is done; it's the caller's responsibility to balance work from @@ -1184,13 +1264,12 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { } } - // Drain root marking jobs. - if work.markrootNext < work.markrootJobs { + if work.markrootNext.Load() < work.markrootJobs.Load() { // Stop if we're preemptible, if someone wants to STW, or if // someone is calling forEachP. for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) { - job := atomic.Xadd(&work.markrootNext, +1) - 1 - if job >= work.markrootJobs { + job, ok := gcNextMarkRoot() + if !ok { break } markroot(gcw, job, flushBgCredit) @@ -1342,9 +1421,9 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 { if b = gcw.tryGetObj(); b == 0 { if s = gcw.tryGetSpan(); s == 0 { // Try to do a root job. - if work.markrootNext < work.markrootJobs { - job := atomic.Xadd(&work.markrootNext, +1) - 1 - if job < work.markrootJobs { + if work.markrootNext.Load() < work.markrootJobs.Load() { + job, ok := gcNextMarkRoot() + if ok { workFlushed += markroot(gcw, job, false) continue } diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go index 53fcd3d966ec3b..7b78611cf7b6fe 100644 --- a/src/runtime/mgcmark_greenteagc.go +++ b/src/runtime/mgcmark_greenteagc.go @@ -618,6 +618,48 @@ func (q *spanQueue) refill(r *spanSPMC) objptr { return q.tryGetFast() } +// destroy frees all chains in an empty spanQueue. +// +// Preconditions: +// - World is stopped. +// - GC is outside of the mark phase. +// - (Therefore) the queue is empty. +func (q *spanQueue) destroy() { + assertWorldStopped() + if gcphase != _GCoff { + throw("spanQueue.destroy during the mark phase") + } + if !q.empty() { + throw("spanQueue.destroy on non-empty queue") + } + + lock(&work.spanSPMCs.lock) + + // Remove and free each ring. + for r := (*spanSPMC)(q.chain.tail.Load()); r != nil; r = (*spanSPMC)(r.prev.Load()) { + prev := r.allprev + next := r.allnext + if prev != nil { + prev.allnext = next + } + if next != nil { + next.allprev = prev + } + if work.spanSPMCs.all == r { + work.spanSPMCs.all = next + } + + r.deinit() + mheap_.spanSPMCAlloc.free(unsafe.Pointer(r)) + } + + q.chain.head = nil + q.chain.tail.Store(nil) + q.putsSinceDrain = 0 + + unlock(&work.spanSPMCs.lock) +} + // spanSPMC is a ring buffer of objptrs that represent spans. // Accessed without a lock. // @@ -651,6 +693,11 @@ type spanSPMC struct { // work.spanSPMCs.lock. allnext *spanSPMC + // allprev is the link to the previous spanSPMC on the work.spanSPMCs + // list. This is used to find and free dead spanSPMCs. Protected by + // work.spanSPMCs.lock. + allprev *spanSPMC + // dead indicates whether the spanSPMC is no longer in use. // Protected by the CAS to the prev field of the spanSPMC pointing // to this spanSPMC. That is, whoever wins that CAS takes ownership @@ -677,7 +724,11 @@ type spanSPMC struct { func newSpanSPMC(cap uint32) *spanSPMC { lock(&work.spanSPMCs.lock) r := (*spanSPMC)(mheap_.spanSPMCAlloc.alloc()) - r.allnext = work.spanSPMCs.all + next := work.spanSPMCs.all + r.allnext = next + if next != nil { + next.allprev = r + } work.spanSPMCs.all = r unlock(&work.spanSPMCs.lock) @@ -714,6 +765,8 @@ func (r *spanSPMC) deinit() { r.head.Store(0) r.tail.Store(0) r.cap = 0 + r.allnext = nil + r.allprev = nil } // slot returns a pointer to slot i%r.cap. @@ -722,13 +775,8 @@ func (r *spanSPMC) slot(i uint32) *objptr { return (*objptr)(unsafe.Add(unsafe.Pointer(r.ring), idx*unsafe.Sizeof(objptr(0)))) } -// freeSomeSpanSPMCs frees some spanSPMCs back to the OS and returns -// true if it should be called again to free more. -func freeSomeSpanSPMCs(preemptible bool) bool { - // TODO(mknyszek): This is arbitrary, but some kind of limit is necessary - // to help bound delays to cooperatively preempt ourselves. - const batchSize = 64 - +// freeDeadSpanSPMCs frees dead spanSPMCs back to the OS. +func freeDeadSpanSPMCs() { // According to the SPMC memory management invariants, we can only free // spanSPMCs outside of the mark phase. We ensure we do this in two ways. // @@ -740,33 +788,39 @@ func freeSomeSpanSPMCs(preemptible bool) bool { // // This way, we ensure that we don't start freeing if we're in the wrong // phase, and the phase can't change on us while we're freeing. + // + // TODO(go.dev/issue/75771): Due to the grow semantics in + // spanQueue.drain, we expect a steady-state of around one spanSPMC per + // P, with some spikes higher when Ps have more than one. For high + // GOMAXPROCS, or if this list otherwise gets long, it would be nice to + // have a way to batch work that allows preemption during processing. lock(&work.spanSPMCs.lock) if gcphase != _GCoff || work.spanSPMCs.all == nil { unlock(&work.spanSPMCs.lock) - return false + return } - rp := &work.spanSPMCs.all - gp := getg() - more := true - for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ { - r := *rp - if r == nil { - more = false - break - } + r := work.spanSPMCs.all + for r != nil { + next := r.allnext if r.dead.Load() { // It's dead. Deinitialize and free it. - *rp = r.allnext + prev := r.allprev + if prev != nil { + prev.allnext = next + } + if next != nil { + next.allprev = prev + } + if work.spanSPMCs.all == r { + work.spanSPMCs.all = next + } + r.deinit() mheap_.spanSPMCAlloc.free(unsafe.Pointer(r)) - } else { - // Still alive, likely in some P's chain. - // Skip it. - rp = &r.allnext } + r = next } unlock(&work.spanSPMCs.lock) - return more } // tryStealSpan attempts to steal a span from another P's local queue. @@ -1143,7 +1197,7 @@ func gcMarkWorkAvailable() bool { if !work.full.empty() { return true // global work available } - if work.markrootNext < work.markrootJobs { + if work.markrootNext.Load() < work.markrootJobs.Load() { return true // root scan work available } if work.spanqMask.any() { diff --git a/src/runtime/mgcmark_nogreenteagc.go b/src/runtime/mgcmark_nogreenteagc.go index e4505032917210..024565ef3e250d 100644 --- a/src/runtime/mgcmark_nogreenteagc.go +++ b/src/runtime/mgcmark_nogreenteagc.go @@ -63,12 +63,15 @@ func (q *spanQueue) empty() bool { return true } +func (q *spanQueue) destroy() { +} + type spanSPMC struct { _ sys.NotInHeap } -func freeSomeSpanSPMCs(preemptible bool) bool { - return false +func freeDeadSpanSPMCs() { + return } type objptr uintptr @@ -124,7 +127,7 @@ func gcMarkWorkAvailable() bool { if !work.full.empty() { return true // global work available } - if work.markrootNext < work.markrootJobs { + if work.markrootNext.Load() < work.markrootJobs.Load() { return true // root scan work available } return false diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index 364cdb58ccb0bc..c3d6afb90a54fe 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -307,10 +307,7 @@ func bgsweep(c chan int) { // N.B. freeSomeWbufs is already batched internally. goschedIfBusy() } - for freeSomeSpanSPMCs(true) { - // N.B. freeSomeSpanSPMCs is already batched internally. - goschedIfBusy() - } + freeDeadSpanSPMCs() lock(&sweep.lock) if !isSweepDone() { // This can happen if a GC runs between diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 769c4ffc5c9eeb..7b84ba0a6fbaaf 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -163,19 +163,21 @@ package runtime type xRegs struct { `) pos := 0 - for _, reg := range l.regs { - if reg.pos != pos { - log.Fatalf("padding not implemented") - } - typ := fmt.Sprintf("[%d]byte", reg.size) - switch { - case reg.size == 4 && reg.pos%4 == 0: - typ = "uint32" - case reg.size == 8 && reg.pos%8 == 0: - typ = "uint64" + for _, seq := range l.regs { + for _, r := range seq.regs { + if r.pos != pos && !seq.fixedOffset { + log.Fatalf("padding not implemented") + } + typ := fmt.Sprintf("[%d]byte", r.size) + switch { + case r.size == 4 && r.pos%4 == 0: + typ = "uint32" + case r.size == 8 && r.pos%8 == 0: + typ = "uint64" + } + fmt.Fprintf(g.w, "\t%s %s\n", r.name, typ) + pos += r.size } - fmt.Fprintf(g.w, "\t%s %s\n", reg.reg, typ) - pos += reg.size } fmt.Fprintf(g.w, "}\n") @@ -191,16 +193,61 @@ type xRegs struct { type layout struct { stack int - regs []regPos + regs []regSeq sp string // stack pointer register } -type regPos struct { - pos, size int +type regInfo struct { + size int // register size in bytes + name string // register name + + // Some register names may require a specific suffix. + // In ARM64, a suffix called an "arrangement specifier" can be added to + // a register name. For example: + // + // V0.B16 + // + // In this case, "V0" is the register name, and ".B16" is the suffix. + suffix string + pos int // position on stack +} + +// Some save/restore operations can involve multiple registers in a single +// instruction. For example, the LDP/STP instructions in ARM64: +// +// LDP 8(RSP), (R0, R1) +// STP (R0, R1), 8(RSP) +// +// In these cases, a pair of registers (R0, R1) is used as a single argument. +type regSeq struct { saveOp string restoreOp string - reg string + regs []regInfo + + // By default, all registers are saved on the stack, and the stack pointer offset + // is calculated based on the size of each register. For example (ARM64): + // + // STP (R0, R1), 8(RSP) + // STP (R2, R3), 24(RSP) + // + // However, automatic offset calculation may not always be desirable. + // In some cases, the offset must remain fixed: + // + // VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R0) + // VST1.P [V4.B16, V5.B16, V6.B16, V7.B16], 64(R0) + // + // In this example, R0 is post-incremented after each instruction, + // so the offset should not be recalculated. For such cases, + // `fixedOffset` is set to true. + fixedOffset bool + + // After conversion to a string, register names are separated by commas + // and may be wrapped in a custom pair of brackets. For example (ARM64): + // + // (R0, R1) // wrapped in parentheses + // [V0.B16, V1.B16, V2.B16, V3.B16] // wrapped in square brackets + brackets [2]string // If this register requires special save and restore, these // give those operations with a %d placeholder for the stack @@ -208,42 +255,97 @@ type regPos struct { save, restore string } -func (l *layout) add(op, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack, size: size}) +func (l *layout) add(op, regname string, size int) { + l.regs = append(l.regs, regSeq{saveOp: op, restoreOp: op, regs: []regInfo{{size, regname, "", l.stack}}}) l.stack += size } -func (l *layout) add2(sop, rop, reg string, size int) { - l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack, size: size}) - l.stack += size +func (l *layout) add2(sop, rop string, regs []regInfo, brackets [2]string, fixedOffset bool) { + l.regs = append(l.regs, regSeq{saveOp: sop, restoreOp: rop, regs: regs, brackets: brackets, fixedOffset: fixedOffset}) + if !fixedOffset { + for i := range regs { + regs[i].pos = l.stack + l.stack += regs[i].size + } + } } func (l *layout) addSpecial(save, restore string, size int) { - l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack, size: size}) + l.regs = append(l.regs, regSeq{save: save, restore: restore, regs: []regInfo{{size, "", "", l.stack}}}) l.stack += size } +func (rs *regSeq) String() string { + switch len(rs.regs) { + case 0: + log.Fatal("Register sequence must not be empty!") + case 1: + return rs.regs[0].name + default: + names := make([]string, 0) + for _, r := range rs.regs { + name := r.name + r.suffix + names = append(names, name) + } + return rs.brackets[0] + strings.Join(names, ", ") + rs.brackets[1] + } + return "" +} + func (l *layout) save(g *gen) { - for _, reg := range l.regs { - if reg.save != "" { - g.p(reg.save, reg.pos) + for _, seq := range l.regs { + if len(seq.regs) < 1 { + log.Fatal("Register sequence must not be empty!") + } + // When dealing with a sequence of registers, we assume that only the position + // of the first register is relevant. For example: + // + // STP (R0, R1), 8(RSP) + // STP (R2, R3), 24(RSP) + // + // Here, R0.pos is 8. While we can infer that R1.pos is 16, it doesn't need to + // be explicitly specified, as the STP instruction calculates it automatically. + pos := seq.regs[0].pos + if seq.save != "" { + g.p(seq.save, pos) } else { - g.p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp) + name := seq.String() + g.p("%s %s, %d(%s)", seq.saveOp, name, pos, l.sp) } } } -func (l *layout) restore(g *gen) { - for i := len(l.regs) - 1; i >= 0; i-- { - reg := l.regs[i] +func (l *layout) restoreInOrder(g *gen, reverse bool) { + var seq []regSeq + if reverse { + seq = make([]regSeq, 0) + for i := len(l.regs) - 1; i >= 0; i-- { + seq = append(seq, l.regs[i]) + } + } else { + seq = l.regs + } + for _, reg := range seq { + if len(reg.regs) < 1 { + log.Fatal("Register sequence must not be empty!") + } + pos := reg.regs[0].pos if reg.restore != "" { - g.p(reg.restore, reg.pos) + g.p(reg.restore, pos) } else { - g.p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg) + g.p("%s %d(%s), %s", reg.restoreOp, pos, l.sp, reg.String()) } } } +func (l *layout) restore(g *gen) { + l.restoreInOrder(g, true) +} + +func (l *layout) restoreDirect(g *gen) { + l.restoreInOrder(g, false) +} + func gen386(g *gen) { p := g.p @@ -320,8 +422,11 @@ func genAMD64(g *gen) { // We don't have to do this, but it results in a nice Go type. If we split // this into multiple types, we probably should stop doing this. for i := range lXRegs.regs { - lXRegs.regs[i].pos = lZRegs.regs[i].pos - lYRegs.regs[i].pos = lZRegs.regs[i].pos + for j := range lXRegs.regs[i].regs { + lXRegs.regs[i].regs[j].pos = lZRegs.regs[i].regs[j].pos + lYRegs.regs[i].regs[j].pos = lZRegs.regs[i].regs[j].pos + } + } writeXRegs(g.goarch, &lZRegs) @@ -456,6 +561,7 @@ func genARM(g *gen) { } func genARM64(g *gen) { + const vReg = "R0" // *xRegState p := g.p // Add integer registers R0-R26 // R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special @@ -466,8 +572,11 @@ func genARM64(g *gen) { i-- continue // R18 is not used, skip } - reg := fmt.Sprintf("(R%d, R%d)", i, i+1) - l.add2("STP", "LDP", reg, 16) + regs := []regInfo{ + {name: fmt.Sprintf("R%d", i), size: 8}, + {name: fmt.Sprintf("R%d", i+1), size: 8}, + } + l.add2("STP", "LDP", regs, [2]string{"(", ")"}, false) } // Add flag registers. l.addSpecial( @@ -480,10 +589,17 @@ func genARM64(g *gen) { 8) // TODO: FPCR? I don't think we'll change it, so no need to save. // Add floating point registers F0-F31. - for i := 0; i < 31; i += 2 { - reg := fmt.Sprintf("(F%d, F%d)", i, i+1) - l.add2("FSTPD", "FLDPD", reg, 16) + lVRegs := layout{sp: vReg} // Non-GP registers + for i := 0; i < 31; i += 4 { + regs := []regInfo{ + {name: fmt.Sprintf("V%d", i), suffix: ".B16", size: 16, pos: 64}, + {name: fmt.Sprintf("V%d", i+1), suffix: ".B16", size: 16, pos: 64}, + {name: fmt.Sprintf("V%d", i+2), suffix: ".B16", size: 16, pos: 64}, + {name: fmt.Sprintf("V%d", i+3), suffix: ".B16", size: 16, pos: 64}, + } + lVRegs.add2("VST1.P", "VLD1.P", regs, [2]string{"[", "]"}, true) } + writeXRegs(g.goarch, &lVRegs) if l.stack%16 != 0 { l.stack += 8 // SP needs 16-byte alignment } @@ -500,8 +616,20 @@ func genARM64(g *gen) { p("MOVD R30, (RSP)") p("#endif") + p("// Save GPs") l.save(g) + p("// Save extended register state to p.xRegs.scratch") + p("MOVD g_m(g), %s", vReg) + p("MOVD m_p(%s), %s", vReg, vReg) + p("ADD $(p_xRegs+xRegPerP_scratch), %s, %s", vReg, vReg) + lVRegs.save(g) p("CALL ·asyncPreempt2(SB)") + p("// Restore non-GPs from *p.xRegs.cache") + p("MOVD g_m(g), %s", vReg) + p("MOVD m_p(%s), %s", vReg, vReg) + p("MOVD (p_xRegs+xRegPerP_cache)(%s), %s", vReg, vReg) + lVRegs.restoreDirect(g) + p("// Restore GPs") l.restore(g) p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go index eb425f070445dd..2e3643004bc858 100644 --- a/src/runtime/mpagealloc_64bit.go +++ b/src/runtime/mpagealloc_64bit.go @@ -180,9 +180,6 @@ func (p *pageAlloc) sysGrow(base, limit uintptr) { sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size()) p.summaryMappedReady += need.size() } - - // Update the scavenge index. - p.summaryMappedReady += p.scav.index.sysGrow(base, limit, p.sysStat) } // sysGrow increases the index's backing store in response to a heap growth. diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 97b29076523d5d..794f57cee24a7c 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -444,7 +444,7 @@ func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) { } // Only use the part of mp.profStack we need and ignore the extra space // reserved for delayed inline expansion with frame pointer unwinding. - nstk := callers(5, mp.profStack[:debug.profstackdepth]) + nstk := callers(3, mp.profStack[:debug.profstackdepth+2]) index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future)) b := stkbucket(memProfile, size, mp.profStack[:nstk], true) @@ -1259,6 +1259,20 @@ func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.P return goroutineProfileWithLabelsConcurrent(p, labels) } +//go:linkname pprof_goroutineLeakProfileWithLabels +func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + return goroutineLeakProfileWithLabelsConcurrent(p, labels) +} + +// labels may be nil. If labels is non-nil, it must have the same length as p. +func goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + if labels != nil && len(labels) != len(p) { + labels = nil + } + + return goroutineLeakProfileWithLabelsConcurrent(p, labels) +} + var goroutineProfile = struct { sema uint32 active bool @@ -1302,6 +1316,48 @@ func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileSt return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new)) } +func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + if len(p) == 0 { + // An empty slice is obviously too small. Return a rough + // allocation estimate. + return work.goroutineLeak.count, false + } + + // Use the same semaphore as goroutineProfileWithLabelsConcurrent, + // because ultimately we still use goroutine profiles. + semacquire(&goroutineProfile.sema) + + // Unlike in goroutineProfileWithLabelsConcurrent, we don't need to + // save the current goroutine stack, because it is obviously not leaked. + + pcbuf := makeProfStack() // see saveg() for explanation + + // Prepare a profile large enough to store all leaked goroutines. + n = work.goroutineLeak.count + + if n > len(p) { + // There's not enough space in p to store the whole profile, so (per the + // contract of runtime.GoroutineProfile) we're not allowed to write to p + // at all and must return n, false. + semrelease(&goroutineProfile.sema) + return n, false + } + + // Visit each leaked goroutine and try to record its stack. + forEachGRace(func(gp1 *g) { + if readgstatus(gp1) == _Gleaked { + doRecordGoroutineProfile(gp1, pcbuf) + } + }) + + if raceenabled { + raceacquire(unsafe.Pointer(&labelSync)) + } + + semrelease(&goroutineProfile.sema) + return n, true +} + func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { if len(p) == 0 { // An empty slice is obviously too small. Return a rough diff --git a/src/runtime/pprof/mprof_test.go b/src/runtime/pprof/mprof_test.go index 7c4a37e3c996fb..456ba9044498af 100644 --- a/src/runtime/pprof/mprof_test.go +++ b/src/runtime/pprof/mprof_test.go @@ -97,25 +97,25 @@ func TestMemoryProfiler(t *testing.T) { legacy string }{{ stk: []string{"runtime/pprof.allocatePersistent1K", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+( 0x[0-9,a-f]+ 0x[0-9,a-f]+)? # 0x[0-9,a-f]+ runtime/pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:48 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test\.go:87 `, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient1M", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ # 0x[0-9,a-f]+ runtime/pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:25 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:84 `, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient2M", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ # 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:31 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:85 `, memoryProfilerRun, (2<<20)*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient2MInline", "runtime/pprof.TestMemoryProfiler"}, - legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ + legacy: fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ # 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2MInline\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:35 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*runtime/pprof/mprof_test.go:86 `, memoryProfilerRun, (2<<20)*memoryProfilerRun), diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go index 55563009b3a2c1..b524e992b8b209 100644 --- a/src/runtime/pprof/pprof.go +++ b/src/runtime/pprof/pprof.go @@ -80,6 +80,7 @@ import ( "cmp" "fmt" "internal/abi" + "internal/goexperiment" "internal/profilerecord" "io" "runtime" @@ -105,12 +106,13 @@ import ( // // Each Profile has a unique name. A few profiles are predefined: // -// goroutine - stack traces of all current goroutines -// heap - a sampling of memory allocations of live objects -// allocs - a sampling of all past memory allocations -// threadcreate - stack traces that led to the creation of new OS threads -// block - stack traces that led to blocking on synchronization primitives -// mutex - stack traces of holders of contended mutexes +// goroutine - stack traces of all current goroutines +// goroutineleak - stack traces of all leaked goroutines +// allocs - a sampling of all past memory allocations +// heap - a sampling of memory allocations of live objects +// threadcreate - stack traces that led to the creation of new OS threads +// block - stack traces that led to blocking on synchronization primitives +// mutex - stack traces of holders of contended mutexes // // These predefined profiles maintain themselves and panic on an explicit // [Profile.Add] or [Profile.Remove] method call. @@ -169,6 +171,7 @@ import ( // holds a lock for 1s while 5 other goroutines are waiting for the entire // second to acquire the lock, its unlock call stack will report 5s of // contention. + type Profile struct { name string mu sync.Mutex @@ -189,6 +192,12 @@ var goroutineProfile = &Profile{ write: writeGoroutine, } +var goroutineLeakProfile = &Profile{ + name: "goroutineleak", + count: runtime_goroutineleakcount, + write: writeGoroutineLeak, +} + var threadcreateProfile = &Profile{ name: "threadcreate", count: countThreadCreate, @@ -231,6 +240,9 @@ func lockProfiles() { "block": blockProfile, "mutex": mutexProfile, } + if goexperiment.GoroutineLeakProfile { + profiles.m["goroutineleak"] = goroutineLeakProfile + } } } @@ -275,6 +287,7 @@ func Profiles() []*Profile { all := make([]*Profile, 0, len(profiles.m)) for _, p := range profiles.m { + all = append(all, p) } @@ -747,6 +760,23 @@ func writeGoroutine(w io.Writer, debug int) error { return writeRuntimeProfile(w, debug, "goroutine", pprof_goroutineProfileWithLabels) } +// writeGoroutineLeak first invokes a GC cycle that performs goroutine leak detection. +// It then writes the goroutine profile, filtering for leaked goroutines. +func writeGoroutineLeak(w io.Writer, debug int) error { + // Run the GC with leak detection first so that leaked goroutines + // may transition to the leaked state. + runtime_goroutineLeakGC() + + // If the debug flag is set sufficiently high, just defer to writing goroutine stacks + // like in a regular goroutine profile. Include non-leaked goroutines, too. + if debug >= 2 { + return writeGoroutineStacks(w) + } + + // Otherwise, write the goroutine leak profile. + return writeRuntimeProfile(w, debug, "goroutineleak", pprof_goroutineLeakProfileWithLabels) +} + func writeGoroutineStacks(w io.Writer) error { // We don't know how big the buffer needs to be to collect // all the goroutines. Start with 1 MB and try a few times, doubling each time. @@ -969,6 +999,9 @@ func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile fu //go:linkname pprof_goroutineProfileWithLabels runtime.pprof_goroutineProfileWithLabels func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) +//go:linkname pprof_goroutineLeakProfileWithLabels runtime.pprof_goroutineLeakProfileWithLabels +func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) + //go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond func pprof_cyclesPerSecond() int64 diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 25a2f3b32415d7..b816833e52285f 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -344,6 +344,11 @@ func (h inlineWrapper) dump(pcs []uintptr) { func inlinedWrapperCallerDump(pcs []uintptr) { var h inlineWrapperInterface + + // Take the address of h, such that h.dump() call (below) + // does not get devirtualized by the compiler. + _ = &h + h = &inlineWrapper{} h.dump(pcs) } @@ -2578,9 +2583,10 @@ func TestProfilerStackDepth(t *testing.T) { t.Logf("matched stack=%s", stk) if len(stk) != depth { t.Errorf("want stack depth = %d, got %d", depth, len(stk)) + continue } - if rootFn, wantFn := stk[depth-1], "runtime/pprof.produceProfileEvents"; rootFn != wantFn { + if rootFn, wantFn := stk[depth-1], "runtime/pprof.allocDeep"; rootFn != wantFn { t.Errorf("want stack stack root %s, got %v", wantFn, rootFn) } } diff --git a/src/runtime/pprof/proto_windows.go b/src/runtime/pprof/proto_windows.go index f4dc44bd078eac..3118e8911e28ec 100644 --- a/src/runtime/pprof/proto_windows.go +++ b/src/runtime/pprof/proto_windows.go @@ -67,8 +67,7 @@ func readMainModuleMapping() (start, end uint64, exe, buildID string, err error) func createModuleSnapshot() (syscall.Handle, error) { for { snap, err := syscall.CreateToolhelp32Snapshot(windows.TH32CS_SNAPMODULE|windows.TH32CS_SNAPMODULE32, uint32(syscall.Getpid())) - var errno syscall.Errno - if err != nil && errors.As(err, &errno) && errno == windows.ERROR_BAD_LENGTH { + if errno, ok := errors.AsType[syscall.Errno](err); ok && errno == windows.ERROR_BAD_LENGTH { // When CreateToolhelp32Snapshot(SNAPMODULE|SNAPMODULE32, ...) fails // with ERROR_BAD_LENGTH then it should be retried until it succeeds. continue diff --git a/src/runtime/pprof/runtime.go b/src/runtime/pprof/runtime.go index 8d37c7d3add146..690cab81ab5a5a 100644 --- a/src/runtime/pprof/runtime.go +++ b/src/runtime/pprof/runtime.go @@ -29,6 +29,12 @@ func runtime_setProfLabel(labels unsafe.Pointer) // runtime_getProfLabel is defined in runtime/proflabel.go. func runtime_getProfLabel() unsafe.Pointer +// runtime_goroutineleakcount is defined in runtime/proc.go. +func runtime_goroutineleakcount() int + +// runtime_goroutineLeakGC is defined in runtime/mgc.go. +func runtime_goroutineLeakGC() + // SetGoroutineLabels sets the current goroutine's labels to match ctx. // A new goroutine inherits the labels of the goroutine that created it. // This is a lower-level API than [Do], which should be used instead when possible. diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go index 22727df74eead2..5367f66213804b 100644 --- a/src/runtime/preempt.go +++ b/src/runtime/preempt.go @@ -160,7 +160,7 @@ func suspendG(gp *g) suspendGState { s = _Gwaiting fallthrough - case _Grunnable, _Gsyscall, _Gwaiting: + case _Grunnable, _Gsyscall, _Gwaiting, _Gleaked: // Claim goroutine by setting scan bit. // This may race with execution or readying of gp. // The scan bit keeps it from transition state. @@ -269,6 +269,7 @@ func resumeG(state suspendGState) { case _Grunnable | _Gscan, _Gwaiting | _Gscan, + _Gleaked | _Gscan, _Gsyscall | _Gscan: casfrom_Gscanstatus(gp, s, s&^_Gscan) } diff --git a/src/runtime/preempt_arm64.go b/src/runtime/preempt_arm64.go new file mode 100644 index 00000000000000..1b71d2713ea5d0 --- /dev/null +++ b/src/runtime/preempt_arm64.go @@ -0,0 +1,38 @@ +// Code generated by mkpreempt.go; DO NOT EDIT. + +package runtime + +type xRegs struct { + V0 [16]byte + V1 [16]byte + V2 [16]byte + V3 [16]byte + V4 [16]byte + V5 [16]byte + V6 [16]byte + V7 [16]byte + V8 [16]byte + V9 [16]byte + V10 [16]byte + V11 [16]byte + V12 [16]byte + V13 [16]byte + V14 [16]byte + V15 [16]byte + V16 [16]byte + V17 [16]byte + V18 [16]byte + V19 [16]byte + V20 [16]byte + V21 [16]byte + V22 [16]byte + V23 [16]byte + V24 [16]byte + V25 [16]byte + V26 [16]byte + V27 [16]byte + V28 [16]byte + V29 [16]byte + V30 [16]byte + V31 [16]byte +} diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s index 31ec9d940f76d4..9017d8815970b2 100644 --- a/src/runtime/preempt_arm64.s +++ b/src/runtime/preempt_arm64.s @@ -4,13 +4,14 @@ #include "textflag.h" TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 - MOVD R30, -496(RSP) - SUB $496, RSP + MOVD R30, -240(RSP) + SUB $240, RSP MOVD R29, -8(RSP) SUB $8, RSP, R29 #ifdef GOOS_ios MOVD R30, (RSP) #endif + // Save GPs STP (R0, R1), 8(RSP) STP (R2, R3), 24(RSP) STP (R4, R5), 40(RSP) @@ -28,39 +29,32 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVD R0, 216(RSP) MOVD FPSR, R0 MOVD R0, 224(RSP) - FSTPD (F0, F1), 232(RSP) - FSTPD (F2, F3), 248(RSP) - FSTPD (F4, F5), 264(RSP) - FSTPD (F6, F7), 280(RSP) - FSTPD (F8, F9), 296(RSP) - FSTPD (F10, F11), 312(RSP) - FSTPD (F12, F13), 328(RSP) - FSTPD (F14, F15), 344(RSP) - FSTPD (F16, F17), 360(RSP) - FSTPD (F18, F19), 376(RSP) - FSTPD (F20, F21), 392(RSP) - FSTPD (F22, F23), 408(RSP) - FSTPD (F24, F25), 424(RSP) - FSTPD (F26, F27), 440(RSP) - FSTPD (F28, F29), 456(RSP) - FSTPD (F30, F31), 472(RSP) + // Save extended register state to p.xRegs.scratch + MOVD g_m(g), R0 + MOVD m_p(R0), R0 + ADD $(p_xRegs+xRegPerP_scratch), R0, R0 + VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R0) + VST1.P [V4.B16, V5.B16, V6.B16, V7.B16], 64(R0) + VST1.P [V8.B16, V9.B16, V10.B16, V11.B16], 64(R0) + VST1.P [V12.B16, V13.B16, V14.B16, V15.B16], 64(R0) + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R0) + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R0) + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R0) + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R0) CALL ·asyncPreempt2(SB) - FLDPD 472(RSP), (F30, F31) - FLDPD 456(RSP), (F28, F29) - FLDPD 440(RSP), (F26, F27) - FLDPD 424(RSP), (F24, F25) - FLDPD 408(RSP), (F22, F23) - FLDPD 392(RSP), (F20, F21) - FLDPD 376(RSP), (F18, F19) - FLDPD 360(RSP), (F16, F17) - FLDPD 344(RSP), (F14, F15) - FLDPD 328(RSP), (F12, F13) - FLDPD 312(RSP), (F10, F11) - FLDPD 296(RSP), (F8, F9) - FLDPD 280(RSP), (F6, F7) - FLDPD 264(RSP), (F4, F5) - FLDPD 248(RSP), (F2, F3) - FLDPD 232(RSP), (F0, F1) + // Restore non-GPs from *p.xRegs.cache + MOVD g_m(g), R0 + MOVD m_p(R0), R0 + MOVD (p_xRegs+xRegPerP_cache)(R0), R0 + VLD1.P 64(R0), [V0.B16, V1.B16, V2.B16, V3.B16] + VLD1.P 64(R0), [V4.B16, V5.B16, V6.B16, V7.B16] + VLD1.P 64(R0), [V8.B16, V9.B16, V10.B16, V11.B16] + VLD1.P 64(R0), [V12.B16, V13.B16, V14.B16, V15.B16] + VLD1.P 64(R0), [V16.B16, V17.B16, V18.B16, V19.B16] + VLD1.P 64(R0), [V20.B16, V21.B16, V22.B16, V23.B16] + VLD1.P 64(R0), [V24.B16, V25.B16, V26.B16, V27.B16] + VLD1.P 64(R0), [V28.B16, V29.B16, V30.B16, V31.B16] + // Restore GPs MOVD 224(RSP), R0 MOVD R0, FPSR MOVD 216(RSP), R0 @@ -78,8 +72,8 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 LDP 40(RSP), (R4, R5) LDP 24(RSP), (R2, R3) LDP 8(RSP), (R0, R1) - MOVD 496(RSP), R30 + MOVD 240(RSP), R30 MOVD -8(RSP), R29 MOVD (RSP), R27 - ADD $512, RSP + ADD $256, RSP RET (R27) diff --git a/src/runtime/preempt_noxreg.go b/src/runtime/preempt_noxreg.go index dfe46559b5b723..9f03b2b333420e 100644 --- a/src/runtime/preempt_noxreg.go +++ b/src/runtime/preempt_noxreg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !amd64 +//go:build !amd64 && !arm64 // This provides common support for architectures that DO NOT use extended // register state in asynchronous preemption. diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go index 9e05455ddbb747..f4578a4d76d889 100644 --- a/src/runtime/preempt_xreg.go +++ b/src/runtime/preempt_xreg.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build amd64 +//go:build amd64 || arm64 // This provides common support for architectures that use extended register // state in asynchronous preemption. diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 2c42cad6c10e8a..fadd9a59639f87 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -517,7 +517,7 @@ func acquireSudog() *sudog { s := pp.sudogcache[n-1] pp.sudogcache[n-1] = nil pp.sudogcache = pp.sudogcache[:n-1] - if s.elem != nil { + if s.elem.get() != nil { throw("acquireSudog: found s.elem != nil in cache") } releasem(mp) @@ -526,7 +526,7 @@ func acquireSudog() *sudog { //go:nosplit func releaseSudog(s *sudog) { - if s.elem != nil { + if s.elem.get() != nil { throw("runtime: sudog with non-nil elem") } if s.isSelect { @@ -541,7 +541,7 @@ func releaseSudog(s *sudog) { if s.waitlink != nil { throw("runtime: sudog with non-nil waitlink") } - if s.c != nil { + if s.c.get() != nil { throw("runtime: sudog with non-nil c") } gp := getg() @@ -789,7 +789,9 @@ func cpuinit(env string) { // getGodebugEarly extracts the environment variable GODEBUG from the environment on // Unix-like operating systems and returns it. This function exists to extract GODEBUG // early before much of the runtime is initialized. -func getGodebugEarly() string { +// +// Returns nil, false if OS doesn't provide env vars early in the init sequence. +func getGodebugEarly() (string, bool) { const prefix = "GODEBUG=" var env string switch GOOS { @@ -807,12 +809,16 @@ func getGodebugEarly() string { s := unsafe.String(p, findnull(p)) if stringslite.HasPrefix(s, prefix) { - env = gostring(p)[len(prefix):] + env = gostringnocopy(p)[len(prefix):] break } } + break + + default: + return "", false } - return env + return env, true } // The bootstrap sequence is: @@ -859,12 +865,15 @@ func schedinit() { // The world starts stopped. worldStopped() + godebug, parsedGodebug := getGodebugEarly() + if parsedGodebug { + parseRuntimeDebugVars(godebug) + } ticks.init() // run as early as possible moduledataverify() stackinit() randinit() // must run before mallocinit, alginit, mcommoninit mallocinit() - godebug := getGodebugEarly() cpuinit(godebug) // must run before alginit alginit() // maps, hash, rand must not be used before this call mcommoninit(gp.m, -1) @@ -880,7 +889,12 @@ func schedinit() { goenvs() secure() checkfds() - parsedebugvars() + if !parsedGodebug { + // Some platforms, e.g., Windows, didn't make env vars available "early", + // so try again now. + parseRuntimeDebugVars(gogetenv("GODEBUG")) + } + finishDebugVarsSetup() gcinit() // Allocate stack space that can be used when crashing due to bad stack @@ -1208,6 +1222,7 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { _Gscanwaiting, _Gscanrunning, _Gscansyscall, + _Gscanleaked, _Gscanpreempted: if newval == oldval&^_Gscan { success = gp.atomicstatus.CompareAndSwap(oldval, newval) @@ -1228,6 +1243,7 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool { case _Grunnable, _Grunning, _Gwaiting, + _Gleaked, _Gsyscall: if newval == oldval|_Gscan { r := gp.atomicstatus.CompareAndSwap(oldval, newval) @@ -5551,6 +5567,14 @@ func gcount(includeSys bool) int32 { return n } +// goroutineleakcount returns the number of leaked goroutines last reported by +// the runtime. +// +//go:linkname goroutineleakcount runtime/pprof.runtime_goroutineleakcount +func goroutineleakcount() int { + return work.goroutineLeak.count +} + func mcount() int32 { return int32(sched.mnext - sched.nmfreed) } @@ -5794,11 +5818,15 @@ func (pp *p) destroy() { // Move all timers to the local P. getg().m.p.ptr().timers.take(&pp.timers) - // Flush p's write barrier buffer. - if gcphase != _GCoff { - wbBufFlush1(pp) - pp.gcw.dispose() + // No need to flush p's write barrier buffer or span queue, as Ps + // cannot be destroyed during the mark phase. + if phase := gcphase; phase != _GCoff { + println("runtime: p id", pp.id, "destroyed during GC phase", phase) + throw("P destroyed while GC is running") } + // We should free the queues though. + pp.gcw.spanq.destroy() + clear(pp.sudogbuf[:]) pp.sudogcache = pp.sudogbuf[:0] pp.pinnerCache = nil diff --git a/src/runtime/rt0_aix_ppc64.s b/src/runtime/rt0_aix_ppc64.s index 74c57bb1dc9136..518fcb3b88d670 100644 --- a/src/runtime/rt0_aix_ppc64.s +++ b/src/runtime/rt0_aix_ppc64.s @@ -41,152 +41,7 @@ TEXT _main(SB),NOSPLIT,$-8 MOVD R12, CTR BR (CTR) -// Paramater save space required to cross-call into _cgo_sys_thread_create -#define PARAM_SPACE 16 - -TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$-8 - // Start with standard C stack frame layout and linkage. - MOVD LR, R0 - MOVD R0, 16(R1) // Save LR in caller's frame. - MOVW CR, R0 // Save CR in caller's frame - MOVD R0, 8(R1) - - MOVDU R1, -344-PARAM_SPACE(R1) // Allocate frame. - - // Preserve callee-save registers. - MOVD R14, 48+PARAM_SPACE(R1) - MOVD R15, 56+PARAM_SPACE(R1) - MOVD R16, 64+PARAM_SPACE(R1) - MOVD R17, 72+PARAM_SPACE(R1) - MOVD R18, 80+PARAM_SPACE(R1) - MOVD R19, 88+PARAM_SPACE(R1) - MOVD R20, 96+PARAM_SPACE(R1) - MOVD R21,104+PARAM_SPACE(R1) - MOVD R22, 112+PARAM_SPACE(R1) - MOVD R23, 120+PARAM_SPACE(R1) - MOVD R24, 128+PARAM_SPACE(R1) - MOVD R25, 136+PARAM_SPACE(R1) - MOVD R26, 144+PARAM_SPACE(R1) - MOVD R27, 152+PARAM_SPACE(R1) - MOVD R28, 160+PARAM_SPACE(R1) - MOVD R29, 168+PARAM_SPACE(R1) - MOVD g, 176+PARAM_SPACE(R1) // R30 - MOVD R31, 184+PARAM_SPACE(R1) - FMOVD F14, 192+PARAM_SPACE(R1) - FMOVD F15, 200+PARAM_SPACE(R1) - FMOVD F16, 208+PARAM_SPACE(R1) - FMOVD F17, 216+PARAM_SPACE(R1) - FMOVD F18, 224+PARAM_SPACE(R1) - FMOVD F19, 232+PARAM_SPACE(R1) - FMOVD F20, 240+PARAM_SPACE(R1) - FMOVD F21, 248+PARAM_SPACE(R1) - FMOVD F22, 256+PARAM_SPACE(R1) - FMOVD F23, 264+PARAM_SPACE(R1) - FMOVD F24, 272+PARAM_SPACE(R1) - FMOVD F25, 280+PARAM_SPACE(R1) - FMOVD F26, 288+PARAM_SPACE(R1) - FMOVD F27, 296+PARAM_SPACE(R1) - FMOVD F28, 304+PARAM_SPACE(R1) - FMOVD F29, 312+PARAM_SPACE(R1) - FMOVD F30, 320+PARAM_SPACE(R1) - FMOVD F31, 328+PARAM_SPACE(R1) - - // Synchronous initialization. - MOVD $runtime·reginit(SB), R12 - MOVD R12, CTR - BL (CTR) - - MOVBZ runtime·isarchive(SB), R3 // Check buildmode = c-archive - CMP $0, R3 - BEQ done - - MOVD R14, _rt0_ppc64_aix_lib_argc<>(SB) - MOVD R15, _rt0_ppc64_aix_lib_argv<>(SB) - - MOVD $runtime·libpreinit(SB), R12 - MOVD R12, CTR - BL (CTR) - - // Create a new thread to do the runtime initialization and return. - MOVD _cgo_sys_thread_create(SB), R12 - CMP $0, R12 - BEQ nocgo - MOVD $_rt0_ppc64_aix_lib_go(SB), R3 - MOVD $0, R4 - MOVD R2, 40(R1) - MOVD 8(R12), R2 - MOVD (R12), R12 - MOVD R12, CTR - BL (CTR) - MOVD 40(R1), R2 - BR done - -nocgo: - MOVD $0x800000, R12 // stacksize = 8192KB - MOVD R12, 8(R1) - MOVD $_rt0_ppc64_aix_lib_go(SB), R12 - MOVD R12, 16(R1) - MOVD $runtime·newosproc0(SB),R12 - MOVD R12, CTR - BL (CTR) - -done: - // Restore saved registers. - MOVD 48+PARAM_SPACE(R1), R14 - MOVD 56+PARAM_SPACE(R1), R15 - MOVD 64+PARAM_SPACE(R1), R16 - MOVD 72+PARAM_SPACE(R1), R17 - MOVD 80+PARAM_SPACE(R1), R18 - MOVD 88+PARAM_SPACE(R1), R19 - MOVD 96+PARAM_SPACE(R1), R20 - MOVD 104+PARAM_SPACE(R1), R21 - MOVD 112+PARAM_SPACE(R1), R22 - MOVD 120+PARAM_SPACE(R1), R23 - MOVD 128+PARAM_SPACE(R1), R24 - MOVD 136+PARAM_SPACE(R1), R25 - MOVD 144+PARAM_SPACE(R1), R26 - MOVD 152+PARAM_SPACE(R1), R27 - MOVD 160+PARAM_SPACE(R1), R28 - MOVD 168+PARAM_SPACE(R1), R29 - MOVD 176+PARAM_SPACE(R1), g // R30 - MOVD 184+PARAM_SPACE(R1), R31 - FMOVD 196+PARAM_SPACE(R1), F14 - FMOVD 200+PARAM_SPACE(R1), F15 - FMOVD 208+PARAM_SPACE(R1), F16 - FMOVD 216+PARAM_SPACE(R1), F17 - FMOVD 224+PARAM_SPACE(R1), F18 - FMOVD 232+PARAM_SPACE(R1), F19 - FMOVD 240+PARAM_SPACE(R1), F20 - FMOVD 248+PARAM_SPACE(R1), F21 - FMOVD 256+PARAM_SPACE(R1), F22 - FMOVD 264+PARAM_SPACE(R1), F23 - FMOVD 272+PARAM_SPACE(R1), F24 - FMOVD 280+PARAM_SPACE(R1), F25 - FMOVD 288+PARAM_SPACE(R1), F26 - FMOVD 296+PARAM_SPACE(R1), F27 - FMOVD 304+PARAM_SPACE(R1), F28 - FMOVD 312+PARAM_SPACE(R1), F29 - FMOVD 320+PARAM_SPACE(R1), F30 - FMOVD 328+PARAM_SPACE(R1), F31 - - ADD $344+PARAM_SPACE, R1 - - MOVD 8(R1), R0 - MOVFL R0, $0xff - MOVD 16(R1), R0 - MOVD R0, LR - RET - -DEFINE_PPC64X_FUNCDESC(_rt0_ppc64_aix_lib_go, __rt0_ppc64_aix_lib_go) - -TEXT __rt0_ppc64_aix_lib_go(SB),NOSPLIT,$0 - MOVD _rt0_ppc64_aix_lib_argc<>(SB), R3 - MOVD _rt0_ppc64_aix_lib_argv<>(SB), R4 - MOVD $runtime·rt0_go(SB), R12 - MOVD R12, CTR - BR (CTR) - -DATA _rt0_ppc64_aix_lib_argc<>(SB)/8, $0 -GLOBL _rt0_ppc64_aix_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_ppc64_aix_lib_argv<>(SB)/8, $0 -GLOBL _rt0_ppc64_aix_lib_argv<>(SB),NOPTR, $8 +TEXT _rt0_ppc64_aix_lib(SB),NOSPLIT,$0 + MOVD R14, R3 // argc + MOVD R15, R4 // argv + JMP _rt0_ppc64x_lib(SB) diff --git a/src/runtime/rt0_freebsd_riscv64.s b/src/runtime/rt0_freebsd_riscv64.s index dc46b7047669ad..f3fb4ffbe75ffd 100644 --- a/src/runtime/rt0_freebsd_riscv64.s +++ b/src/runtime/rt0_freebsd_riscv64.s @@ -12,100 +12,8 @@ TEXT _rt0_riscv64_freebsd(SB),NOSPLIT|NOFRAME,$0 // When building with -buildmode=c-shared, this symbol is called when the shared // library is loaded. -TEXT _rt0_riscv64_freebsd_lib(SB),NOSPLIT,$224 - // Preserve callee-save registers, along with X1 (LR). - MOV X1, (8*3)(X2) - MOV X8, (8*4)(X2) - MOV X9, (8*5)(X2) - MOV X18, (8*6)(X2) - MOV X19, (8*7)(X2) - MOV X20, (8*8)(X2) - MOV X21, (8*9)(X2) - MOV X22, (8*10)(X2) - MOV X23, (8*11)(X2) - MOV X24, (8*12)(X2) - MOV X25, (8*13)(X2) - MOV X26, (8*14)(X2) - MOV g, (8*15)(X2) - MOVD F8, (8*16)(X2) - MOVD F9, (8*17)(X2) - MOVD F18, (8*18)(X2) - MOVD F19, (8*19)(X2) - MOVD F20, (8*20)(X2) - MOVD F21, (8*21)(X2) - MOVD F22, (8*22)(X2) - MOVD F23, (8*23)(X2) - MOVD F24, (8*24)(X2) - MOVD F25, (8*25)(X2) - MOVD F26, (8*26)(X2) - MOVD F27, (8*27)(X2) - - // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go - MOV X0, g - - MOV A0, _rt0_riscv64_freebsd_lib_argc<>(SB) - MOV A1, _rt0_riscv64_freebsd_lib_argv<>(SB) - - // Synchronous initialization. - MOV $runtime·libpreinit(SB), T0 - JALR RA, T0 - - // Create a new thread to do the runtime initialization and return. - MOV _cgo_sys_thread_create(SB), T0 - BEQZ T0, nocgo - MOV $_rt0_riscv64_freebsd_lib_go(SB), A0 - MOV $0, A1 - JALR RA, T0 - JMP restore - -nocgo: - MOV $0x800000, A0 // stacksize = 8192KB - MOV $_rt0_riscv64_freebsd_lib_go(SB), A1 - MOV A0, 8(X2) - MOV A1, 16(X2) - MOV $runtime·newosproc0(SB), T0 - JALR RA, T0 - -restore: - // Restore callee-save registers, along with X1 (LR). - MOV (8*3)(X2), X1 - MOV (8*4)(X2), X8 - MOV (8*5)(X2), X9 - MOV (8*6)(X2), X18 - MOV (8*7)(X2), X19 - MOV (8*8)(X2), X20 - MOV (8*9)(X2), X21 - MOV (8*10)(X2), X22 - MOV (8*11)(X2), X23 - MOV (8*12)(X2), X24 - MOV (8*13)(X2), X25 - MOV (8*14)(X2), X26 - MOV (8*15)(X2), g - MOVD (8*16)(X2), F8 - MOVD (8*17)(X2), F9 - MOVD (8*18)(X2), F18 - MOVD (8*19)(X2), F19 - MOVD (8*20)(X2), F20 - MOVD (8*21)(X2), F21 - MOVD (8*22)(X2), F22 - MOVD (8*23)(X2), F23 - MOVD (8*24)(X2), F24 - MOVD (8*25)(X2), F25 - MOVD (8*26)(X2), F26 - MOVD (8*27)(X2), F27 - - RET - -TEXT _rt0_riscv64_freebsd_lib_go(SB),NOSPLIT,$0 - MOV _rt0_riscv64_freebsd_lib_argc<>(SB), A0 - MOV _rt0_riscv64_freebsd_lib_argv<>(SB), A1 - MOV $runtime·rt0_go(SB), T0 - JALR ZERO, T0 - -DATA _rt0_riscv64_freebsd_lib_argc<>(SB)/8, $0 -GLOBL _rt0_riscv64_freebsd_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_riscv64_freebsd_lib_argv<>(SB)/8, $0 -GLOBL _rt0_riscv64_freebsd_lib_argv<>(SB),NOPTR, $8 +TEXT _rt0_riscv64_freebsd_lib(SB),NOSPLIT,$0 + JMP _rt0_riscv64_lib(SB) TEXT main(SB),NOSPLIT|NOFRAME,$0 MOV $runtime·rt0_go(SB), T0 diff --git a/src/runtime/rt0_linux_loong64.s b/src/runtime/rt0_linux_loong64.s index b52f7d530a6a98..d8da4461a2f56d 100644 --- a/src/runtime/rt0_linux_loong64.s +++ b/src/runtime/rt0_linux_loong64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. #include "textflag.h" -#include "cgo/abi_loong64.h" TEXT _rt0_loong64_linux(SB),NOSPLIT|NOFRAME,$0 // In a statically linked binary, the stack contains argc, @@ -16,53 +15,8 @@ TEXT _rt0_loong64_linux(SB),NOSPLIT|NOFRAME,$0 // When building with -buildmode=c-shared, this symbol is called when the shared // library is loaded. -TEXT _rt0_loong64_linux_lib(SB),NOSPLIT,$168 - // Preserve callee-save registers. - SAVE_R22_TO_R31(3*8) - SAVE_F24_TO_F31(13*8) - - // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go - MOVV R0, g - - MOVV R4, _rt0_loong64_linux_lib_argc<>(SB) - MOVV R5, _rt0_loong64_linux_lib_argv<>(SB) - - // Synchronous initialization. - MOVV $runtime·libpreinit(SB), R19 - JAL (R19) - - // Create a new thread to do the runtime initialization and return. - MOVV _cgo_sys_thread_create(SB), R19 - BEQ R19, nocgo - MOVV $_rt0_loong64_linux_lib_go(SB), R4 - MOVV $0, R5 - JAL (R19) - JMP restore - -nocgo: - MOVV $0x800000, R4 // stacksize = 8192KB - MOVV $_rt0_loong64_linux_lib_go(SB), R5 - MOVV R4, 8(R3) - MOVV R5, 16(R3) - MOVV $runtime·newosproc0(SB), R19 - JAL (R19) - -restore: - // Restore callee-save registers. - RESTORE_R22_TO_R31(3*8) - RESTORE_F24_TO_F31(13*8) - RET - -TEXT _rt0_loong64_linux_lib_go(SB),NOSPLIT,$0 - MOVV _rt0_loong64_linux_lib_argc<>(SB), R4 - MOVV _rt0_loong64_linux_lib_argv<>(SB), R5 - MOVV $runtime·rt0_go(SB),R19 - JMP (R19) - -DATA _rt0_loong64_linux_lib_argc<>(SB)/8, $0 -GLOBL _rt0_loong64_linux_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_loong64_linux_lib_argv<>(SB)/8, $0 -GLOBL _rt0_loong64_linux_lib_argv<>(SB),NOPTR, $8 +TEXT _rt0_loong64_linux_lib(SB),NOSPLIT,$0 + JMP _rt0_loong64_lib(SB) TEXT main(SB),NOSPLIT|NOFRAME,$0 // in external linking, glibc jumps to main with argc in R4 diff --git a/src/runtime/rt0_linux_ppc64le.s b/src/runtime/rt0_linux_ppc64le.s index 4b7d8e1b940a1e..3a6e8863b2da1d 100644 --- a/src/runtime/rt0_linux_ppc64le.s +++ b/src/runtime/rt0_linux_ppc64le.s @@ -5,60 +5,13 @@ #include "go_asm.h" #include "textflag.h" #include "asm_ppc64x.h" -#include "cgo/abi_ppc64x.h" TEXT _rt0_ppc64le_linux(SB),NOSPLIT,$0 XOR R0, R0 // Make sure R0 is zero before _main BR _main<>(SB) TEXT _rt0_ppc64le_linux_lib(SB),NOSPLIT|NOFRAME,$0 - // This is called with ELFv2 calling conventions. Convert to Go. - // Allocate argument storage for call to newosproc0. - STACK_AND_SAVE_HOST_TO_GO_ABI(16) - - MOVD R3, _rt0_ppc64le_linux_lib_argc<>(SB) - MOVD R4, _rt0_ppc64le_linux_lib_argv<>(SB) - - // Synchronous initialization. - MOVD $runtime·libpreinit(SB), R12 - MOVD R12, CTR - BL (CTR) - - // Create a new thread to do the runtime initialization and return. - MOVD _cgo_sys_thread_create(SB), R12 - CMP $0, R12 - BEQ nocgo - MOVD $_rt0_ppc64le_linux_lib_go(SB), R3 - MOVD $0, R4 - MOVD R12, CTR - BL (CTR) - BR done - -nocgo: - MOVD $0x800000, R12 // stacksize = 8192KB - MOVD R12, 8+FIXED_FRAME(R1) - MOVD $_rt0_ppc64le_linux_lib_go(SB), R12 - MOVD R12, 16+FIXED_FRAME(R1) - MOVD $runtime·newosproc0(SB),R12 - MOVD R12, CTR - BL (CTR) - -done: - // Restore and return to ELFv2 caller. - UNSTACK_AND_RESTORE_GO_TO_HOST_ABI(16) - RET - -TEXT _rt0_ppc64le_linux_lib_go(SB),NOSPLIT,$0 - MOVD _rt0_ppc64le_linux_lib_argc<>(SB), R3 - MOVD _rt0_ppc64le_linux_lib_argv<>(SB), R4 - MOVD $runtime·rt0_go(SB), R12 - MOVD R12, CTR - BR (CTR) - -DATA _rt0_ppc64le_linux_lib_argc<>(SB)/8, $0 -GLOBL _rt0_ppc64le_linux_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_ppc64le_linux_lib_argv<>(SB)/8, $0 -GLOBL _rt0_ppc64le_linux_lib_argv<>(SB),NOPTR, $8 + JMP _rt0_ppc64x_lib(SB) TEXT _main<>(SB),NOSPLIT,$-8 // In a statically linked binary, the stack contains argc, diff --git a/src/runtime/rt0_linux_riscv64.s b/src/runtime/rt0_linux_riscv64.s index d6b8ac85dca646..4139c9bcecdc62 100644 --- a/src/runtime/rt0_linux_riscv64.s +++ b/src/runtime/rt0_linux_riscv64.s @@ -11,100 +11,8 @@ TEXT _rt0_riscv64_linux(SB),NOSPLIT|NOFRAME,$0 // When building with -buildmode=c-shared, this symbol is called when the shared // library is loaded. -TEXT _rt0_riscv64_linux_lib(SB),NOSPLIT,$224 - // Preserve callee-save registers, along with X1 (LR). - MOV X1, (8*3)(X2) - MOV X8, (8*4)(X2) - MOV X9, (8*5)(X2) - MOV X18, (8*6)(X2) - MOV X19, (8*7)(X2) - MOV X20, (8*8)(X2) - MOV X21, (8*9)(X2) - MOV X22, (8*10)(X2) - MOV X23, (8*11)(X2) - MOV X24, (8*12)(X2) - MOV X25, (8*13)(X2) - MOV X26, (8*14)(X2) - MOV g, (8*15)(X2) - MOVD F8, (8*16)(X2) - MOVD F9, (8*17)(X2) - MOVD F18, (8*18)(X2) - MOVD F19, (8*19)(X2) - MOVD F20, (8*20)(X2) - MOVD F21, (8*21)(X2) - MOVD F22, (8*22)(X2) - MOVD F23, (8*23)(X2) - MOVD F24, (8*24)(X2) - MOVD F25, (8*25)(X2) - MOVD F26, (8*26)(X2) - MOVD F27, (8*27)(X2) - - // Initialize g as nil in case of using g later e.g. sigaction in cgo_sigaction.go - MOV X0, g - - MOV A0, _rt0_riscv64_linux_lib_argc<>(SB) - MOV A1, _rt0_riscv64_linux_lib_argv<>(SB) - - // Synchronous initialization. - MOV $runtime·libpreinit(SB), T0 - JALR RA, T0 - - // Create a new thread to do the runtime initialization and return. - MOV _cgo_sys_thread_create(SB), T0 - BEQZ T0, nocgo - MOV $_rt0_riscv64_linux_lib_go(SB), A0 - MOV $0, A1 - JALR RA, T0 - JMP restore - -nocgo: - MOV $0x800000, A0 // stacksize = 8192KB - MOV $_rt0_riscv64_linux_lib_go(SB), A1 - MOV A0, 8(X2) - MOV A1, 16(X2) - MOV $runtime·newosproc0(SB), T0 - JALR RA, T0 - -restore: - // Restore callee-save registers, along with X1 (LR). - MOV (8*3)(X2), X1 - MOV (8*4)(X2), X8 - MOV (8*5)(X2), X9 - MOV (8*6)(X2), X18 - MOV (8*7)(X2), X19 - MOV (8*8)(X2), X20 - MOV (8*9)(X2), X21 - MOV (8*10)(X2), X22 - MOV (8*11)(X2), X23 - MOV (8*12)(X2), X24 - MOV (8*13)(X2), X25 - MOV (8*14)(X2), X26 - MOV (8*15)(X2), g - MOVD (8*16)(X2), F8 - MOVD (8*17)(X2), F9 - MOVD (8*18)(X2), F18 - MOVD (8*19)(X2), F19 - MOVD (8*20)(X2), F20 - MOVD (8*21)(X2), F21 - MOVD (8*22)(X2), F22 - MOVD (8*23)(X2), F23 - MOVD (8*24)(X2), F24 - MOVD (8*25)(X2), F25 - MOVD (8*26)(X2), F26 - MOVD (8*27)(X2), F27 - - RET - -TEXT _rt0_riscv64_linux_lib_go(SB),NOSPLIT,$0 - MOV _rt0_riscv64_linux_lib_argc<>(SB), A0 - MOV _rt0_riscv64_linux_lib_argv<>(SB), A1 - MOV $runtime·rt0_go(SB), T0 - JALR ZERO, T0 - -DATA _rt0_riscv64_linux_lib_argc<>(SB)/8, $0 -GLOBL _rt0_riscv64_linux_lib_argc<>(SB),NOPTR, $8 -DATA _rt0_riscv64_linux_lib_argv<>(SB)/8, $0 -GLOBL _rt0_riscv64_linux_lib_argv<>(SB),NOPTR, $8 +TEXT _rt0_riscv64_linux_lib(SB),NOSPLIT,$0 + JMP _rt0_riscv64_lib(SB) TEXT main(SB),NOSPLIT|NOFRAME,$0 diff --git a/src/runtime/rt0_netbsd_arm64.s b/src/runtime/rt0_netbsd_arm64.s index 07fb0a1240aec5..80802209b78f12 100644 --- a/src/runtime/rt0_netbsd_arm64.s +++ b/src/runtime/rt0_netbsd_arm64.s @@ -7,7 +7,7 @@ TEXT _rt0_arm64_netbsd(SB),NOSPLIT,$0 MOVD 0(RSP), R0 // argc ADD $8, RSP, R1 // argv - BL main(SB) + JMP main(SB) // When building with -buildmode=c-shared, this symbol is called when the shared // library is loaded. diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 424745d2357dc9..15b546783b5e53 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -402,7 +402,7 @@ var dbgvars = []*dbgVar{ {name: "updatemaxprocs", value: &debug.updatemaxprocs, def: 1}, } -func parsedebugvars() { +func parseRuntimeDebugVars(godebug string) { // defaults debug.cgocheck = 1 debug.invalidptr = 1 @@ -420,12 +420,6 @@ func parsedebugvars() { } debug.traceadvanceperiod = defaultTraceAdvancePeriod - godebug := gogetenv("GODEBUG") - - p := new(string) - *p = godebug - godebugEnv.Store(p) - // apply runtime defaults, if any for _, v := range dbgvars { if v.def != 0 { @@ -437,7 +431,6 @@ func parsedebugvars() { } } } - // apply compile-time GODEBUG settings parsegodebug(godebugDefault, nil) @@ -463,6 +456,12 @@ func parsedebugvars() { if debug.gccheckmark > 0 { debug.asyncpreemptoff = 1 } +} + +func finishDebugVarsSetup() { + p := new(string) + *p = gogetenv("GODEBUG") + godebugEnv.Store(p) setTraceback(gogetenv("GOTRACEBACK")) traceback_env = traceback_cache diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 042c3137cd03df..6016c6fde054fb 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -87,6 +87,9 @@ const ( // ready()ing this G. _Gpreempted // 9 + // _Gleaked represents a leaked goroutine caught by the GC. + _Gleaked // 10 + // _Gscan combined with one of the above states other than // _Grunning indicates that GC is scanning the stack. The // goroutine is not executing user code and the stack is owned @@ -104,6 +107,7 @@ const ( _Gscansyscall = _Gscan + _Gsyscall // 0x1003 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 + _Gscanleaked = _Gscan + _Gleaked // 0x100a ) const ( @@ -315,6 +319,78 @@ type gobuf struct { bp uintptr // for framepointer-enabled architectures } +// maybeTraceablePtr is a special pointer that is conditionally trackable +// by the GC. It consists of an address as a uintptr (vu) and a pointer +// to a data element (vp). +// +// maybeTraceablePtr values can be in one of three states: +// 1. Unset: vu == 0 && vp == nil +// 2. Untracked: vu != 0 && vp == nil +// 3. Tracked: vu != 0 && vp != nil +// +// Do not set fields manually. Use methods instead. +// Extend this type with additional methods if needed. +type maybeTraceablePtr struct { + vp unsafe.Pointer // For liveness only. + vu uintptr // Source of truth. +} + +// untrack unsets the pointer but preserves the address. +// This is used to hide the pointer from the GC. +// +//go:nosplit +func (p *maybeTraceablePtr) setUntraceable() { + p.vp = nil +} + +// setTraceable resets the pointer to the stored address. +// This is used to make the pointer visible to the GC. +// +//go:nosplit +func (p *maybeTraceablePtr) setTraceable() { + p.vp = unsafe.Pointer(p.vu) +} + +// set sets the pointer to the data element and updates the address. +// +//go:nosplit +func (p *maybeTraceablePtr) set(v unsafe.Pointer) { + p.vp = v + p.vu = uintptr(v) +} + +// get retrieves the pointer to the data element. +// +//go:nosplit +func (p *maybeTraceablePtr) get() unsafe.Pointer { + return unsafe.Pointer(p.vu) +} + +// uintptr returns the uintptr address of the pointer. +// +//go:nosplit +func (p *maybeTraceablePtr) uintptr() uintptr { + return p.vu +} + +// maybeTraceableChan extends conditionally trackable pointers (maybeTraceablePtr) +// to track hchan pointers. +// +// Do not set fields manually. Use methods instead. +type maybeTraceableChan struct { + maybeTraceablePtr +} + +//go:nosplit +func (p *maybeTraceableChan) set(c *hchan) { + p.maybeTraceablePtr.set(unsafe.Pointer(c)) +} + +//go:nosplit +func (p *maybeTraceableChan) get() *hchan { + return (*hchan)(p.maybeTraceablePtr.get()) +} + // sudog (pseudo-g) represents a g in a wait list, such as for sending/receiving // on a channel. // @@ -334,7 +410,8 @@ type sudog struct { next *sudog prev *sudog - elem unsafe.Pointer // data element (may point to stack) + + elem maybeTraceablePtr // data element (may point to stack) // The following fields are never accessed concurrently. // For channels, waitlink is only accessed by g. @@ -362,10 +439,10 @@ type sudog struct { // in the second entry in the list.) waiters uint16 - parent *sudog // semaRoot binary tree - waitlink *sudog // g.waiting list or semaRoot - waittail *sudog // semaRoot - c *hchan // channel + parent *sudog // semaRoot binary tree + waitlink *sudog // g.waiting list or semaRoot + waittail *sudog // semaRoot + c maybeTraceableChan // channel } type libcall struct { @@ -1072,24 +1149,24 @@ const ( waitReasonZero waitReason = iota // "" waitReasonGCAssistMarking // "GC assist marking" waitReasonIOWait // "IO wait" - waitReasonChanReceiveNilChan // "chan receive (nil chan)" - waitReasonChanSendNilChan // "chan send (nil chan)" waitReasonDumpingHeap // "dumping heap" waitReasonGarbageCollection // "garbage collection" waitReasonGarbageCollectionScan // "garbage collection scan" waitReasonPanicWait // "panicwait" - waitReasonSelect // "select" - waitReasonSelectNoCases // "select (no cases)" waitReasonGCAssistWait // "GC assist wait" waitReasonGCSweepWait // "GC sweep wait" waitReasonGCScavengeWait // "GC scavenge wait" - waitReasonChanReceive // "chan receive" - waitReasonChanSend // "chan send" waitReasonFinalizerWait // "finalizer wait" waitReasonForceGCIdle // "force gc (idle)" waitReasonUpdateGOMAXPROCSIdle // "GOMAXPROCS updater (idle)" waitReasonSemacquire // "semacquire" waitReasonSleep // "sleep" + waitReasonChanReceiveNilChan // "chan receive (nil chan)" + waitReasonChanSendNilChan // "chan send (nil chan)" + waitReasonSelectNoCases // "select (no cases)" + waitReasonSelect // "select" + waitReasonChanReceive // "chan receive" + waitReasonChanSend // "chan send" waitReasonSyncCondWait // "sync.Cond.Wait" waitReasonSyncMutexLock // "sync.Mutex.Lock" waitReasonSyncRWMutexRLock // "sync.RWMutex.RLock" @@ -1175,12 +1252,34 @@ func (w waitReason) String() string { return waitReasonStrings[w] } +// isMutexWait returns true if the goroutine is blocked because of +// sync.Mutex.Lock or sync.RWMutex.[R]Lock. +// +//go:nosplit func (w waitReason) isMutexWait() bool { return w == waitReasonSyncMutexLock || w == waitReasonSyncRWMutexRLock || w == waitReasonSyncRWMutexLock } +// isSyncWait returns true if the goroutine is blocked because of +// sync library primitive operations. +// +//go:nosplit +func (w waitReason) isSyncWait() bool { + return waitReasonSyncCondWait <= w && w <= waitReasonSyncWaitGroupWait +} + +// isChanWait is true if the goroutine is blocked because of non-nil +// channel operations or a select statement with at least one case. +// +//go:nosplit +func (w waitReason) isChanWait() bool { + return w == waitReasonSelect || + w == waitReasonChanReceive || + w == waitReasonChanSend +} + func (w waitReason) isWaitingForSuspendG() bool { return isWaitingForSuspendG[w] } diff --git a/src/runtime/select.go b/src/runtime/select.go index 113dc8ad19e984..553f6960eb1286 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -83,7 +83,7 @@ func selparkcommit(gp *g, _ unsafe.Pointer) bool { // channels in lock order. var lastc *hchan for sg := gp.waiting; sg != nil; sg = sg.waitlink { - if sg.c != lastc && lastc != nil { + if sg.c.get() != lastc && lastc != nil { // As soon as we unlock the channel, fields in // any sudog with that channel may change, // including c and waitlink. Since multiple @@ -92,7 +92,7 @@ func selparkcommit(gp *g, _ unsafe.Pointer) bool { // of a channel. unlock(&lastc.lock) } - lastc = sg.c + lastc = sg.c.get() } if lastc != nil { unlock(&lastc.lock) @@ -320,12 +320,12 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo sg.isSelect = true // No stack splits between assigning elem and enqueuing // sg on gp.waiting where copystack can find it. - sg.elem = cas.elem + sg.elem.set(cas.elem) sg.releasetime = 0 if t0 != 0 { sg.releasetime = -1 } - sg.c = c + sg.c.set(c) // Construct waiting list in lock order. *nextp = sg nextp = &sg.waitlink @@ -368,8 +368,8 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo // Clear all elem before unlinking from gp.waiting. for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { sg1.isSelect = false - sg1.elem = nil - sg1.c = nil + sg1.elem.set(nil) + sg1.c.set(nil) } gp.waiting = nil diff --git a/src/runtime/sema.go b/src/runtime/sema.go index 6af49b1b0c42d9..0fe0f2a2c20739 100644 --- a/src/runtime/sema.go +++ b/src/runtime/sema.go @@ -303,7 +303,10 @@ func cansemacquire(addr *uint32) bool { // queue adds s to the blocked goroutines in semaRoot. func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { s.g = getg() - s.elem = unsafe.Pointer(addr) + s.elem.set(unsafe.Pointer(addr)) + // Storing this pointer so that we can trace the semaphore address + // from the blocked goroutine when checking for goroutine leaks. + s.g.waiting = s s.next = nil s.prev = nil s.waiters = 0 @@ -311,7 +314,7 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { var last *sudog pt := &root.treap for t := *pt; t != nil; t = *pt { - if t.elem == unsafe.Pointer(addr) { + if uintptr(unsafe.Pointer(addr)) == t.elem.uintptr() { // Already have addr in list. if lifo { // Substitute s in t's place in treap. @@ -357,7 +360,7 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { return } last = t - if uintptr(unsafe.Pointer(addr)) < uintptr(t.elem) { + if uintptr(unsafe.Pointer(addr)) < t.elem.uintptr() { pt = &t.prev } else { pt = &t.next @@ -402,11 +405,13 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now, tailtime int64) { ps := &root.treap s := *ps + for ; s != nil; s = *ps { - if s.elem == unsafe.Pointer(addr) { + if uintptr(unsafe.Pointer(addr)) == s.elem.uintptr() { goto Found } - if uintptr(unsafe.Pointer(addr)) < uintptr(s.elem) { + + if uintptr(unsafe.Pointer(addr)) < s.elem.uintptr() { ps = &s.prev } else { ps = &s.next @@ -470,8 +475,10 @@ Found: } tailtime = s.acquiretime } + // Goroutine is no longer blocked. Clear the waiting pointer. + s.g.waiting = nil s.parent = nil - s.elem = nil + s.elem.set(nil) s.next = nil s.prev = nil s.ticket = 0 @@ -590,6 +597,10 @@ func notifyListWait(l *notifyList, t uint32) { // Enqueue itself. s := acquireSudog() s.g = getg() + // Storing this pointer so that we can trace the condvar address + // from the blocked goroutine when checking for goroutine leaks. + s.elem.set(unsafe.Pointer(l)) + s.g.waiting = s s.ticket = t s.releasetime = 0 t0 := int64(0) @@ -607,6 +618,10 @@ func notifyListWait(l *notifyList, t uint32) { if t0 != 0 { blockevent(s.releasetime-t0, 2) } + // Goroutine is no longer blocked. Clear up its waiting pointer, + // and clean up the sudog before releasing it. + s.g.waiting = nil + s.elem.set(nil) releaseSudog(s) } diff --git a/src/runtime/set_vma_name_linux.go b/src/runtime/set_vma_name_linux.go index 9b6654f33299a0..03c7739c3465c9 100644 --- a/src/runtime/set_vma_name_linux.go +++ b/src/runtime/set_vma_name_linux.go @@ -14,9 +14,13 @@ import ( var prSetVMAUnsupported atomic.Bool +func setVMANameSupported() bool { + return !prSetVMAUnsupported.Load() +} + // setVMAName calls prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, start, len, name) func setVMAName(start unsafe.Pointer, length uintptr, name string) { - if debug.decoratemappings == 0 || prSetVMAUnsupported.Load() { + if debug.decoratemappings == 0 || !setVMANameSupported() { return } diff --git a/src/runtime/set_vma_name_stub.go b/src/runtime/set_vma_name_stub.go index 38f65fd592ba34..6cb01ebf50dcef 100644 --- a/src/runtime/set_vma_name_stub.go +++ b/src/runtime/set_vma_name_stub.go @@ -10,3 +10,5 @@ import "unsafe" // setVMAName isn’t implemented func setVMAName(start unsafe.Pointer, len uintptr, name string) {} + +func setVMANameSupported() bool { return false } diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go index de859866a5adb2..5888177f0ea7a1 100644 --- a/src/runtime/sizeof_test.go +++ b/src/runtime/sizeof_test.go @@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) { _64bit uintptr // size on 64bit platforms }{ {runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing - {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing + {runtime.Sudog{}, 64, 104}, // sudog, but exported for testing } if xreg > runtime.PtrSize { diff --git a/src/runtime/stack.go b/src/runtime/stack.go index d809820b076dfd..55e97e77afa957 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -821,7 +821,8 @@ func adjustsudogs(gp *g, adjinfo *adjustinfo) { // the data elements pointed to by a SudoG structure // might be in the stack. for s := gp.waiting; s != nil; s = s.waitlink { - adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) + adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vu)) + adjustpointer(adjinfo, unsafe.Pointer(&s.elem.vp)) } } @@ -834,7 +835,7 @@ func fillstack(stk stack, b byte) { func findsghi(gp *g, stk stack) uintptr { var sghi uintptr for sg := gp.waiting; sg != nil; sg = sg.waitlink { - p := uintptr(sg.elem) + uintptr(sg.c.elemsize) + p := sg.elem.uintptr() + uintptr(sg.c.get().elemsize) if stk.lo <= p && p < stk.hi && p > sghi { sghi = p } @@ -853,7 +854,7 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { // Lock channels to prevent concurrent send/receive. var lastc *hchan for sg := gp.waiting; sg != nil; sg = sg.waitlink { - if sg.c != lastc { + if sg.c.get() != lastc { // There is a ranking cycle here between gscan bit and // hchan locks. Normally, we only allow acquiring hchan // locks and then getting a gscan bit. In this case, we @@ -863,9 +864,9 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { // suspended. So, we get a special hchan lock rank here // that is lower than gscan, but doesn't allow acquiring // any other locks other than hchan. - lockWithRank(&sg.c.lock, lockRankHchanLeaf) + lockWithRank(&sg.c.get().lock, lockRankHchanLeaf) } - lastc = sg.c + lastc = sg.c.get() } // Adjust sudogs. @@ -885,10 +886,10 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr { // Unlock channels. lastc = nil for sg := gp.waiting; sg != nil; sg = sg.waitlink { - if sg.c != lastc { - unlock(&sg.c.lock) + if sg.c.get() != lastc { + unlock(&sg.c.get().lock) } - lastc = sg.c + lastc = sg.c.get() } return sgsize diff --git a/src/runtime/sys_wasm.s b/src/runtime/sys_wasm.s index b7965ec3fa4138..95c162eb857ac4 100644 --- a/src/runtime/sys_wasm.s +++ b/src/runtime/sys_wasm.s @@ -22,64 +22,6 @@ TEXT runtime·wasmDiv(SB), NOSPLIT, $0-0 I64DivS Return -TEXT runtime·wasmTruncS(SB), NOSPLIT, $0-0 - Get R0 - Get R0 - F64Ne // NaN - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - F64Const $0x7ffffffffffffc00p0 // Maximum truncated representation of 0x7fffffffffffffff - F64Gt - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - F64Const $-0x7ffffffffffffc00p0 // Minimum truncated representation of -0x8000000000000000 - F64Lt - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - I64TruncF64S - Return - -TEXT runtime·wasmTruncU(SB), NOSPLIT, $0-0 - Get R0 - Get R0 - F64Ne // NaN - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - F64Const $0xfffffffffffff800p0 // Maximum truncated representation of 0xffffffffffffffff - F64Gt - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - F64Const $0. - F64Lt - If - I64Const $0x8000000000000000 - Return - End - - Get R0 - I64TruncF64U - Return - TEXT runtime·exitThread(SB), NOSPLIT, $0-0 UNDEF diff --git a/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go b/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go new file mode 100644 index 00000000000000..353e48ee7034f7 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/commonpatterns.go @@ -0,0 +1,277 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "fmt" + "os" + "runtime" + "runtime/pprof" + "time" +) + +// Common goroutine leak patterns. Extracted from: +// "Unveiling and Vanquishing Goroutine Leaks in Enterprise Microservices: A Dynamic Analysis Approach" +// doi:10.1109/CGO57630.2024.10444835 +// +// Tests in this file are not flaky iff. the test is run with GOMAXPROCS=1. +// The main goroutine forcefully yields via `runtime.Gosched()` before +// running the profiler. This moves them to the back of the run queue, +// allowing the leaky goroutines to be scheduled beforehand and get stuck. + +func init() { + register("NoCloseRange", NoCloseRange) + register("MethodContractViolation", MethodContractViolation) + register("DoubleSend", DoubleSend) + register("EarlyReturn", EarlyReturn) + register("NCastLeak", NCastLeak) + register("Timeout", Timeout) +} + +// Incoming list of items and the number of workers. +func noCloseRange(list []any, workers int) { + ch := make(chan any) + + // Create each worker + for i := 0; i < workers; i++ { + go func() { + + // Each worker waits for an item and processes it. + for item := range ch { + // Process each item + _ = item + } + }() + } + + // Send each item to one of the workers. + for _, item := range list { + // Sending can leak if workers == 0 or if one of the workers panics + ch <- item + } + // The channel is never closed, so workers leak once there are no more + // items left to process. +} + +func NoCloseRange() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + go noCloseRange([]any{1, 2, 3}, 0) + go noCloseRange([]any{1, 2, 3}, 3) +} + +// A worker processes items pushed to `ch` one by one in the background. +// When the worker is no longer needed, it must be closed with `Stop`. +// +// Specifications: +// +// A worker may be started any number of times, but must be stopped only once. +// Stopping a worker multiple times will lead to a close panic. +// Any worker that is started must eventually be stopped. +// Failing to stop a worker results in a goroutine leak +type worker struct { + ch chan any + done chan any +} + +// Start spawns a background goroutine that extracts items pushed to the queue. +func (w worker) Start() { + go func() { + + for { + select { + case <-w.ch: // Normal workflow + case <-w.done: + return // Shut down + } + } + }() +} + +func (w worker) Stop() { + // Allows goroutine created by Start to terminate + close(w.done) +} + +func (w worker) AddToQueue(item any) { + w.ch <- item +} + +// worker limited in scope by workerLifecycle +func workerLifecycle(items []any) { + // Create a new worker + w := worker{ + ch: make(chan any), + done: make(chan any), + } + // Start worker + w.Start() + + // Operate on worker + for _, item := range items { + w.AddToQueue(item) + } + + runtime.Gosched() + // Exits without calling ’Stop’. Goroutine created by `Start` eventually leaks. +} + +func MethodContractViolation() { + prof := pprof.Lookup("goroutineleak") + defer func() { + runtime.Gosched() + prof.WriteTo(os.Stdout, 2) + }() + + workerLifecycle(make([]any, 10)) + runtime.Gosched() +} + +// doubleSend incoming channel must send a message (incoming error simulates an error generated internally). +func doubleSend(ch chan any, err error) { + if err != nil { + // In case of an error, send nil. + ch <- nil + // Return is missing here. + } + // Otherwise, continue with normal behaviour + // This send is still executed in the error case, which may lead to a goroutine leak. + ch <- struct{}{} +} + +func DoubleSend() { + prof := pprof.Lookup("goroutineleak") + ch := make(chan any) + defer func() { + runtime.Gosched() + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + doubleSend(ch, nil) + }() + <-ch + + go func() { + doubleSend(ch, fmt.Errorf("error")) + }() + <-ch + + ch1 := make(chan any, 1) + go func() { + doubleSend(ch1, fmt.Errorf("error")) + }() + <-ch1 +} + +// earlyReturn demonstrates a common pattern of goroutine leaks. +// A return statement interrupts the evaluation of the parent goroutine before it can consume a message. +// Incoming error simulates an error produced internally. +func earlyReturn(err error) { + // Create a synchronous channel + ch := make(chan any) + + go func() { + + // Send something to the channel. + // Leaks if the parent goroutine terminates early. + ch <- struct{}{} + }() + + if err != nil { + // Interrupt evaluation of parent early in case of error. + // Sender leaks. + return + } + + // Only receive if there is no error. + <-ch +} + +func EarlyReturn() { + prof := pprof.Lookup("goroutineleak") + defer func() { + runtime.Gosched() + prof.WriteTo(os.Stdout, 2) + }() + + go earlyReturn(fmt.Errorf("error")) +} + +// nCastLeak processes a number of items. First result to pass the post is retrieved from the channel queue. +func nCastLeak(items []any) { + // Channel is synchronous. + ch := make(chan any) + + // Iterate over every item + for range items { + go func() { + + // Process item and send result to channel + ch <- struct{}{} + // Channel is synchronous: only one sender will synchronise + }() + } + // Retrieve first result. All other senders block. + // Receiver blocks if there are no senders. + <-ch +} + +func NCastLeak() { + prof := pprof.Lookup("goroutineleak") + defer func() { + for i := 0; i < yieldCount; i++ { + // Yield enough times to allow all the leaky goroutines to + // reach the execution point. + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + nCastLeak(nil) + }() + + go func() { + nCastLeak(make([]any, 5)) + }() +} + +// A context is provided to short-circuit evaluation, leading +// the sender goroutine to leak. +func timeout(ctx context.Context) { + ch := make(chan any) + + go func() { + ch <- struct{}{} + }() + + select { + case <-ch: // Receive message + // Sender is released + case <-ctx.Done(): // Context was cancelled or timed out + // Sender is leaked + } +} + +func Timeout() { + prof := pprof.Lookup("goroutineleak") + defer func() { + runtime.Gosched() + prof.WriteTo(os.Stdout, 2) + }() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + for i := 0; i < 100; i++ { + go timeout(ctx) + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE b/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE new file mode 100644 index 00000000000000..f4b4b8abc4b372 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) +Copyright © 2021 Institute of Computing Technology, University of New South Wales + + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the “Software”), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/README.md b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md new file mode 100644 index 00000000000000..e6f8fe23f26c02 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/README.md @@ -0,0 +1,1851 @@ +# GoKer + +The following examples are obtained from the publication +"GoBench: A Benchmark Suite of Real-World Go Concurrency Bugs" +(doi:10.1109/CGO51591.2021.9370317). + +**Authors** +Ting Yuan (yuanting@ict.ac.cn): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences, + University of Chinese Academy of Sciences, Beijing, China; +Guangwei Li (liguangwei@ict.ac.cn): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences, + University of Chinese Academy of Sciences, Beijing, China; +Jie Lu† (lujie@ict.ac.an): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences; +Chen Liu (liuchen17z@ict.ac.cn): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences, + University of Chinese Academy of Sciences, Beijing, China +Lian Li (lianli@ict.ac.cn): + State Key Laboratory of Computer Architecture, Institute of Computing Technology, Chinese Academy of Sciences, + University of Chinese Academy of Sciences, Beijing, China; +Jingling Xue (jingling@cse.unsw.edu.au): + University of New South Wales, School of Computer Science and Engineering, Sydney, Australia + +White paper: https://lujie.ac.cn/files/papers/GoBench.pdf + +The examples have been modified in order to run the goroutine leak profiler. +Buggy snippets are moved from within a unit test to separate applications. +Each is then independently executed, possibly as multiple copies within the +same application in order to exercise more interleavings. Concurrently, the +main program sets up a waiting period (typically 1ms), followed by a goroutine +leak profile request. Other modifications may involve injecting calls to +`runtime.Gosched()`, to more reliably exercise buggy interleavings, or reductions +in waiting periods when calling `time.Sleep`, in order to reduce overall testing +time. + +The resulting goroutine leak profile is analyzed to ensure that no unexpecte +leaks occurred, and that the expected leaks did occur. If the leak is flaky, the +only purpose of the expected leak list is to protect against unexpected leaks. + +The examples have also been modified to remove data races, since those create flaky +test failures, when really all we care about are leaked goroutines. + +The entries below document each of the corresponding leaks. + +## Cockroach/10214 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#10214]|[pull request]|[patch]| Resource | AB-BA leak | + +[cockroach#10214]:(cockroach10214_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/10214/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/10214 + +### Description + +This goroutine leak is caused by different order when acquiring +coalescedMu.Lock() and raftMu.Lock(). The fix is to refactor sendQueuedHeartbeats() +so that cockroachdb can unlock coalescedMu before locking raftMu. + +### Example execution + +```go +G1 G2 +------------------------------------------------------------------------------------ +s.sendQueuedHeartbeats() . +s.coalescedMu.Lock() [L1] . +s.sendQueuedHeartbeatsToNode() . +s.mu.replicas[0].reportUnreachable() . +s.mu.replicas[0].raftMu.Lock() [L2] . +. s.mu.replicas[0].tick() +. s.mu.replicas[0].raftMu.Lock() [L2] +. s.mu.replicas[0].tickRaftMuLocked() +. s.mu.replicas[0].mu.Lock() [L3] +. s.mu.replicas[0].maybeQuiesceLocked() +. s.mu.replicas[0].maybeCoalesceHeartbeat() +. s.coalescedMu.Lock() [L1] +--------------------------------G1,G2 leak------------------------------------------ +``` + +## Cockroach/1055 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#1055]|[pull request]|[patch]| Mixed | Channel & WaitGroup | + +[cockroach#1055]:(cockroach1055_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/1055/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/1055 + +### Description + +1. `Stop()` is called and blocked at `s.stop.Wait()` after acquiring the lock. +2. `StartTask()` is called and attempts to acquire the lock. It is then blocked. +3. `Stop()` never finishes since the task doesn't call SetStopped. + +### Example execution + +```go +G1 G2.0 G2.1 G2.2 G3 +------------------------------------------------------------------------------------------------------------------------------- +s[0].stop.Add(1) [1] +go func() [G2.0] +s[1].stop.Add(1) [1] . +go func() [G2.1] . +s[2].stop.Add(1) [1] . . +go func() [G2.2] . . +go func() [G3] . . . +<-done . . . . +. s[0].StartTask() . . . +. s[0].draining == 0 . . . +. . s[1].StartTask() . . +. . s[1].draining == 0 . . +. . . s[2].StartTask() . +. . . s[2].draining == 0 . +. . . . s[0].Quiesce() +. . . . s[0].mu.Lock() [L1[0]] +. s[0].mu.Lock() [L1[0]] . . . +. s[0].drain.Add(1) [1] . . . +. s[0].mu.Unlock() [L1[0]] . . . +. <-s[0].ShouldStop() . . . +. . . . s[0].draining = 1 +. . . . s[0].drain.Wait() +. . s[0].mu.Lock() [L1[1]] . . +. . s[1].drain.Add(1) [1] . . +. . s[1].mu.Unlock() [L1[1]] . . +. . <-s[1].ShouldStop() . . +. . . s[2].mu.Lock() [L1[2]] . +. . . s[2].drain.Add() [1] . +. . . s[2].mu.Unlock() [L1[2]] . +. . . <-s[2].ShouldStop() . +----------------------------------------------------G1, G2.[0..2], G3 leak----------------------------------------------------- +``` + +## Cockroach/10790 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#10790]|[pull request]|[patch]| Communication | Channel & Context | + +[cockroach#10790]:(cockroach10790_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/10790/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/10790 + +### Description + +It is possible that a message from `ctxDone` will make `beginCmds` +return without draining the channel `ch`, so that anonymous function +goroutines will leak. + +### Example execution + +```go +G1 G2 helper goroutine +----------------------------------------------------- +. . r.sendChans() +r.beginCmds() . . +. . ch1 <- true +<- ch1 . . +. . ch2 <- true +... +. cancel() +<- ch1 +------------------G1 leak---------------------------- +``` + +## Cockroach/13197 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#13197]|[pull request]|[patch]| Communication | Channel & Context | + +[cockroach#13197]:(cockroach13197_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/13197/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/13197 + +### Description + +One goroutine executing `(*Tx).awaitDone()` blocks and +waiting for a signal `context.Done()`. + +### Example execution + +```go +G1 G2 +------------------------------- +begin() +. awaitDone() +return . +. <-tx.ctx.Done() +-----------G2 leaks------------ +``` + +## Cockroach/13755 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#13755]|[pull request]|[patch]| Communication | Channel & Context | + +[cockroach#13755]:(cockroach13755_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/13755/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/13755 + +### Description + +The buggy code does not close the db query result (rows), +so that one goroutine running `(*Rows).awaitDone` is blocked forever. +The blocking goroutine is waiting for cancel signal from context. + +### Example execution + +```go +G1 G2 +--------------------------------------- +initContextClose() +. awaitDone() +return . +. <-tx.ctx.Done() +---------------G2 leaks---------------- +``` + +## Cockroach/1462 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#1462]|[pull request]|[patch]| Mixed | Channel & WaitGroup | + +[cockroach#1462]:(cockroach1462_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/1462/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/1462 + +### Description + +Executing `<-stopper.ShouldStop()` in `processEventsUntil` may cause +goroutines created by `lt.RunWorker` in `lt.start` to be stuck sending +a message over `lt.Events`. The main thread is then stuck at `s.stop.Wait()`, +since the sender goroutines cannot call `s.stop.Done()`. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------------------------------------------------------------------- +NewLocalInterceptableTransport() +lt.start() +lt.stopper.RunWorker() +s.AddWorker() +s.stop.Add(1) [1] +go func() [G2] +stopper.RunWorker() . +s.AddWorker() . +s.stop.Add(1) [2] . +go func() [G3] . +s.Stop() . . +s.Quiesce() . . +. select [default] . +. lt.Events <- interceptMessage(0) . +close(s.stopper) . . +. . select [<-stopper.ShouldStop()] +. . <<>> +s.stop.Wait() . +----------------------------------------------G1,G2 leak----------------------------------------------- +``` + +## Cockroach/16167 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#16167]|[pull request]|[patch]| Resource | Double Locking | + +[cockroach#16167]:(cockroach16167_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/16167/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/16167 + +### Description + +This is another example of goroutine leaks caused by recursively +acquiring `RWLock`. +There are two lock variables (`systemConfigCond` and `systemConfigMu`) +which refer to the same underlying lock. The leak invovlves two goroutines. +The first acquires `systemConfigMu.Lock()`, then tries to acquire `systemConfigMu.RLock()`. +The second acquires `systemConfigMu.Lock()`. +If the second goroutine interleaves in between the two lock operations of the +first goroutine, both goroutines will leak. + +### Example execution + +```go +G1 G2 +--------------------------------------------------------------- +. e.Start() +. e.updateSystemConfig() +e.execParsed() . +e.systemConfigCond.L.Lock() [L1] . +. e.systemConfigMu.Lock() [L1] +e.systemConfigMu.RLock() [L1] . +------------------------G1,G2 leak----------------------------- +``` + +## Cockroach/18101 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#18101]|[pull request]|[patch]| Resource | Double Locking | + +[cockroach#18101]:(cockroach18101_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/18101/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/18101 + +### Description + +The `context.Done()` signal short-circuits the reader goroutine, but not +the senders, leading them to leak. + +### Example execution + +```go +G1 G2 helper goroutine +-------------------------------------------------------------- +restore() +. splitAndScatter() +<-readyForImportCh . +<-readyForImportCh <==> readyForImportCh<- +... +. . cancel() +<> . <> + readyForImportCh<- +-----------------------G2 leaks-------------------------------- +``` + +## Cockroach/2448 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#2448]|[pull request]|[patch]| Communication | Channel | + +[cockroach#2448]:(cockroach2448_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/2448/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/2448 + +### Description + +This bug is caused by two goroutines waiting for each other +to unblock their channels: + +1) `MultiRaft` sends the commit event for the Membership change +2) `store.processRaft` takes it and begins processing +3) another command commits and triggers another `sendEvent`, but + this blocks since `store.processRaft` isn't ready for another + `select`. Consequently the main `MultiRaft` loop is waiting for + that as well. +4) the `Membership` change was applied to the range, and the store + now tries to execute the callback +5) the callback tries to write to `callbackChan`, but that is + consumed by the `MultiRaft` loop, which is currently waiting + for `store.processRaft` to consume from the events channel, + which it will only do after the callback has completed. + +### Example execution + +```go +G1 G2 +-------------------------------------------------------------------------- +s.processRaft() st.start() +select . +. select [default] +. s.handleWriteResponse() +. s.sendEvent() +. select +<-s.multiraft.Events <----> m.Events <- event +. select [default] +. s.handleWriteResponse() +. s.sendEvent() +. select [m.Events<-, <-s.stopper.ShouldStop()] +callback() . +select [ + m.callbackChan<-, + <-s.stopper.ShouldStop() +] . +------------------------------G1,G2 leak---------------------------------- +``` + +## Cockroach/24808 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#24808]|[pull request]|[patch]| Communication | Channel | + +[cockroach#24808]:(cockroach24808_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/24808/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/24808 + +### Description + +When we `Start` the `Compactor`, it may already have received +`Suggestions`, leaking the previously blocking write to a full channel. + +### Example execution + +```go +G1 +------------------------------------------------ +... +compactor.ch <- +compactor.Start() +compactor.ch <- +--------------------G1 leaks-------------------- +``` + +## Cockroach/25456 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#25456]|[pull request]|[patch]| Communication | Channel | + +[cockroach#25456]:(cockroach25456_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/25456/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/25456 + +### Description + +When `CheckConsistency` (in the complete code) returns an error, the queue +checks whether the store is draining to decide whether the error is worth +logging. This check was incorrect and would block until the store actually +started draining. + +### Example execution + +```go +G1 +--------------------------------------- +... +<-repl.store.Stopper().ShouldQuiesce() +---------------G1 leaks---------------- +``` + +## Cockroach/35073 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#35073]|[pull request]|[patch]| Communication | Channel | + +[cockroach#35073]:(cockroach35073_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/35073/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/35073 + +### Description + +Previously, the outbox could fail during startup without closing its +`RowChannel`. This could lead to goroutine leaks in rare cases due +to channel communication mismatch. + +### Example execution + +## Cockroach/35931 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#35931]|[pull request]|[patch]| Communication | Channel | + +[cockroach#35931]:(cockroach35931_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/35931/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/35931 + +### Description + +Previously, if a processor that reads from multiple inputs was waiting +on one input to provide more data, and the other input was full, and +both inputs were connected to inbound streams, it was possible to +cause goroutine leaks during flow cancellation when trying to propagate +the cancellation metadata messages into the flow. The cancellation method +wrote metadata messages to each inbound stream one at a time, so if the +first one was full, the canceller would block and never send a cancellation +message to the second stream, which was the one actually being read from. + +### Example execution + +## Cockroach/3710 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#3710]|[pull request]|[patch]| Resource | RWR Deadlock | + +[cockroach#3710]:(cockroach3710_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/3710/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/3710 + +### Description + +The goroutine leak is caused by acquiring a RLock twice in a call chain. +`ForceRaftLogScanAndProcess(acquire s.mu.RLock())` +`-> MaybeAdd()` +`-> shouldQueue()` +`-> getTruncatableIndexes()` +`->RaftStatus(acquire s.mu.Rlock())` + +### Example execution + +```go +G1 G2 +------------------------------------------------------------ +store.ForceRaftLogScanAndProcess() +s.mu.RLock() +s.raftLogQueue.MaybeAdd() +bq.impl.shouldQueue() +getTruncatableIndexes() +r.store.RaftStatus() +. store.processRaft() +. s.mu.Lock() +s.mu.RLock() +----------------------G1,G2 leak----------------------------- +``` + +## Cockroach/584 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#584]|[pull request]|[patch]| Resource | Double Locking | + +[cockroach#584]:(cockroach584_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/584/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/584 + +### Description + +Missing call to `mu.Unlock()` before the `break` in the loop. + +### Example execution + +```go +G1 +--------------------------- +g.bootstrap() +g.mu.Lock() [L1] +if g.closed { ==> break +g.manage() +g.mu.Lock() [L1] +----------G1 leaks--------- +``` + +## Cockroach/6181 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#6181]|[pull request]|[patch]| Resource | RWR Deadlock | + +[cockroach#6181]:(cockroach6181_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/6181/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/6181 + +### Description + +The same `RWMutex` may be recursively acquired for both reading and writing. + +### Example execution + +```go +G1 G2 G3 ... +----------------------------------------------------------------------------------------------- +testRangeCacheCoalescedRquests() +initTestDescriptorDB() +pauseLookupResumeAndAssert() +return +. doLookupWithToken() +. . doLookupWithToken() +. rc.LookupRangeDescriptor() . +. . rc.LookupRangeDescriptor() +. rdc.rangeCacheMu.RLock() . +. rdc.String() . +. . rdc.rangeCacheMu.RLock() +. . fmt.Printf() +. . rdc.rangeCacheMu.RUnlock() +. . rdc.rangeCacheMu.Lock() +. rdc.rangeCacheMu.RLock() . +-----------------------------------G2,G3,... leak---------------------------------------------- +``` + +## Cockroach/7504 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#7504]|[pull request]|[patch]| Resource | AB-BA Deadlock | + +[cockroach#7504]:(cockroach7504_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/7504/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/7504 + +### Description + +The locks are acquired as `leaseState` and `tableNameCache` in `Release()`, but +as `tableNameCache` and `leaseState` in `AcquireByName`, leading to an AB-BA deadlock. + +### Example execution + +```go +G1 G2 +----------------------------------------------------- +mgr.AcquireByName() mgr.Release() +m.tableNames.get(id) . +c.mu.Lock() [L2] . +. t.release(lease) +. t.mu.Lock() [L3] +. s.mu.Lock() [L1] +lease.mu.Lock() [L1] . +. t.removeLease(s) +. t.tableNameCache.remove() +. c.mu.Lock() [L2] +---------------------G1, G2 leak--------------------- +``` + +## Cockroach/9935 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[cockroach#9935]|[pull request]|[patch]| Resource | Double Locking | + +[cockroach#9935]:(cockroach9935_test.go) +[patch]:https://github.com/cockroachdb/cockroach/pull/9935/files +[pull request]:https://github.com/cockroachdb/cockroach/pull/9935 + +### Description + +This bug is caused by acquiring `l.mu.Lock()` twice. + +### Example execution + +## Etcd/10492 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#10492]|[pull request]|[patch]| Resource | Double locking | + +[etcd#10492]:(etcd10492_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/10492/files +[pull request]:https://github.com/etcd-io/etcd/pull/10492 + +### Description + +A simple double locking case for lines 19, 31. + +## Etcd/5509 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#5509]|[pull request]|[patch]| Resource | Double locking | + +[etcd#5509]:(etcd5509_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/5509/files +[pull request]:https://github.com/etcd-io/etcd/pull/5509 + +### Description + +`r.acquire()` returns holding `r.client.mu.RLock()` on a failure path (line 42). +This causes any call to `client.Close()` to leak goroutines. + +## Etcd/6708 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#6708]|[pull request]|[patch]| Resource | Double locking | + +[etcd#6708]:(etcd6708_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/6708/files +[pull request]:https://github.com/etcd-io/etcd/pull/6708 + +### Description + +Line 54, 49 double locking + +## Etcd/6857 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#6857]|[pull request]|[patch]| Communication | Channel | + +[etcd#6857]:(etcd6857_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/6857/files +[pull request]:https://github.com/etcd-io/etcd/pull/6857 + +### Description + +Choosing a different case in a `select` statement (`n.stop`) will +lead to goroutine leaks when sending over `n.status`. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------- +n.run() . . +. . n.Stop() +. . n.stop<- +<-n.stop . . +. . <-n.done +close(n.done) . . +return . . +. . return +. n.Status() +. n.status<- +----------------G2 leaks------------------- +``` + +## Etcd/6873 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#6873]|[pull request]|[patch]| Mixed | Channel & Lock | + +[etcd#6873]:(etcd6873_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/6873/files +[pull request]:https://github.com/etcd-io/etcd/pull/6873 + +### Description + +This goroutine leak involves a goroutine acquiring a lock and being +blocked over a channel operation with no partner, while another tries +to acquire the same lock. + +### Example execution + +```go +G1 G2 G3 +-------------------------------------------------------------- +newWatchBroadcasts() +wbs.update() +wbs.updatec <- +return +. <-wbs.updatec . +. wbs.coalesce() . +. . wbs.stop() +. . wbs.mu.Lock() +. . close(wbs.updatec) +. . <-wbs.donec +. wbs.mu.Lock() . +---------------------G2,G3 leak-------------------------------- +``` + +## Etcd/7492 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#7492]|[pull request]|[patch]| Mixed | Channel & Lock | + +[etcd#7492]:(etcd7492_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/7492/files +[pull request]:https://github.com/etcd-io/etcd/pull/7492 + +### Description + +This goroutine leak involves a goroutine acquiring a lock and being +blocked over a channel operation with no partner, while another tries +to acquire the same lock. + +### Example execution + +```go +G2 G1 +--------------------------------------------------------------- +. stk.run() +ts.assignSimpleTokenToUser() . +t.simpleTokensMu.Lock() . +t.simpleTokenKeeper.addSimpleToken() . +tm.addSimpleTokenCh <- true . +. <-tm.addSimpleTokenCh +t.simpleTokensMu.Unlock() . +ts.assignSimpleTokenToUser() . +... +t.simpleTokensMu.Lock() +. <-tokenTicker.C +tm.addSimpleTokenCh <- true . +. tm.deleteTokenFunc() +. t.simpleTokensMu.Lock() +---------------------------G1,G2 leak-------------------------- +``` + +## Etcd/7902 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[etcd#7902]|[pull request]|[patch]| Mixed | Channel & Lock | + +[etcd#7902]:(etcd7902_test.go) +[patch]:https://github.com/etcd-io/etcd/pull/7902/files +[pull request]:https://github.com/etcd-io/etcd/pull/7902 + +### Description + +If the follower gooroutine acquires `mu.Lock()` first and calls +`rc.release()`, it will be blocked sending over `rcNextc`. +Only the leader can `close(nextc)` to unblock the follower. +However, in order to invoke `rc.release()`, the leader needs +to acquires `mu.Lock()`. +The fix is to remove the lock and unlock around `rc.release()`. + +### Example execution + +```go +G1 G2 (leader) G3 (follower) +--------------------------------------------------------------------- +runElectionFunc() +doRounds() +wg.Wait() +. ... +. mu.Lock() +. rc.validate() +. rcNextc = nextc +. mu.Unlock() ... +. . mu.Lock() +. . rc.validate() +. . mu.Unlock() +. . mu.Lock() +. . rc.release() +. . <-rcNextc +. mu.Lock() +-------------------------G1,G2,G3 leak-------------------------- +``` + +## Grpc/1275 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#1275]|[pull request]|[patch]| Communication | Channel | + +[grpc#1275]:(grpc1275_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/1275/files +[pull request]:https://github.com/grpc/grpc-go/pull/1275 + +### Description + +Two goroutines are involved in this leak. The main goroutine +is blocked at `case <- donec`, and is waiting for the second goroutine +to close the channel. +The second goroutine is created by the main goroutine. It is blocked +when calling `stream.Read()`, which invokes `recvBufferRead.Read()`. +The second goroutine is blocked at case `i := r.recv.get()`, and it is +waiting for someone to send a message to this channel. +It is the `client.CloseSream()` method called by the main goroutine that +should send the message, but it is not. The patch is to send out this message. + +### Example execution + +```go +G1 G2 +----------------------------------------------------- +testInflightStreamClosing() +. stream.Read() +. io.ReadFull() +. <-r.recv.get() +CloseStream() +<-donec +---------------------G1, G2 leak--------------------- +``` + +## Grpc/1424 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#1424]|[pull request]|[patch]| Communication | Channel | + +[grpc#1424]:(grpc1424_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/1424/files +[pull request]:https://github.com/grpc/grpc-go/pull/1424 + +### Description + +The goroutine running `cc.lbWatcher` returns without +draining the `done` channel. + +### Example execution + +```go +G1 G2 G3 +----------------------------------------------------------------- +DialContext() . . +. cc.dopts.balancer.Notify() . +. . cc.lbWatcher() +. <-doneChan +close() +---------------------------G2 leaks------------------------------- +``` + +## Grpc/1460 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#1460]|[pull request]|[patch]| Mixed | Channel & Lock | + +[grpc#1460]:(grpc1460_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/1460/files +[pull request]:https://github.com/grpc/grpc-go/pull/1460 + +### Description + +When gRPC keepalives are enabled (which isn't the case +by default at this time) and PermitWithoutStream is false +(the default), the client can leak goroutines when transitioning +between having no active stream and having one active +stream.The keepalive() goroutine is stuck at “<-t.awakenKeepalive”, +while the main goroutine is stuck in NewStream() on t.mu.Lock(). + +### Example execution + +```go +G1 G2 +-------------------------------------------- +client.keepalive() +. client.NewStream() +t.mu.Lock() +<-t.awakenKeepalive +. t.mu.Lock() +---------------G1,G2 leak------------------- +``` + +## Grpc/3017 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#3017]|[pull request]|[patch]| Resource | Missing unlock | + +[grpc#3017]:(grpc3017_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/3017/files +[pull request]:https://github.com/grpc/grpc-go/pull/3017 + +### Description + +Line 65 is an execution path with a missing unlock. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------------------------------------------------------------ +NewSubConn([1]) +ccc.mu.Lock() [L1] +sc = 1 +ccc.subConnToAddr[1] = 1 +go func() [G2] +<-done . +. ccc.RemoveSubConn(1) +. ccc.mu.Lock() +. addr = 1 +. entry = &subConnCacheEntry_grpc3017{} +. cc.subConnCache[1] = entry +. timer = time.AfterFunc() [G3] +. entry.cancel = func() +. sc = ccc.NewSubConn([1]) +. ccc.mu.Lock() [L1] +. entry.cancel() +. !timer.Stop() [true] +. entry.abortDeleting = true +. . ccc.mu.Lock() +. . <<>> +. ccc.RemoveSubConn(1) +. ccc.mu.Lock() [L1] +-------------------------------------------G1, G2 leak----------------------------------------- +``` + +## Grpc/660 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#660]|[pull request]|[patch]| Communication | Channel | + +[grpc#660]:(grpc660_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/660/files +[pull request]:https://github.com/grpc/grpc-go/pull/660 + +### Description + +The parent function could return without draining the done channel. + +### Example execution + +```go +G1 G2 helper goroutine +------------------------------------------------------------- +doCloseLoopUnary() +. bc.stop <- true +<-bc.stop +return +. done <- +----------------------G2 leak-------------------------------- +``` + +## Grpc/795 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#795]|[pull request]|[patch]| Resource | Double locking | + +[grpc#795]:(grpc795_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/795/files +[pull request]:https://github.com/grpc/grpc-go/pull/795 + +### Description + +Line 20 is an execution path with a missing unlock. + +## Grpc/862 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[grpc#862]|[pull request]|[patch]| Communication | Channel & Context | + +[grpc#862]:(grpc862_test.go) +[patch]:https://github.com/grpc/grpc-go/pull/862/files +[pull request]:https://github.com/grpc/grpc-go/pull/862 + +### Description + +When return value `conn` is `nil`, `cc(ClientConn)` is not closed. +The goroutine executing resetAddrConn is leaked. The patch is to +close `ClientConn` in `defer func()`. + +### Example execution + +```go +G1 G2 +--------------------------------------- +DialContext() +. cc.resetAddrConn() +. resetTransport() +. <-ac.ctx.Done() +--------------G2 leak------------------ +``` + +## Hugo/3251 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[hugo#3251]|[pull request]|[patch]| Resource | RWR deadlock | + +[hugo#3251]:(hugo3251_test.go) +[patch]:https://github.com/gohugoio/hugo/pull/3251/files +[pull request]:https://github.com/gohugoio/hugo/pull/3251 + +### Description + +A goroutine can hold `Lock()` at line 20 then acquire `RLock()` at +line 29. `RLock()` at line 29 will never be acquired because `Lock()` +at line 20 will never be released. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------------------------------------------------------ +wg.Add(1) [W1: 1] +go func() [G2] +go func() [G3] +. resGetRemote() +. remoteURLLock.URLLock(url) +. l.Lock() [L1] +. l.m[url] = &sync.Mutex{} [L2] +. l.m[url].Lock() [L2] +. l.Unlock() [L1] +. . resGetRemote() +. . remoteURLLock.URLLock(url) +. . l.Lock() [L1] +. . l.m[url].Lock() [L2] +. remoteURLLock.URLUnlock(url) +. l.RLock() [L1] +... +wg.Wait() [W1] +----------------------------------------G1,G2,G3 leak-------------------------------------- +``` + +## Hugo/5379 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[hugo#5379]|[pull request]|[patch]| Resource | Double locking | + +[hugo#5379]:(hugo5379_test.go) +[patch]:https://github.com/gohugoio/hugo/pull/5379/files +[pull request]:https://github.com/gohugoio/hugo/pull/5379 + +### Description + +A goroutine first acquire `contentInitMu` at line 99 then +acquire the same `Mutex` at line 66 + +## Istio/16224 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[istio#16224]|[pull request]|[patch]| Mixed | Channel & Lock | + +[istio#16224]:(istio16224_test.go) +[patch]:https://github.com/istio/istio/pull/16224/files +[pull request]:https://github.com/istio/istio/pull/16224 + +### Description + +A goroutine holds a `Mutex` at line 91 and is then blocked at line 93. +Another goroutine attempts to acquire the same `Mutex` at line 101 to +further drains the same channel at 103. + +## Istio/17860 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[istio#17860]|[pull request]|[patch]| Communication | Channel | + +[istio#17860]:(istio17860_test.go) +[patch]:https://github.com/istio/istio/pull/17860/files +[pull request]:https://github.com/istio/istio/pull/17860 + +### Description + +`a.statusCh` can't be drained at line 70. + +## Istio/18454 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[istio#18454]|[pull request]|[patch]| Communication | Channel & Context | + +[istio#18454]:(istio18454_test.go) +[patch]:https://github.com/istio/istio/pull/18454/files +[pull request]:https://github.com/istio/istio/pull/18454 + +### Description + +`s.timer.Stop()` at line 56 and 61 can be called concurrency +(i.e. from their entry point at line 104 and line 66). +See [Timer](https://golang.org/pkg/time/#Timer). + +## Kubernetes/10182 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#10182]|[pull request]|[patch]| Mixed | Channel & Lock | + +[kubernetes#10182]:(kubernetes10182_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/10182/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/10182 + +### Description + +Goroutine 1 is blocked on a lock held by goroutine 3, +while goroutine 3 is blocked on sending message to `ch`, +which is read by goroutine 1. + +### Example execution + +```go +G1 G2 G3 +------------------------------------------------------------------------------- +s.Start() +s.syncBatch() +. s.SetPodStatus() +. s.podStatusesLock.Lock() +<-s.podStatusChannel <===> s.podStatusChannel <- true +. s.podStatusesLock.Unlock() +. return +s.DeletePodStatus() . +. . s.podStatusesLock.Lock() +. . s.podStatusChannel <- true +s.podStatusesLock.Lock() +-----------------------------G1,G3 leak----------------------------------------- +``` + +## Kubernetes/11298 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#11298]|[pull request]|[patch]| Communication | Channel & Condition Variable | + +[kubernetes#11298]:(kubernetes11298_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/11298/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/11298 + +### Description + +`n.node` used the `n.lock` as underlaying locker. The service loop initially +locked it, the `Notify` function tried to lock it before calling `n.node.Signal()`, +leading to a goroutine leak. `n.cond.Signal()` at line 59 and line 81 are not +guaranteed to unblock the `n.cond.Wait` at line 56. + +## Kubernetes/13135 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#13135]|[pull request]|[patch]| Resource | AB-BA deadlock | + +[kubernetes#13135]:(kubernetes13135_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/13135/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/13135 + +### Description + +```go +G1 G2 G3 +---------------------------------------------------------------------------------- +NewCacher() +watchCache.SetOnReplace() +watchCache.SetOnEvent() +. cacher.startCaching() +. c.Lock() +. c.reflector.ListAndWatch() +. r.syncWith() +. r.store.Replace() +. w.Lock() +. w.onReplace() +. cacher.initOnce.Do() +. cacher.Unlock() +return cacher . +. . c.watchCache.Add() +. . w.processEvent() +. . w.Lock() +. cacher.startCaching() . +. c.Lock() . +... +. c.Lock() +. w.Lock() +--------------------------------G2,G3 leak----------------------------------------- +``` + +## Kubernetes/1321 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#1321]|[pull request]|[patch]| Mixed | Channel & Lock | + +[kubernetes#1321]:(kubernetes1321_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/1321/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/1321 + +### Description + +This is a lock-channel bug. The first goroutine invokes +`distribute()`, which holds `m.lock.Lock()`, while blocking +at sending message to `w.result`. The second goroutine +invokes `stopWatching()` function, which can unblock the first +goroutine by closing `w.result`. However, in order to close `w.result`, +`stopWatching()` function needs to acquire `m.lock.Lock()`. + +The fix is to introduce another channel and put receive message +from the second channel in the same `select` statement as the +`w.result`. Close the second channel can unblock the first +goroutine, while no need to hold `m.lock.Lock()`. + +### Example execution + +```go +G1 G2 +---------------------------------------------- +testMuxWatcherClose() +NewMux() +. m.loop() +. m.distribute() +. m.lock.Lock() +. w.result <- true +w := m.Watch() +w.Stop() +mw.m.stopWatching() +m.lock.Lock() +---------------G1,G2 leak--------------------- +``` + +## Kubernetes/25331 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#25331]|[pull request]|[patch]| Communication | Channel & Context | + +[kubernetes#25331]:(kubernetes25331_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/25331/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/25331 + +### Description + +A potential goroutine leak occurs when an error has happened, +blocking `resultChan`, while cancelling context in `Stop()`. + +### Example execution + +```go +G1 G2 +------------------------------------ +wc.run() +. wc.Stop() +. wc.errChan <- +. wc.cancel() +<-wc.errChan +wc.cancel() +wc.resultChan <- +-------------G1 leak---------------- +``` + +## Kubernetes/26980 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#26980]|[pull request]|[patch]| Mixed | Channel & Lock | + +[kubernetes#26980]:(kubernetes26980_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/26980/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/26980 + +### Description + +A goroutine holds a `Mutex` at line 24 and blocked at line 35. +Another goroutine blocked at line 58 by acquiring the same `Mutex`. + +## Kubernetes/30872 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#30872]|[pull request]|[patch]| Resource | AB-BA deadlock | + +[kubernetes#30872]:(kubernetes30872_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/30872/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/30872 + +### Description + +The lock is acquired both at lines 92 and 157. + +## Kubernetes/38669 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#38669]|[pull request]|[patch]| Communication | Channel | + +[kubernetes#38669]:(kubernetes38669_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/38669/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/38669 + +### Description + +No sender for line 33. + +## Kubernetes/5316 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#5316]|[pull request]|[patch]| Communication | Channel | + +[kubernetes#5316]:(kubernetes5316_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/5316/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/5316 + +### Description + +If the main goroutine selects a case that doesn’t consumes +the channels, the anonymous goroutine will be blocked on sending +to channel. + +### Example execution + +```go +G1 G2 +-------------------------------------- +finishRequest() +. fn() +time.After() +. errCh<-/ch<- +--------------G2 leaks---------------- +``` + +## Kubernetes/58107 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#58107]|[pull request]|[patch]| Resource | RWR deadlock | + +[kubernetes#58107]:(kubernetes58107_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/58107/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/58107 + +### Description + +The rules for read and write lock: allows concurrent read lock; +write lock has higher priority than read lock. + +There are two queues (queue 1 and queue 2) involved in this bug, +and the two queues are protected by the same read-write lock +(`rq.workerLock.RLock()`). Before getting an element from queue 1 or +queue 2, `rq.workerLock.RLock()` is acquired. If the queue is empty, +`cond.Wait()` will be invoked. There is another goroutine (goroutine D), +which will periodically invoke `rq.workerLock.Lock()`. Under the following +situation, deadlock will happen. Queue 1 is empty, so that some goroutines +hold `rq.workerLock.RLock()`, and block at `cond.Wait()`. Goroutine D is +blocked when acquiring `rq.workerLock.Lock()`. Some goroutines try to process +jobs in queue 2, but they are blocked when acquiring `rq.workerLock.RLock()`, +since write lock has a higher priority. + +The fix is to not acquire `rq.workerLock.RLock()`, while pulling data +from any queue. Therefore, when a goroutine is blocked at `cond.Wait()`, +`rq.workLock.RLock()` is not held. + +### Example execution + +```go +G3 G4 G5 +-------------------------------------------------------------------- +. . Sync() +rq.workerLock.RLock() . . +q.cond.Wait() . . +. . rq.workerLock.Lock() +. rq.workerLock.RLock() +. q.cond.L.Lock() +-----------------------------G3,G4,G5 leak----------------------------- +``` + +## Kubernetes/62464 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#62464]|[pull request]|[patch]| Resource | RWR deadlock | + +[kubernetes#62464]:(kubernetes62464_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/62464/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/62464 + +### Description + +This is another example for recursive read lock bug. It has +been noticed by the go developers that RLock should not be +recursively used in the same thread. + +### Example execution + +```go +G1 G2 +-------------------------------------------------------- +m.reconcileState() +m.state.GetCPUSetOrDefault() +s.RLock() +s.GetCPUSet() +. p.RemoveContainer() +. s.GetDefaultCPUSet() +. s.SetDefaultCPUSet() +. s.Lock() +s.RLock() +---------------------G1,G2 leak-------------------------- +``` + +## Kubernetes/6632 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#6632]|[pull request]|[patch]| Mixed | Channel & Lock | + +[kubernetes#6632]:(kubernetes6632_test.go) +[patch]:https://github.com/kubernetes/kubernetes/pull/6632/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/6632 + +### Description + +When `resetChan` is full, `WriteFrame` holds the lock and blocks +on the channel. Then `monitor()` fails to close the `resetChan` +because the lock is already held by `WriteFrame`. + + +### Example execution + +```go +G1 G2 helper goroutine +---------------------------------------------------------------- +i.monitor() +<-i.conn.closeChan +. i.WriteFrame() +. i.writeLock.Lock() +. i.resetChan <- +. . i.conn.closeChan<- +i.writeLock.Lock() +----------------------G1,G2 leak-------------------------------- +``` + +## Kubernetes/70277 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[kubernetes#70277]|[pull request]|[patch]| Communication | Channel | + +[kubernetes#70277]:kubernetes70277_test.go +[patch]:https://github.com/kubernetes/kubernetes/pull/70277/files +[pull request]:https://github.com/kubernetes/kubernetes/pull/70277 + +### Description + +`wait.poller()` returns a function with type `WaitFunc`. +the function creates a goroutine and the goroutine only +quits when after or done closed. + +The `doneCh` defined at line 70 is never closed. + +## Moby/17176 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#17176]|[pull request]|[patch]| Resource | Double locking | + +[moby#17176]:(moby17176_test.go) +[patch]:https://github.com/moby/moby/pull/17176/files +[pull request]:https://github.com/moby/moby/pull/17176 + +### Description + +`devices.nrDeletedDevices` takes `devices.Lock()` but does +not release it (line 36) if there are no deleted devices. This will block +other goroutines trying to acquire `devices.Lock()`. + +## Moby/21233 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#21233]|[pull request]|[patch]| Communication | Channel | + +[moby#21233]:(moby21233_test.go) +[patch]:https://github.com/moby/moby/pull/21233/files +[pull request]:https://github.com/moby/moby/pull/21233 + +### Description + +This test was checking that it received every progress update that was +produced. But delivery of these intermediate progress updates is not +guaranteed. A new update can overwrite the previous one if the previous +one hasn't been sent to the channel yet. + +The call to `t.Fatalf` terminated the current goroutine which was consuming +the channel, which caused a deadlock and eventual test timeout rather +than a proper failure message. + +### Example execution + +```go +G1 G2 G3 +---------------------------------------------------------- +testTransfer() . . +tm.Transfer() . . +t.Watch() . . +. WriteProgress() . +. ProgressChan<- . +. . <-progressChan +. ... ... +. return . +. <-progressChan +<-watcher.running +----------------------G1,G3 leak-------------------------- +``` + +## Moby/25384 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#25384]|[pull request]|[patch]| Mixed | Misuse WaitGroup | + +[moby#25384]:(moby25384_test.go) +[patch]:https://github.com/moby/moby/pull/25384/files +[pull request]:https://github.com/moby/moby/pull/25384 + +### Description + +When `n=1` (where `n` is `len(pm.plugins)`), the location of `group.Wait()` doesn’t matter. +When `n > 1`, `group.Wait()` is invoked in each iteration. Whenever +`group.Wait()` is invoked, it waits for `group.Done()` to be executed `n` times. +However, `group.Done()` is only executed once in one iteration. + +Misuse of sync.WaitGroup + +## Moby/27782 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#27782]|[pull request]|[patch]| Communication | Channel & Condition Variable | + +[moby#27782]:(moby27782_test.go) +[patch]:https://github.com/moby/moby/pull/27782/files +[pull request]:https://github.com/moby/moby/pull/27782 + +### Description + +### Example execution + +```go +G1 G2 G3 +----------------------------------------------------------------------- +InitializeStdio() +startLogging() +l.ReadLogs() +NewLogWatcher() +. l.readLogs() +container.Reset() . +LogDriver.Close() . +r.Close() . +close(w.closeNotifier) . +. followLogs(logWatcher) +. watchFile() +. New() +. NewEventWatcher() +. NewWatcher() +. . w.readEvents() +. . event.ignoreLinux() +. . return false +. <-logWatcher.WatchClose() . +. fileWatcher.Remove() . +. w.cv.Wait() . +. . w.Events <- event +------------------------------G2,G3 leak------------------------------- +``` + +## Moby/28462 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#28462]|[pull request]|[patch]| Mixed | Channel & Lock | + +[moby#28462]:(moby28462_test.go) +[patch]:https://github.com/moby/moby/pull/28462/files +[pull request]:https://github.com/moby/moby/pull/28462 + +### Description + +One goroutine may acquire a lock and try to send a message over channel `stop`, +while the other will try to acquire the same lock. With the wrong ordering, +both goroutines will leak. + +### Example execution + +```go +G1 G2 +-------------------------------------------------------------- +monitor() +handleProbeResult() +. d.StateChanged() +. c.Lock() +. d.updateHealthMonitorElseBranch() +. h.CloseMonitorChannel() +. s.stop <- struct{}{} +c.Lock() +----------------------G1,G2 leak------------------------------ +``` + +## Moby/30408 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#30408]|[pull request]|[patch]| Communication | Condition Variable | + +[moby#30408]:(moby30408_test.go) +[patch]:https://github.com/moby/moby/pull/30408/files +[pull request]:https://github.com/moby/moby/pull/30408 + +### Description + +`Wait()` at line 22 has no corresponding `Signal()` or `Broadcast()`. + +### Example execution + +```go +G1 G2 +------------------------------------------ +testActive() +. p.waitActive() +. p.activateWait.L.Lock() +. p.activateWait.Wait() +<-done +-----------------G1,G2 leak--------------- +``` + +## Moby/33781 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#33781]|[pull request]|[patch]| Communication | Channel & Context | + +[moby#33781]:(moby33781_test.go) +[patch]:https://github.com/moby/moby/pull/33781/files +[pull request]:https://github.com/moby/moby/pull/33781 + +### Description + +The goroutine created using an anonymous function is blocked +sending a message over an unbuffered channel. However there +exists a path in the parent goroutine where the parent function +will return without draining the channel. + +### Example execution + +```go +G1 G2 G3 +---------------------------------------- +monitor() . +<-time.After() . +. . +<-stop stop<- +. +cancelProbe() +return +. result<- +----------------G3 leak------------------ +``` + +## Moby/36114 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#36114]|[pull request]|[patch]| Resource | Double locking | + +[moby#36114]:(moby36114_test.go) +[patch]:https://github.com/moby/moby/pull/36114/files +[pull request]:https://github.com/moby/moby/pull/36114 + +### Description + +The the lock for the struct svm has already been locked when calling +`svm.hotRemoveVHDsAtStart()`. + +## Moby/4951 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#4951]|[pull request]|[patch]| Resource | AB-BA deadlock | + +[moby#4951]:(moby4951_test.go) +[patch]:https://github.com/moby/moby/pull/4951/files +[pull request]:https://github.com/moby/moby/pull/4951 + +### Description + +The root cause and patch is clearly explained in the commit +description. The global lock is `devices.Lock()`, and the device +lock is `baseInfo.lock.Lock()`. It is very likely that this bug +can be reproduced. + +## Moby/7559 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[moby#7559]|[pull request]|[patch]| Resource | Double locking | + +[moby#7559]:(moby7559_test.go) +[patch]:https://github.com/moby/moby/pull/7559/files +[pull request]:https://github.com/moby/moby/pull/7559 + +### Description + +Line 25 is missing a call to `.Unlock`. + +### Example execution + +```go +G1 +--------------------------- +proxy.connTrackLock.Lock() +if err != nil { continue } +proxy.connTrackLock.Lock() +-----------G1 leaks-------- +``` + +## Serving/2137 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[serving#2137]|[pull request]|[patch]| Mixed | Channel & Lock | + +[serving#2137]:(serving2137_test.go) +[patch]:https://github.com/ knative/serving/pull/2137/files +[pull request]:https://github.com/ knative/serving/pull/2137 + +### Description + +### Example execution + +```go +G1 G2 G3 +---------------------------------------------------------------------------------- +b.concurrentRequests(2) . . +b.concurrentRequest() . . +r.lock.Lock() . . +. start.Done() . +start.Wait() . . +b.concurrentRequest() . . +r.lock.Lock() . . +. . start.Done() +start.Wait() . . +unlockAll(locks) . . +unlock(lc) . . +req.lock.Unlock() . . +ok := <-req.accepted . . +. b.Maybe() . +. b.activeRequests <- t . +. thunk() . +. r.lock.Lock() . +. . b.Maybe() +. . b.activeRequests <- t +----------------------------G1,G2,G3 leak----------------------------------------- +``` + +## Syncthing/4829 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[syncthing#4829]|[pull request]|[patch]| Resource | Double locking | + +[syncthing#4829]:(syncthing4829_test.go) +[patch]:https://github.com/syncthing/syncthing/pull/4829/files +[pull request]:https://github.com/syncthing/syncthing/pull/4829 + +### Description + +Double locking at line 17 and line 30. + +### Example execution + +```go +G1 +--------------------------- +mapping.clearAddresses() +m.mut.Lock() [L2] +m.notify(...) +m.mut.RLock() [L2] +----------G1 leaks--------- +``` + +## Syncthing/5795 + +| Bug ID | Ref | Patch | Type | Sub-type | +| ---- | ---- | ---- | ---- | ---- | +|[syncthing#5795]|[pull request]|[patch]| Communication | Channel | + +[syncthing#5795]:(syncthing5795_test.go) +[patch]:https://github.com/syncthing/syncthing/pull/5795/files +[pull request]:https://github.com/syncthing/syncthing/pull/5795 + +### Description + +`<-c.dispatcherLoopStopped` at line 82 is blocking forever because +`dispatcherLoop()` is blocking at line 72. + +### Example execution + +```go +G1 G2 +-------------------------------------------------------------- +c.Start() +go c.dispatcherLoop() [G3] +. select [<-c.inbox, <-c.closed] +c.inbox <- <================> [<-c.inbox] +<-c.dispatcherLoopStopped . +. default +. c.ccFn()/c.Close() +. close(c.closed) +. <-c.dispatcherLoopStopped +---------------------G1,G2 leak------------------------------- +``` diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go new file mode 100644 index 00000000000000..4f5ef3b0fc32fc --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10214.go @@ -0,0 +1,145 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/10214 + * Buggy version: 7207111aa3a43df0552509365fdec741a53f873f + * fix commit-id: 27e863d90ab0660494778f1c35966cc5ddc38e32 + * Flaky: 3/100 + * Description: This goroutine leak is caused by different order when acquiring + * coalescedMu.Lock() and raftMu.Lock(). The fix is to refactor sendQueuedHeartbeats() + * so that cockroachdb can unlock coalescedMu before locking raftMu. + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" + "unsafe" +) + +func init() { + register("Cockroach10214", Cockroach10214) +} + +type Store_cockroach10214 struct { + coalescedMu struct { + sync.Mutex // L1 + heartbeatResponses []int + } + mu struct { + replicas map[int]*Replica_cockroach10214 + } +} + +func (s *Store_cockroach10214) sendQueuedHeartbeats() { + s.coalescedMu.Lock() // L1 acquire + defer s.coalescedMu.Unlock() // L2 release + for i := 0; i < len(s.coalescedMu.heartbeatResponses); i++ { + s.sendQueuedHeartbeatsToNode() // L2 + } +} + +func (s *Store_cockroach10214) sendQueuedHeartbeatsToNode() { + for i := 0; i < len(s.mu.replicas); i++ { + r := s.mu.replicas[i] + r.reportUnreachable() // L2 + } +} + +type Replica_cockroach10214 struct { + raftMu sync.Mutex // L2 + mu sync.Mutex // L3 + store *Store_cockroach10214 +} + +func (r *Replica_cockroach10214) reportUnreachable() { + r.raftMu.Lock() // L2 acquire + time.Sleep(time.Millisecond) + defer r.raftMu.Unlock() // L2 release +} + +func (r *Replica_cockroach10214) tick() { + r.raftMu.Lock() // L2 acquire + defer r.raftMu.Unlock() // L2 release + r.tickRaftMuLocked() +} + +func (r *Replica_cockroach10214) tickRaftMuLocked() { + r.mu.Lock() // L3 acquire + defer r.mu.Unlock() // L3 release + if r.maybeQuiesceLocked() { + return + } +} + +func (r *Replica_cockroach10214) maybeQuiesceLocked() bool { + for i := 0; i < 2; i++ { + if !r.maybeCoalesceHeartbeat() { + return true + } + } + return false +} + +func (r *Replica_cockroach10214) maybeCoalesceHeartbeat() bool { + msgtype := uintptr(unsafe.Pointer(r)) % 3 + switch msgtype { + case 0, 1, 2: + r.store.coalescedMu.Lock() // L1 acquire + default: + return false + } + r.store.coalescedMu.Unlock() // L1 release + return true +} + +func Cockroach10214() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 1000; i++ { + go func() { + store := &Store_cockroach10214{} + responses := &store.coalescedMu.heartbeatResponses + *responses = append(*responses, 1, 2) + store.mu.replicas = make(map[int]*Replica_cockroach10214) + + rp1 := &Replica_cockroach10214{ // L2,3[0] + store: store, + } + rp2 := &Replica_cockroach10214{ // L2,3[1] + store: store, + } + store.mu.replicas[0] = rp1 + store.mu.replicas[1] = rp2 + + go store.sendQueuedHeartbeats() // G1 + go rp1.tick() // G2 + }() + } +} + +// Example of goroutine leak trace: +// +// G1 G2 +//------------------------------------------------------------------------------------ +// s.sendQueuedHeartbeats() . +// s.coalescedMu.Lock() [L1] . +// s.sendQueuedHeartbeatsToNode() . +// s.mu.replicas[0].reportUnreachable() . +// s.mu.replicas[0].raftMu.Lock() [L2] . +// . s.mu.replicas[0].tick() +// . s.mu.replicas[0].raftMu.Lock() [L2] +// . s.mu.replicas[0].tickRaftMuLocked() +// . s.mu.replicas[0].mu.Lock() [L3] +// . s.mu.replicas[0].maybeQuiesceLocked() +// . s.mu.replicas[0].maybeCoalesceHeartbeat() +// . s.coalescedMu.Lock() [L1] +//--------------------------------G1,G2 leak------------------------------------------ \ No newline at end of file diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go new file mode 100644 index 00000000000000..87cf157996800e --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1055.go @@ -0,0 +1,115 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "sync" + "sync/atomic" + "time" +) + +func init() { + register("Cockroach1055", Cockroach1055) +} + +type Stopper_cockroach1055 struct { + stopper chan struct{} + stop sync.WaitGroup + mu sync.Mutex + draining int32 + drain sync.WaitGroup +} + +func (s *Stopper_cockroach1055) AddWorker() { + s.stop.Add(1) +} + +func (s *Stopper_cockroach1055) ShouldStop() <-chan struct{} { + if s == nil { + return nil + } + return s.stopper +} + +func (s *Stopper_cockroach1055) SetStopped() { + if s != nil { + s.stop.Done() + } +} + +func (s *Stopper_cockroach1055) Quiesce() { + s.mu.Lock() + defer s.mu.Unlock() + atomic.StoreInt32(&s.draining, 1) + s.drain.Wait() + atomic.StoreInt32(&s.draining, 0) +} + +func (s *Stopper_cockroach1055) Stop() { + s.mu.Lock() // L1 + defer s.mu.Unlock() + atomic.StoreInt32(&s.draining, 1) + s.drain.Wait() + close(s.stopper) + s.stop.Wait() +} + +func (s *Stopper_cockroach1055) StartTask() bool { + if atomic.LoadInt32(&s.draining) == 0 { + s.mu.Lock() + defer s.mu.Unlock() + s.drain.Add(1) + return true + } + return false +} + +func NewStopper_cockroach1055() *Stopper_cockroach1055 { + return &Stopper_cockroach1055{ + stopper: make(chan struct{}), + } +} + +func Cockroach1055() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i <= 1000; i++ { + go func() { // G1 + var stoppers []*Stopper_cockroach1055 + for i := 0; i < 2; i++ { + stoppers = append(stoppers, NewStopper_cockroach1055()) + } + + for i := range stoppers { + s := stoppers[i] + s.AddWorker() + go func() { // G2 + s.StartTask() + <-s.ShouldStop() + s.SetStopped() + }() + } + + done := make(chan struct{}) + go func() { // G3 + for _, s := range stoppers { + s.Quiesce() + } + for _, s := range stoppers { + s.Stop() + } + close(done) + }() + + <-done + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go new file mode 100644 index 00000000000000..636f45b3e0e765 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach10790.go @@ -0,0 +1,98 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/10790 + * Buggy version: 96b5452557ebe26bd9d85fe7905155009204d893 + * fix commit-id: f1a5c19125c65129b966fbdc0e6408e8df214aba + * Flaky: 28/100 + * Description: + * It is possible that a message from ctxDone will make the function beginCmds + * returns without draining the channel ch, so that goroutines created by anonymous + * function will leak. + */ + +package main + +import ( + "context" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Cockroach10790", Cockroach10790) +} + +type Replica_cockroach10790 struct { + chans []chan bool +} + +func (r *Replica_cockroach10790) beginCmds(ctx context.Context) { + ctxDone := ctx.Done() + for _, ch := range r.chans { + select { + case <-ch: + case <-ctxDone: + go func() { // G3 + for _, ch := range r.chans { + <-ch + } + }() + } + } +} + +func (r *Replica_cockroach10790) sendChans(ctx context.Context) { + for _, ch := range r.chans { + select { + case ch <- true: + case <-ctx.Done(): + return + } + } +} + +func NewReplica_cockroach10790() *Replica_cockroach10790 { + r := &Replica_cockroach10790{} + r.chans = append(r.chans, make(chan bool), make(chan bool)) + return r +} + +// Example of goroutine leak trace: +// +// G1 G2 G3 helper goroutine +//-------------------------------------------------------------------------------------- +// . . r.sendChans() +// r.beginCmds() . . +// . . ch1 <- +// <-ch1 <================================================> ch1 <- +// . . select [ch2<-, <-ctx.Done()] +// . cancel() . +// . <> [<-ctx.Done()] ==> return +// . <> +// go func() [G3] . +// . <-ch1 +// ------------------------------G3 leaks---------------------------------------------- +// + +func Cockroach10790() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + r := NewReplica_cockroach10790() + ctx, cancel := context.WithCancel(context.Background()) + go r.sendChans(ctx) // helper goroutine + go r.beginCmds(ctx) // G1 + go cancel() // G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go new file mode 100644 index 00000000000000..a0a9a792676692 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13197.go @@ -0,0 +1,82 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/13197 + * Buggy version: fff27aedabafe20cef57f75905fe340cab48c2a4 + * fix commit-id: 9bf770cd8f6eaff5441b80d3aec1a5614e8747e1 + * Flaky: 100/100 + * Description: One goroutine executing (*Tx).awaitDone() blocks + * waiting for a signal over context.Done() that never comes. + */ +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Cockroach13197", Cockroach13197) +} + +type DB_cockroach13197 struct{} + +func (db *DB_cockroach13197) begin(ctx context.Context) *Tx_cockroach13197 { + ctx, cancel := context.WithCancel(ctx) + tx := &Tx_cockroach13197{ + cancel: cancel, + ctx: ctx, + } + go tx.awaitDone() // G2 + return tx +} + +type Tx_cockroach13197 struct { + cancel context.CancelFunc + ctx context.Context +} + +func (tx *Tx_cockroach13197) awaitDone() { + <-tx.ctx.Done() +} + +func (tx *Tx_cockroach13197) Rollback() { + tx.rollback() +} + +func (tx *Tx_cockroach13197) rollback() { + tx.close() +} + +func (tx *Tx_cockroach13197) close() { + tx.cancel() +} + +// Example of goroutine leak trace: +// +// G1 G2 +//-------------------------------- +// begin() +// . awaitDone() +// <> . +// <-tx.ctx.Done() +//------------G2 leak------------- + +func Cockroach13197() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + db := &DB_cockroach13197{} + db.begin(context.Background()) // G1 +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go new file mode 100644 index 00000000000000..5ef6fa1e28a06d --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach13755.go @@ -0,0 +1,66 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/13755 + * Buggy version: 7acb881bbb8f23e87b69fce9568d9a3316b5259c + * fix commit-id: ef906076adc1d0e3721944829cfedfed51810088 + * Flaky: 100/100 + */ + +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Cockroach13755", Cockroach13755) +} + +type Rows_cockroach13755 struct { + cancel context.CancelFunc +} + +func (rs *Rows_cockroach13755) initContextClose(ctx context.Context) { + ctx, rs.cancel = context.WithCancel(ctx) + go rs.awaitDone(ctx) +} + +func (rs *Rows_cockroach13755) awaitDone(ctx context.Context) { + <-ctx.Done() + rs.close(ctx.Err()) +} + +func (rs *Rows_cockroach13755) close(err error) { + rs.cancel() +} + +// Example of goroutine leak trace: +// +// G1 G2 +//---------------------------------------- +// initContextClose() +// . awaitDone() +// <> . +// <-tx.ctx.Done() +//----------------G2 leak----------------- + +func Cockroach13755() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + rs := &Rows_cockroach13755{} + rs.initContextClose(context.Background()) +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go new file mode 100644 index 00000000000000..108d7884a3d82b --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach1462.go @@ -0,0 +1,167 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach1462", Cockroach1462) +} + +type Stopper_cockroach1462 struct { + stopper chan struct{} + stopped chan struct{} + stop sync.WaitGroup + mu sync.Mutex + drain *sync.Cond + draining bool + numTasks int +} + +func NewStopper_cockroach1462() *Stopper_cockroach1462 { + s := &Stopper_cockroach1462{ + stopper: make(chan struct{}), + stopped: make(chan struct{}), + } + s.drain = sync.NewCond(&s.mu) + return s +} + +func (s *Stopper_cockroach1462) RunWorker(f func()) { + s.AddWorker() + go func() { // G2, G3 + defer s.SetStopped() + f() + }() +} + +func (s *Stopper_cockroach1462) AddWorker() { + s.stop.Add(1) +} +func (s *Stopper_cockroach1462) StartTask() bool { + s.mu.Lock() + runtime.Gosched() + defer s.mu.Unlock() + if s.draining { + return false + } + s.numTasks++ + return true +} + +func (s *Stopper_cockroach1462) FinishTask() { + s.mu.Lock() + runtime.Gosched() + defer s.mu.Unlock() + s.numTasks-- + s.drain.Broadcast() +} +func (s *Stopper_cockroach1462) SetStopped() { + if s != nil { + s.stop.Done() + } +} +func (s *Stopper_cockroach1462) ShouldStop() <-chan struct{} { + if s == nil { + return nil + } + return s.stopper +} + +func (s *Stopper_cockroach1462) Quiesce() { + s.mu.Lock() + runtime.Gosched() + defer s.mu.Unlock() + s.draining = true + for s.numTasks > 0 { + // Unlock s.mu, wait for the signal, and lock s.mu. + s.drain.Wait() + } +} + +func (s *Stopper_cockroach1462) Stop() { + s.Quiesce() + close(s.stopper) + s.stop.Wait() + s.mu.Lock() + runtime.Gosched() + defer s.mu.Unlock() + close(s.stopped) +} + +type interceptMessage_cockroach1462 int + +type localInterceptableTransport_cockroach1462 struct { + mu sync.Mutex + Events chan interceptMessage_cockroach1462 + stopper *Stopper_cockroach1462 +} + +func (lt *localInterceptableTransport_cockroach1462) Close() {} + +type Transport_cockroach1462 interface { + Close() +} + +func NewLocalInterceptableTransport_cockroach1462(stopper *Stopper_cockroach1462) Transport_cockroach1462 { + lt := &localInterceptableTransport_cockroach1462{ + Events: make(chan interceptMessage_cockroach1462), + stopper: stopper, + } + lt.start() + return lt +} + +func (lt *localInterceptableTransport_cockroach1462) start() { + lt.stopper.RunWorker(func() { + for { + select { + case <-lt.stopper.ShouldStop(): + return + default: + lt.Events <- interceptMessage_cockroach1462(0) + } + } + }) +} + +func processEventsUntil_cockroach1462(ch <-chan interceptMessage_cockroach1462, stopper *Stopper_cockroach1462) { + for { + select { + case _, ok := <-ch: + runtime.Gosched() + if !ok { + return + } + case <-stopper.ShouldStop(): + return + } + } +} + +func Cockroach1462() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(2000 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i <= 1000; i++ { + go func() { // G1 + stopper := NewStopper_cockroach1462() + transport := NewLocalInterceptableTransport_cockroach1462(stopper).(*localInterceptableTransport_cockroach1462) + stopper.RunWorker(func() { + processEventsUntil_cockroach1462(transport.Events, stopper) + }) + stopper.Stop() + }() + } +} + diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go new file mode 100644 index 00000000000000..4cd14c7a5b3ac5 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach16167.go @@ -0,0 +1,108 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/16167 + * Buggy version: 36fa784aa846b46c29e077634c4e362635f6e74a + * fix commit-id: d064942b067ab84628f79cbfda001fa3138d8d6e + * Flaky: 1/100 + */ + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach16167", Cockroach16167) +} + +type PreparedStatements_cockroach16167 struct { + session *Session_cockroach16167 +} + +func (ps PreparedStatements_cockroach16167) New(e *Executor_cockroach16167) { + e.Prepare(ps.session) +} + +type Session_cockroach16167 struct { + PreparedStatements PreparedStatements_cockroach16167 +} + +func (s *Session_cockroach16167) resetForBatch(e *Executor_cockroach16167) { + e.getDatabaseCache() +} + +type Executor_cockroach16167 struct { + systemConfigCond *sync.Cond + systemConfigMu sync.RWMutex // L1 +} + +func (e *Executor_cockroach16167) Start() { + e.updateSystemConfig() +} + +func (e *Executor_cockroach16167) execParsed(session *Session_cockroach16167) { + e.systemConfigCond.L.Lock() // Same as e.systemConfigMu.RLock() + runtime.Gosched() + defer e.systemConfigCond.L.Unlock() + runTxnAttempt_cockroach16167(e, session) +} + +func (e *Executor_cockroach16167) execStmtsInCurrentTxn(session *Session_cockroach16167) { + e.execStmtInOpenTxn(session) +} + +func (e *Executor_cockroach16167) execStmtInOpenTxn(session *Session_cockroach16167) { + session.PreparedStatements.New(e) +} + +func (e *Executor_cockroach16167) Prepare(session *Session_cockroach16167) { + session.resetForBatch(e) +} + +func (e *Executor_cockroach16167) getDatabaseCache() { + e.systemConfigMu.RLock() + defer e.systemConfigMu.RUnlock() +} + +func (e *Executor_cockroach16167) updateSystemConfig() { + e.systemConfigMu.Lock() + runtime.Gosched() + defer e.systemConfigMu.Unlock() +} + +func runTxnAttempt_cockroach16167(e *Executor_cockroach16167, session *Session_cockroach16167) { + e.execStmtsInCurrentTxn(session) +} + +func NewExectorAndSession_cockroach16167() (*Executor_cockroach16167, *Session_cockroach16167) { + session := &Session_cockroach16167{} + session.PreparedStatements = PreparedStatements_cockroach16167{session} + e := &Executor_cockroach16167{} + return e, session +} + +func Cockroach16167() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { // G1 + e, s := NewExectorAndSession_cockroach16167() + e.systemConfigCond = sync.NewCond(e.systemConfigMu.RLocker()) + go e.Start() // G2 + e.execParsed(s) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go new file mode 100644 index 00000000000000..17b03203304ac7 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach18101.go @@ -0,0 +1,60 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/18101 + * Buggy version: f7a8e2f57b6bcf00b9abaf3da00598e4acd3a57f + * fix commit-id: 822bd176cc725c6b50905ea615023200b395e14f + * Flaky: 100/100 + */ + +package main + +import ( + "context" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Cockroach18101", Cockroach18101) +} + +const chanSize_cockroach18101 = 6 + +func restore_cockroach18101(ctx context.Context) bool { + readyForImportCh := make(chan bool, chanSize_cockroach18101) + go func() { // G2 + defer close(readyForImportCh) + splitAndScatter_cockroach18101(ctx, readyForImportCh) + }() + for readyForImportSpan := range readyForImportCh { + select { + case <-ctx.Done(): + return readyForImportSpan + } + } + return true +} + +func splitAndScatter_cockroach18101(ctx context.Context, readyForImportCh chan bool) { + for i := 0; i < chanSize_cockroach18101+2; i++ { + readyForImportCh <- (false || i != 0) + } +} + +func Cockroach18101() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + ctx, cancel := context.WithCancel(context.Background()) + go restore_cockroach18101(ctx) // G1 + go cancel() // helper goroutine + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go new file mode 100644 index 00000000000000..a7544bc8a46a18 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach2448.go @@ -0,0 +1,125 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "time" +) + +func init() { + register("Cockroach2448", Cockroach2448) +} + +type Stopper_cockroach2448 struct { + Done chan bool +} + +func (s *Stopper_cockroach2448) ShouldStop() <-chan bool { + return s.Done +} + +type EventMembershipChangeCommitted_cockroach2448 struct { + Callback func() +} + +type MultiRaft_cockroach2448 struct { + stopper *Stopper_cockroach2448 + Events chan interface{} + callbackChan chan func() +} + +// sendEvent can be invoked many times +func (m *MultiRaft_cockroach2448) sendEvent(event interface{}) { + select { + case m.Events <- event: // Waiting for events consumption + case <-m.stopper.ShouldStop(): + } +} + +type state_cockroach2448 struct { + *MultiRaft_cockroach2448 +} + +func (s *state_cockroach2448) start() { + for { + select { + case <-s.stopper.ShouldStop(): + return + case cb := <-s.callbackChan: + cb() + default: + s.handleWriteResponse() + time.Sleep(100 * time.Microsecond) + } + } +} + +func (s *state_cockroach2448) handleWriteResponse() { + s.sendEvent(&EventMembershipChangeCommitted_cockroach2448{ + Callback: func() { + select { + case s.callbackChan <- func() { // Waiting for callbackChan consumption + time.Sleep(time.Nanosecond) + }: + case <-s.stopper.ShouldStop(): + } + }, + }) +} + +type Store_cockroach2448 struct { + multiraft *MultiRaft_cockroach2448 +} + +func (s *Store_cockroach2448) processRaft() { + for { + select { + case e := <-s.multiraft.Events: + switch e := e.(type) { + case *EventMembershipChangeCommitted_cockroach2448: + callback := e.Callback + runtime.Gosched() + if callback != nil { + callback() // Waiting for callbackChan consumption + } + } + case <-s.multiraft.stopper.ShouldStop(): + return + } + } +} + +func NewStoreAndState_cockroach2448() (*Store_cockroach2448, *state_cockroach2448) { + stopper := &Stopper_cockroach2448{ + Done: make(chan bool), + } + mltrft := &MultiRaft_cockroach2448{ + stopper: stopper, + Events: make(chan interface{}), + callbackChan: make(chan func()), + } + st := &state_cockroach2448{mltrft} + s := &Store_cockroach2448{mltrft} + return s, st +} + +func Cockroach2448() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 1000; i++ { + go func() { + s, st := NewStoreAndState_cockroach2448() + go s.processRaft() // G1 + go st.start() // G2 + }() + } +} + diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go new file mode 100644 index 00000000000000..a916d3c928ee2e --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach24808.go @@ -0,0 +1,78 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Cockroach24808", Cockroach24808) +} + +type Compactor_cockroach24808 struct { + ch chan struct{} +} + +type Stopper_cockroach24808 struct { + stop sync.WaitGroup + stopper chan struct{} +} + +func (s *Stopper_cockroach24808) RunWorker(ctx context.Context, f func(context.Context)) { + s.stop.Add(1) + go func() { + defer s.stop.Done() + f(ctx) + }() +} + +func (s *Stopper_cockroach24808) ShouldStop() <-chan struct{} { + if s == nil { + return nil + } + return s.stopper +} + +func (s *Stopper_cockroach24808) Stop() { + close(s.stopper) +} + +func (c *Compactor_cockroach24808) Start(ctx context.Context, stopper *Stopper_cockroach24808) { + c.ch <- struct{}{} + stopper.RunWorker(ctx, func(ctx context.Context) { + for { + select { + case <-stopper.ShouldStop(): + return + case <-c.ch: + } + } + }) +} + +func Cockroach24808() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { // G1 + stopper := &Stopper_cockroach24808{stopper: make(chan struct{})} + defer stopper.Stop() + + compactor := &Compactor_cockroach24808{ch: make(chan struct{}, 1)} + compactor.ch <- struct{}{} + + compactor.Start(context.Background(), stopper) + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go new file mode 100644 index 00000000000000..b9259c9f918293 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach25456.go @@ -0,0 +1,92 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Cockroach25456", Cockroach25456) +} + +type Stopper_cockroach25456 struct { + quiescer chan struct{} +} + +func (s *Stopper_cockroach25456) ShouldQuiesce() <-chan struct{} { + if s == nil { + return nil + } + return s.quiescer +} + +func NewStopper_cockroach25456() *Stopper_cockroach25456 { + return &Stopper_cockroach25456{quiescer: make(chan struct{})} +} + +type Store_cockroach25456 struct { + stopper *Stopper_cockroach25456 + consistencyQueue *consistencyQueue_cockroach25456 +} + +func (s *Store_cockroach25456) Stopper() *Stopper_cockroach25456 { + return s.stopper +} + +type Replica_cockroach25456 struct { + store *Store_cockroach25456 +} + +func NewReplica_cockroach25456(store *Store_cockroach25456) *Replica_cockroach25456 { + return &Replica_cockroach25456{store: store} +} + +type consistencyQueue_cockroach25456 struct{} + +func (q *consistencyQueue_cockroach25456) process(repl *Replica_cockroach25456) { + <-repl.store.Stopper().ShouldQuiesce() +} + +func newConsistencyQueue_cockroach25456() *consistencyQueue_cockroach25456 { + return &consistencyQueue_cockroach25456{} +} + +type testContext_cockroach25456 struct { + store *Store_cockroach25456 + repl *Replica_cockroach25456 +} + +func (tc *testContext_cockroach25456) StartWithStoreConfig(stopper *Stopper_cockroach25456) { + if tc.store == nil { + tc.store = &Store_cockroach25456{ + consistencyQueue: newConsistencyQueue_cockroach25456(), + } + } + tc.store.stopper = stopper + tc.repl = NewReplica_cockroach25456(tc.store) +} + +func Cockroach25456() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { // G1 + stopper := NewStopper_cockroach25456() + tc := testContext_cockroach25456{} + tc.StartWithStoreConfig(stopper) + + for i := 0; i < 2; i++ { + tc.store.consistencyQueue.process(tc.repl) + } + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go new file mode 100644 index 00000000000000..f00a7bd46259eb --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35073.go @@ -0,0 +1,124 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "sync/atomic" +) + +func init() { + register("Cockroach35073", Cockroach35073) +} + +type ConsumerStatus_cockroach35073 uint32 + +const ( + NeedMoreRows_cockroach35073 ConsumerStatus_cockroach35073 = iota + DrainRequested_cockroach35073 + ConsumerClosed_cockroach35073 +) + +const rowChannelBufSize_cockroach35073 = 16 +const outboxBufRows_cockroach35073 = 16 + +type rowSourceBase_cockroach35073 struct { + consumerStatus ConsumerStatus_cockroach35073 +} + +func (rb *rowSourceBase_cockroach35073) consumerClosed() { + atomic.StoreUint32((*uint32)(&rb.consumerStatus), uint32(ConsumerClosed_cockroach35073)) +} + +type RowChannelMsg_cockroach35073 int + +type RowChannel_cockroach35073 struct { + rowSourceBase_cockroach35073 + dataChan chan RowChannelMsg_cockroach35073 +} + +func (rc *RowChannel_cockroach35073) ConsumerClosed() { + rc.consumerClosed() + select { + case <-rc.dataChan: + default: + } +} + +func (rc *RowChannel_cockroach35073) Push() ConsumerStatus_cockroach35073 { + consumerStatus := ConsumerStatus_cockroach35073( + atomic.LoadUint32((*uint32)(&rc.consumerStatus))) + switch consumerStatus { + case NeedMoreRows_cockroach35073: + rc.dataChan <- RowChannelMsg_cockroach35073(0) + case DrainRequested_cockroach35073: + case ConsumerClosed_cockroach35073: + } + return consumerStatus +} + +func (rc *RowChannel_cockroach35073) InitWithNumSenders() { + rc.initWithBufSizeAndNumSenders(rowChannelBufSize_cockroach35073) +} + +func (rc *RowChannel_cockroach35073) initWithBufSizeAndNumSenders(chanBufSize int) { + rc.dataChan = make(chan RowChannelMsg_cockroach35073, chanBufSize) +} + +type outbox_cockroach35073 struct { + RowChannel_cockroach35073 +} + +func (m *outbox_cockroach35073) init() { + m.RowChannel_cockroach35073.InitWithNumSenders() +} + +func (m *outbox_cockroach35073) start(wg *sync.WaitGroup) { + if wg != nil { + wg.Add(1) + } + go m.run(wg) +} + +func (m *outbox_cockroach35073) run(wg *sync.WaitGroup) { + if wg != nil { + wg.Done() + } +} + +func Cockroach35073() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + outbox := &outbox_cockroach35073{} + outbox.init() + + var wg sync.WaitGroup + for i := 0; i < outboxBufRows_cockroach35073; i++ { + outbox.Push() + } + + var blockedPusherWg sync.WaitGroup + blockedPusherWg.Add(1) + go func() { + outbox.Push() + blockedPusherWg.Done() + }() + + outbox.start(&wg) + + wg.Wait() + outbox.RowChannel_cockroach35073.Push() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go new file mode 100644 index 00000000000000..9ddcda1b6242bf --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach35931.go @@ -0,0 +1,135 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Cockroach35931", Cockroach35931) +} + +type RowReceiver_cockroach35931 interface { + Push() +} + +type inboundStreamInfo_cockroach35931 struct { + receiver RowReceiver_cockroach35931 +} + +type RowChannel_cockroach35931 struct { + dataChan chan struct{} +} + +func (rc *RowChannel_cockroach35931) Push() { + // The buffer size can be either 0 or 1 when this function is entered. + // We need context sensitivity or a path-condition on the buffer size + // to find this bug. + rc.dataChan <- struct{}{} +} + +func (rc *RowChannel_cockroach35931) initWithBufSizeAndNumSenders(chanBufSize int) { + rc.dataChan = make(chan struct{}, chanBufSize) +} + +type flowEntry_cockroach35931 struct { + flow *Flow_cockroach35931 + inboundStreams map[int]*inboundStreamInfo_cockroach35931 +} + +type flowRegistry_cockroach35931 struct { + sync.Mutex + flows map[int]*flowEntry_cockroach35931 +} + +func (fr *flowRegistry_cockroach35931) getEntryLocked(id int) *flowEntry_cockroach35931 { + entry, ok := fr.flows[id] + if !ok { + entry = &flowEntry_cockroach35931{} + fr.flows[id] = entry + } + return entry +} + +func (fr *flowRegistry_cockroach35931) cancelPendingStreamsLocked(id int) []RowReceiver_cockroach35931 { + entry := fr.flows[id] + pendingReceivers := make([]RowReceiver_cockroach35931, 0) + for _, is := range entry.inboundStreams { + pendingReceivers = append(pendingReceivers, is.receiver) + } + return pendingReceivers +} + +type Flow_cockroach35931 struct { + id int + flowRegistry *flowRegistry_cockroach35931 + inboundStreams map[int]*inboundStreamInfo_cockroach35931 +} + +func (f *Flow_cockroach35931) cancel() { + f.flowRegistry.Lock() + timedOutReceivers := f.flowRegistry.cancelPendingStreamsLocked(f.id) + f.flowRegistry.Unlock() + + for _, receiver := range timedOutReceivers { + receiver.Push() + } +} + +func (fr *flowRegistry_cockroach35931) RegisterFlow(f *Flow_cockroach35931, inboundStreams map[int]*inboundStreamInfo_cockroach35931) { + entry := fr.getEntryLocked(f.id) + entry.flow = f + entry.inboundStreams = inboundStreams +} + +func makeFlowRegistry_cockroach35931() *flowRegistry_cockroach35931 { + return &flowRegistry_cockroach35931{ + flows: make(map[int]*flowEntry_cockroach35931), + } +} + +func Cockroach35931() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + fr := makeFlowRegistry_cockroach35931() + + left := &RowChannel_cockroach35931{} + left.initWithBufSizeAndNumSenders(1) + right := &RowChannel_cockroach35931{} + right.initWithBufSizeAndNumSenders(1) + + inboundStreams := map[int]*inboundStreamInfo_cockroach35931{ + 0: { + receiver: left, + }, + 1: { + receiver: right, + }, + } + + left.Push() + + flow := &Flow_cockroach35931{ + id: 0, + flowRegistry: fr, + inboundStreams: inboundStreams, + } + + fr.RegisterFlow(flow, inboundStreams) + + flow.cancel() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go new file mode 100644 index 00000000000000..e419cd2fc32019 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach3710.go @@ -0,0 +1,122 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/3710 + * Buggy version: 4afdd4860fd7c3bd9e92489f84a95e5cc7d11a0d + * fix commit-id: cb65190f9caaf464723e7d072b1f1b69a044ef7b + * Flaky: 2/100 + */ + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" + "unsafe" +) + +func init() { + register("Cockroach3710", Cockroach3710) +} + +type Store_cockroach3710 struct { + raftLogQueue *baseQueue + replicas map[int]*Replica_cockroach3710 + + mu struct { + sync.RWMutex + } +} + +func (s *Store_cockroach3710) ForceRaftLogScanAndProcess() { + s.mu.RLock() + runtime.Gosched() + for _, r := range s.replicas { + s.raftLogQueue.MaybeAdd(r) + } + s.mu.RUnlock() +} + +func (s *Store_cockroach3710) RaftStatus() { + s.mu.RLock() + defer s.mu.RUnlock() +} + +func (s *Store_cockroach3710) processRaft() { + go func() { + for { + var replicas []*Replica_cockroach3710 + s.mu.Lock() + for _, r := range s.replicas { + replicas = append(replicas, r) + } + s.mu.Unlock() + break + } + }() +} + +type Replica_cockroach3710 struct { + store *Store_cockroach3710 +} + +type baseQueue struct { + sync.Mutex + impl *raftLogQueue +} + +func (bq *baseQueue) MaybeAdd(repl *Replica_cockroach3710) { + bq.Lock() + defer bq.Unlock() + bq.impl.shouldQueue(repl) +} + +type raftLogQueue struct{} + +func (*raftLogQueue) shouldQueue(r *Replica_cockroach3710) { + getTruncatableIndexes(r) +} + +func getTruncatableIndexes(r *Replica_cockroach3710) { + r.store.RaftStatus() +} + +func NewStore_cockroach3710() *Store_cockroach3710 { + rlq := &raftLogQueue{} + bq := &baseQueue{impl: rlq} + store := &Store_cockroach3710{ + raftLogQueue: bq, + replicas: make(map[int]*Replica_cockroach3710), + } + r1 := &Replica_cockroach3710{store} + r2 := &Replica_cockroach3710{store} + + makeKey := func(r *Replica_cockroach3710) int { + return int((uintptr(unsafe.Pointer(r)) >> 1) % 7) + } + store.replicas[makeKey(r1)] = r1 + store.replicas[makeKey(r2)] = r2 + + return store +} + +func Cockroach3710() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 10000; i++ { + go func() { + store := NewStore_cockroach3710() + go store.ForceRaftLogScanAndProcess() // G1 + go store.processRaft() // G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go new file mode 100644 index 00000000000000..33f7ba7a45ec9a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach584.go @@ -0,0 +1,62 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Cockroach584", Cockroach584) +} + +type gossip_cockroach584 struct { + mu sync.Mutex // L1 + closed bool +} + +func (g *gossip_cockroach584) bootstrap() { + for { + g.mu.Lock() + if g.closed { + // Missing g.mu.Unlock + break + } + g.mu.Unlock() + } +} + +func (g *gossip_cockroach584) manage() { + for { + g.mu.Lock() + if g.closed { + // Missing g.mu.Unlock + break + } + g.mu.Unlock() + } +} + +func Cockroach584() { + prof := pprof.Lookup("goroutineleak") + defer func() { + for i := 0; i < yieldCount; i++ { + // Yield several times to allow the child goroutine to run. + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + g := &gossip_cockroach584{ + closed: true, + } + go func() { // G1 + g.bootstrap() + g.manage() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go new file mode 100644 index 00000000000000..80f1dd504daae5 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach6181.go @@ -0,0 +1,87 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/6181 + * Buggy version: c0a232b5521565904b851699853bdbd0c670cf1e + * fix commit-id: d5814e4886a776bf7789b3c51b31f5206480d184 + * Flaky: 57/100 + */ +package main + +import ( + "io" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach6181", Cockroach6181) +} + +type testDescriptorDB_cockroach6181 struct { + cache *rangeDescriptorCache_cockroach6181 +} + +func initTestDescriptorDB_cockroach6181() *testDescriptorDB_cockroach6181 { + return &testDescriptorDB_cockroach6181{&rangeDescriptorCache_cockroach6181{}} +} + +type rangeDescriptorCache_cockroach6181 struct { + rangeCacheMu sync.RWMutex +} + +func (rdc *rangeDescriptorCache_cockroach6181) LookupRangeDescriptor() { + rdc.rangeCacheMu.RLock() + runtime.Gosched() + io.Discard.Write([]byte(rdc.String())) + rdc.rangeCacheMu.RUnlock() + rdc.rangeCacheMu.Lock() + rdc.rangeCacheMu.Unlock() +} + +func (rdc *rangeDescriptorCache_cockroach6181) String() string { + rdc.rangeCacheMu.RLock() + defer rdc.rangeCacheMu.RUnlock() + return rdc.stringLocked() +} + +func (rdc *rangeDescriptorCache_cockroach6181) stringLocked() string { + return "something here" +} + +func doLookupWithToken_cockroach6181(rc *rangeDescriptorCache_cockroach6181) { + rc.LookupRangeDescriptor() +} + +func testRangeCacheCoalescedRequests_cockroach6181() { + db := initTestDescriptorDB_cockroach6181() + pauseLookupResumeAndAssert := func() { + var wg sync.WaitGroup + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { // G2,G3,... + doLookupWithToken_cockroach6181(db.cache) + wg.Done() + }() + } + wg.Wait() + } + pauseLookupResumeAndAssert() +} + +func Cockroach6181() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go testRangeCacheCoalescedRequests_cockroach6181() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go new file mode 100644 index 00000000000000..945308a76f92b1 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach7504.go @@ -0,0 +1,183 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/7504 + * Buggy version: bc963b438cdc3e0ad058a5282358e5aee0595e17 + * fix commit-id: cab761b9f5ee5dee1448bc5d6b1d9f5a0ff0bad5 + * Flaky: 1/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach7504", Cockroach7504) +} + +func MakeCacheKey_cockroach7504(lease *LeaseState_cockroach7504) int { + return lease.id +} + +type LeaseState_cockroach7504 struct { + mu sync.Mutex // L1 + id int +} +type LeaseSet_cockroach7504 struct { + data []*LeaseState_cockroach7504 +} + +func (l *LeaseSet_cockroach7504) find(id int) *LeaseState_cockroach7504 { + return l.data[id] +} + +func (l *LeaseSet_cockroach7504) remove(s *LeaseState_cockroach7504) { + for i := 0; i < len(l.data); i++ { + if s == l.data[i] { + l.data = append(l.data[:i], l.data[i+1:]...) + break + } + } +} + +type tableState_cockroach7504 struct { + tableNameCache *tableNameCache_cockroach7504 + mu sync.Mutex // L3 + active *LeaseSet_cockroach7504 +} + +func (t *tableState_cockroach7504) release(lease *LeaseState_cockroach7504) { + t.mu.Lock() // L3 + defer t.mu.Unlock() // L3 + + s := t.active.find(MakeCacheKey_cockroach7504(lease)) + s.mu.Lock() // L1 + runtime.Gosched() + defer s.mu.Unlock() // L1 + + t.removeLease(s) +} +func (t *tableState_cockroach7504) removeLease(lease *LeaseState_cockroach7504) { + t.active.remove(lease) + t.tableNameCache.remove(lease) // L1 acquire/release +} + +type tableNameCache_cockroach7504 struct { + mu sync.Mutex // L2 + tables map[int]*LeaseState_cockroach7504 +} + +func (c *tableNameCache_cockroach7504) get(id int) { + c.mu.Lock() // L2 + defer c.mu.Unlock() // L2 + lease, ok := c.tables[id] + if !ok { + return + } + if lease == nil { + panic("nil lease in name cache") + } + lease.mu.Lock() // L1 + defer lease.mu.Unlock() // L1 +} + +func (c *tableNameCache_cockroach7504) remove(lease *LeaseState_cockroach7504) { + c.mu.Lock() // L2 + runtime.Gosched() + defer c.mu.Unlock() // L2 + key := MakeCacheKey_cockroach7504(lease) + existing, ok := c.tables[key] + if !ok { + return + } + if existing == lease { + delete(c.tables, key) + } +} + +type LeaseManager_cockroach7504 struct { + _ [64]byte + tableNames *tableNameCache_cockroach7504 + tables map[int]*tableState_cockroach7504 +} + +func (m *LeaseManager_cockroach7504) AcquireByName(id int) { + m.tableNames.get(id) +} + +func (m *LeaseManager_cockroach7504) findTableState(lease *LeaseState_cockroach7504) *tableState_cockroach7504 { + existing, ok := m.tables[lease.id] + if !ok { + return nil + } + return existing +} + +func (m *LeaseManager_cockroach7504) Release(lease *LeaseState_cockroach7504) { + t := m.findTableState(lease) + t.release(lease) +} +func NewLeaseManager_cockroach7504(tname *tableNameCache_cockroach7504, ts *tableState_cockroach7504) *LeaseManager_cockroach7504 { + mgr := &LeaseManager_cockroach7504{ + tableNames: tname, + tables: make(map[int]*tableState_cockroach7504), + } + mgr.tables[0] = ts + return mgr +} +func NewLeaseSet_cockroach7504(n int) *LeaseSet_cockroach7504 { + lset := &LeaseSet_cockroach7504{} + for i := 0; i < n; i++ { + lease := new(LeaseState_cockroach7504) + lset.data = append(lset.data, lease) + } + return lset +} + +func Cockroach7504() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go func() { + leaseNum := 2 + lset := NewLeaseSet_cockroach7504(leaseNum) + + nc := &tableNameCache_cockroach7504{ + tables: make(map[int]*LeaseState_cockroach7504), + } + for i := 0; i < leaseNum; i++ { + nc.tables[i] = lset.find(i) + } + + ts := &tableState_cockroach7504{ + tableNameCache: nc, + active: lset, + } + + mgr := NewLeaseManager_cockroach7504(nc, ts) + + // G1 + go func() { + // lock L2-L1 + mgr.AcquireByName(0) + }() + + // G2 + go func() { + // lock L1-L2 + mgr.Release(lset.find(0)) + }() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go new file mode 100644 index 00000000000000..e143a6670d8ff2 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/cockroach9935.go @@ -0,0 +1,77 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: cockroach + * Issue or PR : https://github.com/cockroachdb/cockroach/pull/9935 + * Buggy version: 4df302cc3f03328395dc3fefbfba58b7718e4f2f + * fix commit-id: ed6a100ba38dd51b0888b9a3d3ac6bdbb26c528c + * Flaky: 100/100 + * Description: This leak is caused by acquiring l.mu.Lock() twice. The fix is + * to release l.mu.Lock() before acquiring l.mu.Lock for the second time. + */ +package main + +import ( + "errors" + "math/rand" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Cockroach9935", Cockroach9935) +} + +type loggingT_cockroach9935 struct { + mu sync.Mutex +} + +func (l *loggingT_cockroach9935) outputLogEntry() { + l.mu.Lock() + if err := l.createFile(); err != nil { + l.exit(err) + } + l.mu.Unlock() +} + +func (l *loggingT_cockroach9935) createFile() error { + if rand.Intn(8)%4 > 0 { + return errors.New("") + } + return nil +} + +func (l *loggingT_cockroach9935) exit(err error) { + l.mu.Lock() // Blocked forever + defer l.mu.Unlock() +} + +// Example of goroutine leak trace: +// +// G1 +//---------------------------- +// l.outputLogEntry() +// l.mu.Lock() +// l.createFile() +// l.exit() +// l.mu.Lock() +//-----------G1 leaks--------- + +func Cockroach9935() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + l := &loggingT_cockroach9935{} + go l.outputLogEntry() // G1 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go new file mode 100644 index 00000000000000..7d56642d5e9ed9 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd10492.go @@ -0,0 +1,72 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Etcd10492", Etcd10492) +} + +type Checkpointer_etcd10492 func(ctx context.Context) + +type lessor_etcd10492 struct { + mu sync.RWMutex + cp Checkpointer_etcd10492 + checkpointInterval time.Duration +} + +func (le *lessor_etcd10492) Checkpoint() { + le.mu.Lock() // Lock acquired twice here + defer le.mu.Unlock() +} + +func (le *lessor_etcd10492) SetCheckpointer(cp Checkpointer_etcd10492) { + le.mu.Lock() + defer le.mu.Unlock() + + le.cp = cp +} + +func (le *lessor_etcd10492) Renew() { + le.mu.Lock() + unlock := func() { le.mu.Unlock() } + defer func() { unlock() }() + + if le.cp != nil { + le.cp(context.Background()) + } +} + +func Etcd10492() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { // G1 + le := &lessor_etcd10492{ + checkpointInterval: 0, + } + fakerCheckerpointer_etcd10492 := func(ctx context.Context) { + le.Checkpoint() + } + le.SetCheckpointer(fakerCheckerpointer_etcd10492) + le.mu.Lock() + le.mu.Unlock() + le.Renew() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go new file mode 100644 index 00000000000000..868e926e66949a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd5509.go @@ -0,0 +1,126 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "io" + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Etcd5509", Etcd5509) +} + +var ErrConnClosed_etcd5509 error + +type Client_etcd5509 struct { + mu sync.RWMutex + ctx context.Context + cancel context.CancelFunc +} + +func (c *Client_etcd5509) Close() { + c.mu.Lock() + defer c.mu.Unlock() + if c.cancel == nil { + return + } + c.cancel() + c.cancel = nil + c.mu.Unlock() + c.mu.Lock() +} + +type remoteClient_etcd5509 struct { + client *Client_etcd5509 + mu sync.Mutex +} + +func (r *remoteClient_etcd5509) acquire(ctx context.Context) error { + for { + r.client.mu.RLock() + closed := r.client.cancel == nil + r.mu.Lock() + r.mu.Unlock() + if closed { + return ErrConnClosed_etcd5509 // Missing RUnlock before return + } + r.client.mu.RUnlock() + } +} + +type kv_etcd5509 struct { + rc *remoteClient_etcd5509 +} + +func (kv *kv_etcd5509) Get(ctx context.Context) error { + return kv.Do(ctx) +} + +func (kv *kv_etcd5509) Do(ctx context.Context) error { + for { + err := kv.do(ctx) + if err == nil { + return nil + } + return err + } +} + +func (kv *kv_etcd5509) do(ctx context.Context) error { + err := kv.getRemote(ctx) + return err +} + +func (kv *kv_etcd5509) getRemote(ctx context.Context) error { + return kv.rc.acquire(ctx) +} + +type KV interface { + Get(ctx context.Context) error + Do(ctx context.Context) error +} + +func NewKV_etcd5509(c *Client_etcd5509) KV { + return &kv_etcd5509{rc: &remoteClient_etcd5509{ + client: c, + }} +} + +func Etcd5509() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + ctx, _ := context.WithCancel(context.TODO()) + cli := &Client_etcd5509{ + ctx: ctx, + } + kv := NewKV_etcd5509(cli) + donec := make(chan struct{}) + go func() { + defer close(donec) + err := kv.Get(context.TODO()) + if err != nil && err != ErrConnClosed_etcd5509 { + io.Discard.Write([]byte("Expect ErrConnClosed")) + } + }() + + runtime.Gosched() + cli.Close() + + <-donec + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go new file mode 100644 index 00000000000000..afbbe35104bbb8 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6708.go @@ -0,0 +1,100 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Etcd6708", Etcd6708) +} + +type EndpointSelectionMode_etcd6708 int + +const ( + EndpointSelectionRandom_etcd6708 EndpointSelectionMode_etcd6708 = iota + EndpointSelectionPrioritizeLeader_etcd6708 +) + +type MembersAPI_etcd6708 interface { + Leader(ctx context.Context) +} + +type Client_etcd6708 interface { + Sync(ctx context.Context) + SetEndpoints() + httpClient_etcd6708 +} + +type httpClient_etcd6708 interface { + Do(context.Context) +} + +type httpClusterClient_etcd6708 struct { + sync.RWMutex + selectionMode EndpointSelectionMode_etcd6708 +} + +func (c *httpClusterClient_etcd6708) getLeaderEndpoint() { + mAPI := NewMembersAPI_etcd6708(c) + mAPI.Leader(context.Background()) +} + +func (c *httpClusterClient_etcd6708) SetEndpoints() { + switch c.selectionMode { + case EndpointSelectionRandom_etcd6708: + case EndpointSelectionPrioritizeLeader_etcd6708: + c.getLeaderEndpoint() + } +} + +func (c *httpClusterClient_etcd6708) Do(ctx context.Context) { + c.RLock() + c.RUnlock() +} + +func (c *httpClusterClient_etcd6708) Sync(ctx context.Context) { + c.Lock() + defer c.Unlock() + + c.SetEndpoints() +} + +type httpMembersAPI_etcd6708 struct { + client httpClient_etcd6708 +} + +func (m *httpMembersAPI_etcd6708) Leader(ctx context.Context) { + m.client.Do(ctx) +} + +func NewMembersAPI_etcd6708(c Client_etcd6708) MembersAPI_etcd6708 { + return &httpMembersAPI_etcd6708{ + client: c, + } +} + +func Etcd6708() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + hc := &httpClusterClient_etcd6708{ + selectionMode: EndpointSelectionPrioritizeLeader_etcd6708, + } + hc.Sync(context.Background()) + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go new file mode 100644 index 00000000000000..0798ab23d3b588 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6857.go @@ -0,0 +1,81 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: etcd + * Issue or PR : https://github.com/etcd-io/etcd/pull/6857 + * Buggy version: 7c8f13aed7fe251e7066ed6fc1a090699c2cae0e + * fix commit-id: 7afc490c95789c408fbc256d8e790273d331c984 + * Flaky: 19/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Etcd6857", Etcd6857) +} + +type Status_etcd6857 struct{} + +type node_etcd6857 struct { + status chan chan Status_etcd6857 + stop chan struct{} + done chan struct{} +} + +func (n *node_etcd6857) Status() Status_etcd6857 { + c := make(chan Status_etcd6857) + n.status <- c + return <-c +} + +func (n *node_etcd6857) run() { + for { + select { + case c := <-n.status: + c <- Status_etcd6857{} + case <-n.stop: + close(n.done) + return + } + } +} + +func (n *node_etcd6857) Stop() { + select { + case n.stop <- struct{}{}: + case <-n.done: + return + } + <-n.done +} + +func NewNode_etcd6857() *node_etcd6857 { + return &node_etcd6857{ + status: make(chan chan Status_etcd6857), + stop: make(chan struct{}), + done: make(chan struct{}), + } +} + +func Etcd6857() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i <= 100; i++ { + go func() { + n := NewNode_etcd6857() + go n.run() // G1 + go n.Status() // G2 + go n.Stop() // G3 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go new file mode 100644 index 00000000000000..1846d0f260c2f6 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd6873.go @@ -0,0 +1,98 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: etcd + * Issue or PR : https://github.com/etcd-io/etcd/commit/7618fdd1d642e47cac70c03f637b0fd798a53a6e + * Buggy version: 377f19b0031f9c0aafe2aec28b6f9019311f52f9 + * fix commit-id: 7618fdd1d642e47cac70c03f637b0fd798a53a6e + * Flaky: 9/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Etcd6873", Etcd6873) +} + +type watchBroadcast_etcd6873 struct{} + +type watchBroadcasts_etcd6873 struct { + mu sync.Mutex + updatec chan *watchBroadcast_etcd6873 + donec chan struct{} +} + +func newWatchBroadcasts_etcd6873() *watchBroadcasts_etcd6873 { + wbs := &watchBroadcasts_etcd6873{ + updatec: make(chan *watchBroadcast_etcd6873, 1), + donec: make(chan struct{}), + } + go func() { // G2 + defer close(wbs.donec) + for wb := range wbs.updatec { + wbs.coalesce(wb) + } + }() + return wbs +} + +func (wbs *watchBroadcasts_etcd6873) coalesce(wb *watchBroadcast_etcd6873) { + wbs.mu.Lock() + wbs.mu.Unlock() +} + +func (wbs *watchBroadcasts_etcd6873) stop() { + wbs.mu.Lock() + defer wbs.mu.Unlock() + close(wbs.updatec) + <-wbs.donec +} + +func (wbs *watchBroadcasts_etcd6873) update(wb *watchBroadcast_etcd6873) { + select { + case wbs.updatec <- wb: + default: + } +} + +// Example of goroutine leak trace: +// +// G1 G2 G3 +//--------------------------------------------------------- +// newWatchBroadcasts() +// wbs.update() +// wbs.updatec <- +// return +// <-wbs.updatec +// wbs.coalesce() +// wbs.stop() +// wbs.mu.Lock() +// close(wbs.updatec) +// <-wbs.donec +// wbs.mu.Lock() +//---------------------G2,G3 leak------------------------- +// + +func Etcd6873() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + wbs := newWatchBroadcasts_etcd6873() // G1 + wbs.update(&watchBroadcast_etcd6873{}) + go wbs.stop() // G3 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go new file mode 100644 index 00000000000000..3c8d58a221cac5 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7492.go @@ -0,0 +1,163 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: etcd + * Issue or PR : https://github.com/etcd-io/etcd/pull/7492 + * Buggy version: 51939650057d602bb5ab090633138fffe36854dc + * fix commit-id: 1b1fabef8ffec606909f01c3983300fff539f214 + * Flaky: 40/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Etcd7492", Etcd7492) +} + +type TokenProvider_etcd7492 interface { + assign() + enable() + disable() +} + +type simpleTokenTTLKeeper_etcd7492 struct { + tokens map[string]time.Time + addSimpleTokenCh chan struct{} + stopCh chan chan struct{} + deleteTokenFunc func(string) +} + +type authStore_etcd7492 struct { + tokenProvider TokenProvider_etcd7492 +} + +func (as *authStore_etcd7492) Authenticate() { + as.tokenProvider.assign() +} + +func NewSimpleTokenTTLKeeper_etcd7492(deletefunc func(string)) *simpleTokenTTLKeeper_etcd7492 { + stk := &simpleTokenTTLKeeper_etcd7492{ + tokens: make(map[string]time.Time), + addSimpleTokenCh: make(chan struct{}, 1), + stopCh: make(chan chan struct{}), + deleteTokenFunc: deletefunc, + } + go stk.run() // G1 + return stk +} + +func (tm *simpleTokenTTLKeeper_etcd7492) run() { + tokenTicker := time.NewTicker(time.Nanosecond) + defer tokenTicker.Stop() + for { + select { + case <-tm.addSimpleTokenCh: + runtime.Gosched() + /// Make tm.tokens not empty is enough + tm.tokens["1"] = time.Now() + case <-tokenTicker.C: + runtime.Gosched() + for t, _ := range tm.tokens { + tm.deleteTokenFunc(t) + delete(tm.tokens, t) + } + case waitCh := <-tm.stopCh: + waitCh <- struct{}{} + return + } + } +} + +func (tm *simpleTokenTTLKeeper_etcd7492) addSimpleToken() { + tm.addSimpleTokenCh <- struct{}{} + runtime.Gosched() +} + +func (tm *simpleTokenTTLKeeper_etcd7492) stop() { + waitCh := make(chan struct{}) + tm.stopCh <- waitCh + <-waitCh + close(tm.stopCh) +} + +type tokenSimple_etcd7492 struct { + simpleTokenKeeper *simpleTokenTTLKeeper_etcd7492 + simpleTokensMu sync.RWMutex +} + +func (t *tokenSimple_etcd7492) assign() { + t.assignSimpleTokenToUser() +} + +func (t *tokenSimple_etcd7492) assignSimpleTokenToUser() { + t.simpleTokensMu.Lock() + runtime.Gosched() + t.simpleTokenKeeper.addSimpleToken() + t.simpleTokensMu.Unlock() +} +func newDeleterFunc(t *tokenSimple_etcd7492) func(string) { + return func(tk string) { + t.simpleTokensMu.Lock() + defer t.simpleTokensMu.Unlock() + } +} + +func (t *tokenSimple_etcd7492) enable() { + t.simpleTokenKeeper = NewSimpleTokenTTLKeeper_etcd7492(newDeleterFunc(t)) +} + +func (t *tokenSimple_etcd7492) disable() { + if t.simpleTokenKeeper != nil { + t.simpleTokenKeeper.stop() + t.simpleTokenKeeper = nil + } + t.simpleTokensMu.Lock() + t.simpleTokensMu.Unlock() +} + +func newTokenProviderSimple_etcd7492() *tokenSimple_etcd7492 { + return &tokenSimple_etcd7492{} +} + +func setupAuthStore_etcd7492() (store *authStore_etcd7492, teardownfunc func()) { + as := &authStore_etcd7492{ + tokenProvider: newTokenProviderSimple_etcd7492(), + } + as.tokenProvider.enable() + tearDown := func() { + as.tokenProvider.disable() + } + return as, tearDown +} + +func Etcd7492() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go func() { + as, tearDown := setupAuthStore_etcd7492() + defer tearDown() + var wg sync.WaitGroup + wg.Add(3) + for i := 0; i < 3; i++ { + go func() { // G2 + as.Authenticate() + defer wg.Done() + }() + } + wg.Wait() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go new file mode 100644 index 00000000000000..0a96d7f0472d9e --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/etcd7902.go @@ -0,0 +1,87 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: etcd + * Issue or PR : https://github.com/coreos/etcd/pull/7902 + * Buggy version: dfdaf082c51ba14861267f632f6af795a27eb4ef + * fix commit-id: 87d99fe0387ee1df1cf1811d88d37331939ef4ae + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Etcd7902", Etcd7902) +} + +type roundClient_etcd7902 struct { + progress int + acquire func() + validate func() + release func() +} + +func runElectionFunc_etcd7902() { + rcs := make([]roundClient_etcd7902, 3) + nextc := make(chan bool) + for i := range rcs { + var rcNextc chan bool + setRcNextc := func() { + rcNextc = nextc + } + rcs[i].acquire = func() {} + rcs[i].validate = func() { + setRcNextc() + } + rcs[i].release = func() { + if i == 0 { // Assume the first roundClient is the leader + close(nextc) + nextc = make(chan bool) + } + <-rcNextc // Follower is blocking here + } + } + doRounds_etcd7902(rcs, 100) +} + +func doRounds_etcd7902(rcs []roundClient_etcd7902, rounds int) { + var mu sync.Mutex + var wg sync.WaitGroup + wg.Add(len(rcs)) + for i := range rcs { + go func(rc *roundClient_etcd7902) { // G2,G3 + defer wg.Done() + for rc.progress < rounds || rounds <= 0 { + rc.acquire() + mu.Lock() + rc.validate() + mu.Unlock() + time.Sleep(10 * time.Millisecond) + rc.progress++ + mu.Lock() + rc.release() + mu.Unlock() + } + }(&rcs[i]) + } + wg.Wait() +} + +func Etcd7902() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go runElectionFunc_etcd7902() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go new file mode 100644 index 00000000000000..ec5491e438f74a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1275.go @@ -0,0 +1,111 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc-go + * Issue or PR : https://github.com/grpc/grpc-go/pull/1275 + * Buggy version: (missing) + * fix commit-id: 0669f3f89e0330e94bb13fa1ce8cc704aab50c9c + * Flaky: 100/100 + */ +package main + +import ( + "io" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Grpc1275", Grpc1275) +} + +type recvBuffer_grpc1275 struct { + c chan bool +} + +func (b *recvBuffer_grpc1275) get() <-chan bool { + return b.c +} + +type recvBufferReader_grpc1275 struct { + recv *recvBuffer_grpc1275 +} + +func (r *recvBufferReader_grpc1275) Read(p []byte) (int, error) { + select { + case <-r.recv.get(): + } + return 0, nil +} + +type Stream_grpc1275 struct { + trReader io.Reader +} + +func (s *Stream_grpc1275) Read(p []byte) (int, error) { + return io.ReadFull(s.trReader, p) +} + +type http2Client_grpc1275 struct{} + +func (t *http2Client_grpc1275) CloseStream(s *Stream_grpc1275) { + // It is the client.CloseSream() method called by the + // main goroutine that should send the message, but it + // is not. The patch is to send out this message. +} + +func (t *http2Client_grpc1275) NewStream() *Stream_grpc1275 { + return &Stream_grpc1275{ + trReader: &recvBufferReader_grpc1275{ + recv: &recvBuffer_grpc1275{ + c: make(chan bool), + }, + }, + } +} + +func testInflightStreamClosing_grpc1275() { + client := &http2Client_grpc1275{} + stream := client.NewStream() + donec := make(chan bool) + go func() { // G2 + defer close(donec) + stream.Read([]byte{1}) + }() + + client.CloseStream(stream) + + timeout := time.NewTimer(300 * time.Nanosecond) + select { + case <-donec: + if !timeout.Stop() { + <-timeout.C + } + case <-timeout.C: + } +} + +/// +/// G1 G2 +/// testInflightStreamClosing() +/// stream.Read() +/// io.ReadFull() +/// <- r.recv.get() +/// CloseStream() +/// <- donec +/// ------------G1 timeout, G2 leak--------------------- +/// + +func Grpc1275() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + go func() { + testInflightStreamClosing_grpc1275() // G1 + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go new file mode 100644 index 00000000000000..777534a788163c --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1424.go @@ -0,0 +1,105 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc-go + * Issue or PR : https://github.com/grpc/grpc-go/pull/1424 + * Buggy version: 39c8c3866d926d95e11c03508bf83d00f2963f91 + * fix commit-id: 64bd0b04a7bb1982078bae6a2ab34c226125fbc1 + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Grpc1424", Grpc1424) +} + +type Balancer_grpc1424 interface { + Notify() <-chan bool +} + +type roundRobin_grpc1424 struct { + mu sync.Mutex + addrCh chan bool +} + +func (rr *roundRobin_grpc1424) Notify() <-chan bool { + return rr.addrCh +} + +type addrConn_grpc1424 struct { + mu sync.Mutex +} + +func (ac *addrConn_grpc1424) tearDown() { + ac.mu.Lock() + defer ac.mu.Unlock() +} + +type dialOption_grpc1424 struct { + balancer Balancer_grpc1424 +} + +type ClientConn_grpc1424 struct { + dopts dialOption_grpc1424 + conns []*addrConn_grpc1424 +} + +func (cc *ClientConn_grpc1424) lbWatcher(doneChan chan bool) { + for addr := range cc.dopts.balancer.Notify() { + if addr { + // nop, make compiler happy + } + var ( + del []*addrConn_grpc1424 + ) + for _, a := range cc.conns { + del = append(del, a) + } + for _, c := range del { + c.tearDown() + } + } +} + +func NewClientConn_grpc1424() *ClientConn_grpc1424 { + cc := &ClientConn_grpc1424{ + dopts: dialOption_grpc1424{ + &roundRobin_grpc1424{addrCh: make(chan bool)}, + }, + } + return cc +} + +func DialContext_grpc1424() { + cc := NewClientConn_grpc1424() + waitC := make(chan error, 1) + go func() { // G2 + defer close(waitC) + ch := cc.dopts.balancer.Notify() + if ch != nil { + doneChan := make(chan bool) + go cc.lbWatcher(doneChan) // G3 + <-doneChan + } + }() + /// close addrCh + close(cc.dopts.balancer.(*roundRobin_grpc1424).addrCh) +} + +func Grpc1424() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + go DialContext_grpc1424() // G1 +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go new file mode 100644 index 00000000000000..bc658b408d8c6b --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc1460.go @@ -0,0 +1,84 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc + * Issue or PR : https://github.com/grpc/grpc-go/pull/1460 + * Buggy version: 7db1564ba1229bc42919bb1f6d9c4186f3aa8678 + * fix commit-id: e605a1ecf24b634f94f4eefdab10a9ada98b70dd + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Grpc1460", Grpc1460) +} + +type Stream_grpc1460 struct{} + +type http2Client_grpc1460 struct { + mu sync.Mutex + awakenKeepalive chan struct{} + activeStream []*Stream_grpc1460 +} + +func (t *http2Client_grpc1460) keepalive() { + t.mu.Lock() + if len(t.activeStream) < 1 { + <-t.awakenKeepalive + runtime.Gosched() + t.mu.Unlock() + } else { + t.mu.Unlock() + } +} + +func (t *http2Client_grpc1460) NewStream() { + t.mu.Lock() + runtime.Gosched() + t.activeStream = append(t.activeStream, &Stream_grpc1460{}) + if len(t.activeStream) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + default: + } + } + t.mu.Unlock() +} + +/// +/// G1 G2 +/// client.keepalive() +/// client.NewStream() +/// t.mu.Lock() +/// <-t.awakenKeepalive +/// t.mu.Lock() +/// ---------------G1, G2 deadlock-------------- +/// + +func Grpc1460() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + client := &http2Client_grpc1460{ + awakenKeepalive: make(chan struct{}), + } + go client.keepalive() //G1 + go client.NewStream() //G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go new file mode 100644 index 00000000000000..0523b9509fdcbe --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc3017.go @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +// This test case is a reproduction of grpc/3017. +// +// It is a goroutine leak that also simultaneously engages many GC assists. +// Testing runtime behaviour when pivoting between regular and goroutine leak detection modes. + +func init() { + register("Grpc3017", Grpc3017) +} + +type Address_grpc3017 int +type SubConn_grpc3017 int + +type subConnCacheEntry_grpc3017 struct { + sc SubConn_grpc3017 + cancel func() + abortDeleting bool +} + +type lbCacheClientConn_grpc3017 struct { + mu sync.Mutex // L1 + timeout time.Duration + subConnCache map[Address_grpc3017]*subConnCacheEntry_grpc3017 + subConnToAddr map[SubConn_grpc3017]Address_grpc3017 +} + +func (ccc *lbCacheClientConn_grpc3017) NewSubConn(addrs []Address_grpc3017) SubConn_grpc3017 { + if len(addrs) != 1 { + return SubConn_grpc3017(1) + } + addrWithoutMD := addrs[0] + ccc.mu.Lock() // L1 + defer ccc.mu.Unlock() + if entry, ok := ccc.subConnCache[addrWithoutMD]; ok { + entry.cancel() + delete(ccc.subConnCache, addrWithoutMD) + return entry.sc + } + scNew := SubConn_grpc3017(1) + ccc.subConnToAddr[scNew] = addrWithoutMD + return scNew +} + +func (ccc *lbCacheClientConn_grpc3017) RemoveSubConn(sc SubConn_grpc3017) { + ccc.mu.Lock() // L1 + defer ccc.mu.Unlock() + addr, ok := ccc.subConnToAddr[sc] + if !ok { + return + } + + if entry, ok := ccc.subConnCache[addr]; ok { + if entry.sc != sc { + delete(ccc.subConnToAddr, sc) + } + return + } + + entry := &subConnCacheEntry_grpc3017{ + sc: sc, + } + ccc.subConnCache[addr] = entry + + timer := time.AfterFunc(ccc.timeout, func() { // G3 + runtime.Gosched() + ccc.mu.Lock() // L1 + if entry.abortDeleting { + return // Missing unlock + } + delete(ccc.subConnToAddr, sc) + delete(ccc.subConnCache, addr) + ccc.mu.Unlock() + }) + + entry.cancel = func() { + if !timer.Stop() { + entry.abortDeleting = true + } + } +} + +func Grpc3017() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { //G1 + done := make(chan struct{}) + + ccc := &lbCacheClientConn_grpc3017{ + timeout: time.Nanosecond, + subConnCache: make(map[Address_grpc3017]*subConnCacheEntry_grpc3017), + subConnToAddr: make(map[SubConn_grpc3017]Address_grpc3017), + } + + sc := ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)}) + go func() { // G2 + for i := 0; i < 10000; i++ { + ccc.RemoveSubConn(sc) + sc = ccc.NewSubConn([]Address_grpc3017{Address_grpc3017(1)}) + } + close(done) + }() + <-done + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go new file mode 100644 index 00000000000000..5f6201ec8062d9 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc660.go @@ -0,0 +1,65 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc-go + * Issue or PR : https://github.com/grpc/grpc-go/pull/660 + * Buggy version: db85417dd0de6cc6f583672c6175a7237e5b5dd2 + * fix commit-id: ceacfbcbc1514e4e677932fd55938ac455d182fb + * Flaky: 100/100 + */ +package main + +import ( + "math/rand" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Grpc660", Grpc660) +} + +type benchmarkClient_grpc660 struct { + stop chan bool +} + +func (bc *benchmarkClient_grpc660) doCloseLoopUnary() { + for { + done := make(chan bool) + go func() { // G2 + if rand.Intn(10) > 7 { + done <- false + return + } + done <- true + }() + select { + case <-bc.stop: + return + case <-done: + } + } +} + +func Grpc660() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + bc := &benchmarkClient_grpc660{ + stop: make(chan bool), + } + go bc.doCloseLoopUnary() // G1 + go func() { // helper goroutine + bc.stop <- true + }() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go new file mode 100644 index 00000000000000..72005cc844e225 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc795.go @@ -0,0 +1,74 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Grpc795", Grpc795) +} + +type Server_grpc795 struct { + mu sync.Mutex + drain bool +} + +func (s *Server_grpc795) GracefulStop() { + s.mu.Lock() + if s.drain { + s.mu.Lock() + return + } + s.drain = true + s.mu.Unlock() +} +func (s *Server_grpc795) Serve() { + s.mu.Lock() + s.mu.Unlock() +} + +func NewServer_grpc795() *Server_grpc795 { + return &Server_grpc795{} +} + +type test_grpc795 struct { + srv *Server_grpc795 +} + +func (te *test_grpc795) startServer() { + s := NewServer_grpc795() + te.srv = s + go s.Serve() +} + +func newTest_grpc795() *test_grpc795 { + return &test_grpc795{} +} + +func testServerGracefulStopIdempotent_grpc795() { + te := newTest_grpc795() + + te.startServer() + + for i := 0; i < 3; i++ { + te.srv.GracefulStop() + } +} + +func Grpc795() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go testServerGracefulStopIdempotent_grpc795() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go new file mode 100644 index 00000000000000..188b3b88ba5f8a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/grpc862.go @@ -0,0 +1,105 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: grpc-go + * Issue or PR : https://github.com/grpc/grpc-go/pull/862 + * Buggy version: d8f4ebe77f6b7b6403d7f98626de8a534f9b93a7 + * fix commit-id: dd5645bebff44f6b88780bb949022a09eadd7dae + * Flaky: 100/100 + */ +package main + +import ( + "context" + "os" + "runtime" + "runtime/pprof" + "time" +) + +func init() { + register("Grpc862", Grpc862) +} + +type ClientConn_grpc862 struct { + ctx context.Context + cancel context.CancelFunc + conns []*addrConn_grpc862 +} + +func (cc *ClientConn_grpc862) Close() { + cc.cancel() + conns := cc.conns + cc.conns = nil + for _, ac := range conns { + ac.tearDown() + } +} + +func (cc *ClientConn_grpc862) resetAddrConn() { + ac := &addrConn_grpc862{ + cc: cc, + } + cc.conns = append(cc.conns, ac) + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + ac.resetTransport() +} + +type addrConn_grpc862 struct { + cc *ClientConn_grpc862 + ctx context.Context + cancel context.CancelFunc +} + +func (ac *addrConn_grpc862) resetTransport() { + for retries := 1; ; retries++ { + _ = 2 * time.Nanosecond * time.Duration(retries) + timeout := 10 * time.Nanosecond + _, cancel := context.WithTimeout(ac.ctx, timeout) + _ = time.Now() + cancel() + <-ac.ctx.Done() + return + } +} + +func (ac *addrConn_grpc862) tearDown() { + ac.cancel() +} + +func DialContext_grpc862(ctx context.Context) (conn *ClientConn_grpc862) { + cc := &ClientConn_grpc862{} + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + defer func() { + select { + case <-ctx.Done(): + if conn != nil { + conn.Close() + } + conn = nil + default: + } + }() + go func() { // G2 + cc.resetAddrConn() + }() + return conn +} + +func Grpc862() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + ctx, cancel := context.WithCancel(context.Background()) + go DialContext_grpc862(ctx) // G1 + go cancel() // helper goroutine + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go new file mode 100644 index 00000000000000..3804692a8bf6bd --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo3251.go @@ -0,0 +1,81 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Hugo3251", Hugo3251) +} + +type remoteLock_hugo3251 struct { + sync.RWMutex // L1 + m map[string]*sync.Mutex // L2 +} + +func (l *remoteLock_hugo3251) URLLock(url string) { + l.Lock() // L1 + if _, ok := l.m[url]; !ok { + l.m[url] = &sync.Mutex{} + } + l.m[url].Lock() // L2 + runtime.Gosched() + l.Unlock() // L1 + // runtime.Gosched() +} + +func (l *remoteLock_hugo3251) URLUnlock(url string) { + l.RLock() // L1 + defer l.RUnlock() // L1 + if um, ok := l.m[url]; ok { + um.Unlock() // L2 + } +} + +func resGetRemote_hugo3251(remoteURLLock *remoteLock_hugo3251, url string) error { + remoteURLLock.URLLock(url) + defer func() { remoteURLLock.URLUnlock(url) }() + + return nil +} + +func Hugo3251() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(time.Second) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 11; i++ { + go func() { // G1 + url := "http://Foo.Bar/foo_Bar-Foo" + remoteURLLock := &remoteLock_hugo3251{m: make(map[string]*sync.Mutex)} + for range []bool{false, true} { + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func(gor int) { // G2 + defer wg.Done() + for j := 0; j < 200; j++ { + err := resGetRemote_hugo3251(remoteURLLock, url) + if err != nil { + fmt.Errorf("Error getting resource content: %s", err) + } + time.Sleep(300 * time.Nanosecond) + } + }(i) + } + wg.Wait() + } + }() + } +} \ No newline at end of file diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go new file mode 100644 index 00000000000000..6a1bbe9a3f7767 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/hugo5379.go @@ -0,0 +1,317 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "log" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Hugo5379", Hugo5379) +} + +type shortcodeHandler_hugo5379 struct { + p *PageWithoutContent_hugo5379 + contentShortcodes map[int]func() error + contentShortcodesDelta map[int]func() error + init sync.Once // O1 +} + +func (s *shortcodeHandler_hugo5379) executeShortcodesForDelta(p *PageWithoutContent_hugo5379) error { + for k, _ := range s.contentShortcodesDelta { + render := s.contentShortcodesDelta[k] + if err := render(); err != nil { + continue + } + } + return nil +} + +func (s *shortcodeHandler_hugo5379) updateDelta() { + s.init.Do(func() { + s.contentShortcodes = createShortcodeRenderers_hugo5379(s.p.withoutContent()) + }) + + delta := make(map[int]func() error) + + for k, v := range s.contentShortcodes { + if _, ok := delta[k]; !ok { + delta[k] = v + } + } + + s.contentShortcodesDelta = delta +} + +type Page_hugo5379 struct { + *pageInit_hugo5379 + *pageContentInit_hugo5379 + pageWithoutContent *PageWithoutContent_hugo5379 + contentInit sync.Once // O2 + contentInitMu sync.Mutex // L1 + shortcodeState *shortcodeHandler_hugo5379 +} + +func (p *Page_hugo5379) WordCount() { + p.initContentPlainAndMeta() +} + +func (p *Page_hugo5379) initContentPlainAndMeta() { + p.initContent() + p.initPlain(true) +} + +func (p *Page_hugo5379) initPlain(lock bool) { + p.plainInit.Do(func() { + if lock { + /// Double locking here. + p.contentInitMu.Lock() + defer p.contentInitMu.Unlock() + } + }) +} + +func (p *Page_hugo5379) withoutContent() *PageWithoutContent_hugo5379 { + p.pageInit_hugo5379.withoutContentInit.Do(func() { + p.pageWithoutContent = &PageWithoutContent_hugo5379{Page_hugo5379: p} + }) + return p.pageWithoutContent +} + +func (p *Page_hugo5379) prepareForRender() error { + var err error + if err = handleShortcodes_hugo5379(p.withoutContent()); err != nil { + return err + } + return nil +} + +func (p *Page_hugo5379) setContentInit() { + p.shortcodeState.updateDelta() +} + +func (p *Page_hugo5379) initContent() { + p.contentInit.Do(func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) + defer cancel() + c := make(chan error, 1) + + go func() { // G2 + var err error + p.contentInitMu.Lock() // first lock here + defer p.contentInitMu.Unlock() + + err = p.prepareForRender() + if err != nil { + c <- err + return + } + c <- err + }() + + select { + case <-ctx.Done(): + case <-c: + } + }) +} + +type PageWithoutContent_hugo5379 struct { + *Page_hugo5379 +} + +type pageInit_hugo5379 struct { + withoutContentInit sync.Once +} + +type pageContentInit_hugo5379 struct { + contentInit sync.Once // O3 + plainInit sync.Once // O4 +} + +type HugoSites_hugo5379 struct { + Sites []*Site_hugo5379 +} + +func (h *HugoSites_hugo5379) render() { + for _, s := range h.Sites { + for _, s2 := range h.Sites { + s2.preparePagesForRender() + } + s.renderPages() + } +} + +func (h *HugoSites_hugo5379) Build() { + h.render() +} + +type Pages_hugo5379 []*Page_hugo5379 + +type PageCollections_hugo5379 struct { + Pages Pages_hugo5379 +} + +type Site_hugo5379 struct { + *PageCollections_hugo5379 +} + +func (s *Site_hugo5379) preparePagesForRender() { + for _, p := range s.Pages { + p.setContentInit() + } +} + +func (s *Site_hugo5379) renderForLayouts() { + /// Omit reflections + for _, p := range s.Pages { + p.WordCount() + } +} + +func (s *Site_hugo5379) renderAndWritePage() { + s.renderForLayouts() +} + +func (s *Site_hugo5379) renderPages() { + numWorkers := 2 + wg := &sync.WaitGroup{} + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go pageRenderer_hugo5379(s, wg) // G3 + } + + wg.Wait() +} + +type sitesBuilder_hugo5379 struct { + H *HugoSites_hugo5379 +} + +func (s *sitesBuilder_hugo5379) Build() *sitesBuilder_hugo5379 { + return s.build() +} + +func (s *sitesBuilder_hugo5379) build() *sitesBuilder_hugo5379 { + s.H.Build() + return s +} + +func (s *sitesBuilder_hugo5379) CreateSitesE() error { + sites, err := NewHugoSites_hugo5379() + if err != nil { + return err + } + s.H = sites + return nil +} + +func (s *sitesBuilder_hugo5379) CreateSites() *sitesBuilder_hugo5379 { + if err := s.CreateSitesE(); err != nil { + log.Fatalf("Failed to create sites: %s", err) + } + return s +} + +func newHugoSites_hugo5379(sites ...*Site_hugo5379) (*HugoSites_hugo5379, error) { + h := &HugoSites_hugo5379{Sites: sites} + return h, nil +} + +func newSite_hugo5379() *Site_hugo5379 { + c := &PageCollections_hugo5379{} + s := &Site_hugo5379{ + PageCollections_hugo5379: c, + } + return s +} + +func createSitesFromConfig_hugo5379() []*Site_hugo5379 { + var ( + sites []*Site_hugo5379 + ) + + var s *Site_hugo5379 = newSite_hugo5379() + sites = append(sites, s) + return sites +} + +func NewHugoSites_hugo5379() (*HugoSites_hugo5379, error) { + sites := createSitesFromConfig_hugo5379() + return newHugoSites_hugo5379(sites...) +} + +func prepareShortcodeForPage_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error { + m := make(map[int]func() error) + m[0] = func() error { + return renderShortcode_hugo5379(p) + } + return m +} + +func renderShortcode_hugo5379(p *PageWithoutContent_hugo5379) error { + return renderShortcodeWithPage_hugo5379(p) +} + +func renderShortcodeWithPage_hugo5379(p *PageWithoutContent_hugo5379) error { + /// Omit reflections + p.WordCount() + return nil +} + +func createShortcodeRenderers_hugo5379(p *PageWithoutContent_hugo5379) map[int]func() error { + return prepareShortcodeForPage_hugo5379(p) +} + +func newShortcodeHandler_hugo5379(p *Page_hugo5379) *shortcodeHandler_hugo5379 { + return &shortcodeHandler_hugo5379{ + p: p.withoutContent(), + contentShortcodes: make(map[int]func() error), + contentShortcodesDelta: make(map[int]func() error), + } +} + +func handleShortcodes_hugo5379(p *PageWithoutContent_hugo5379) error { + return p.shortcodeState.executeShortcodesForDelta(p) +} + +func pageRenderer_hugo5379(s *Site_hugo5379, wg *sync.WaitGroup) { + defer wg.Done() + s.renderAndWritePage() +} + +func Hugo5379() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { // G1 + b := &sitesBuilder_hugo5379{} + s := b.CreateSites() + for _, site := range s.H.Sites { + p := &Page_hugo5379{ + pageInit_hugo5379: &pageInit_hugo5379{}, + pageContentInit_hugo5379: &pageContentInit_hugo5379{}, + pageWithoutContent: &PageWithoutContent_hugo5379{}, + contentInit: sync.Once{}, + contentInitMu: sync.Mutex{}, + shortcodeState: nil, + } + p.shortcodeState = newShortcodeHandler_hugo5379(p) + site.Pages = append(site.Pages, p) + } + s.Build() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go new file mode 100644 index 00000000000000..839051cc64a18d --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio16224.go @@ -0,0 +1,129 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Istio16224", Istio16224) +} + +type ConfigStoreCache_istio16224 interface { + RegisterEventHandler(handler func()) + Run() +} + +type Event_istio16224 int + +type Handler_istio16224 func(Event_istio16224) + +type configstoreMonitor_istio16224 struct { + handlers []Handler_istio16224 + eventCh chan Event_istio16224 +} + +func (m *configstoreMonitor_istio16224) Run(stop <-chan struct{}) { + for { + select { + case <-stop: + // This bug is not descibed, but is a true positive (in our eyes) + // In a real run main exits when the goro is blocked here. + if _, ok := <-m.eventCh; ok { + close(m.eventCh) + } + return + case ce, ok := <-m.eventCh: + if ok { + m.processConfigEvent(ce) + } + } + } +} + +func (m *configstoreMonitor_istio16224) processConfigEvent(ce Event_istio16224) { + m.applyHandlers(ce) +} + +func (m *configstoreMonitor_istio16224) AppendEventHandler(h Handler_istio16224) { + m.handlers = append(m.handlers, h) +} + +func (m *configstoreMonitor_istio16224) applyHandlers(e Event_istio16224) { + for _, f := range m.handlers { + f(e) + } +} +func (m *configstoreMonitor_istio16224) ScheduleProcessEvent(configEvent Event_istio16224) { + m.eventCh <- configEvent +} + +type Monitor_istio16224 interface { + Run(<-chan struct{}) + AppendEventHandler(Handler_istio16224) + ScheduleProcessEvent(Event_istio16224) +} + +type controller_istio16224 struct { + monitor Monitor_istio16224 +} + +func (c *controller_istio16224) RegisterEventHandler(f func(Event_istio16224)) { + c.monitor.AppendEventHandler(f) +} + +func (c *controller_istio16224) Run(stop <-chan struct{}) { + c.monitor.Run(stop) +} + +func (c *controller_istio16224) Create() { + c.monitor.ScheduleProcessEvent(Event_istio16224(0)) +} + +func NewMonitor_istio16224() Monitor_istio16224 { + return NewBufferedMonitor_istio16224() +} + +func NewBufferedMonitor_istio16224() Monitor_istio16224 { + return &configstoreMonitor_istio16224{ + eventCh: make(chan Event_istio16224), + } +} + +func Istio16224() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + controller := &controller_istio16224{monitor: NewMonitor_istio16224()} + done := make(chan bool) + lock := sync.Mutex{} + controller.RegisterEventHandler(func(event Event_istio16224) { + lock.Lock() + defer lock.Unlock() + done <- true + }) + + stop := make(chan struct{}) + go controller.Run(stop) + + controller.Create() + + lock.Lock() // blocks + lock.Unlock() + <-done + + close(stop) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go new file mode 100644 index 00000000000000..aa8317c6d5f41b --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio17860.go @@ -0,0 +1,144 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime/pprof" + + "sync" + "time" +) + +func init() { + register("Istio17860", Istio17860) +} + +type Proxy_istio17860 interface { + IsLive() bool +} + +type TestProxy_istio17860 struct { + live func() bool +} + +func (tp TestProxy_istio17860) IsLive() bool { + if tp.live == nil { + return true + } + return tp.live() +} + +type Agent_istio17860 interface { + Run(ctx context.Context) + Restart() +} + +type exitStatus_istio17860 int + +type agent_istio17860 struct { + proxy Proxy_istio17860 + mu *sync.Mutex + statusCh chan exitStatus_istio17860 + currentEpoch int + activeEpochs map[int]struct{} +} + +func (a *agent_istio17860) Run(ctx context.Context) { + for { + select { + case status := <-a.statusCh: + a.mu.Lock() + delete(a.activeEpochs, int(status)) + active := len(a.activeEpochs) + a.mu.Unlock() + if active == 0 { + return + } + case <-ctx.Done(): + return + } + } +} + +func (a *agent_istio17860) Restart() { + a.mu.Lock() + defer a.mu.Unlock() + + a.waitUntilLive() + a.currentEpoch++ + a.activeEpochs[a.currentEpoch] = struct{}{} + + go a.runWait(a.currentEpoch) +} + +func (a *agent_istio17860) runWait(epoch int) { + a.statusCh <- exitStatus_istio17860(epoch) +} + +func (a *agent_istio17860) waitUntilLive() { + if len(a.activeEpochs) == 0 { + return + } + + interval := time.NewTicker(30 * time.Nanosecond) + timer := time.NewTimer(100 * time.Nanosecond) + defer func() { + interval.Stop() + timer.Stop() + }() + + if a.proxy.IsLive() { + return + } + + for { + select { + case <-timer.C: + return + case <-interval.C: + if a.proxy.IsLive() { + return + } + } + } +} + +func NewAgent_istio17860(proxy Proxy_istio17860) Agent_istio17860 { + return &agent_istio17860{ + proxy: proxy, + mu: &sync.Mutex{}, + statusCh: make(chan exitStatus_istio17860), + activeEpochs: make(map[int]struct{}), + } +} + +func Istio17860() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + neverLive := func() bool { + return false + } + + a := NewAgent_istio17860(TestProxy_istio17860{live: neverLive}) + go func() { a.Run(ctx) }() + + a.Restart() + go a.Restart() + + time.Sleep(200 * time.Nanosecond) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go b/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go new file mode 100644 index 00000000000000..b410c490322440 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/istio18454.go @@ -0,0 +1,154 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "context" + "os" + "runtime/pprof" + + "sync" + "time" +) + +func init() { + register("Istio18454", Istio18454) +} + +const eventChCap_istio18454 = 1024 + +type Worker_istio18454 struct { + ctx context.Context + ctxCancel context.CancelFunc +} + +func (w *Worker_istio18454) Start(setupFn func(), runFn func(c context.Context)) { + if setupFn != nil { + setupFn() + } + go func() { + runFn(w.ctx) + }() +} + +func (w *Worker_istio18454) Stop() { + w.ctxCancel() +} + +type Strategy_istio18454 struct { + timer *time.Timer + timerFrequency time.Duration + stateLock sync.Mutex + resetChan chan struct{} + worker *Worker_istio18454 + startTimerFn func() +} + +func (s *Strategy_istio18454) OnChange() { + s.stateLock.Lock() + if s.timer != nil { + s.stateLock.Unlock() + s.resetChan <- struct{}{} + return + } + s.startTimerFn() + s.stateLock.Unlock() +} + +func (s *Strategy_istio18454) startTimer() { + s.timer = time.NewTimer(s.timerFrequency) + eventLoop := func(ctx context.Context) { + for { + select { + case <-s.timer.C: + case <-s.resetChan: + if !s.timer.Stop() { + <-s.timer.C + } + s.timer.Reset(s.timerFrequency) + case <-ctx.Done(): + s.timer.Stop() + return + } + } + } + s.worker.Start(nil, eventLoop) +} + +func (s *Strategy_istio18454) Close() { + s.worker.Stop() +} + +type Event_istio18454 int + +type Processor_istio18454 struct { + stateStrategy *Strategy_istio18454 + worker *Worker_istio18454 + eventCh chan Event_istio18454 +} + +func (p *Processor_istio18454) processEvent() { + p.stateStrategy.OnChange() +} + +func (p *Processor_istio18454) Start() { + setupFn := func() { + for i := 0; i < eventChCap_istio18454; i++ { + p.eventCh <- Event_istio18454(0) + } + } + runFn := func(ctx context.Context) { + defer func() { + p.stateStrategy.Close() + }() + for { + select { + case <-ctx.Done(): + return + case <-p.eventCh: + p.processEvent() + } + } + } + p.worker.Start(setupFn, runFn) +} + +func (p *Processor_istio18454) Stop() { + p.worker.Stop() +} + +func NewWorker_istio18454() *Worker_istio18454 { + worker := &Worker_istio18454{} + worker.ctx, worker.ctxCancel = context.WithCancel(context.Background()) + return worker +} + +func Istio18454() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + stateStrategy := &Strategy_istio18454{ + timerFrequency: time.Nanosecond, + resetChan: make(chan struct{}, 1), + worker: NewWorker_istio18454(), + } + stateStrategy.startTimerFn = stateStrategy.startTimer + + p := &Processor_istio18454{ + stateStrategy: stateStrategy, + worker: NewWorker_istio18454(), + eventCh: make(chan Event_istio18454, eventChCap_istio18454), + } + + p.Start() + defer p.Stop() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go new file mode 100644 index 00000000000000..0eca5f41fbfab4 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes10182.go @@ -0,0 +1,95 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/10182 + * Buggy version: 4b990d128a17eea9058d28a3b3688ab8abafbd94 + * fix commit-id: 64ad3e17ad15cd0f9a4fd86706eec1c572033254 + * Flaky: 15/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes10182", Kubernetes10182) +} + +type statusManager_kubernetes10182 struct { + podStatusesLock sync.RWMutex + podStatusChannel chan bool +} + +func (s *statusManager_kubernetes10182) Start() { + go func() { + for i := 0; i < 2; i++ { + s.syncBatch() + } + }() +} + +func (s *statusManager_kubernetes10182) syncBatch() { + runtime.Gosched() + <-s.podStatusChannel + s.DeletePodStatus() +} + +func (s *statusManager_kubernetes10182) DeletePodStatus() { + s.podStatusesLock.Lock() + defer s.podStatusesLock.Unlock() +} + +func (s *statusManager_kubernetes10182) SetPodStatus() { + s.podStatusesLock.Lock() + defer s.podStatusesLock.Unlock() + s.podStatusChannel <- true +} + +func NewStatusManager_kubernetes10182() *statusManager_kubernetes10182 { + return &statusManager_kubernetes10182{ + podStatusChannel: make(chan bool), + } +} + +// Example of deadlock trace: +// +// G1 G2 G3 +// -------------------------------------------------------------------------------- +// s.Start() +// s.syncBatch() +// s.SetPodStatus() +// <-s.podStatusChannel +// s.podStatusesLock.Lock() +// s.podStatusChannel <- true +// s.podStatusesLock.Unlock() +// return +// s.DeletePodStatus() +// s.podStatusesLock.Lock() +// s.podStatusChannel <- true +// s.podStatusesLock.Lock() +// -----------------------------------G1,G3 leak------------------------------------- + +func Kubernetes10182() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + s := NewStatusManager_kubernetes10182() + go s.Start() + go s.SetPodStatus() + go s.SetPodStatus() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go new file mode 100644 index 00000000000000..36405f240a6612 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes11298.go @@ -0,0 +1,118 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes11298", Kubernetes11298) +} + +type Signal_kubernetes11298 <-chan struct{} + +func After_kubernetes11298(f func()) Signal_kubernetes11298 { + ch := make(chan struct{}) + go func() { + defer close(ch) + if f != nil { + f() + } + }() + return Signal_kubernetes11298(ch) +} + +func Until_kubernetes11298(f func(), period time.Duration, stopCh <-chan struct{}) { + if f == nil { + return + } + for { + select { + case <-stopCh: + return + default: + } + f() + select { + case <-stopCh: + case <-time.After(period): + } + } + +} + +type notifier_kubernetes11298 struct { + lock sync.Mutex + cond *sync.Cond +} + +// abort will be closed no matter what +func (n *notifier_kubernetes11298) serviceLoop(abort <-chan struct{}) { + n.lock.Lock() + defer n.lock.Unlock() + for { + select { + case <-abort: + return + default: + ch := After_kubernetes11298(func() { + n.cond.Wait() + }) + select { + case <-abort: + n.cond.Signal() + <-ch + return + case <-ch: + } + } + } +} + +// abort will be closed no matter what +func Notify_kubernetes11298(abort <-chan struct{}) { + n := ¬ifier_kubernetes11298{} + n.cond = sync.NewCond(&n.lock) + finished := After_kubernetes11298(func() { + Until_kubernetes11298(func() { + for { + select { + case <-abort: + return + default: + func() { + n.lock.Lock() + defer n.lock.Unlock() + n.cond.Signal() + }() + } + } + }, 0, abort) + }) + Until_kubernetes11298(func() { n.serviceLoop(finished) }, 0, abort) +} +func Kubernetes11298() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + done := make(chan struct{}) + notifyDone := After_kubernetes11298(func() { Notify_kubernetes11298(done) }) + go func() { + defer close(done) + time.Sleep(300 * time.Nanosecond) + }() + <-notifyDone + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go new file mode 100644 index 00000000000000..f6aa8b9ddfe178 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes13135.go @@ -0,0 +1,166 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/13135 + * Buggy version: 6ced66249d4fd2a81e86b4a71d8df0139fe5ceae + * fix commit-id: a12b7edc42c5c06a2e7d9f381975658692951d5a + * Flaky: 93/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes13135", Kubernetes13135) +} + +var ( + StopChannel_kubernetes13135 chan struct{} +) + +func Util_kubernetes13135(f func(), period time.Duration, stopCh <-chan struct{}) { + for { + select { + case <-stopCh: + return + default: + } + func() { + f() + }() + time.Sleep(period) + } +} + +type Store_kubernetes13135 interface { + Add(obj interface{}) + Replace(obj interface{}) +} + +type Reflector_kubernetes13135 struct { + store Store_kubernetes13135 +} + +func (r *Reflector_kubernetes13135) ListAndWatch(stopCh <-chan struct{}) error { + r.syncWith() + return nil +} + +func NewReflector_kubernetes13135(store Store_kubernetes13135) *Reflector_kubernetes13135 { + return &Reflector_kubernetes13135{ + store: store, + } +} + +func (r *Reflector_kubernetes13135) syncWith() { + r.store.Replace(nil) +} + +type Cacher_kubernetes13135 struct { + sync.Mutex + initialized sync.WaitGroup + initOnce sync.Once + watchCache *WatchCache_kubernetes13135 + reflector *Reflector_kubernetes13135 +} + +func (c *Cacher_kubernetes13135) processEvent() { + c.Lock() + defer c.Unlock() +} + +func (c *Cacher_kubernetes13135) startCaching(stopChannel <-chan struct{}) { + c.Lock() + for { + err := c.reflector.ListAndWatch(stopChannel) + if err == nil { + break + } + } +} + +type WatchCache_kubernetes13135 struct { + sync.RWMutex + onReplace func() + onEvent func() +} + +func (w *WatchCache_kubernetes13135) SetOnEvent(onEvent func()) { + w.Lock() + defer w.Unlock() + w.onEvent = onEvent +} + +func (w *WatchCache_kubernetes13135) SetOnReplace(onReplace func()) { + w.Lock() + defer w.Unlock() + w.onReplace = onReplace +} + +func (w *WatchCache_kubernetes13135) processEvent() { + w.Lock() + defer w.Unlock() + if w.onEvent != nil { + w.onEvent() + } +} + +func (w *WatchCache_kubernetes13135) Add(obj interface{}) { + w.processEvent() +} + +func (w *WatchCache_kubernetes13135) Replace(obj interface{}) { + w.Lock() + defer w.Unlock() + if w.onReplace != nil { + w.onReplace() + } +} + +func NewCacher_kubernetes13135(stopCh <-chan struct{}) *Cacher_kubernetes13135 { + watchCache := &WatchCache_kubernetes13135{} + cacher := &Cacher_kubernetes13135{ + initialized: sync.WaitGroup{}, + watchCache: watchCache, + reflector: NewReflector_kubernetes13135(watchCache), + } + cacher.initialized.Add(1) + watchCache.SetOnReplace(func() { + cacher.initOnce.Do(func() { cacher.initialized.Done() }) + cacher.Unlock() + }) + watchCache.SetOnEvent(cacher.processEvent) + go Util_kubernetes13135(func() { cacher.startCaching(stopCh) }, 0, stopCh) // G2 + cacher.initialized.Wait() + return cacher +} + +func Kubernetes13135() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + StopChannel_kubernetes13135 = make(chan struct{}) + for i := 0; i < 50; i++ { + go func() { + // Should create a local channel. Using a single global channel + // concurrently will cause a deadlock which does not actually exist + // in the original microbenchmark. + StopChannel_kubernetes13135 := make(chan struct{}) + + c := NewCacher_kubernetes13135(StopChannel_kubernetes13135) // G1 + go c.watchCache.Add(nil) // G3 + go close(StopChannel_kubernetes13135) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go new file mode 100644 index 00000000000000..6c0139a9d24fcb --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes1321.go @@ -0,0 +1,100 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/1321 + * Buggy version: 9cd0fc70f1ca852c903b18b0933991036b3b2fa1 + * fix commit-id: 435e0b73bb99862f9dedf56a50260ff3dfef14ff + * Flaky: 1/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes1321", Kubernetes1321) +} + +type muxWatcher_kubernetes1321 struct { + result chan struct{} + m *Mux_kubernetes1321 + id int64 +} + +func (mw *muxWatcher_kubernetes1321) Stop() { + mw.m.stopWatching(mw.id) +} + +type Mux_kubernetes1321 struct { + lock sync.Mutex + watchers map[int64]*muxWatcher_kubernetes1321 +} + +func NewMux_kubernetes1321() *Mux_kubernetes1321 { + m := &Mux_kubernetes1321{ + watchers: map[int64]*muxWatcher_kubernetes1321{}, + } + go m.loop() // G2 + return m +} + +func (m *Mux_kubernetes1321) Watch() *muxWatcher_kubernetes1321 { + mw := &muxWatcher_kubernetes1321{ + result: make(chan struct{}), + m: m, + id: int64(len(m.watchers)), + } + m.watchers[mw.id] = mw + runtime.Gosched() + return mw +} + +func (m *Mux_kubernetes1321) loop() { + for i := 0; i < 100; i++ { + m.distribute() + } +} + +func (m *Mux_kubernetes1321) distribute() { + m.lock.Lock() + defer m.lock.Unlock() + for _, w := range m.watchers { + w.result <- struct{}{} + runtime.Gosched() + } +} + +func (m *Mux_kubernetes1321) stopWatching(id int64) { + m.lock.Lock() + defer m.lock.Unlock() + w, ok := m.watchers[id] + if !ok { + return + } + delete(m.watchers, id) + close(w.result) +} + +func testMuxWatcherClose_kubernetes1321() { + m := NewMux_kubernetes1321() + m.watchers[m.Watch().id].Stop() +} + +func Kubernetes1321() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 1000; i++ { + go testMuxWatcherClose_kubernetes1321() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go new file mode 100644 index 00000000000000..323cb236c0687a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes25331.go @@ -0,0 +1,72 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/25331 + * Buggy version: 5dd087040bb13434f1ddf2f0693d0203c30f28cb + * fix commit-id: 97f4647dc3d8cf46c2b66b89a31c758a6edfb57c + * Flaky: 100/100 + */ +package main + +import ( + "context" + "errors" + "os" + "runtime" + "runtime/pprof" +) + +func init() { + register("Kubernetes25331", Kubernetes25331) +} + +type watchChan_kubernetes25331 struct { + ctx context.Context + cancel context.CancelFunc + resultChan chan bool + errChan chan error +} + +func (wc *watchChan_kubernetes25331) Stop() { + wc.errChan <- errors.New("Error") + wc.cancel() +} + +func (wc *watchChan_kubernetes25331) run() { + select { + case err := <-wc.errChan: + errResult := len(err.Error()) != 0 + wc.cancel() // Removed in fix + wc.resultChan <- errResult + case <-wc.ctx.Done(): + } +} + +func NewWatchChan_kubernetes25331() *watchChan_kubernetes25331 { + ctx, cancel := context.WithCancel(context.Background()) + return &watchChan_kubernetes25331{ + ctx: ctx, + cancel: cancel, + resultChan: make(chan bool), + errChan: make(chan error), + } +} + +func Kubernetes25331() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + wc := NewWatchChan_kubernetes25331() + go wc.run() // G1 + go wc.Stop() // G2 + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go new file mode 100644 index 00000000000000..38e53cf4ad7218 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes26980.go @@ -0,0 +1,87 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes26980", Kubernetes26980) +} + +type processorListener_kubernetes26980 struct { + lock sync.RWMutex + cond sync.Cond + + pendingNotifications []interface{} +} + +func (p *processorListener_kubernetes26980) add(notification interface{}) { + p.lock.Lock() + defer p.lock.Unlock() + + p.pendingNotifications = append(p.pendingNotifications, notification) + p.cond.Broadcast() +} + +func (p *processorListener_kubernetes26980) pop(stopCh <-chan struct{}) { + p.lock.Lock() + runtime.Gosched() + defer p.lock.Unlock() + for { + for len(p.pendingNotifications) == 0 { + select { + case <-stopCh: + return + default: + } + p.cond.Wait() + } + select { + case <-stopCh: + return + } + } +} + +func newProcessListener_kubernetes26980() *processorListener_kubernetes26980 { + ret := &processorListener_kubernetes26980{ + pendingNotifications: []interface{}{}, + } + ret.cond.L = &ret.lock + return ret +} +func Kubernetes26980() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 3000; i++ { + go func() { + pl := newProcessListener_kubernetes26980() + stopCh := make(chan struct{}) + defer close(stopCh) + pl.add(1) + runtime.Gosched() + go pl.pop(stopCh) + + resultCh := make(chan struct{}) + go func() { + pl.lock.Lock() + close(resultCh) + }() + runtime.Gosched() + <-resultCh + pl.lock.Unlock() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go new file mode 100644 index 00000000000000..00cdcf2b678692 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes30872.go @@ -0,0 +1,223 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes30872", Kubernetes30872) +} + +type PopProcessFunc_kubernetes30872 func() + +type ProcessFunc_kubernetes30872 func() + +func Util_kubernetes30872(f func(), stopCh <-chan struct{}) { + JitterUntil_kubernetes30872(f, stopCh) +} + +func JitterUntil_kubernetes30872(f func(), stopCh <-chan struct{}) { + for { + select { + case <-stopCh: + return + default: + } + func() { + f() + }() + } +} + +type Queue_kubernetes30872 interface { + HasSynced() + Pop(PopProcessFunc_kubernetes30872) +} + +type Config_kubernetes30872 struct { + Queue Queue_kubernetes30872 + Process ProcessFunc_kubernetes30872 +} + +type Controller_kubernetes30872 struct { + config Config_kubernetes30872 +} + +func (c *Controller_kubernetes30872) Run(stopCh <-chan struct{}) { + Util_kubernetes30872(c.processLoop, stopCh) +} + +func (c *Controller_kubernetes30872) HasSynced() { + c.config.Queue.HasSynced() +} + +func (c *Controller_kubernetes30872) processLoop() { + c.config.Queue.Pop(PopProcessFunc_kubernetes30872(c.config.Process)) +} + +type ControllerInterface_kubernetes30872 interface { + Run(<-chan struct{}) + HasSynced() +} + +type ResourceEventHandler_kubernetes30872 interface { + OnAdd() +} + +type ResourceEventHandlerFuncs_kubernetes30872 struct { + AddFunc func() +} + +func (r ResourceEventHandlerFuncs_kubernetes30872) OnAdd() { + if r.AddFunc != nil { + r.AddFunc() + } +} + +type informer_kubernetes30872 struct { + controller ControllerInterface_kubernetes30872 + + stopChan chan struct{} +} + +type federatedInformerImpl_kubernetes30872 struct { + sync.Mutex + clusterInformer informer_kubernetes30872 +} + +func (f *federatedInformerImpl_kubernetes30872) ClustersSynced() { + f.Lock() // L1 + defer f.Unlock() + f.clusterInformer.controller.HasSynced() +} + +func (f *federatedInformerImpl_kubernetes30872) addCluster() { + f.Lock() // L1 + defer f.Unlock() +} + +func (f *federatedInformerImpl_kubernetes30872) Start() { + f.Lock() // L1 + defer f.Unlock() + + f.clusterInformer.stopChan = make(chan struct{}) + go f.clusterInformer.controller.Run(f.clusterInformer.stopChan) // G2 + runtime.Gosched() +} + +func (f *federatedInformerImpl_kubernetes30872) Stop() { + f.Lock() // L1 + defer f.Unlock() + close(f.clusterInformer.stopChan) +} + +type DelayingDeliverer_kubernetes30872 struct{} + +func (d *DelayingDeliverer_kubernetes30872) StartWithHandler(handler func()) { + go func() { // G4 + handler() + }() +} + +type FederationView_kubernetes30872 interface { + ClustersSynced() +} + +type FederatedInformer_kubernetes30872 interface { + FederationView_kubernetes30872 + Start() + Stop() +} + +type NamespaceController_kubernetes30872 struct { + namespaceDeliverer *DelayingDeliverer_kubernetes30872 + namespaceFederatedInformer FederatedInformer_kubernetes30872 +} + +func (nc *NamespaceController_kubernetes30872) isSynced() { + nc.namespaceFederatedInformer.ClustersSynced() +} + +func (nc *NamespaceController_kubernetes30872) reconcileNamespace() { + nc.isSynced() +} + +func (nc *NamespaceController_kubernetes30872) Run(stopChan <-chan struct{}) { + nc.namespaceFederatedInformer.Start() + go func() { // G3 + <-stopChan + nc.namespaceFederatedInformer.Stop() + }() + nc.namespaceDeliverer.StartWithHandler(func() { + nc.reconcileNamespace() + }) +} + +type DeltaFIFO_kubernetes30872 struct { + lock sync.RWMutex +} + +func (f *DeltaFIFO_kubernetes30872) HasSynced() { + f.lock.Lock() // L2 + defer f.lock.Unlock() +} + +func (f *DeltaFIFO_kubernetes30872) Pop(process PopProcessFunc_kubernetes30872) { + f.lock.Lock() // L2 + defer f.lock.Unlock() + process() +} + +func NewFederatedInformer_kubernetes30872() FederatedInformer_kubernetes30872 { + federatedInformer := &federatedInformerImpl_kubernetes30872{} + federatedInformer.clusterInformer.controller = NewInformer_kubernetes30872( + ResourceEventHandlerFuncs_kubernetes30872{ + AddFunc: func() { + federatedInformer.addCluster() + }, + }) + return federatedInformer +} + +func NewInformer_kubernetes30872(h ResourceEventHandler_kubernetes30872) *Controller_kubernetes30872 { + fifo := &DeltaFIFO_kubernetes30872{} + cfg := &Config_kubernetes30872{ + Queue: fifo, + Process: func() { + h.OnAdd() + }, + } + return &Controller_kubernetes30872{config: *cfg} +} + +func NewNamespaceController_kubernetes30872() *NamespaceController_kubernetes30872 { + nc := &NamespaceController_kubernetes30872{} + nc.namespaceDeliverer = &DelayingDeliverer_kubernetes30872{} + nc.namespaceFederatedInformer = NewFederatedInformer_kubernetes30872() + return nc +} + +func Kubernetes30872() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { // G1 + namespaceController := NewNamespaceController_kubernetes30872() + stop := make(chan struct{}) + namespaceController.Run(stop) + close(stop) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go new file mode 100644 index 00000000000000..27020d580493bf --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes38669.go @@ -0,0 +1,83 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Kubernetes38669", Kubernetes38669) +} + +type Event_kubernetes38669 int +type watchCacheEvent_kubernetes38669 int + +type cacheWatcher_kubernetes38669 struct { + sync.Mutex + input chan watchCacheEvent_kubernetes38669 + result chan Event_kubernetes38669 + stopped bool +} + +func (c *cacheWatcher_kubernetes38669) process(initEvents []watchCacheEvent_kubernetes38669) { + for _, event := range initEvents { + c.sendWatchCacheEvent(&event) + } + defer close(c.result) + defer c.Stop() + for { + _, ok := <-c.input + if !ok { + return + } + } +} + +func (c *cacheWatcher_kubernetes38669) sendWatchCacheEvent(event *watchCacheEvent_kubernetes38669) { + c.result <- Event_kubernetes38669(*event) +} + +func (c *cacheWatcher_kubernetes38669) Stop() { + c.stop() +} + +func (c *cacheWatcher_kubernetes38669) stop() { + c.Lock() + defer c.Unlock() + if !c.stopped { + c.stopped = true + close(c.input) + } +} + +func newCacheWatcher_kubernetes38669(chanSize int, initEvents []watchCacheEvent_kubernetes38669) *cacheWatcher_kubernetes38669 { + watcher := &cacheWatcher_kubernetes38669{ + input: make(chan watchCacheEvent_kubernetes38669, chanSize), + result: make(chan Event_kubernetes38669, chanSize), + stopped: false, + } + go watcher.process(initEvents) + return watcher +} + +func Kubernetes38669() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + initEvents := []watchCacheEvent_kubernetes38669{1, 2} + w := newCacheWatcher_kubernetes38669(0, initEvents) + w.Stop() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go new file mode 100644 index 00000000000000..fd51484a0f773b --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes5316.go @@ -0,0 +1,68 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/5316 + * Buggy version: c868b0bbf09128960bc7c4ada1a77347a464d876 + * fix commit-id: cc3a433a7abc89d2f766d4c87eaae9448e3dc091 + * Flaky: 100/100 + */ + +package main + +import ( + "errors" + "math/rand" + "os" + "runtime" + "runtime/pprof" + "time" +) + +func init() { + register("Kubernetes5316", Kubernetes5316) +} + +func finishRequest_kubernetes5316(timeout time.Duration, fn func() error) { + ch := make(chan bool) + errCh := make(chan error) + go func() { // G2 + if err := fn(); err != nil { + errCh <- err + } else { + ch <- true + } + }() + + select { + case <-ch: + case <-errCh: + case <-time.After(timeout): + } +} + +func Kubernetes5316() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Wait a bit because the child goroutine relies on timed operations. + time.Sleep(100 * time.Millisecond) + + // Yield several times to allow the child goroutine to run + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + fn := func() error { + time.Sleep(2 * time.Millisecond) + if rand.Intn(10) > 5 { + return errors.New("Error") + } + return nil + } + go finishRequest_kubernetes5316(time.Microsecond, fn) // G1 + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go new file mode 100644 index 00000000000000..0ca707e981693a --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes58107.go @@ -0,0 +1,108 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Tag: Reproduce misbehavior + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/58107 + * Buggy version: 2f17d782eb2772d6401da7ddced9ac90656a7a79 + * fix commit-id: 010a127314a935d8d038f8dd4559fc5b249813e4 + * Flaky: 53/100 + */ + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes58107", Kubernetes58107) +} + +type RateLimitingInterface_kubernetes58107 interface { + Get() + Put() +} + +type Type_kubernetes58107 struct { + cond *sync.Cond +} + +func (q *Type_kubernetes58107) Get() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.cond.Wait() +} + +func (q *Type_kubernetes58107) Put() { + q.cond.Signal() +} + +type ResourceQuotaController_kubernetes58107 struct { + workerLock sync.RWMutex + queue RateLimitingInterface_kubernetes58107 + missingUsageQueue RateLimitingInterface_kubernetes58107 +} + +func (rq *ResourceQuotaController_kubernetes58107) worker(queue RateLimitingInterface_kubernetes58107, _ string) { + workFunc := func() bool { + rq.workerLock.RLock() + defer rq.workerLock.RUnlock() + queue.Get() + return true + } + for { + if quit := workFunc(); quit { + return + } + } +} + +func (rq *ResourceQuotaController_kubernetes58107) Run() { + go rq.worker(rq.queue, "G1") // G3 + go rq.worker(rq.missingUsageQueue, "G2") // G4 +} + +func (rq *ResourceQuotaController_kubernetes58107) Sync() { + for i := 0; i < 100000; i++ { + rq.workerLock.Lock() + runtime.Gosched() + rq.workerLock.Unlock() + } +} + +func (rq *ResourceQuotaController_kubernetes58107) HelperSignals() { + for i := 0; i < 100000; i++ { + rq.queue.Put() + rq.missingUsageQueue.Put() + } +} + +func startResourceQuotaController_kubernetes58107() { + resourceQuotaController := &ResourceQuotaController_kubernetes58107{ + queue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})}, + missingUsageQueue: &Type_kubernetes58107{sync.NewCond(&sync.Mutex{})}, + } + + go resourceQuotaController.Run() // G2 + go resourceQuotaController.Sync() // G5 + resourceQuotaController.HelperSignals() +} + +func Kubernetes58107() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(1000 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go startResourceQuotaController_kubernetes58107() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go new file mode 100644 index 00000000000000..0d07ebc4a9c451 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes62464.go @@ -0,0 +1,120 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/62464 + * Buggy version: a048ca888ad27367b1a7b7377c67658920adbf5d + * fix commit-id: c1b19fce903675b82e9fdd1befcc5f5d658bfe78 + * Flaky: 8/100 + */ + +package main + +import ( + "math/rand" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes62464", Kubernetes62464) +} + +type State_kubernetes62464 interface { + GetCPUSetOrDefault() + GetCPUSet() bool + GetDefaultCPUSet() + SetDefaultCPUSet() +} + +type stateMemory_kubernetes62464 struct { + sync.RWMutex +} + +func (s *stateMemory_kubernetes62464) GetCPUSetOrDefault() { + s.RLock() + defer s.RUnlock() + if ok := s.GetCPUSet(); ok { + return + } + s.GetDefaultCPUSet() +} + +func (s *stateMemory_kubernetes62464) GetCPUSet() bool { + runtime.Gosched() + s.RLock() + defer s.RUnlock() + + if rand.Intn(10) > 5 { + return true + } + return false +} + +func (s *stateMemory_kubernetes62464) GetDefaultCPUSet() { + s.RLock() + defer s.RUnlock() +} + +func (s *stateMemory_kubernetes62464) SetDefaultCPUSet() { + s.Lock() + runtime.Gosched() + defer s.Unlock() +} + +type staticPolicy_kubernetes62464 struct{} + +func (p *staticPolicy_kubernetes62464) RemoveContainer(s State_kubernetes62464) { + s.GetDefaultCPUSet() + s.SetDefaultCPUSet() +} + +type manager_kubernetes62464 struct { + state *stateMemory_kubernetes62464 +} + +func (m *manager_kubernetes62464) reconcileState() { + m.state.GetCPUSetOrDefault() +} + +func NewPolicyAndManager_kubernetes62464() (*staticPolicy_kubernetes62464, *manager_kubernetes62464) { + s := &stateMemory_kubernetes62464{} + m := &manager_kubernetes62464{s} + p := &staticPolicy_kubernetes62464{} + return p, m +} + +/// +/// G1 G2 +/// m.reconcileState() +/// m.state.GetCPUSetOrDefault() +/// s.RLock() +/// s.GetCPUSet() +/// p.RemoveContainer() +/// s.GetDefaultCPUSet() +/// s.SetDefaultCPUSet() +/// s.Lock() +/// s.RLock() +/// ---------------------G1,G2 deadlock--------------------- +/// + +func Kubernetes62464() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + p, m := NewPolicyAndManager_kubernetes62464() + go m.reconcileState() + go p.RemoveContainer(m.state) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go new file mode 100644 index 00000000000000..a3af3b24ae81ac --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes6632.go @@ -0,0 +1,86 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: kubernetes + * Issue or PR : https://github.com/kubernetes/kubernetes/pull/6632 + * Buggy version: e597b41d939573502c8dda1dde7bf3439325fb5d + * fix commit-id: 82afb7ab1fe12cf2efceede2322d082eaf5d5adc + * Flaky: 4/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Kubernetes6632", Kubernetes6632) +} + +type Connection_kubernetes6632 struct { + closeChan chan bool +} + +type idleAwareFramer_kubernetes6632 struct { + resetChan chan bool + writeLock sync.Mutex + conn *Connection_kubernetes6632 +} + +func (i *idleAwareFramer_kubernetes6632) monitor() { + var resetChan = i.resetChan +Loop: + for { + select { + case <-i.conn.closeChan: + i.writeLock.Lock() + close(resetChan) + i.resetChan = nil + i.writeLock.Unlock() + break Loop + } + } +} + +func (i *idleAwareFramer_kubernetes6632) WriteFrame() { + i.writeLock.Lock() + defer i.writeLock.Unlock() + if i.resetChan == nil { + return + } + i.resetChan <- true +} + +func NewIdleAwareFramer_kubernetes6632() *idleAwareFramer_kubernetes6632 { + return &idleAwareFramer_kubernetes6632{ + resetChan: make(chan bool), + conn: &Connection_kubernetes6632{ + closeChan: make(chan bool), + }, + } +} + +func Kubernetes6632() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + i := NewIdleAwareFramer_kubernetes6632() + + go func() { // helper goroutine + i.conn.closeChan <- true + }() + go i.monitor() // G1 + go i.WriteFrame() // G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go new file mode 100644 index 00000000000000..ae02ec83040b99 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/kubernetes70277.go @@ -0,0 +1,97 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Kubernetes70277", Kubernetes70277) +} + +type WaitFunc_kubernetes70277 func(done <-chan struct{}) <-chan struct{} + +type ConditionFunc_kubernetes70277 func() (done bool, err error) + +func WaitFor_kubernetes70277(wait WaitFunc_kubernetes70277, fn ConditionFunc_kubernetes70277, done <-chan struct{}) error { + c := wait(done) + for { + _, open := <-c + ok, err := fn() + if err != nil { + return err + } + if ok { + return nil + } + if !open { + break + } + } + return nil +} + +func poller_kubernetes70277(interval, timeout time.Duration) WaitFunc_kubernetes70277 { + return WaitFunc_kubernetes70277(func(done <-chan struct{}) <-chan struct{} { + ch := make(chan struct{}) + go func() { + defer close(ch) + + tick := time.NewTicker(interval) + defer tick.Stop() + + var after <-chan time.Time + if timeout != 0 { + timer := time.NewTimer(timeout) + after = timer.C + defer timer.Stop() + } + for { + select { + case <-tick.C: + select { + case ch <- struct{}{}: + default: + } + case <-after: + return + case <-done: + return + } + } + }() + + return ch + }) +} + +func Kubernetes70277() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 1000; i++ { + go func() { + stopCh := make(chan struct{}) + defer close(stopCh) + waitFunc := poller_kubernetes70277(time.Millisecond, 80*time.Millisecond) + var doneCh <-chan struct{} + + WaitFor_kubernetes70277(func(done <-chan struct{}) <-chan struct{} { + doneCh = done + return waitFunc(done) + }, func() (bool, error) { + time.Sleep(10 * time.Millisecond) + return true, nil + }, stopCh) + + <-doneCh // block here + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/main.go b/src/runtime/testdata/testgoroutineleakprofile/goker/main.go new file mode 100644 index 00000000000000..5787c1e2b2bb90 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/main.go @@ -0,0 +1,39 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "os" + +// The number of times the main (profiling) goroutine should yield +// in order to allow the leaking goroutines to get stuck. +const yieldCount = 10 + +var cmds = map[string]func(){} + +func register(name string, f func()) { + if cmds[name] != nil { + panic("duplicate registration: " + name) + } + cmds[name] = f +} + +func registerInit(name string, f func()) { + if len(os.Args) >= 2 && os.Args[1] == name { + f() + } +} + +func main() { + if len(os.Args) < 2 { + println("usage: " + os.Args[0] + " name-of-test") + return + } + f := cmds[os.Args[1]] + if f == nil { + println("unknown function: " + os.Args[1]) + return + } + f() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go new file mode 100644 index 00000000000000..884d077550f6b8 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby17176.go @@ -0,0 +1,68 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/17176 + * Buggy version: d295dc66521e2734390473ec1f1da8a73ad3288a + * fix commit-id: 2f16895ee94848e2d8ad72bc01968b4c88d84cb8 + * Flaky: 100/100 + */ +package main + +import ( + "errors" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby17176", Moby17176) +} + +type DeviceSet_moby17176 struct { + sync.Mutex + nrDeletedDevices int +} + +func (devices *DeviceSet_moby17176) cleanupDeletedDevices() error { + devices.Lock() + if devices.nrDeletedDevices == 0 { + /// Missing devices.Unlock() + return nil + } + devices.Unlock() + return errors.New("Error") +} + +func testDevmapperLockReleasedDeviceDeletion_moby17176() { + ds := &DeviceSet_moby17176{ + nrDeletedDevices: 0, + } + ds.cleanupDeletedDevices() + doneChan := make(chan bool) + go func() { + ds.Lock() + defer ds.Unlock() + doneChan <- true + }() + + select { + case <-time.After(time.Millisecond): + case <-doneChan: + } +} +func Moby17176() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go testDevmapperLockReleasedDeviceDeletion_moby17176() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go new file mode 100644 index 00000000000000..36017a9488cc06 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby21233.go @@ -0,0 +1,146 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/21233 + * Buggy version: cc12d2bfaae135e63b1f962ad80e6943dd995337 + * fix commit-id: 2f4aa9658408ac72a598363c6e22eadf93dbb8a7 + * Flaky:100/100 + */ +package main + +import ( + "math/rand" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby21233", Moby21233) +} + +type Progress_moby21233 struct{} + +type Output_moby21233 interface { + WriteProgress(Progress_moby21233) error +} + +type chanOutput_moby21233 chan<- Progress_moby21233 + +type TransferManager_moby21233 struct { + mu sync.Mutex +} + +type Transfer_moby21233 struct { + mu sync.Mutex +} + +type Watcher_moby21233 struct { + signalChan chan struct{} + releaseChan chan struct{} + running chan struct{} +} + +func ChanOutput_moby21233(progressChan chan<- Progress_moby21233) Output_moby21233 { + return chanOutput_moby21233(progressChan) +} +func (out chanOutput_moby21233) WriteProgress(p Progress_moby21233) error { + out <- p + return nil +} +func NewTransferManager_moby21233() *TransferManager_moby21233 { + return &TransferManager_moby21233{} +} +func NewTransfer_moby21233() *Transfer_moby21233 { + return &Transfer_moby21233{} +} +func (t *Transfer_moby21233) Release(watcher *Watcher_moby21233) { + t.mu.Lock() + t.mu.Unlock() + close(watcher.releaseChan) + <-watcher.running +} +func (t *Transfer_moby21233) Watch(progressOutput Output_moby21233) *Watcher_moby21233 { + t.mu.Lock() + defer t.mu.Unlock() + lastProgress := Progress_moby21233{} + w := &Watcher_moby21233{ + releaseChan: make(chan struct{}), + signalChan: make(chan struct{}), + running: make(chan struct{}), + } + go func() { // G2 + defer func() { + close(w.running) + }() + done := false + for { + t.mu.Lock() + t.mu.Unlock() + if rand.Int31n(2) >= 1 { + progressOutput.WriteProgress(lastProgress) + } + if done { + return + } + select { + case <-w.signalChan: + case <-w.releaseChan: + done = true + } + } + }() + return w +} +func (tm *TransferManager_moby21233) Transfer(progressOutput Output_moby21233) (*Transfer_moby21233, *Watcher_moby21233) { + tm.mu.Lock() + defer tm.mu.Unlock() + t := NewTransfer_moby21233() + return t, t.Watch(progressOutput) +} + +func testTransfer_moby21233() { // G1 + tm := NewTransferManager_moby21233() + progressChan := make(chan Progress_moby21233) + progressDone := make(chan struct{}) + go func() { // G3 + time.Sleep(1 * time.Millisecond) + for p := range progressChan { /// Chan consumer + if rand.Int31n(2) >= 1 { + return + } + _ = p + } + close(progressDone) + }() + time.Sleep(1 * time.Millisecond) + ids := []string{"id1", "id2", "id3"} + xrefs := make([]*Transfer_moby21233, len(ids)) + watchers := make([]*Watcher_moby21233, len(ids)) + for i := range ids { + xrefs[i], watchers[i] = tm.Transfer(ChanOutput_moby21233(progressChan)) /// Chan producer + time.Sleep(2 * time.Millisecond) + } + + for i := range xrefs { + xrefs[i].Release(watchers[i]) + } + + close(progressChan) + <-progressDone +} + +func Moby21233() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go testTransfer_moby21233() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go new file mode 100644 index 00000000000000..d653731f6caea8 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby25384.go @@ -0,0 +1,59 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/25384 + * Buggy version: 58befe3081726ef74ea09198cd9488fb42c51f51 + * fix commit-id: 42360d164b9f25fb4b150ef066fcf57fa39559a7 + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Moby25348", Moby25348) +} + +type plugin_moby25348 struct{} + +type Manager_moby25348 struct { + plugins []*plugin_moby25348 +} + +func (pm *Manager_moby25348) init() { + var group sync.WaitGroup + group.Add(len(pm.plugins)) + for _, p := range pm.plugins { + go func(p *plugin_moby25348) { + defer group.Done() + }(p) + group.Wait() // Block here + } +} + +func Moby25348() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { + p1 := &plugin_moby25348{} + p2 := &plugin_moby25348{} + pm := &Manager_moby25348{ + plugins: []*plugin_moby25348{p1, p2}, + } + go pm.init() + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go new file mode 100644 index 00000000000000..9b53d9035ca1e1 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby27782.go @@ -0,0 +1,249 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/27782 + * Buggy version: 18768fdc2e76ec6c600c8ab57d2d487ee7877794 + * fix commit-id: a69a59ffc7e3d028a72d1195c2c1535f447eaa84 + * Flaky: 2/100 + */ +package main + +import ( + "errors" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby27782", Moby27782) +} + +type Event_moby27782 struct { + Op Op_moby27782 +} + +type Op_moby27782 uint32 + +const ( + Create_moby27782 Op_moby27782 = 1 << iota + Write_moby27782 + Remove_moby27782 + Rename_moby27782 + Chmod_moby27782 +) + +func newEvent(op Op_moby27782) Event_moby27782 { + return Event_moby27782{op} +} + +func (e *Event_moby27782) ignoreLinux(w *Watcher_moby27782) bool { + if e.Op != Write_moby27782 { + w.mu.Lock() + defer w.mu.Unlock() + w.cv.Broadcast() + return true + } + runtime.Gosched() + return false +} + +type Watcher_moby27782 struct { + Events chan Event_moby27782 + mu sync.Mutex // L1 + cv *sync.Cond // C1 + done chan struct{} +} + +func NewWatcher_moby27782() *Watcher_moby27782 { + w := &Watcher_moby27782{ + Events: make(chan Event_moby27782), + done: make(chan struct{}), + } + w.cv = sync.NewCond(&w.mu) + go w.readEvents() // G3 + return w +} + +func (w *Watcher_moby27782) readEvents() { + defer close(w.Events) + for { + if w.isClosed() { + return + } + event := newEvent(Write_moby27782) // MODIFY event + if !event.ignoreLinux(w) { + runtime.Gosched() + select { + case w.Events <- event: + case <-w.done: + return + } + } + } +} + +func (w *Watcher_moby27782) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +func (w *Watcher_moby27782) Close() { + if w.isClosed() { + return + } + close(w.done) +} + +func (w *Watcher_moby27782) Remove() { + w.mu.Lock() + defer w.mu.Unlock() + exists := true + for exists { + w.cv.Wait() + runtime.Gosched() + } +} + +type FileWatcher_moby27782 interface { + Events() <-chan Event_moby27782 + Remove() + Close() +} + +func New_moby27782() FileWatcher_moby27782 { + return NewEventWatcher_moby27782() +} + +func NewEventWatcher_moby27782() FileWatcher_moby27782 { + return &fsNotifyWatcher_moby27782{NewWatcher_moby27782()} +} + +type fsNotifyWatcher_moby27782 struct { + *Watcher_moby27782 +} + +func (w *fsNotifyWatcher_moby27782) Events() <-chan Event_moby27782 { + return w.Watcher_moby27782.Events +} + +func watchFile_moby27782() FileWatcher_moby27782 { + fileWatcher := New_moby27782() + return fileWatcher +} + +type LogWatcher_moby27782 struct { + closeOnce sync.Once + closeNotifier chan struct{} +} + +func (w *LogWatcher_moby27782) Close() { + w.closeOnce.Do(func() { + close(w.closeNotifier) + }) +} + +func (w *LogWatcher_moby27782) WatchClose() <-chan struct{} { + return w.closeNotifier +} + +func NewLogWatcher_moby27782() *LogWatcher_moby27782 { + return &LogWatcher_moby27782{ + closeNotifier: make(chan struct{}), + } +} + +func followLogs_moby27782(logWatcher *LogWatcher_moby27782) { + fileWatcher := watchFile_moby27782() + defer func() { + fileWatcher.Close() + }() + waitRead := func() { + runtime.Gosched() + select { + case <-fileWatcher.Events(): + case <-logWatcher.WatchClose(): + fileWatcher.Remove() + return + } + } + handleDecodeErr := func() { + waitRead() + } + handleDecodeErr() +} + +type Container_moby27782 struct { + LogDriver *JSONFileLogger_moby27782 +} + +func (container *Container_moby27782) InitializeStdio() { + if err := container.startLogging(); err != nil { + container.Reset() + } +} + +func (container *Container_moby27782) startLogging() error { + l := &JSONFileLogger_moby27782{ + readers: make(map[*LogWatcher_moby27782]struct{}), + } + container.LogDriver = l + l.ReadLogs() + return errors.New("Some error") +} + +func (container *Container_moby27782) Reset() { + if container.LogDriver != nil { + container.LogDriver.Close() + } +} + +type JSONFileLogger_moby27782 struct { + mu sync.Mutex + readers map[*LogWatcher_moby27782]struct{} +} + +func (l *JSONFileLogger_moby27782) ReadLogs() *LogWatcher_moby27782 { + logWatcher := NewLogWatcher_moby27782() + go l.readLogs(logWatcher) // G2 + return logWatcher +} + +func (l *JSONFileLogger_moby27782) readLogs(logWatcher *LogWatcher_moby27782) { + l.mu.Lock() + defer l.mu.Unlock() + + l.readers[logWatcher] = struct{}{} + followLogs_moby27782(logWatcher) +} + +func (l *JSONFileLogger_moby27782) Close() { + l.mu.Lock() + defer l.mu.Unlock() + + for r := range l.readers { + r.Close() + delete(l.readers, r) + } +} + +func Moby27782() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 10000; i++ { + go (&Container_moby27782{}).InitializeStdio() // G1 + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go new file mode 100644 index 00000000000000..56467e0b56f7cc --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby28462.go @@ -0,0 +1,125 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/28462 + * Buggy version: b184bdabf7a01c4b802304ac64ac133743c484be + * fix commit-id: 89b123473774248fc3a0356dd3ce5b116cc69b29 + * Flaky: 69/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby28462", Moby28462) +} + +type State_moby28462 struct { + Health *Health_moby28462 +} + +type Container_moby28462 struct { + sync.Mutex + State *State_moby28462 +} + +func (ctr *Container_moby28462) start() { + go ctr.waitExit() +} +func (ctr *Container_moby28462) waitExit() { + +} + +type Store_moby28462 struct { + ctr *Container_moby28462 +} + +func (s *Store_moby28462) Get() *Container_moby28462 { + return s.ctr +} + +type Daemon_moby28462 struct { + containers Store_moby28462 +} + +func (d *Daemon_moby28462) StateChanged() { + c := d.containers.Get() + c.Lock() + d.updateHealthMonitorElseBranch(c) + defer c.Unlock() +} + +func (d *Daemon_moby28462) updateHealthMonitorIfBranch(c *Container_moby28462) { + h := c.State.Health + if stop := h.OpenMonitorChannel(); stop != nil { + go monitor_moby28462(c, stop) + } +} +func (d *Daemon_moby28462) updateHealthMonitorElseBranch(c *Container_moby28462) { + h := c.State.Health + h.CloseMonitorChannel() +} + +type Health_moby28462 struct { + stop chan struct{} +} + +func (s *Health_moby28462) OpenMonitorChannel() chan struct{} { + return s.stop +} + +func (s *Health_moby28462) CloseMonitorChannel() { + if s.stop != nil { + s.stop <- struct{}{} + } +} + +func monitor_moby28462(c *Container_moby28462, stop chan struct{}) { + for { + select { + case <-stop: + return + default: + handleProbeResult_moby28462(c) + } + } +} + +func handleProbeResult_moby28462(c *Container_moby28462) { + runtime.Gosched() + c.Lock() + defer c.Unlock() +} + +func NewDaemonAndContainer_moby28462() (*Daemon_moby28462, *Container_moby28462) { + c := &Container_moby28462{ + State: &State_moby28462{&Health_moby28462{make(chan struct{})}}, + } + d := &Daemon_moby28462{Store_moby28462{c}} + return d, c +} + +func Moby28462() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + d, c := NewDaemonAndContainer_moby28462() + go monitor_moby28462(c, c.State.Health.OpenMonitorChannel()) // G1 + go d.StateChanged() // G2 + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go new file mode 100644 index 00000000000000..561e2faf576ab0 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby30408.go @@ -0,0 +1,67 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "errors" + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Moby30408", Moby30408) +} + +type Manifest_moby30408 struct { + Implements []string +} + +type Plugin_moby30408 struct { + activateWait *sync.Cond + activateErr error + Manifest *Manifest_moby30408 +} + +func (p *Plugin_moby30408) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin_moby30408) activated() bool { + return p.Manifest != nil +} + +func testActive_moby30408(p *Plugin_moby30408) { + done := make(chan struct{}) + go func() { // G2 + p.waitActive() + close(done) + }() + <-done +} + +func Moby30408() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { // G1 + p := &Plugin_moby30408{activateWait: sync.NewCond(&sync.Mutex{})} + p.activateErr = errors.New("some junk happened") + + testActive_moby30408(p) + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go new file mode 100644 index 00000000000000..4be50546e9be2c --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby33781.go @@ -0,0 +1,71 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/33781 + * Buggy version: 33fd3817b0f5ca4b87f0a75c2bd583b4425d392b + * fix commit-id: 67297ba0051d39be544009ba76abea14bc0be8a4 + * Flaky: 25/100 + */ + +package main + +import ( + "context" + "os" + "runtime/pprof" + "time" +) + +func init() { + register("Moby33781", Moby33781) +} + +func monitor_moby33781(stop chan bool) { + probeInterval := time.Millisecond + probeTimeout := time.Millisecond + for { + select { + case <-stop: + return + case <-time.After(probeInterval): + results := make(chan bool) + ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) + go func() { // G3 + results <- true + close(results) + }() + select { + case <-stop: + // results should be drained here + cancelProbe() + return + case <-results: + cancelProbe() + case <-ctx.Done(): + cancelProbe() + <-results + } + } + } +} + +func Moby33781() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + for i := 0; i < 100; i++ { + go func(i int) { + stop := make(chan bool) + go monitor_moby33781(stop) // G1 + go func() { // G2 + time.Sleep(time.Duration(i) * time.Millisecond) + stop <- true + }() + }(i) + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go new file mode 100644 index 00000000000000..577c81651a0668 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby36114.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/36114 + * Buggy version: 6d4d3c52ae7c3f910bfc7552a2a673a8338e5b9f + * fix commit-id: a44fcd3d27c06aaa60d8d1cbce169f0d982e74b1 + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby36114", Moby36114) +} + +type serviceVM_moby36114 struct { + sync.Mutex +} + +func (svm *serviceVM_moby36114) hotAddVHDsAtStart() { + svm.Lock() + defer svm.Unlock() + svm.hotRemoveVHDsAtStart() +} + +func (svm *serviceVM_moby36114) hotRemoveVHDsAtStart() { + svm.Lock() + defer svm.Unlock() +} + +func Moby36114() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 100; i++ { + go func() { + s := &serviceVM_moby36114{} + go s.hotAddVHDsAtStart() + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go new file mode 100644 index 00000000000000..7f0497648cf606 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby4951.go @@ -0,0 +1,101 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/4951 + * Buggy version: 81f148be566ab2b17810ad4be61a5d8beac8330f + * fix commit-id: 2ffef1b7eb618162673c6ffabccb9ca57c7dfce3 + * Flaky: 100/100 + */ +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby4951", Moby4951) +} + +type DeviceSet_moby4951 struct { + sync.Mutex + infos map[string]*DevInfo_moby4951 + nrDeletedDevices int +} + +func (devices *DeviceSet_moby4951) DeleteDevice(hash string) { + devices.Lock() + defer devices.Unlock() + + info := devices.lookupDevice(hash) + + info.lock.Lock() + runtime.Gosched() + defer info.lock.Unlock() + + devices.deleteDevice(info) +} + +func (devices *DeviceSet_moby4951) lookupDevice(hash string) *DevInfo_moby4951 { + existing, ok := devices.infos[hash] + if !ok { + return nil + } + return existing +} + +func (devices *DeviceSet_moby4951) deleteDevice(info *DevInfo_moby4951) { + devices.removeDeviceAndWait(info.Name()) +} + +func (devices *DeviceSet_moby4951) removeDeviceAndWait(devname string) { + /// remove devices by devname + devices.Unlock() + time.Sleep(300 * time.Nanosecond) + devices.Lock() +} + +type DevInfo_moby4951 struct { + lock sync.Mutex + name string +} + +func (info *DevInfo_moby4951) Name() string { + return info.name +} + +func NewDeviceSet_moby4951() *DeviceSet_moby4951 { + devices := &DeviceSet_moby4951{ + infos: make(map[string]*DevInfo_moby4951), + } + info1 := &DevInfo_moby4951{ + name: "info1", + } + info2 := &DevInfo_moby4951{ + name: "info2", + } + devices.infos[info1.name] = info1 + devices.infos[info2.name] = info2 + return devices +} + +func Moby4951() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + go func() { + ds := NewDeviceSet_moby4951() + /// Delete devices by the same info + go ds.DeleteDevice("info1") + go ds.DeleteDevice("info1") + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go b/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go new file mode 100644 index 00000000000000..212f65b1f3f2c6 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/moby7559.go @@ -0,0 +1,55 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +/* + * Project: moby + * Issue or PR : https://github.com/moby/moby/pull/7559 + * Buggy version: 64579f51fcb439c36377c0068ccc9a007b368b5a + * fix commit-id: 6cbb8e070d6c3a66bf48fbe5cbf689557eee23db + * Flaky: 100/100 + */ +package main + +import ( + "net" + "os" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Moby7559", Moby7559) +} + +type UDPProxy_moby7559 struct { + connTrackLock sync.Mutex +} + +func (proxy *UDPProxy_moby7559) Run() { + for i := 0; i < 2; i++ { + proxy.connTrackLock.Lock() + _, err := net.DialUDP("udp", nil, nil) + if err != nil { + continue + /// Missing unlock here + } + if i == 0 { + break + } + } + proxy.connTrackLock.Unlock() +} + +func Moby7559() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 20; i++ { + go (&UDPProxy_moby7559{}).Run() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go b/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go new file mode 100644 index 00000000000000..49905315a01b4c --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/serving2137.go @@ -0,0 +1,122 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +func init() { + register("Serving2137", Serving2137) +} + +type token_serving2137 struct{} + +type request_serving2137 struct { + lock *sync.Mutex + accepted chan bool +} + +type Breaker_serving2137 struct { + pendingRequests chan token_serving2137 + activeRequests chan token_serving2137 +} + +func (b *Breaker_serving2137) Maybe(thunk func()) bool { + var t token_serving2137 + select { + default: + // Pending request queue is full. Report failure. + return false + case b.pendingRequests <- t: + // Pending request has capacity. + // Wait for capacity in the active queue. + b.activeRequests <- t + // Defer releasing capacity in the active and pending request queue. + defer func() { + <-b.activeRequests + runtime.Gosched() + <-b.pendingRequests + }() + // Do the thing. + thunk() + // Report success + return true + } +} + +func (b *Breaker_serving2137) concurrentRequest() request_serving2137 { + r := request_serving2137{lock: &sync.Mutex{}, accepted: make(chan bool, 1)} + r.lock.Lock() + var start sync.WaitGroup + start.Add(1) + go func() { // G2, G3 + start.Done() + runtime.Gosched() + ok := b.Maybe(func() { + // Will block on locked mutex. + r.lock.Lock() + runtime.Gosched() + r.lock.Unlock() + }) + r.accepted <- ok + }() + start.Wait() // Ensure that the go func has had a chance to execute. + return r +} + +// Perform n requests against the breaker, returning mutexes for each +// request which succeeded, and a slice of bools for all requests. +func (b *Breaker_serving2137) concurrentRequests(n int) []request_serving2137 { + requests := make([]request_serving2137, n) + for i := range requests { + requests[i] = b.concurrentRequest() + } + return requests +} + +func NewBreaker_serving2137(queueDepth, maxConcurrency int32) *Breaker_serving2137 { + return &Breaker_serving2137{ + pendingRequests: make(chan token_serving2137, queueDepth+maxConcurrency), + activeRequests: make(chan token_serving2137, maxConcurrency), + } +} + +func unlock_serving2137(req request_serving2137) { + req.lock.Unlock() + runtime.Gosched() + // Verify that function has completed + ok := <-req.accepted + runtime.Gosched() + // Requeue for next usage + req.accepted <- ok +} + +func unlockAll_serving2137(requests []request_serving2137) { + for _, lc := range requests { + unlock_serving2137(lc) + } +} + +func Serving2137() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(100 * time.Millisecond) + prof.WriteTo(os.Stdout, 2) + }() + + for i := 0; i < 1000; i++ { + go func() { + b := NewBreaker_serving2137(1, 1) + + locks := b.concurrentRequests(2) // G1 + unlockAll_serving2137(locks) + }() + } +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go new file mode 100644 index 00000000000000..7967db7cfe083f --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing4829.go @@ -0,0 +1,87 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Syncthing4829", Syncthing4829) +} + +type Address_syncthing4829 int + +type Mapping_syncthing4829 struct { + mut sync.RWMutex // L2 + + extAddresses map[string]Address_syncthing4829 +} + +func (m *Mapping_syncthing4829) clearAddresses() { + m.mut.Lock() // L2 + var removed []Address_syncthing4829 + for id, addr := range m.extAddresses { + removed = append(removed, addr) + delete(m.extAddresses, id) + } + if len(removed) > 0 { + m.notify(nil, removed) + } + m.mut.Unlock() // L2 +} + +func (m *Mapping_syncthing4829) notify(added, remove []Address_syncthing4829) { + m.mut.RLock() // L2 + m.mut.RUnlock() // L2 +} + +type Service_syncthing4829 struct { + mut sync.RWMutex // L1 + + mappings []*Mapping_syncthing4829 +} + +func (s *Service_syncthing4829) NewMapping() *Mapping_syncthing4829 { + mapping := &Mapping_syncthing4829{ + extAddresses: make(map[string]Address_syncthing4829), + } + s.mut.Lock() // L1 + s.mappings = append(s.mappings, mapping) + s.mut.Unlock() // L1 + return mapping +} + +func (s *Service_syncthing4829) RemoveMapping(mapping *Mapping_syncthing4829) { + s.mut.Lock() // L1 + defer s.mut.Unlock() // L1 + for _, existing := range s.mappings { + if existing == mapping { + mapping.clearAddresses() + } + } +} + +func Syncthing4829() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + + go func() { // G1 + natSvc := &Service_syncthing4829{} + m := natSvc.NewMapping() + m.extAddresses["test"] = 0 + + natSvc.RemoveMapping(m) + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go new file mode 100644 index 00000000000000..e25494a688dd5c --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/goker/syncthing5795.go @@ -0,0 +1,103 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +func init() { + register("Syncthing5795", Syncthing5795) +} + +type message_syncthing5795 interface{} + +type ClusterConfig_syncthing5795 struct{} + +type Model_syncthing5795 interface { + ClusterConfig(message_syncthing5795) +} + +type TestModel_syncthing5795 struct { + ccFn func() +} + +func (t *TestModel_syncthing5795) ClusterConfig(msg message_syncthing5795) { + if t.ccFn != nil { + t.ccFn() + } +} + +type Connection_syncthing5795 interface { + Start() + Close() +} + +type rawConnection_syncthing5795 struct { + receiver Model_syncthing5795 + + inbox chan message_syncthing5795 + dispatcherLoopStopped chan struct{} + closed chan struct{} + closeOnce sync.Once +} + +func (c *rawConnection_syncthing5795) Start() { + go c.dispatcherLoop() // G2 +} + +func (c *rawConnection_syncthing5795) dispatcherLoop() { + defer close(c.dispatcherLoopStopped) + var msg message_syncthing5795 + for { + select { + case msg = <-c.inbox: + case <-c.closed: + return + } + switch msg := msg.(type) { + case *ClusterConfig_syncthing5795: + c.receiver.ClusterConfig(msg) + default: + return + } + } +} + +func (c *rawConnection_syncthing5795) Close() { + c.closeOnce.Do(func() { + close(c.closed) + <-c.dispatcherLoopStopped + }) +} + +func Syncthing5795() { + prof := pprof.Lookup("goroutineleak") + defer func() { + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) + }() + go func() { // G1 + m := &TestModel_syncthing5795{} + c := &rawConnection_syncthing5795{ + dispatcherLoopStopped: make(chan struct{}), + closed: make(chan struct{}), + inbox: make(chan message_syncthing5795), + receiver: m, + } + m.ccFn = c.Close + + c.Start() + c.inbox <- &ClusterConfig_syncthing5795{} + + <-c.dispatcherLoopStopped + }() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/main.go b/src/runtime/testdata/testgoroutineleakprofile/main.go new file mode 100644 index 00000000000000..5787c1e2b2bb90 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/main.go @@ -0,0 +1,39 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "os" + +// The number of times the main (profiling) goroutine should yield +// in order to allow the leaking goroutines to get stuck. +const yieldCount = 10 + +var cmds = map[string]func(){} + +func register(name string, f func()) { + if cmds[name] != nil { + panic("duplicate registration: " + name) + } + cmds[name] = f +} + +func registerInit(name string, f func()) { + if len(os.Args) >= 2 && os.Args[1] == name { + f() + } +} + +func main() { + if len(os.Args) < 2 { + println("usage: " + os.Args[0] + " name-of-test") + return + } + f := cmds[os.Args[1]] + if f == nil { + println("unknown function: " + os.Args[1]) + return + } + f() +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/simple.go b/src/runtime/testdata/testgoroutineleakprofile/simple.go new file mode 100644 index 00000000000000..b8172cd6df8aa7 --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/simple.go @@ -0,0 +1,253 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "runtime" + "runtime/pprof" + "sync" +) + +// This is a set of micro-tests with obvious goroutine leaks that +// ensures goroutine leak detection works. +// +// Tests in this file are not flaky iff. run with GOMAXPROCS=1. +// The main goroutine forcefully yields via `runtime.Gosched()` before +// running the profiler. This moves them to the back of the run queue, +// allowing the leaky goroutines to be scheduled beforehand and get stuck. + +func init() { + register("NilRecv", NilRecv) + register("NilSend", NilSend) + register("SelectNoCases", SelectNoCases) + register("ChanRecv", ChanRecv) + register("ChanSend", ChanSend) + register("Select", Select) + register("WaitGroup", WaitGroup) + register("MutexStack", MutexStack) + register("MutexHeap", MutexHeap) + register("RWMutexRLock", RWMutexRLock) + register("RWMutexLock", RWMutexLock) + register("Cond", Cond) + register("Mixed", Mixed) + register("NoLeakGlobal", NoLeakGlobal) +} + +func NilRecv() { + prof := pprof.Lookup("goroutineleak") + go func() { + var c chan int + <-c + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func NilSend() { + prof := pprof.Lookup("goroutineleak") + go func() { + var c chan int + c <- 0 + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func ChanRecv() { + prof := pprof.Lookup("goroutineleak") + go func() { + <-make(chan int) + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func SelectNoCases() { + prof := pprof.Lookup("goroutineleak") + go func() { + select {} + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func ChanSend() { + prof := pprof.Lookup("goroutineleak") + go func() { + make(chan int) <- 0 + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func Select() { + prof := pprof.Lookup("goroutineleak") + go func() { + select { + case make(chan int) <- 0: + case <-make(chan int): + } + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func WaitGroup() { + prof := pprof.Lookup("goroutineleak") + go func() { + var wg sync.WaitGroup + wg.Add(1) + wg.Wait() + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func MutexStack() { + prof := pprof.Lookup("goroutineleak") + for i := 0; i < 1000; i++ { + go func() { + var mu sync.Mutex + mu.Lock() + mu.Lock() + panic("should not be reached") + }() + } + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func MutexHeap() { + prof := pprof.Lookup("goroutineleak") + for i := 0; i < 1000; i++ { + go func() { + mu := &sync.Mutex{} + go func() { + mu.Lock() + mu.Lock() + panic("should not be reached") + }() + }() + } + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func RWMutexRLock() { + prof := pprof.Lookup("goroutineleak") + go func() { + mu := &sync.RWMutex{} + mu.Lock() + mu.RLock() + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func RWMutexLock() { + prof := pprof.Lookup("goroutineleak") + go func() { + mu := &sync.RWMutex{} + mu.Lock() + mu.Lock() + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func Cond() { + prof := pprof.Lookup("goroutineleak") + go func() { + cond := sync.NewCond(&sync.Mutex{}) + cond.L.Lock() + cond.Wait() + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +func Mixed() { + prof := pprof.Lookup("goroutineleak") + go func() { + ch := make(chan int) + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + ch <- 0 + wg.Done() + panic("should not be reached") + }() + wg.Wait() + <-ch + panic("should not be reached") + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} + +var ch = make(chan int) + +// No leak should be reported by this test +func NoLeakGlobal() { + prof := pprof.Lookup("goroutineleak") + go func() { + <-ch + }() + // Yield several times to allow the child goroutine to run. + for i := 0; i < yieldCount; i++ { + runtime.Gosched() + } + prof.WriteTo(os.Stdout, 2) +} diff --git a/src/runtime/testdata/testgoroutineleakprofile/stresstests.go b/src/runtime/testdata/testgoroutineleakprofile/stresstests.go new file mode 100644 index 00000000000000..64b535f51c87ef --- /dev/null +++ b/src/runtime/testdata/testgoroutineleakprofile/stresstests.go @@ -0,0 +1,89 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "io" + "os" + "runtime" + "runtime/pprof" + "sync" + "time" +) + +const spawnGCMaxDepth = 5 + +func init() { + register("SpawnGC", SpawnGC) + register("DaisyChain", DaisyChain) +} + +func spawnGC(i int) { + prof := pprof.Lookup("goroutineleak") + if i == 0 { + return + } + wg := &sync.WaitGroup{} + wg.Add(i + 1) + go func() { + wg.Done() + <-make(chan int) + }() + for j := 0; j < i; j++ { + go func() { + wg.Done() + spawnGC(i - 1) + }() + } + wg.Wait() + runtime.Gosched() + if i == spawnGCMaxDepth { + prof.WriteTo(os.Stdout, 2) + } else { + // We want to concurrently trigger the profile in order to concurrently run + // the GC, but we don't want to stream all the profiles to standard output. + // + // Only output the profile for the root call to spawnGC, and otherwise stream + // the profile outputs to /dev/null to avoid jumbling. + prof.WriteTo(io.Discard, 2) + } +} + +// SpawnGC spawns a tree of goroutine leaks and calls the goroutine leak profiler +// for each node in the tree. It is supposed to stress the goroutine leak profiler +// under a heavily concurrent workload. +func SpawnGC() { + spawnGC(spawnGCMaxDepth) +} + +// DaisyChain spawns a daisy-chain of runnable goroutines. +// +// Each goroutine in the chain creates a new channel and goroutine. +// +// This illustrates a pathological worstcase for the goroutine leak GC complexity, +// as opposed to the regular GC, which is not negatively affected by this pattern. +func DaisyChain() { + prof := pprof.Lookup("goroutineleak") + defer func() { + time.Sleep(time.Second) + prof.WriteTo(os.Stdout, 2) + }() + var chain func(i int, ch chan struct{}) + chain = func(i int, ch chan struct{}) { + if i <= 0 { + go func() { + time.Sleep(time.Hour) + ch <- struct{}{} + }() + return + } + ch2 := make(chan struct{}) + go chain(i-1, ch2) + <-ch2 + ch <- struct{}{} + } + // The channel buffer avoids goroutine leaks. + go chain(1000, make(chan struct{}, 1)) +} diff --git a/src/runtime/testdata/testprog/badtraceback.go b/src/runtime/testdata/testprog/badtraceback.go index 09aa2b877ecf5a..455118a54371d7 100644 --- a/src/runtime/testdata/testprog/badtraceback.go +++ b/src/runtime/testdata/testprog/badtraceback.go @@ -44,6 +44,8 @@ func badLR2(arg int) { lrPtr := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&arg)) - lrOff)) *lrPtr = 0xbad + runtime.KeepAlive(lrPtr) // prevent dead store elimination + // Print a backtrace. This should include diagnostics for the // bad return PC and a hex dump. panic("backtrace") diff --git a/src/runtime/testdata/testprog/schedmetrics.go b/src/runtime/testdata/testprog/schedmetrics.go index 6d3f68a848e00a..bc0906330f1a4f 100644 --- a/src/runtime/testdata/testprog/schedmetrics.go +++ b/src/runtime/testdata/testprog/schedmetrics.go @@ -84,7 +84,12 @@ func SchedMetrics() { // threadsSlack is the maximum number of threads left over // from the runtime (sysmon, the template thread, etc.) - const threadsSlack = 4 + // Certain build modes may also cause the creation of additional + // threads through frequent scheduling, like mayMoreStackPreempt. + // A slack of 5 is arbitrary but appears to be enough to cover + // the leftovers plus any inflation from scheduling-heavy build + // modes. + const threadsSlack = 5 // Make sure GC isn't running, since GC workers interfere with // expected counts. diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 949d48c79a6df5..8882c306edb736 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -429,7 +429,7 @@ func (u *unwinder) resolveInternal(innermost, isSyscall bool) { // gp._defer for a defer corresponding to this function, but that // is hard to do with defer records on the stack during a stack copy.) // Note: the +1 is to offset the -1 that - // stack.go:getStackMap does to back up a return + // (*stkframe).getStackMap does to back up a return // address make sure the pc is in the CALL instruction. } else { frame.continpc = 0 @@ -1206,6 +1206,7 @@ var gStatusStrings = [...]string{ _Gwaiting: "waiting", _Gdead: "dead", _Gcopystack: "copystack", + _Gleaked: "leaked", _Gpreempted: "preempted", } @@ -1226,7 +1227,7 @@ func goroutineheader(gp *g) { } // Override. - if gpstatus == _Gwaiting && gp.waitreason != waitReasonZero { + if (gpstatus == _Gwaiting || gpstatus == _Gleaked) && gp.waitreason != waitReasonZero { status = gp.waitreason.String() } @@ -1245,6 +1246,9 @@ func goroutineheader(gp *g) { } } print(" [", status) + if gpstatus == _Gleaked { + print(" (leaked)") + } if isScan { print(" (scan)") } diff --git a/src/runtime/tracestatus.go b/src/runtime/tracestatus.go index 03ec81fc0262a1..8b5eafd170f488 100644 --- a/src/runtime/tracestatus.go +++ b/src/runtime/tracestatus.go @@ -122,7 +122,7 @@ func goStatusToTraceGoStatus(status uint32, wr waitReason) tracev2.GoStatus { tgs = tracev2.GoRunning case _Gsyscall: tgs = tracev2.GoSyscall - case _Gwaiting, _Gpreempted: + case _Gwaiting, _Gpreempted, _Gleaked: // There are a number of cases where a G might end up in // _Gwaiting but it's actually running in a non-preemptive // state but needs to present itself as preempted to the diff --git a/src/strconv/ftoa_test.go b/src/strconv/ftoa_test.go index 3512ccf58070ec..40faa433a64cf8 100644 --- a/src/strconv/ftoa_test.go +++ b/src/strconv/ftoa_test.go @@ -172,6 +172,11 @@ var ftoatests = []ftoaTest{ {3.999969482421875, 'x', 2, "0x1.00p+02"}, {3.999969482421875, 'x', 1, "0x1.0p+02"}, {3.999969482421875, 'x', 0, "0x1p+02"}, + + // Cases that Java once mishandled, from David Chase. + {1.801439850948199e+16, 'g', -1, "1.801439850948199e+16"}, + {5.960464477539063e-08, 'g', -1, "5.960464477539063e-08"}, + {1.012e-320, 'g', -1, "1.012e-320"}, } func TestFtoa(t *testing.T) { @@ -186,13 +191,20 @@ func TestFtoa(t *testing.T) { t.Error("AppendFloat testN=64", test.f, string(test.fmt), test.prec, "want", "abc"+test.s, "got", string(x)) } if float64(float32(test.f)) == test.f && test.fmt != 'b' { + test_s := test.s + if test.f == 5.960464477539063e-08 { + // This test is an exact float32 but asking for float64 precision in the string. + // (All our other float64-only tests fail to exactness check above.) + test_s = "5.9604645e-08" + continue + } s := FormatFloat(test.f, test.fmt, test.prec, 32) if s != test.s { - t.Error("testN=32", test.f, string(test.fmt), test.prec, "want", test.s, "got", s) + t.Error("testN=32", test.f, string(test.fmt), test.prec, "want", test_s, "got", s) } x := AppendFloat([]byte("abc"), test.f, test.fmt, test.prec, 32) - if string(x) != "abc"+test.s { - t.Error("AppendFloat testN=32", test.f, string(test.fmt), test.prec, "want", "abc"+test.s, "got", string(x)) + if string(x) != "abc"+test_s { + t.Error("AppendFloat testN=32", test.f, string(test.fmt), test.prec, "want", "abc"+test_s, "got", string(x)) } } } diff --git a/src/testing/fstest/testfs.go b/src/testing/fstest/testfs.go index 1fb84b892842cb..72830a09a727bd 100644 --- a/src/testing/fstest/testfs.go +++ b/src/testing/fstest/testfs.go @@ -29,7 +29,7 @@ import ( // The contents of fsys must not change concurrently with TestFS. // // If TestFS finds any misbehaviors, it returns either the first error or a -// list of errors. Use [errors.Is] or [errors.As] to inspect. +// list of errors. Use [errors.Is] or [errors.AsType] to inspect. // // Typical usage inside a test is: // diff --git a/src/testing/fstest/testfs_test.go b/src/testing/fstest/testfs_test.go index d6d6d89b89fdaa..e3d7f1ab44d7b7 100644 --- a/src/testing/fstest/testfs_test.go +++ b/src/testing/fstest/testfs_test.go @@ -105,8 +105,12 @@ func TestTestFSWrappedErrors(t *testing.T) { // TestFS is expected to return a list of errors. // Enforce that the list can be extracted for browsing. - var errs interface{ Unwrap() []error } - if !errors.As(err, &errs) { + type wrapper interface{ + error + Unwrap() []error + } + errs, ok := errors.AsType[wrapper](err) + if !ok { t.Errorf("caller should be able to extract the errors as a list: %#v", err) } else { for _, err := range errs.Unwrap() { diff --git a/src/testing/internal/testdeps/deps.go b/src/testing/internal/testdeps/deps.go index 6f42d4722ca00c..5ab377daeb61ed 100644 --- a/src/testing/internal/testdeps/deps.go +++ b/src/testing/internal/testdeps/deps.go @@ -66,6 +66,12 @@ func (TestDeps) ImportPath() string { return ImportPath } +var ModulePath string + +func (TestDeps) ModulePath() string { + return ModulePath +} + // testLog implements testlog.Interface, logging actions by package os. type testLog struct { mu sync.Mutex diff --git a/src/testing/iotest/reader_test.go b/src/testing/iotest/reader_test.go index 1d222372caf384..cecfbfce4920c4 100644 --- a/src/testing/iotest/reader_test.go +++ b/src/testing/iotest/reader_test.go @@ -238,7 +238,6 @@ func TestErrReader(t *testing.T) { } for _, tt := range cases { - tt := tt t.Run(tt.name, func(t *testing.T) { n, err := ErrReader(tt.err).Read(nil) if err != tt.err { diff --git a/src/testing/synctest/example_test.go b/src/testing/synctest/example_test.go index 843377ea88ef71..a86d87fcecdca1 100644 --- a/src/testing/synctest/example_test.go +++ b/src/testing/synctest/example_test.go @@ -66,7 +66,7 @@ func TestContextAfterFunc(t *testing.T) { cancel() synctest.Wait() if !afterFuncCalled { - t.Fatalf("before context is canceled: AfterFunc not called") + t.Fatalf("after context is canceled: AfterFunc not called") } }) } diff --git a/src/testing/synctest/run.go b/src/testing/synctest/run.go deleted file mode 100644 index 2e668ab8634f58..00000000000000 --- a/src/testing/synctest/run.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.synctest - -package synctest - -import "internal/synctest" - -// Run is deprecated. -// -// Deprecated: Use Test instead. Run will be removed in Go 1.26. -func Run(f func()) { - synctest.Run(f) -} diff --git a/src/testing/synctest/synctest.go b/src/testing/synctest/synctest.go index 707383f9c75499..9f499515b8881a 100644 --- a/src/testing/synctest/synctest.go +++ b/src/testing/synctest/synctest.go @@ -147,7 +147,7 @@ // cancel() // synctest.Wait() // if !afterFuncCalled { -// t.Fatalf("before context is canceled: AfterFunc not called") +// t.Fatalf("after context is canceled: AfterFunc not called") // } // }) // } diff --git a/src/testing/testing.go b/src/testing/testing.go index 3f76446549364a..0d1d08ca89a5e6 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -420,7 +420,6 @@ import ( "sync/atomic" "time" "unicode" - "unicode/utf8" _ "unsafe" // for linkname ) @@ -456,6 +455,7 @@ func Init() { // this flag lets "go test" tell the binary to write the files in the directory where // the "go test" command is run. outputDir = flag.String("test.outputdir", "", "write profiles to `dir`") + artifacts = flag.Bool("test.artifacts", false, "store test artifacts in test.,outputdir") // Report as tests are run; default is silent for success. flag.Var(&chatty, "test.v", "verbose: print additional output") count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times") @@ -489,6 +489,7 @@ var ( short *bool failFast *bool outputDir *string + artifacts *bool chatty chattyFlag count *uint coverProfile *string @@ -516,6 +517,7 @@ var ( cpuList []int testlogFile *os.File + artifactDir string numFailed atomic.Uint32 // number of test failures @@ -653,15 +655,17 @@ type common struct { runner string // Function name of tRunner running the test. isParallel bool // Whether the test is parallel. - parent *common - level int // Nesting depth of test or benchmark. - creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run. - name string // Name of test or benchmark. - start highPrecisionTime // Time test or benchmark started - duration time.Duration - barrier chan bool // To signal parallel subtests they may start. Nil when T.Parallel is not present (B) or not usable (when fuzzing). - signal chan bool // To signal a test is done. - sub []*T // Queue of subtests to be run in parallel. + parent *common + level int // Nesting depth of test or benchmark. + creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run. + modulePath string + importPath string + name string // Name of test or benchmark. + start highPrecisionTime // Time test or benchmark started + duration time.Duration + barrier chan bool // To signal parallel subtests they may start. Nil when T.Parallel is not present (B) or not usable (when fuzzing). + signal chan bool // To signal a test is done. + sub []*T // Queue of subtests to be run in parallel. lastRaceErrors atomic.Int64 // Max value of race.Errors seen during the test or its subtests. raceErrorLogged atomic.Bool @@ -671,6 +675,10 @@ type common struct { tempDirErr error tempDirSeq int32 + artifactDirOnce sync.Once + artifactDir string + artifactDirErr error + ctx context.Context cancelCtx context.CancelFunc } @@ -879,6 +887,7 @@ func fmtDuration(d time.Duration) string { // TB is the interface common to [T], [B], and [F]. type TB interface { + ArtifactDir() string Attr(key, value string) Cleanup(func()) Error(args ...any) @@ -1313,6 +1322,96 @@ func (c *common) Cleanup(f func()) { c.cleanups = append(c.cleanups, fn) } +// ArtifactDir returns a directory in which the test should store output files. +// When the -artifacts flag is provided, this directory is located +// under the output directory. Otherwise, ArtifactDir returns a temporary directory +// that is removed after the test completes. +// +// Each test or subtest within each test package has a unique artifact directory. +// Repeated calls to ArtifactDir in the same test or subtest return the same directory. +// Subtest outputs are not located under the parent test's output directory. +func (c *common) ArtifactDir() string { + c.checkFuzzFn("ArtifactDir") + c.artifactDirOnce.Do(func() { + c.artifactDir, c.artifactDirErr = c.makeArtifactDir() + }) + if c.artifactDirErr != nil { + c.Fatalf("ArtifactDir: %v", c.artifactDirErr) + } + return c.artifactDir +} + +func hashString(s string) (h uint64) { + // FNV, used here to avoid a dependency on maphash. + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= 1099511628211 + } + return +} + +// makeArtifactDir creates the artifact directory for a test. +// The artifact directory is: +// +// /_artifacts/// +// +// The test package is the package import path with the module name prefix removed. +// The test name is truncated if too long. +// Special characters are removed from the path. +func (c *common) makeArtifactDir() (string, error) { + if !*artifacts { + return c.makeTempDir() + } + + // If the test name is longer than maxNameSize, truncate it and replace the last + // hashSize bytes with a hash of the full name. + const maxNameSize = 64 + name := strings.ReplaceAll(c.name, "/", "__") + if len(name) > maxNameSize { + h := fmt.Sprintf("%0x", hashString(name)) + name = name[:maxNameSize-len(h)] + h + } + + // Remove the module path prefix from the import path. + pkg := strings.TrimPrefix(c.importPath, c.modulePath+"/") + + // Join with /, not filepath.Join: the import path is /-separated, + // and we don't want removeSymbolsExcept to strip \ separators on Windows. + base := "/" + pkg + "/" + name + base = removeSymbolsExcept(base, "!#$%&()+,-.=@^_{}~ /") + base, err := filepath.Localize(base) + if err != nil { + // This name can't be safely converted into a local filepath. + // Drop it and just use _artifacts/. + base = "" + } + + artifactBase := filepath.Join(artifactDir, base) + if err := os.MkdirAll(artifactBase, 0o777); err != nil { + return "", err + } + dir, err := os.MkdirTemp(artifactBase, "") + if err != nil { + return "", err + } + if c.chatty != nil { + c.chatty.Updatef(c.name, "=== ARTIFACTS %s %v\n", c.name, dir) + } + return dir, nil +} + +func removeSymbolsExcept(s, allowed string) string { + mapper := func(r rune) rune { + if unicode.IsLetter(r) || + unicode.IsNumber(r) || + strings.ContainsRune(allowed, r) { + return r + } + return -1 // disallowed symbol + } + return strings.Map(mapper, s) +} + // TempDir returns a temporary directory for the test to use. // The directory is automatically removed when the test and // all its subtests complete. @@ -1322,6 +1421,14 @@ func (c *common) Cleanup(f func()) { // be created somewhere beneath it. func (c *common) TempDir() string { c.checkFuzzFn("TempDir") + dir, err := c.makeTempDir() + if err != nil { + c.Fatalf("TempDir: %v", err) + } + return dir +} + +func (c *common) makeTempDir() (string, error) { // Use a single parent directory for all the temporary directories // created by a test, each numbered sequentially. c.tempDirMu.Lock() @@ -1332,7 +1439,7 @@ func (c *common) TempDir() string { _, err := os.Stat(c.tempDir) nonExistent = os.IsNotExist(err) if err != nil && !nonExistent { - c.Fatalf("TempDir: %v", err) + return "", err } } @@ -1347,23 +1454,9 @@ func (c *common) TempDir() string { // Drop unusual characters (such as path separators or // characters interacting with globs) from the directory name to // avoid surprising os.MkdirTemp behavior. - mapper := func(r rune) rune { - if r < utf8.RuneSelf { - const allowed = "!#$%&()+,-.=@^_{}~ " - if '0' <= r && r <= '9' || - 'a' <= r && r <= 'z' || - 'A' <= r && r <= 'Z' { - return r - } - if strings.ContainsRune(allowed, r) { - return r - } - } else if unicode.IsLetter(r) || unicode.IsNumber(r) { - return r - } - return -1 - } - pattern = strings.Map(mapper, pattern) + const allowed = "!#$%&()+,-.=@^_{}~ " + pattern = removeSymbolsExcept(pattern, allowed) + c.tempDir, c.tempDirErr = os.MkdirTemp(os.Getenv("GOTMPDIR"), pattern) if c.tempDirErr == nil { c.Cleanup(func() { @@ -1381,14 +1474,14 @@ func (c *common) TempDir() string { c.tempDirMu.Unlock() if c.tempDirErr != nil { - c.Fatalf("TempDir: %v", c.tempDirErr) + return "", c.tempDirErr } dir := fmt.Sprintf("%s%c%03d", c.tempDir, os.PathSeparator, seq) if err := os.Mkdir(dir, 0o777); err != nil { - c.Fatalf("TempDir: %v", err) + return "", err } - return dir + return dir, nil } // removeAll is like os.RemoveAll, but retries Windows "Access is denied." @@ -1971,15 +2064,17 @@ func (t *T) Run(name string, f func(t *T)) bool { ctx, cancelCtx := context.WithCancel(context.Background()) t = &T{ common: common{ - barrier: make(chan bool), - signal: make(chan bool, 1), - name: testName, - parent: &t.common, - level: t.level + 1, - creator: pc[:n], - chatty: t.chatty, - ctx: ctx, - cancelCtx: cancelCtx, + barrier: make(chan bool), + signal: make(chan bool, 1), + name: testName, + modulePath: t.modulePath, + importPath: t.importPath, + parent: &t.common, + level: t.level + 1, + creator: pc[:n], + chatty: t.chatty, + ctx: ctx, + cancelCtx: cancelCtx, }, tstate: t.tstate, } @@ -2140,6 +2235,7 @@ func (f matchStringOnly) MatchString(pat, str string) (bool, error) { return f func (f matchStringOnly) StartCPUProfile(w io.Writer) error { return errMain } func (f matchStringOnly) StopCPUProfile() {} func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return errMain } +func (f matchStringOnly) ModulePath() string { return "" } func (f matchStringOnly) ImportPath() string { return "" } func (f matchStringOnly) StartTestLog(io.Writer) {} func (f matchStringOnly) StopTestLog() error { return errMain } @@ -2193,6 +2289,7 @@ type M struct { // testing/internal/testdeps's TestDeps. type testDeps interface { ImportPath() string + ModulePath() string MatchString(pat, str string) (bool, error) SetPanicOnExit0(bool) StartCPUProfile(io.Writer) error @@ -2336,7 +2433,7 @@ func (m *M) Run() (code int) { if !*isFuzzWorker { deadline := m.startAlarm() haveExamples = len(m.examples) > 0 - testRan, testOk := runTests(m.deps.MatchString, m.tests, deadline) + testRan, testOk := runTests(m.deps.ModulePath(), m.deps.ImportPath(), m.deps.MatchString, m.tests, deadline) fuzzTargetsRan, fuzzTargetsOk := runFuzzTests(m.deps, m.fuzzTargets, deadline) exampleRan, exampleOk := runExamples(m.deps.MatchString, m.examples) m.stopAlarm() @@ -2437,14 +2534,14 @@ func RunTests(matchString func(pat, str string) (bool, error), tests []InternalT if *timeout > 0 { deadline = time.Now().Add(*timeout) } - ran, ok := runTests(matchString, tests, deadline) + ran, ok := runTests("", "", matchString, tests, deadline) if !ran && !haveExamples { fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") } return ok } -func runTests(matchString func(pat, str string) (bool, error), tests []InternalTest, deadline time.Time) (ran, ok bool) { +func runTests(modulePath, importPath string, matchString func(pat, str string) (bool, error), tests []InternalTest, deadline time.Time) (ran, ok bool) { ok = true for _, procs := range cpuList { runtime.GOMAXPROCS(procs) @@ -2463,11 +2560,13 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT tstate.deadline = deadline t := &T{ common: common{ - signal: make(chan bool, 1), - barrier: make(chan bool), - w: os.Stdout, - ctx: ctx, - cancelCtx: cancelCtx, + signal: make(chan bool, 1), + barrier: make(chan bool), + w: os.Stdout, + ctx: ctx, + cancelCtx: cancelCtx, + modulePath: modulePath, + importPath: importPath, }, tstate: tstate, } @@ -2536,6 +2635,18 @@ func (m *M) before() { fmt.Fprintf(os.Stderr, "testing: cannot use -test.gocoverdir because test binary was not built with coverage enabled\n") os.Exit(2) } + if *artifacts { + var err error + artifactDir, err = filepath.Abs(toOutputDir("_artifacts")) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: cannot make -test.outputdir absolute: %v\n", err) + os.Exit(2) + } + if err := os.Mkdir(artifactDir, 0o777); err != nil && !errors.Is(err, os.ErrExist) { + fmt.Fprintf(os.Stderr, "testing: %v\n", err) + os.Exit(2) + } + } if *testlog != "" { // Note: Not using toOutputDir. // This file is for use by cmd/go, not users. diff --git a/src/testing/testing_test.go b/src/testing/testing_test.go index cc89e4144e6a78..167f4a0b457635 100644 --- a/src/testing/testing_test.go +++ b/src/testing/testing_test.go @@ -469,7 +469,7 @@ func TestTesting(t *testing.T) { // runTest runs a helper test with -test.v, ignoring its exit status. // runTest both logs and returns the test output. -func runTest(t *testing.T, test string) []byte { +func runTest(t *testing.T, test string, args ...string) []byte { t.Helper() testenv.MustHaveExec(t) @@ -477,6 +477,7 @@ func runTest(t *testing.T, test string) []byte { cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^"+test+"$", "-test.bench="+test, "-test.v", "-test.parallel=2", "-test.benchtime=2x") cmd = testenv.CleanCmdEnv(cmd) cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + cmd.Args = append(cmd.Args, args...) out, err := cmd.CombinedOutput() t.Logf("%v: %v\n%s", cmd, err, out) @@ -1055,6 +1056,105 @@ func TestAttrInvalid(t *testing.T) { } } +const artifactContent = "It belongs in a museum.\n" + +func TestArtifactDirExample(t *testing.T) { + os.WriteFile(filepath.Join(t.ArtifactDir(), "artifact"), []byte(artifactContent), 0o666) +} + +func TestArtifactDirDefault(t *testing.T) { + tempDir := t.TempDir() + t.Chdir(tempDir) + out := runTest(t, "TestArtifactDirExample", "-test.artifacts") + checkArtifactDir(t, out, "TestArtifactDirExample", tempDir) +} + +func TestArtifactDirSpecified(t *testing.T) { + tempDir := t.TempDir() + out := runTest(t, "TestArtifactDirExample", "-test.artifacts", "-test.outputdir="+tempDir) + checkArtifactDir(t, out, "TestArtifactDirExample", tempDir) +} + +func TestArtifactDirNoArtifacts(t *testing.T) { + t.Chdir(t.TempDir()) + out := string(runTest(t, "TestArtifactDirExample")) + if strings.Contains(out, "=== ARTIFACTS") { + t.Errorf("expected output with no === ARTIFACTS, got\n%q", out) + } + ents, err := os.ReadDir(".") + if err != nil { + t.Fatal(err) + } + for _, e := range ents { + t.Errorf("unexpected file in current directory after test: %v", e.Name()) + } +} + +func TestArtifactDirSubtestExample(t *testing.T) { + t.Run("Subtest", func(t *testing.T) { + os.WriteFile(filepath.Join(t.ArtifactDir(), "artifact"), []byte(artifactContent), 0o666) + }) +} + +func TestArtifactDirInSubtest(t *testing.T) { + tempDir := t.TempDir() + out := runTest(t, "TestArtifactDirSubtestExample/Subtest", "-test.artifacts", "-test.outputdir="+tempDir) + checkArtifactDir(t, out, "TestArtifactDirSubtestExample/Subtest", tempDir) +} + +func TestArtifactDirLongTestNameExample(t *testing.T) { + name := strings.Repeat("x", 256) + t.Run(name, func(t *testing.T) { + os.WriteFile(filepath.Join(t.ArtifactDir(), "artifact"), []byte(artifactContent), 0o666) + }) +} + +func TestArtifactDirWithLongTestName(t *testing.T) { + tempDir := t.TempDir() + out := runTest(t, "TestArtifactDirLongTestNameExample", "-test.artifacts", "-test.outputdir="+tempDir) + checkArtifactDir(t, out, `TestArtifactDirLongTestNameExample/\w+`, tempDir) +} + +func TestArtifactDirConsistent(t *testing.T) { + a := t.ArtifactDir() + b := t.ArtifactDir() + if a != b { + t.Errorf("t.ArtifactDir is not consistent between calls: %q, %q", a, b) + } +} + +func checkArtifactDir(t *testing.T, out []byte, testName, outputDir string) { + t.Helper() + + re := regexp.MustCompile(`=== ARTIFACTS ` + testName + ` ([^\n]+)`) + match := re.FindSubmatch(out) + if match == nil { + t.Fatalf("expected output matching %q, got\n%q", re, out) + } + artifactDir := string(match[1]) + + // Verify that the artifact directory is contained in the expected output directory. + relDir, err := filepath.Rel(outputDir, artifactDir) + if err != nil { + t.Fatal(err) + } + if !filepath.IsLocal(relDir) { + t.Fatalf("want artifact directory contained in %q, got %q", outputDir, artifactDir) + } + + for _, part := range strings.Split(relDir, string(os.PathSeparator)) { + const maxSize = 64 + if len(part) > maxSize { + t.Errorf("artifact directory %q contains component >%v characters long: %q", relDir, maxSize, part) + } + } + + got, err := os.ReadFile(filepath.Join(artifactDir, "artifact")) + if err != nil || string(got) != artifactContent { + t.Errorf("reading artifact in %q: got %q, %v; want %q", artifactDir, got, err, artifactContent) + } +} + func TestBenchmarkBLoopIterationCorrect(t *testing.T) { out := runTest(t, "BenchmarkBLoopPrint") c := bytes.Count(out, []byte("Printing from BenchmarkBLoopPrint")) @@ -1110,3 +1210,7 @@ func BenchmarkBNPrint(b *testing.B) { b.Logf("Printing from BenchmarkBNPrint") } } + +func TestArtifactDir(t *testing.T) { + t.Log(t.ArtifactDir()) +} diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go index 65440901a0b4ec..8665f3ad4987c2 100644 --- a/src/text/template/exec_test.go +++ b/src/text/template/exec_test.go @@ -1015,8 +1015,7 @@ func TestExecError_CustomError(t *testing.T) { var b bytes.Buffer err := tmpl.Execute(&b, nil) - var e *CustomError - if !errors.As(err, &e) { + if _, ok := errors.AsType[*CustomError](err); !ok { t.Fatalf("expected custom error; got %s", err) } } diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go index c28c3ea2002d21..30b3243a5a8416 100644 --- a/src/text/template/funcs.go +++ b/src/text/template/funcs.go @@ -22,7 +22,7 @@ import ( // return value evaluates to non-nil during execution, execution terminates and // Execute returns that error. // -// Errors returned by Execute wrap the underlying error; call [errors.As] to +// Errors returned by Execute wrap the underlying error; call [errors.AsType] to // unwrap them. // // When template execution invokes a function with an argument list, that list diff --git a/src/vendor/golang.org/x/net/nettest/conntest.go b/src/vendor/golang.org/x/net/nettest/conntest.go index 4297d408c0477c..8b98dfe21c5d30 100644 --- a/src/vendor/golang.org/x/net/nettest/conntest.go +++ b/src/vendor/golang.org/x/net/nettest/conntest.go @@ -142,7 +142,7 @@ func testPingPong(t *testing.T, c1, c2 net.Conn) { } // testRacyRead tests that it is safe to mutate the input Read buffer -// immediately after cancelation has occurred. +// immediately after cancellation has occurred. func testRacyRead(t *testing.T, c1, c2 net.Conn) { go chunkedCopy(c2, rand.New(rand.NewSource(0))) @@ -170,7 +170,7 @@ func testRacyRead(t *testing.T, c1, c2 net.Conn) { } // testRacyWrite tests that it is safe to mutate the input Write buffer -// immediately after cancelation has occurred. +// immediately after cancellation has occurred. func testRacyWrite(t *testing.T, c1, c2 net.Conn) { go chunkedCopy(io.Discard, c2) @@ -318,7 +318,7 @@ func testCloseTimeout(t *testing.T, c1, c2 net.Conn) { defer wg.Wait() wg.Add(3) - // Test for cancelation upon connection closure. + // Test for cancellation upon connection closure. c1.SetDeadline(neverTimeout) go func() { defer wg.Done() diff --git a/src/vendor/golang.org/x/text/unicode/bidi/core.go b/src/vendor/golang.org/x/text/unicode/bidi/core.go index 9d2ae547b5ed4d..fb8273236dde6e 100644 --- a/src/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/src/vendor/golang.org/x/text/unicode/bidi/core.go @@ -427,13 +427,6 @@ type isolatingRunSequence struct { func (i *isolatingRunSequence) Len() int { return len(i.indexes) } -func maxLevel(a, b level) level { - if a > b { - return a - } - return b -} - // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { @@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { indexes: indexes, types: types, level: level, - sos: typeForLevel(maxLevel(prevLevel, level)), - eos: typeForLevel(maxLevel(succLevel, level)), + sos: typeForLevel(max(prevLevel, level)), + eos: typeForLevel(max(succLevel, level)), } } diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index cf5c27c74579d0..f1e33686ed0626 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -1,4 +1,4 @@ -# golang.org/x/crypto v0.42.0 +# golang.org/x/crypto v0.43.0 ## explicit; go 1.24.0 golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 @@ -6,7 +6,7 @@ golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -# golang.org/x/net v0.44.0 +# golang.org/x/net v0.46.0 ## explicit; go 1.24.0 golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts @@ -15,10 +15,10 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/lif golang.org/x/net/nettest -# golang.org/x/sys v0.36.0 +# golang.org/x/sys v0.37.0 ## explicit; go 1.24.0 golang.org/x/sys/cpu -# golang.org/x/text v0.29.0 +# golang.org/x/text v0.30.0 ## explicit; go 1.24.0 golang.org/x/text/secure/bidirule golang.org/x/text/transform diff --git a/test/codegen/constants.go b/test/codegen/constants.go index 3ce17d0ad3a65b..0935a9e53a9d87 100644 --- a/test/codegen/constants.go +++ b/test/codegen/constants.go @@ -7,7 +7,7 @@ package codegen // A uint16 or sint16 constant shifted left. -func shifted16BitConstants(out [64]uint64) { +func shifted16BitConstants() (out [64]uint64) { // ppc64x: "MOVD\t[$]8193,", "SLD\t[$]27," out[0] = 0x0000010008000000 // ppc64x: "MOVD\t[$]-32767", "SLD\t[$]26," @@ -16,10 +16,11 @@ func shifted16BitConstants(out [64]uint64) { out[2] = 0xFFFF000000000000 // ppc64x: "MOVD\t[$]65535", "SLD\t[$]44," out[3] = 0x0FFFF00000000000 + return } // A contiguous set of 1 bits, potentially wrapping. -func contiguousMaskConstants(out [64]uint64) { +func contiguousMaskConstants() (out [64]uint64) { // ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]44, [$]63," out[0] = 0xFFFFF00000000001 // ppc64x: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]43, [$]63," @@ -30,4 +31,5 @@ func contiguousMaskConstants(out [64]uint64) { // ppc64x/power9: "MOVD\t[$]-1", "RLDC\tR[0-9]+, [$]33, [$]63," // ppc64x/power10: "MOVD\t[$]-8589934591," out[3] = 0xFFFFFFFE00000001 + return } diff --git a/test/codegen/floats.go b/test/codegen/floats.go index 29969c8dc08eaa..666c983b56ac04 100644 --- a/test/codegen/floats.go +++ b/test/codegen/floats.go @@ -217,14 +217,36 @@ func Float32Max(a, b float32) float32 { // Constant Optimizations // // ------------------------ // +func Float32ConstantZero() float32 { + // arm64:"FMOVS\tZR," + return 0.0 +} + +func Float32ConstantChipFloat() float32 { + // arm64:"FMOVS\t[$]\\(2\\.25\\)," + return 2.25 +} + func Float32Constant() float32 { + // arm64:"FMOVS\t[$]f32\\.42440000\\(SB\\)" // ppc64x/power8:"FMOVS\t[$]f32\\.42440000\\(SB\\)" // ppc64x/power9:"FMOVS\t[$]f32\\.42440000\\(SB\\)" // ppc64x/power10:"XXSPLTIDP\t[$]1111752704," return 49.0 } +func Float64ConstantZero() float64 { + // arm64:"FMOVD\tZR," + return 0.0 +} + +func Float64ConstantChipFloat() float64 { + // arm64:"FMOVD\t[$]\\(2\\.25\\)," + return 2.25 +} + func Float64Constant() float64 { + // arm64:"FMOVD\t[$]f64\\.4048800000000000\\(SB\\)" // ppc64x/power8:"FMOVD\t[$]f64\\.4048800000000000\\(SB\\)" // ppc64x/power9:"FMOVD\t[$]f64\\.4048800000000000\\(SB\\)" // ppc64x/power10:"XXSPLTIDP\t[$]1111752704," @@ -244,11 +266,12 @@ func Float64DenormalFloat32Constant() float64 { return 0x1p-127 } -func Float64ConstantStore(p *float64) { - // amd64: "MOVQ\t[$]4617801906721357038" +func Float32ConstantStore(p *float32) { + // amd64:"MOVL\t[$]1085133554" *p = 5.432 } -func Float32ConstantStore(p *float32) { - // amd64: "MOVL\t[$]1085133554" + +func Float64ConstantStore(p *float64) { + // amd64:"MOVQ\t[$]4617801906721357038" *p = 5.432 } diff --git a/test/codegen/fuse.go b/test/codegen/fuse.go index 8d6ea3c5c74664..561bac7224728a 100644 --- a/test/codegen/fuse.go +++ b/test/codegen/fuse.go @@ -6,6 +6,8 @@ package codegen +import "math" + // Notes: // - these examples use channels to provide a source of // unknown values that cannot be optimized away @@ -196,6 +198,84 @@ func ui4d(c <-chan uint8) { } } +// -------------------------------------// +// merge NaN checks // +// ------------------------------------ // + +func f64NaNOrPosInf(c <-chan float64) { + // This test assumes IsInf(x, 1) is implemented as x > MaxFloat rather than x == Inf(1). + + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FCLASSD",-"FLED",-"FLTD",-"FNED",-"FEQD" + for x := <-c; math.IsNaN(x) || math.IsInf(x, 1); x = <-c { + } +} + +func f64NaNOrNegInf(c <-chan float64) { + // This test assumes IsInf(x, -1) is implemented as x < -MaxFloat rather than x == Inf(-1). + + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FCLASSD",-"FLED",-"FLTD",-"FNED",-"FEQD" + for x := <-c; math.IsNaN(x) || math.IsInf(x, -1); x = <-c { + } +} + +func f64NaNOrLtOne(c <-chan float64) { + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLED",-"FLTD",-"FNED",-"FEQD" + for x := <-c; math.IsNaN(x) || x < 1; x = <-c { + } +} + +func f64NaNOrLteOne(c <-chan float64) { + // amd64:"JLS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLTD",-"FLED",-"FNED",-"FEQD" + for x := <-c; x <= 1 || math.IsNaN(x); x = <-c { + } +} + +func f64NaNOrGtOne(c <-chan float64) { + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLED",-"FLTD",-"FNED",-"FEQD" + for x := <-c; math.IsNaN(x) || x > 1; x = <-c { + } +} + +func f64NaNOrGteOne(c <-chan float64) { + // amd64:"JLS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLTD",-"FLED",-"FNED",-"FEQD" + for x := <-c; x >= 1 || math.IsNaN(x); x = <-c { + } +} + +func f32NaNOrLtOne(c <-chan float32) { + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLES",-"FLTS",-"FNES",-"FEQS" + for x := <-c; x < 1 || x != x; x = <-c { + } +} + +func f32NaNOrLteOne(c <-chan float32) { + // amd64:"JLS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLTS",-"FLES",-"FNES",-"FEQS" + for x := <-c; x != x || x <= 1; x = <-c { + } +} + +func f32NaNOrGtOne(c <-chan float32) { + // amd64:"JCS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLES",-"FLTS",-"FNES",-"FEQS" + for x := <-c; x > 1 || x != x; x = <-c { + } +} + +func f32NaNOrGteOne(c <-chan float32) { + // amd64:"JLS",-"JNE",-"JPS",-"JPC" + // riscv64:"FLTS",-"FLES",-"FNES",-"FEQS" + for x := <-c; x != x || x >= 1; x = <-c { + } +} + // ------------------------------------ // // regressions // // ------------------------------------ // diff --git a/test/codegen/generics.go b/test/codegen/generics.go new file mode 100644 index 00000000000000..45c4ca8d6aee61 --- /dev/null +++ b/test/codegen/generics.go @@ -0,0 +1,40 @@ +// asmcheck + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codegen + +import "cmp" + +func isNaN[T cmp.Ordered](x T) bool { + return x != x +} + +func compare[T cmp.Ordered](x, y T) int { + // amd64:-"TESTB" + // arm64:-"MOVB" + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN { + if yNaN { + return 0 + } + return -1 + } + if yNaN { + return +1 + } + if x < y { + return -1 + } + if x > y { + return +1 + } + return 0 +} + +func usesCompare(a, b int) int { + return compare(a, b) +} diff --git a/test/codegen/math.go b/test/codegen/math.go index eadf9d7d0554db..9ef881a9afc2f5 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -1,4 +1,4 @@ -// asmcheck +// asmcheck -gcflags=-d=converthash=qy // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -330,3 +330,48 @@ func nanGenerate32() float32 { // amd64/v3:"VFMADD231SS" return z0 + z1 } + +func outOfBoundsConv(i32 *[2]int32, u32 *[2]uint32, i64 *[2]int64, u64 *[2]uint64) { + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + i32[0] = int32(two40()) + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + i32[1] = int32(-two40()) + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + u32[0] = uint32(two41()) + // on arm64, this uses an explicit <0 comparison, so it constant folds. + // on amd64, this uses an explicit <0 comparison, so it constant folds. + // amd64: "MOVL\t[$]0," + u32[1] = uint32(minus1()) + // arm64: "FCVTZSD" + // amd64: "CVTTSD2SQ" + i64[0] = int64(two80()) + // arm64: "FCVTZSD" + // amd64: "CVTTSD2SQ" + i64[1] = int64(-two80()) + // arm64: "FCVTZUD" + // amd64: "CVTTSD2SQ" + u64[0] = uint64(two81()) + // arm64: "FCVTZUD" + // on amd64, this uses an explicit <0 comparison, so it constant folds. + // amd64: "MOVQ\t[$]0," + u64[1] = uint64(minus1()) +} + +func two40() float64 { + return 1 << 40 +} +func two41() float64 { + return 1 << 41 +} +func two80() float64 { + return 1 << 80 +} +func two81() float64 { + return 1 << 81 +} +func minus1() float64 { + return -1 +} diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go index ba5387d2c32121..f8fa374c0af483 100644 --- a/test/codegen/mathbits.go +++ b/test/codegen/mathbits.go @@ -731,7 +731,7 @@ func Add64MPanicOnOverflowGT(a, b [2]uint64) [2]uint64 { // // This is what happened on PPC64 when compiling // crypto/internal/edwards25519/field.feMulGeneric. -func Add64MultipleChains(a, b, c, d [2]uint64) { +func Add64MultipleChains(a, b, c, d [2]uint64) [2]uint64 { var cx, d1, d2 uint64 a1, a2 := a[0], a[1] b1, b2 := b[0], b[1] @@ -748,6 +748,7 @@ func Add64MultipleChains(a, b, c, d [2]uint64) { d2, _ = bits.Add64(c2, d2, cx) d[0] = d1 d[1] = d2 + return d } // --------------- // diff --git a/test/codegen/stack.go b/test/codegen/stack.go index 4e45d68f3816bd..59284ae88862a3 100644 --- a/test/codegen/stack.go +++ b/test/codegen/stack.go @@ -168,3 +168,9 @@ func getp1() *[4]int { func getp2() *[4]int { return nil } + +// Store to an argument without read can be removed. +func storeArg(a [2]int) { + // amd64:-`MOVQ\t\$123,.*\.a\+\d+\(SP\)` + a[1] = 123 +} diff --git a/test/codegen/strings.go b/test/codegen/strings.go index 498c3d398f8bc6..0b3ca7016ffc58 100644 --- a/test/codegen/strings.go +++ b/test/codegen/strings.go @@ -23,7 +23,7 @@ func CountBytes(s []byte) int { func ToByteSlice() []byte { // Issue #24698 // amd64:`LEAQ\ttype:\[3\]uint8` - // amd64:`CALL\truntime\.newobject` + // amd64:`CALL\truntime\.mallocTiny3` // amd64:-`.*runtime.stringtoslicebyte` return []byte("foo") } diff --git a/test/convert5.go b/test/convert5.go new file mode 100644 index 00000000000000..df247ca0b9b238 --- /dev/null +++ b/test/convert5.go @@ -0,0 +1,268 @@ +// run -gcflags=-d=converthash=qy + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !wasm && !386 && !arm && !mips + +// TODO fix this to work for wasm and 32-bit architectures. +// Doing more than this, however, expands the change. + +package main + +import ( + "fmt" + "runtime" +) + +// This test checks that conversion from floats to (unsigned) 32 and 64-bit +// integers has the same sensible behavior for corner cases, and that the +// conversions to smaller integers agree. Because outliers are platform- +// independent, the "golden test" for smaller integers is more like of +// a "gold-ish test" and subject to change. + +//go:noinline +func id[T any](x T) T { + return x +} + +//go:noinline +func want[T comparable](name string, x, y T) { + if x != y { + _, _, line, _ := runtime.Caller(1) + fmt.Println("FAIL at line", line, "var =", name, "got =", x, "want =", y) + } +} + +//go:noinline +func log[T comparable](name string, x T) { + fmt.Println(name, x) +} + +const ( + // pX = max positive signed X bit + // nX = min negative signed X bit + // uX = max unsigned X bit + // tX = two to the X + p32 = 2147483647 + n32 = -2147483648 + u32 = 4294967295 + p64 = 9223372036854775807 + n64 = -9223372036854775808 + u64 = 18446744073709551615 + t44 = 1 << 44 +) + +func main() { + one := 1.0 + minus1_32 := id(float32(-1.0)) + minus1_64 := id(float64(-1.0)) + p32_plus4k_plus1 := id(float32(p32 + 4096 + 1)) // want this to be precise and fit in 24 bits mantissa + p64_plus4k_plus1 := id(float64(p64 + 4096 + 1)) // want this to be precise and fit in 53 bits mantissa + n32_minus4k := id(float32(n32 - 4096)) + n64_minus4k := id(float64(n64 - 4096)) + inf_32 := id(float32(one / 0)) + inf_64 := id(float64(one / 0)) + ninf_32 := id(float32(-one / 0)) + ninf_64 := id(float64(-one / 0)) + + // int32 conversions + int32Tests := []struct { + name string + input any // Use any to handle both float32 and float64 + expected int32 + }{ + {"minus1_32", minus1_32, -1}, + {"minus1_64", minus1_64, -1}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32}, + {"p64_plus4k_plus1", p64_plus4k_plus1, p32}, + {"n32_minus4k", n32_minus4k, n32}, + {"n64_minus4k", n64_minus4k, n32}, + {"inf_32", inf_32, p32}, + {"inf_64", inf_64, p32}, + {"ninf_32", ninf_32, n32}, + {"ninf_64", ninf_64, n32}, + } + + for _, test := range int32Tests { + var converted int32 + switch v := test.input.(type) { + case float32: + converted = int32(v) + case float64: + converted = int32(v) + } + want(test.name, converted, test.expected) + } + + // int64 conversions + int64Tests := []struct { + name string + input any + expected int64 + }{ + {"minus1_32", minus1_32, -1}, + {"minus1_64", minus1_64, -1}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32 + 4096 + 1}, + {"p64_plus4k_plus1", p64_plus4k_plus1, p64}, + {"n32_minus4k", n32_minus4k, n32 - 4096}, + {"n64_minus4k", n64_minus4k, n64}, + {"inf_32", inf_32, p64}, + {"inf_64", inf_64, p64}, + {"ninf_32", ninf_32, n64}, + {"ninf_64", ninf_64, n64}, + } + + for _, test := range int64Tests { + var converted int64 + switch v := test.input.(type) { + case float32: + converted = int64(v) + case float64: + converted = int64(v) + } + want(test.name, converted, test.expected) + } + + // uint32 conversions + uint32Tests := []struct { + name string + input any + expected uint32 + }{ + {"minus1_32", minus1_32, 0}, + {"minus1_64", minus1_64, 0}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32 + 4096 + 1}, + {"p64_plus4k_plus1", p64_plus4k_plus1, u32}, + {"n32_minus4k", n32_minus4k, 0}, + {"n64_minus4k", n64_minus4k, 0}, + {"inf_32", inf_32, u32}, + {"inf_64", inf_64, u32}, + {"ninf_32", ninf_32, 0}, + {"ninf_64", ninf_64, 0}, + } + + for _, test := range uint32Tests { + var converted uint32 + switch v := test.input.(type) { + case float32: + converted = uint32(v) + case float64: + converted = uint32(v) + } + want(test.name, converted, test.expected) + } + + u64_plus4k_plus1_64 := id(float64(u64 + 4096 + 1)) + u64_plust44_plus1_32 := id(float32(u64 + t44 + 1)) + + // uint64 conversions + uint64Tests := []struct { + name string + input any + expected uint64 + }{ + {"minus1_32", minus1_32, 0}, + {"minus1_64", minus1_64, 0}, + {"p32_plus4k_plus1", p32_plus4k_plus1, p32 + 4096 + 1}, + {"p64_plus4k_plus1", p64_plus4k_plus1, p64 + 4096 + 1}, + {"n32_minus4k", n32_minus4k, 0}, + {"n64_minus4k", n64_minus4k, 0}, + {"inf_32", inf_32, u64}, + {"inf_64", inf_64, u64}, + {"ninf_32", ninf_32, 0}, + {"ninf_64", ninf_64, 0}, + {"u64_plus4k_plus1_64", u64_plus4k_plus1_64, u64}, + {"u64_plust44_plus1_32", u64_plust44_plus1_32, u64}, + } + + for _, test := range uint64Tests { + var converted uint64 + switch v := test.input.(type) { + case float32: + converted = uint64(v) + case float64: + converted = uint64(v) + } + want(test.name, converted, test.expected) + } + + // for smaller integer types + // TODO the overflow behavior is dubious, maybe we should fix it to be more sensible, e.g. saturating. + fmt.Println("Below this are 'golden' results to check for consistency across platforms. Overflow behavior is not necessarily what we want") + + u8plus2 := id(float64(257)) + p8minus1 := id(float32(126)) + n8plus2 := id(float64(-126)) + n8minusone := id(float32(-129)) + + fmt.Println("\nuint8 conversions") + uint8Tests := []struct { + name string + input any + }{ + {"minus1_32", minus1_32}, + {"minus1_64", minus1_64}, + {"p32_plus4k_plus1", p32_plus4k_plus1}, + {"p64_plus4k_plus1", p64_plus4k_plus1}, + {"n32_minus4k", n32_minus4k}, + {"n64_minus4k", n64_minus4k}, + {"inf_32", inf_32}, + {"inf_64", inf_64}, + {"ninf_32", ninf_32}, + {"ninf_64", ninf_64}, + {"u64_plus4k_plus1_64", u64_plus4k_plus1_64}, + {"u64_plust44_plus1_32", u64_plust44_plus1_32}, + {"u8plus2", u8plus2}, + {"p8minus1", p8minus1}, + {"n8plus2", n8plus2}, + {"n8minusone", n8minusone}, + } + + for _, test := range uint8Tests { + var converted uint8 + switch v := test.input.(type) { + case float32: + converted = uint8(v) + case float64: + converted = uint8(v) + } + log(test.name, converted) + } + + fmt.Println("\nint8 conversions") + int8Tests := []struct { + name string + input any + }{ + {"minus1_32", minus1_32}, + {"minus1_64", minus1_64}, + {"p32_plus4k_plus1", p32_plus4k_plus1}, + {"p64_plus4k_plus1", p64_plus4k_plus1}, + {"n32_minus4k", n32_minus4k}, + {"n64_minus4k", n64_minus4k}, + {"inf_32", inf_32}, + {"inf_64", inf_64}, + {"ninf_32", ninf_32}, + {"ninf_64", ninf_64}, + {"u64_plus4k_plus1_64", u64_plus4k_plus1_64}, + {"u64_plust44_plus1_32", u64_plust44_plus1_32}, + {"u8plus2", u8plus2}, + {"p8minus1", p8minus1}, + {"n8plus2", n8plus2}, + {"n8minusone", n8minusone}, + } + + for _, test := range int8Tests { + var converted int8 + switch v := test.input.(type) { + case float32: + converted = int8(v) + case float64: + converted = int8(v) + } + log(test.name, converted) + } + +} diff --git a/test/convert5.out b/test/convert5.out new file mode 100644 index 00000000000000..47a8af67f961f2 --- /dev/null +++ b/test/convert5.out @@ -0,0 +1,37 @@ +Below this are 'golden' results to check for consistency across platforms. Overflow behavior is not necessarily what we want + +uint8 conversions +minus1_32 255 +minus1_64 255 +p32_plus4k_plus1 255 +p64_plus4k_plus1 255 +n32_minus4k 0 +n64_minus4k 0 +inf_32 255 +inf_64 255 +ninf_32 0 +ninf_64 0 +u64_plus4k_plus1_64 255 +u64_plust44_plus1_32 255 +u8plus2 1 +p8minus1 126 +n8plus2 130 +n8minusone 127 + +int8 conversions +minus1_32 -1 +minus1_64 -1 +p32_plus4k_plus1 -1 +p64_plus4k_plus1 -1 +n32_minus4k 0 +n64_minus4k 0 +inf_32 -1 +inf_64 -1 +ninf_32 0 +ninf_64 0 +u64_plus4k_plus1_64 -1 +u64_plust44_plus1_32 -1 +u8plus2 1 +p8minus1 126 +n8plus2 -126 +n8minusone 127 diff --git a/test/devirtualization.go b/test/devirtualization.go new file mode 100644 index 00000000000000..edabb94108d02d --- /dev/null +++ b/test/devirtualization.go @@ -0,0 +1,1283 @@ +// errorcheck -0 -m + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +type M interface{ M() } + +type A interface{ A() } + +type C interface{ C() } + +type Impl struct{} + +func (*Impl) M() {} // ERROR "can inline \(\*Impl\).M$" + +func (*Impl) A() {} // ERROR "can inline \(\*Impl\).A$" + +type Impl2 struct{} + +func (*Impl2) M() {} // ERROR "can inline \(\*Impl2\).M$" + +func (*Impl2) A() {} // ERROR "can inline \(\*Impl2\).A$" + +type CImpl struct{} + +func (CImpl) C() {} // ERROR "can inline CImpl.C$" + +func typeAsserts() { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + + a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + a.(A).A() // ERROR "devirtualizing a.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + a.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + + v := a.(M) + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + v.(A).A() // ERROR "devirtualizing v.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + v.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + v.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + v2 := a.(A) + v2.A() // ERROR "devirtualizing v2.A to \*Impl$" "inlining call to \(\*Impl\).A" + v2.(M).M() // ERROR "devirtualizing v2.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + v2.(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + v2.(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + a.(M).(A).A() // ERROR "devirtualizing a.\(M\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(A).(M).M() // ERROR "devirtualizing a.\(A\).\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + + a.(M).(A).(*Impl).A() // ERROR "inlining call to \(\*Impl\).A" + a.(A).(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + any(a).(M).M() // ERROR "devirtualizing any\(a\).\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + any(a).(A).A() // ERROR "devirtualizing any\(a\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + any(a).(M).(any).(A).A() // ERROR "devirtualizing any\(a\).\(M\).\(any\).\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + + c := any(a) + c.(A).A() // ERROR "devirtualizing c.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + c.(M).M() // ERROR "devirtualizing c.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + + M(a).M() // ERROR "devirtualizing M\(a\).M to \*Impl$" "inlining call to \(\*Impl\).M" + M(M(a)).M() // ERROR "devirtualizing M\(M\(a\)\).M to \*Impl$" "inlining call to \(\*Impl\).M" + + a2 := a.(A) + A(a2).A() // ERROR "devirtualizing A\(a2\).A to \*Impl$" "inlining call to \(\*Impl\).A" + A(A(a2)).A() // ERROR "devirtualizing A\(A\(a2\)\).A to \*Impl$" "inlining call to \(\*Impl\).A" + + { + var a C = &CImpl{} // ERROR "&CImpl{} does not escape$" + a.(any).(C).C() // ERROR "devirtualizing a.\(any\).\(C\).C to \*CImpl$" "inlining call to CImpl.C" + a.(any).(*CImpl).C() // ERROR "inlining call to CImpl.C" + } +} + +func typeAssertsWithOkReturn() { + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + if v, ok := a.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + if v, ok := a.(A); ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, ok := a.(M) + if ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, ok := a.(A) + if ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, ok := a.(*Impl) + if ok { + v.A() // ERROR "inlining call to \(\*Impl\).A" + v.M() // ERROR "inlining call to \(\*Impl\).M" + } + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, _ := a.(M) + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, _ := a.(A) + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + v, _ := a.(*Impl) + v.A() // ERROR "inlining call to \(\*Impl\).A" + v.M() // ERROR "inlining call to \(\*Impl\).M" + } + { + a := newM() // ERROR "&Impl{} does not escape$" "inlining call to newM" + callA(a) // ERROR "devirtualizing m.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callA" + callIfA(a) // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callIfA" + } + { + _, a := newM2ret() // ERROR "&Impl{} does not escape$" "inlining call to newM2ret" + callA(a) // ERROR "devirtualizing m.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callA" + callIfA(a) // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" "inlining call to callIfA" + } + { + var a M = &Impl{} // ERROR "&Impl{} does not escape$" + // Note the !ok condition, devirtualizing here is fine. + if v, ok := a.(M); !ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var a A = newImplNoInline() + if v, ok := a.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var impl2InA A = &Impl2{} // ERROR "&Impl2{} does not escape$" + var a A + a, _ = impl2InA.(*Impl) + // a now contains the zero value of *Impl + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + a := newANoInline() + a.A() + } + { + _, a := newANoInlineRet2() + a.A() + } +} + +func newM() M { // ERROR "can inline newM$" + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func newM2ret() (int, M) { // ERROR "can inline newM2ret$" + return -1, &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func callA(m M) { // ERROR "can inline callA$" "leaking param: m$" + m.(A).A() +} + +func callIfA(m M) { // ERROR "can inline callIfA$" "leaking param: m$" + if v, ok := m.(A); ok { + v.A() + } +} + +//go:noinline +func newImplNoInline() *Impl { + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +//go:noinline +func newImpl2ret2() (string, *Impl2) { + return "str", &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +//go:noinline +func newImpl2() *Impl2 { + return &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +//go:noinline +func newANoInline() A { + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +//go:noinline +func newANoInlineRet2() (string, A) { + return "", &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func testTypeSwitch() { + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + switch v := v.(type) { + case A: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + } + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + switch v := v.(type) { + case A: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + v = &Impl{} // ERROR "&Impl{} does not escape$" + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + v.(M).M() // ERROR "devirtualizing v.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + switch v1 := v.(type) { + case A: + v1.A() + case M: + v1.M() + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + } + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + switch v := v.(type) { + case A: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + case C: + v.C() + } + } + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + switch v := v.(type) { + case M: + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + default: + panic("does not implement M") // ERROR ".does not implement M. escapes to heap$" + } + } +} + +func differentTypeAssign() { + { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + a.A() + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + var asAny any = a + asAny.(A).A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + var asAny any = a + asAny = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + asAny.(A).A() + } + { + a := A(&Impl{}) // ERROR "&Impl{} escapes to heap$" + var asAny any = a + asAny.(A).A() + asAny = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = newImpl2() + a.A() + } + { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, a = newImpl2ret2() + a.A() + } +} + +func assignWithTypeAssert() { + { + var i1 A = &Impl{} // ERROR "&Impl{} does not escape$" + var i2 A = &Impl2{} // ERROR "&Impl2{} does not escape$" + i1 = i2.(*Impl) // this will panic + i1.A() // ERROR "devirtualizing i1.A to \*Impl$" "inlining call to \(\*Impl\).A" + i2.A() // ERROR "devirtualizing i2.A to \*Impl2$" "inlining call to \(\*Impl2\).A" + } + { + var i1 A = &Impl{} // ERROR "&Impl{} does not escape$" + var i2 A = &Impl2{} // ERROR "&Impl2{} does not escape$" + i1, _ = i2.(*Impl) // i1 is going to be nil + i1.A() // ERROR "devirtualizing i1.A to \*Impl$" "inlining call to \(\*Impl\).A" + i2.A() // ERROR "devirtualizing i2.A to \*Impl2$" "inlining call to \(\*Impl2\).A" + } +} + +func nilIface() { + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v = nil + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v = nil + } + { + var nilIface A + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v = nilIface + } + { + var nilIface A + var v A = &Impl{} // ERROR "&Impl{} does not escape$" + v = nilIface + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v = &Impl{} // ERROR "&Impl{} does not escape$" + } + { + var v A + var v2 A = v + v2.A() // ERROR "devirtualizing v2.A to \*Impl$" "inlining call to \(\*Impl\).A" + v2 = &Impl{} // ERROR "&Impl{} does not escape$" + } + { + var v A + v.A() + } + { + var v A + var v2 A = v + v2.A() + } + { + var v A + var v2 A + v2 = v + v2.A() + } +} + +func longDevirtTest() { + var a interface { + M + A + } = &Impl{} // ERROR "&Impl{} does not escape$" + + { + var b A = a + b.A() // ERROR "devirtualizing b.A to \*Impl$" "inlining call to \(\*Impl\).A" + b.(M).M() // ERROR "devirtualizing b.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var b M = a + b.M() // ERROR "devirtualizing b.M to \*Impl$" "inlining call to \(\*Impl\).M" + b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var b A = a.(M).(A) + b.A() // ERROR "devirtualizing b.A to \*Impl$" "inlining call to \(\*Impl\).A" + b.(M).M() // ERROR "devirtualizing b.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var b M = a.(A).(M) + b.M() // ERROR "devirtualizing b.M to \*Impl$" "inlining call to \(\*Impl\).M" + b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + + if v, ok := a.(A); ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + + if v, ok := a.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + + { + var c A = a + + if v, ok := c.(A); ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + + c = &Impl{} // ERROR "&Impl{} does not escape$" + + if v, ok := c.(M); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + } + + if v, ok := c.(interface { + A + M + }); ok { + v.M() // ERROR "devirtualizing v.M to \*Impl$" "inlining call to \(\*Impl\).M" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } +} + +func deferDevirt() { + var a A + defer func() { // ERROR "can inline deferDevirt.func1$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} does not escape$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" +} + +func deferNoDevirt() { + var a A + defer func() { // ERROR "can inline deferNoDevirt.func1$" "func literal does not escape$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.A() +} + +//go:noinline +func closureDevirt() { + var a A + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureDevirt.func1.1$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} does not escape$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" +} + +//go:noinline +func closureNoDevirt() { + var a A + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureNoDevirt.func1.1$" "func literal does not escape$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + }() + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.A() +} + +var global = "1" + +func closureDevirt2() { + var a A + a = &Impl{} // ERROR "&Impl{} does not escape$" + c := func() { // ERROR "can inline closureDevirt2.func1$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + } + if global == "1" { + c = func() { // ERROR "can inline closureDevirt2.func2$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + } + } + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + c() +} + +func closureNoDevirt2() { + var a A + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + c := func() { // ERROR "can inline closureNoDevirt2.func1$" "func literal does not escape$" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + } + if global == "1" { + c = func() { // ERROR "can inline closureNoDevirt2.func2$" "func literal does not escape$" + a = &Impl{} // ERROR "&Impl{} escapes to heap$" + } + } + a.A() + c() +} + +//go:noinline +func closureDevirt3() { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureDevirt3.func1.1$" "func literal does not escape$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() + func() { // ERROR "can inline closureDevirt3.func2$" + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() // ERROR "inlining call to closureDevirt3.func2" "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" +} + +//go:noinline +func closureNoDevirt3() { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + func() { // ERROR "func literal does not escape$" + // defer so that it does not lnline. + defer func() {}() // ERROR "can inline closureNoDevirt3.func1.1$" "func literal does not escape$" + a.A() + }() + func() { // ERROR "can inline closureNoDevirt3.func2$" + a.A() + }() // ERROR "inlining call to closureNoDevirt3.func2" + a = &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +//go:noinline +func varDeclaredInClosureReferencesOuter() { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func1.1$" "func literal does not escape$" + var v A = a + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func2.1$" "func literal does not escape$" + var v A = a + v = &Impl{} // ERROR "&Impl{} does not escape$" + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + }() + + var b A = &Impl{} // ERROR "&Impl{} escapes to heap$" + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func3.1$" "func literal does not escape$" + var v A = b + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + }() + func() { // ERROR "func literal does not escape$" + // defer for noinline + defer func() {}() // ERROR "can inline varDeclaredInClosureReferencesOuter.func4.1$" "func literal does not escape$" + var v A = b + v.A() + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + }() +} + +//go:noinline +func testNamedReturn0() (v A) { + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + v.A() + return +} + +//go:noinline +func testNamedReturn1() (v A) { + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + v.A() + return &Impl{} // ERROR "&Impl{} escapes to heap$" +} + +func testNamedReturns3() (v A) { + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + defer func() { // ERROR "can inline testNamedReturns3.func1$" "func literal does not escape$" + v.A() + }() + v.A() + return &Impl2{} // ERROR "&Impl2{} escapes to heap$" +} + +var ( + globalImpl = &Impl{} + globalImpl2 = &Impl2{} + globalA A = &Impl{} + globalM M = &Impl{} +) + +func globals() { + { + globalA.A() + globalA.(M).M() + globalM.M() + globalM.(A).A() + + a := globalA + a.A() + a.(M).M() + + m := globalM + m.M() + m.(A).A() + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = globalImpl + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = A(globalImpl) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = M(globalImpl).(A) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = globalA.(*Impl) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + a = globalM.(*Impl) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = globalImpl2 + a.A() + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = globalA + a.A() + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + a = globalM.(A) + a.A() + } +} + +func mapsDevirt() { + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A = m[0] + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + v.(M).M() // ERROR "devirtualizing v.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A + var ok bool + if v, ok = m[0]; ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A + v, _ = m[0] + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } +} + +func mapsNoDevirt() { + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A = m[0] + v.A() + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.(M).M() + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A + var ok bool + if v, ok = m[0]; ok { + v.A() + } + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v, _ = m[0] + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + + { + m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v = m[0] + v.A() + } + { + m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ok bool + if v, ok = m[0]; ok { + v.A() + } + v.A() + } + { + m := make(map[int]A) // ERROR "make\(map\[int\]A\) does not escape$" + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v, _ = m[0] + v.A() + } +} + +func chanDevirt() { + { + m := make(chan *Impl) + var v A = <-m + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + v = <-m + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + v, _ = <-m + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + var ok bool + if v, ok = <-m; ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + m := make(chan *Impl) + var v A + var ok bool + if v, ok = <-m; ok { + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + select { + case <-m: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case v = <-m: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + case v, ok = <-m: + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + } +} + +func chanNoDevirt() { + { + m := make(chan *Impl) + var v A = <-m + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A + v = <-m + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A + v, _ = <-m + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A + var ok bool + if v, ok = <-m; ok { + v.A() + } + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + v.A() + } + { + m := make(chan *Impl) + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + var ok bool + if v, ok = <-m; ok { + v.A() + } + } + { + m := make(chan *Impl) + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + select { + case v = <-m: + v.A() + } + v.A() + } + { + m := make(chan *Impl) + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + select { + case v, _ = <-m: + v.A() + } + v.A() + } + + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v = <-m + v.A() + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + v, _ = <-m + v.A() + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ok bool + if v, ok = <-m; ok { + v.A() + } + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + select { + case v = <-m: + v.A() + } + v.A() + } + { + m := make(chan A) + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + select { + case v, _ = <-m: + v.A() + } + v.A() + } +} + +func rangeDevirt() { + { + var v A + m := make(map[*Impl]struct{}) // ERROR "make\(map\[\*Impl\]struct {}\) does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := make(chan *Impl) + v = &Impl{} // ERROR "&Impl{} does not escape$" + for v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := []*Impl{} // ERROR "\[\]\*Impl{} does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + v = &Impl{} // ERROR "&Impl{} does not escape$" + impl := &Impl{} // ERROR "&Impl{} does not escape$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + v = &Impl{} // ERROR "&Impl{} does not escape$" + impl := &Impl{} // ERROR "&Impl{} does not escape$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} does not escape$" + v = &Impl{} // ERROR "&Impl{} does not escape$" + for _, v = range &m { + } + v.A() // ERROR "devirtualizing v.A to \*Impl$" "inlining call to \(\*Impl\).A" + } +} + +func rangeNoDevirt() { + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(map[*Impl]struct{}) // ERROR "make\(map\[\*Impl\]struct {}\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(map[*Impl]*Impl) // ERROR "make\(map\[\*Impl\]\*Impl\) does not escape$" + for _, v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := make(chan *Impl) + for v = range m { + } + v.A() + } + { + var v A = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + m := []*Impl{} // ERROR "\[\]\*Impl{} does not escape$" + for _, v = range m { + } + v.A() + } + { + var v A + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + impl := &Impl{} // ERROR "&Impl{} escapes to heap$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() + } + { + var v A + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + impl := &Impl{} // ERROR "&Impl{} escapes to heap$" + i := 0 + for v = impl; i < 10; i++ { + } + v.A() + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + for _, v = range m { + } + v.A() + } + { + var v A + m := [1]*Impl{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + for _, v = range &m { + } + v.A() + } + + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(map[A]struct{}) // ERROR "make\(map\[A\]struct {}\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(map[A]A) // ERROR "make\(map\[A\]A\) does not escape$" + for v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(map[A]A) // ERROR "make\(map\[A\]A\) does not escape$" + for _, v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := make(chan A) + for v = range m { + } + v.A() + } + { + var v A = &Impl{} // ERROR "&Impl{} escapes to heap$" + m := []A{} // ERROR "\[\]A{} does not escape$" + for _, v = range m { + } + v.A() + } + + { + var v A + m := [1]A{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + for _, v = range m { + } + v.A() + } + { + var v A + m := [1]A{&Impl{}} // ERROR "&Impl{} escapes to heap$" + v = &Impl{} // ERROR "&Impl{} escapes to heap$" + for _, v = range &m { + } + v.A() + } +} + +var globalInt = 1 + +func testIfInit() { + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + var i = &Impl{} // ERROR "&Impl{} does not escape$" + if a = i; globalInt == 1 { + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var i2 = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + if a = i2; globalInt == 1 { + a.A() + } + a.A() + } +} + +func testSwitchInit() { + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + var i = &Impl{} // ERROR "&Impl{} does not escape$" + switch a = i; globalInt { + case 12: + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(M).M() // ERROR "devirtualizing a.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var i2 = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + switch a = i2; globalInt { + case 12: + a.A() + } + a.A() + } +} + +type implWrapper Impl + +func (implWrapper) A() {} // ERROR "can inline implWrapper.A$" + +//go:noinline +func devirtWrapperType() { + { + i := &Impl{} // ERROR "&Impl{} does not escape$" + // This is an OCONVNOP, so we have to be careful, not to devirtualize it to Impl.A. + var a A = (*implWrapper)(i) + a.A() // ERROR "devirtualizing a.A to \*implWrapper$" "inlining call to implWrapper.A" + } + { + i := Impl{} + // This is an OCONVNOP, so we have to be careful, not to devirtualize it to Impl.A. + var a A = (implWrapper)(i) // ERROR "implWrapper\(i\) does not escape$" + a.A() // ERROR "devirtualizing a.A to implWrapper$" "inlining call to implWrapper.A" + } + { + type anyWrapper any + var foo any = &Impl{} // ERROR "&Impl\{\} does not escape" + var bar anyWrapper = foo + bar.(M).M() // ERROR "devirtualizing bar\.\(M\).M to \*Impl" "inlining call to \(\*Impl\)\.M" + } +} + +func selfAssigns() { + { + var a A = &Impl{} // ERROR "&Impl{} does not escape$" + a = a + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny any = a + asAny = asAny + asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny any = a + a = asAny.(A) + asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + a.(A).A() // ERROR "devirtualizing a.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + b := a + b.(A).A() // ERROR "devirtualizing b.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny any = a + asAny = asAny + a = asAny.(A) + asAny = a + asAny.(A).A() // ERROR "devirtualizing asAny.\(A\).A to \*Impl$" "inlining call to \(\*Impl\).A" + asAny.(M).M() // ERROR "devirtualizing asAny.\(M\).M to \*Impl$" "inlining call to \(\*Impl\).M" + } + { + var a A = &Impl{} // ERROR "&Impl{} does not escape" + var asAny A = a + a = asAny.(A) + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } + { + var a, b, c A + c = &Impl{} // ERROR "&Impl{} does not escape$" + a = c + c = b + b = c + a = b + b = a + c = a + a.A() // ERROR "devirtualizing a.A to \*Impl$" "inlining call to \(\*Impl\).A" + } +} + +func boolNoDevirt() { + { + m := make(map[int]*Impl) // ERROR "make\(map\[int\]\*Impl\) does not escape$" + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, v = m[0] // ERROR ".autotmp_[0-9]+ escapes to heap$" + v.(A).A() + } + { + m := make(chan *Impl) + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + select { + case _, v = <-m: // ERROR ".autotmp_[0-9]+ escapes to heap$" + } + v.(A).A() + } + { + m := make(chan *Impl) + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, v = <-m // ERROR ".autotmp_[0-9]+ escapes to heap$" + v.(A).A() + } + { + var a any = 4 // ERROR "4 does not escape$" + var v any = &Impl{} // ERROR "&Impl{} escapes to heap$" + _, v = a.(int) // ERROR ".autotmp_[0-9]+ escapes to heap$" + v.(A).A() + } +} + +func addrTaken() { + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ptrA = &a + a.A() + _ = ptrA + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ptrA = &a + *ptrA = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.A() + } + { + var a A = &Impl{} // ERROR "&Impl{} escapes to heap$" + var ptrA = &a + *ptrA = &Impl2{} // ERROR "&Impl2{} escapes to heap$" + a.A() + } +} + +func testInvalidAsserts() { + any(0).(interface{ A() }).A() // ERROR "any\(0\) escapes to heap$" + { + var a M = &Impl{} // ERROR "&Impl{} escapes to heap$" + a.(C).C() // this will panic + a.(any).(C).C() // this will panic + } + { + var a C = &CImpl{} // ERROR "&CImpl{} escapes to heap$" + a.(M).M() // this will panic + a.(any).(M).M() // this will panic + } + { + var a C = &CImpl{} // ERROR "&CImpl{} does not escape$" + + // this will panic + a.(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + + // this will panic + a.(any).(M).(*Impl).M() // ERROR "inlining call to \(\*Impl\).M" + } +} + +type namedBool bool + +func (namedBool) M() {} // ERROR "can inline namedBool.M$" + +//go:noinline +func namedBoolTest() { + m := map[int]int{} // ERROR "map\[int\]int{} does not escape" + var ok namedBool + _, ok = m[5] + var i M = ok // ERROR "ok does not escape" + i.M() // ERROR "devirtualizing i.M to namedBool$" "inlining call to namedBool.M" +} diff --git a/test/devirtualization_nil_panics.go b/test/devirtualization_nil_panics.go new file mode 100644 index 00000000000000..59da454be7f910 --- /dev/null +++ b/test/devirtualization_nil_panics.go @@ -0,0 +1,100 @@ +// run + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" + "strings" +) + +type A interface{ A() } + +type Impl struct{} + +func (*Impl) A() {} + +type Impl2 struct{} + +func (*Impl2) A() {} + +func main() { + shouldNilPanic(28, func() { + var v A + v.A() + v = &Impl{} + }) + shouldNilPanic(36, func() { + var v A + defer func() { + v = &Impl{} + }() + v.A() + }) + shouldNilPanic(43, func() { + var v A + f := func() { + v = &Impl{} + } + v.A() + f() + }) + + // Make sure that both devirtualized and non devirtualized + // variants have the panic at the same line. + shouldNilPanic(55, func() { + var v A + defer func() { + v = &Impl{} + }() + v. // A() is on a sepearate line + A() + }) + shouldNilPanic(64, func() { + var v A + defer func() { + v = &Impl{} + v = &Impl2{} // assign different type, such that the call below does not get devirtualized + }() + v. // A() is on a sepearate line + A() + }) +} + +var cnt = 0 + +func shouldNilPanic(wantLine int, f func()) { + cnt++ + defer func() { + p := recover() + if p == nil { + panic("no nil deref panic") + } + if strings.Contains(fmt.Sprintf("%s", p), "invalid memory address or nil pointer dereference") { + callers := make([]uintptr, 128) + n := runtime.Callers(0, callers) + callers = callers[:n] + + frames := runtime.CallersFrames(callers) + line := -1 + for f, next := frames.Next(); next; f, next = frames.Next() { + if f.Func.Name() == fmt.Sprintf("main.main.func%v", cnt) { + line = f.Line + break + } + } + + if line != wantLine { + panic(fmt.Sprintf("invalid line number in panic = %v; want = %v", line, wantLine)) + } + + return + } + panic(p) + }() + f() +} diff --git a/test/devirtualization_with_type_assertions_interleaved.go b/test/devirtualization_with_type_assertions_interleaved.go new file mode 100644 index 00000000000000..6bad3beb9aa0d2 --- /dev/null +++ b/test/devirtualization_with_type_assertions_interleaved.go @@ -0,0 +1,139 @@ +// errorcheck -0 -m + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package escape + +type hashIface interface { + Sum() []byte +} + +type cloneableHashIface interface { + hashIface + Clone() hashIface +} + +type hash struct{ state [32]byte } + +func (h *hash) Sum() []byte { // ERROR "can inline \(\*hash\).Sum$" "h does not escape$" + return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$" +} + +func (h *hash) Clone() hashIface { // ERROR "can inline \(\*hash\).Clone$" "h does not escape$" + c := *h // ERROR "moved to heap: c$" + return &c +} + +type hash2 struct{ state [32]byte } + +func (h *hash2) Sum() []byte { // ERROR "can inline \(\*hash2\).Sum$" "h does not escape$" + return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$" +} + +func (h *hash2) Clone() hashIface { // ERROR "can inline \(\*hash2\).Clone$" "h does not escape$" + c := *h // ERROR "moved to heap: c$" + return &c +} + +func newHash() hashIface { // ERROR "can inline newHash$" + return &hash{} // ERROR "&hash{} escapes to heap$" +} + +func cloneHash1(h hashIface) hashIface { // ERROR "can inline cloneHash1$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone() + } + return &hash{} // ERROR "&hash{} escapes to heap$" +} + +func cloneHash2(h hashIface) hashIface { // ERROR "can inline cloneHash2$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone() + } + return nil +} + +func cloneHash3(h hashIface) hashIface { // ERROR "can inline cloneHash3$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone() + } + return &hash2{} // ERROR "&hash2{} escapes to heap$" +} + +func cloneHashWithBool1(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool1$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone(), true + } + return &hash{}, false // ERROR "&hash{} escapes to heap$" +} + +func cloneHashWithBool2(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool2$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone(), true + } + return nil, false +} + +func cloneHashWithBool3(h hashIface) (hashIface, bool) { // ERROR "can inline cloneHashWithBool3$" "leaking param: h$" + if h, ok := h.(cloneableHashIface); ok { + return h.Clone(), true + } + return &hash2{}, false // ERROR "&hash2{} escapes to heap$" +} + +func interleavedWithTypeAssertions() { + h1 := newHash() // ERROR "&hash{} does not escape$" "inlining call to newHash" + _ = h1.Sum() // ERROR "devirtualizing h1.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h2 := cloneHash1(h1) // ERROR "&hash{} does not escape$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash1" + _ = h2.Sum() // ERROR "devirtualizing h2.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h3 := cloneHash2(h1) // ERROR "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash2" + _ = h3.Sum() // ERROR "devirtualizing h3.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h4 := cloneHash3(h1) // ERROR "&hash2{} escapes to heap$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHash3" "moved to heap: c$" + _ = h4.Sum() + + h5, _ := cloneHashWithBool1(h1) // ERROR "&hash{} does not escape$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool1" + _ = h5.Sum() // ERROR "devirtualizing h5.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h6, _ := cloneHashWithBool2(h1) // ERROR "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool2" + _ = h6.Sum() // ERROR "devirtualizing h6.Sum to \*hash$" "inlining call to \(\*hash\).Sum" "make\(\[\]byte, 32\) does not escape$" + + h7, _ := cloneHashWithBool3(h1) // ERROR "&hash2{} escapes to heap$" "devirtualizing h.Clone to \*hash$" "inlining call to \(\*hash\).Clone" "inlining call to cloneHashWithBool3" "moved to heap: c$" + _ = h7.Sum() +} + +type cloneableHashError interface { + hashIface + Clone() (hashIface, error) +} + +type hash3 struct{ state [32]byte } + +func newHash3() hashIface { // ERROR "can inline newHash3$" + return &hash3{} // ERROR "&hash3{} escapes to heap$" +} + +func (h *hash3) Sum() []byte { // ERROR "can inline \(\*hash3\).Sum$" "h does not escape$" + return make([]byte, 32) // ERROR "make\(\[\]byte, 32\) escapes to heap$" +} + +func (h *hash3) Clone() (hashIface, error) { // ERROR "can inline \(\*hash3\).Clone$" "h does not escape$" + c := *h // ERROR "moved to heap: c$" + return &c, nil +} + +func interleavedCloneableHashError() { + h1 := newHash3() // ERROR "&hash3{} does not escape$" "inlining call to newHash3" + _ = h1.Sum() // ERROR "devirtualizing h1.Sum to \*hash3$" "inlining call to \(\*hash3\).Sum" "make\(\[\]byte, 32\) does not escape$" + + if h1, ok := h1.(cloneableHashError); ok { + h2, err := h1.Clone() // ERROR "devirtualizing h1.Clone to \*hash3$" "inlining call to \(\*hash3\).Clone" + if err == nil { + _ = h2.Sum() // ERROR "devirtualizing h2.Sum to \*hash3$" "inlining call to \(\*hash3\).Sum" "make\(\[\]byte, 32\) does not escape$" + } + } +} diff --git a/test/escape_make_non_const.go b/test/escape_make_non_const.go index 7a9b28d5e37a7e..11854ac4f4731a 100644 --- a/test/escape_make_non_const.go +++ b/test/escape_make_non_const.go @@ -106,3 +106,18 @@ type m struct { func newM(l int) m { // ERROR "can inline" return m{make(map[string]int, l)} // ERROR "make.*escapes to heap" } + +//go:noinline +func testLenOfSliceLit() { + ints := []int{0, 1, 2, 3, 4, 5} // ERROR "\[\]int\{\.\.\.\} does not escape"' + _ = make([]int, len(ints)) // ERROR "make\(\[\]int, 6\) does not escape" + _ = allocLenOf(ints) // ERROR "inlining call", "make\(\[\]int, 6\) does not escape" + + _ = make([]int, 2, len(ints)) // ERROR "make\(\[\]int, 2, 6\) does not escape" + _ = make([]int, len(ints), 2) // ERROR "make\(\[\]int, len\(ints\), 2\) does not escape" + _ = make([]int, 10, len(ints)) // ERROR "make\(\[\]int, 10, 6\) does not escape" +} + +func allocLenOf(s []int) []int { // ERROR "can inline" "s does not escape" + return make([]int, len(s)) // ERROR "escapes to heap" +} diff --git a/test/fixedbugs/issue15747.go b/test/fixedbugs/issue15747.go index 92e762c4e923ac..743adb6a8ffb1f 100644 --- a/test/fixedbugs/issue15747.go +++ b/test/fixedbugs/issue15747.go @@ -19,7 +19,7 @@ type T struct{ M string } var b bool -func f1(q *Q, xx []byte) interface{} { // ERROR "live at call to newobject: xx$" "live at entry to f1: xx$" +func f1(q *Q, xx []byte) interface{} { // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: xx$" "live at entry to f1: xx$" // xx was copied from the stack to the heap on the previous line: // xx was live for the first two prints but then it switched to &xx // being live. We should not see plain xx again. @@ -36,7 +36,7 @@ func f1(q *Q, xx []byte) interface{} { // ERROR "live at call to newobject: xx$" //go:noinline func f2(d []byte, n int) (odata, res []byte, e interface{}) { // ERROR "live at entry to f2: d$" if n > len(d) { - return d, nil, &T{M: "hello"} // ERROR "live at call to newobject: d" + return d, nil, &T{M: "hello"} // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: d" } res = d[:n] odata = d[n:] diff --git a/test/fixedbugs/issue42284.dir/a.go b/test/fixedbugs/issue42284.dir/a.go index ccf54fad54a03c..e55f190d7ee571 100644 --- a/test/fixedbugs/issue42284.dir/a.go +++ b/test/fixedbugs/issue42284.dir/a.go @@ -22,9 +22,8 @@ func g() { h := E() // ERROR "inlining call to E" "T\(0\) does not escape" h.M() // ERROR "devirtualizing h.M to T" "inlining call to T.M" - // BAD: T(0) could be stack allocated. - i := F(T(0)) // ERROR "inlining call to F" "T\(0\) escapes to heap" + i := F(T(0)) // ERROR "inlining call to F" "T\(0\) does not escape" - // Testing that we do NOT devirtualize here: - i.M() + // It is fine that we devirtualize here, as we add an additional nilcheck. + i.M() // ERROR "devirtualizing i.M to T" "inlining call to T.M" } diff --git a/test/fixedbugs/issue42284.dir/b.go b/test/fixedbugs/issue42284.dir/b.go index 559de59184460a..4a0b7cea102e88 100644 --- a/test/fixedbugs/issue42284.dir/b.go +++ b/test/fixedbugs/issue42284.dir/b.go @@ -10,9 +10,8 @@ func g() { h := a.E() // ERROR "inlining call to a.E" "T\(0\) does not escape" h.M() // ERROR "devirtualizing h.M to a.T" "inlining call to a.T.M" - // BAD: T(0) could be stack allocated. - i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) escapes to heap" + i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) does not escape" - // Testing that we do NOT devirtualize here: - i.M() + // It is fine that we devirtualize here, as we add an additional nilcheck. + i.M() // ERROR "devirtualizing i.M to a.T" "inlining call to a.T.M" } diff --git a/test/heapsampling.go b/test/heapsampling.go index 741db74f894d45..db93b253c90bab 100644 --- a/test/heapsampling.go +++ b/test/heapsampling.go @@ -48,22 +48,23 @@ func testInterleavedAllocations() error { const iters = 50000 // Sizes of the allocations performed by each experiment. frames := []string{"main.allocInterleaved1", "main.allocInterleaved2", "main.allocInterleaved3"} + leafFrame := "main.allocInterleaved" // Pass if at least one of three experiments has no errors. Use a separate // function for each experiment to identify each experiment in the profile. allocInterleaved1(iters) - if checkAllocations(getMemProfileRecords(), frames[0:1], iters, allocInterleavedSizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:1], iters, allocInterleavedSizes) == nil { // Passed on first try, report no error. return nil } allocInterleaved2(iters) - if checkAllocations(getMemProfileRecords(), frames[0:2], iters, allocInterleavedSizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:2], iters, allocInterleavedSizes) == nil { // Passed on second try, report no error. return nil } allocInterleaved3(iters) // If it fails a third time, we may be onto something. - return checkAllocations(getMemProfileRecords(), frames[0:3], iters, allocInterleavedSizes) + return checkAllocations(getMemProfileRecords(), leafFrame, frames[0:3], iters, allocInterleavedSizes) } var allocInterleavedSizes = []int64{17 * 1024, 1024, 18 * 1024, 512, 16 * 1024, 256} @@ -108,22 +109,23 @@ func testSmallAllocations() error { // Sizes of the allocations performed by each experiment. sizes := []int64{1024, 512, 256} frames := []string{"main.allocSmall1", "main.allocSmall2", "main.allocSmall3"} + leafFrame := "main.allocSmall" // Pass if at least one of three experiments has no errors. Use a separate // function for each experiment to identify each experiment in the profile. allocSmall1(iters) - if checkAllocations(getMemProfileRecords(), frames[0:1], iters, sizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:1], iters, sizes) == nil { // Passed on first try, report no error. return nil } allocSmall2(iters) - if checkAllocations(getMemProfileRecords(), frames[0:2], iters, sizes) == nil { + if checkAllocations(getMemProfileRecords(), leafFrame, frames[0:2], iters, sizes) == nil { // Passed on second try, report no error. return nil } allocSmall3(iters) // If it fails a third time, we may be onto something. - return checkAllocations(getMemProfileRecords(), frames[0:3], iters, sizes) + return checkAllocations(getMemProfileRecords(), leafFrame, frames[0:3], iters, sizes) } // allocSmall performs only small allocations for sanity testing. @@ -161,21 +163,21 @@ func allocSmall3(n int) { // Look only at samples that include the named frames, and group the // allocations by their line number. All these allocations are done from // the same leaf function, so their line numbers are the same. -func checkAllocations(records []runtime.MemProfileRecord, frames []string, count int64, size []int64) error { +func checkAllocations(records []runtime.MemProfileRecord, leafFrame string, frames []string, count int64, size []int64) error { objectsPerLine := map[int][]int64{} bytesPerLine := map[int][]int64{} totalCount := []int64{} // Compute the line number of the first allocation. All the // allocations are from the same leaf, so pick the first one. var firstLine int - for ln := range allocObjects(records, frames[0]) { + for ln := range allocObjects(records, leafFrame, frames[0]) { if firstLine == 0 || firstLine > ln { firstLine = ln } } for _, frame := range frames { var objectCount int64 - a := allocObjects(records, frame) + a := allocObjects(records, leafFrame, frame) for s := range size { // Allocations of size size[s] should be on line firstLine + s. ln := firstLine + s @@ -258,7 +260,7 @@ type allocStat struct { // allocObjects examines the profile records for samples including the // named function and returns the allocation stats aggregated by // source line number of the allocation (at the leaf frame). -func allocObjects(records []runtime.MemProfileRecord, function string) map[int]allocStat { +func allocObjects(records []runtime.MemProfileRecord, leafFrame, function string) map[int]allocStat { a := make(map[int]allocStat) for _, r := range records { var pcs []uintptr @@ -273,7 +275,7 @@ func allocObjects(records []runtime.MemProfileRecord, function string) map[int]a for { frame, more := frames.Next() name := frame.Function - if line == 0 { + if name == leafFrame && line == 0 { line = frame.Line } if name == function { diff --git a/test/live.go b/test/live.go index 6e1a60557c8ad4..56b78ccf8b4af2 100644 --- a/test/live.go +++ b/test/live.go @@ -467,9 +467,9 @@ func f27defer(b bool) { func f27go(b bool) { x := 0 if b { - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go } - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go printnl() } @@ -538,7 +538,7 @@ func f31(b1, b2, b3 bool) { g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } if b2 { - h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$" + h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" } if b3 { panic(g18()) @@ -665,14 +665,14 @@ func f39a() (x []int) { func f39b() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return x } func f39c() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return } diff --git a/test/live_regabi.go b/test/live_regabi.go index d2565ba9130cd0..838cbdefad7c5b 100644 --- a/test/live_regabi.go +++ b/test/live_regabi.go @@ -465,9 +465,9 @@ func f27defer(b bool) { func f27go(b bool) { x := 0 if b { - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go } - go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go + go call27(func() { x++ }) // ERROR "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: &x$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go printnl() } @@ -536,7 +536,7 @@ func f31(b1, b2, b3 bool) { g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } if b2 { - h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$" + h31(g18()) // ERROR "live at call to convT: .autotmp_[0-9]+$" "live at call to mallocgcSmallScanNoHeaderSC[0-9]+: .autotmp_[0-9]+$" } if b3 { panic(g18()) @@ -663,14 +663,14 @@ func f39a() (x []int) { func f39b() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return x } func f39c() (x [10]*int) { x = [10]*int{} - x[0] = new(int) // ERROR "live at call to newobject: x$" + x[0] = new(int) // ERROR "live at call to mallocTiny[48]: x$" printnl() // ERROR "live at call to printnl: x$" return } diff --git a/test/loopbce.go b/test/loopbce.go index 8bc44ece9455a0..8a58d942361221 100644 --- a/test/loopbce.go +++ b/test/loopbce.go @@ -9,7 +9,7 @@ import "math" func f0a(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += a[i] // ERROR "Proved IsInBounds$" } return x } @@ -17,7 +17,7 @@ func f0a(a []int) int { func f0b(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[i:] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + b := a[i:] // ERROR "Proved IsSliceInBounds$" x += b[0] } return x @@ -26,8 +26,8 @@ func f0b(a []int) int { func f0c(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[:i+1] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - x += b[0] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + b := a[:i+1] // ERROR "Proved IsSliceInBounds$" + x += b[0] // ERROR "Proved IsInBounds$" } return x } @@ -43,7 +43,7 @@ func f1(a []int) int { func f2(a []int) int { x := 0 for i := 1; i < len(a); i++ { // ERROR "Induction variable: limits \[1,\?\), increment 1$" - x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += a[i] // ERROR "Proved IsInBounds$" } return x } @@ -51,7 +51,7 @@ func f2(a []int) int { func f4(a [10]int) int { x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,8\], increment 2$" - x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += a[i] // ERROR "Proved IsInBounds$" } return x } @@ -91,7 +91,7 @@ func f5_int8(a [10]int) int { //go:noinline func f6(a []int) { for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[0:i] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + b := a[0:i] // ERROR "Proved IsSliceInBounds$" f6(b) } } @@ -99,7 +99,7 @@ func f6(a []int) { func g0a(a string) int { x := 0 for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -107,7 +107,7 @@ func g0a(a string) int { func g0b(a string) int { x := 0 for i := 0; len(a) > i; i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -115,7 +115,7 @@ func g0b(a string) int { func g0c(a string) int { x := 0 for i := len(a); i > 0; i-- { // ERROR "Induction variable: limits \(0,\?\], increment 1$" - x += int(a[i-1]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i-1]) // ERROR "Proved IsInBounds$" } return x } @@ -123,7 +123,7 @@ func g0c(a string) int { func g0d(a string) int { x := 0 for i := len(a); 0 < i; i-- { // ERROR "Induction variable: limits \(0,\?\], increment 1$" - x += int(a[i-1]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i-1]) // ERROR "Proved IsInBounds$" } return x } @@ -131,7 +131,7 @@ func g0d(a string) int { func g0e(a string) int { x := 0 for i := len(a) - 1; i >= 0; i-- { // ERROR "Induction variable: limits \[0,\?\], increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -139,7 +139,7 @@ func g0e(a string) int { func g0f(a string) int { x := 0 for i := len(a) - 1; 0 <= i; i-- { // ERROR "Induction variable: limits \[0,\?\], increment 1$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -148,7 +148,7 @@ func g1() int { a := "evenlength" x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,8\], increment 2$" - x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + x += int(a[i]) // ERROR "Proved IsInBounds$" } return x } @@ -158,7 +158,7 @@ func g2() int { x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,8\], increment 2$" j := i - if a[i] == 'e' { // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + if a[i] == 'e' { // ERROR "Proved IsInBounds$" j = j + 1 } x += int(a[j]) @@ -169,29 +169,29 @@ func g2() int { func g3a() { a := "this string has length 25" for i := 0; i < len(a); i += 5 { // ERROR "Induction variable: limits \[0,20\], increment 5$" - useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useString(a[:i+3]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useString(a[:i+5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useString(a[:i+3]) // ERROR "Proved IsSliceInBounds$" + useString(a[:i+5]) // ERROR "Proved IsSliceInBounds$" useString(a[:i+6]) } } func g3b(a string) { for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i+1:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i+1:]) // ERROR "Proved IsSliceInBounds$" } } func g3c(a string) { for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[:i+1]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[:i+1]) // ERROR "Proved IsSliceInBounds$" } } func h1(a []byte) { c := a[:128] for i := range c { // ERROR "Induction variable: limits \[0,128\), increment 1$" - c[i] = byte(i) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + c[i] = byte(i) // ERROR "Proved IsInBounds$" } } @@ -208,11 +208,11 @@ func k0(a [100]int) [100]int { continue } a[i-11] = i - a[i-10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i-5] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i+5] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-10] = i // ERROR "Proved IsInBounds$" + a[i-5] = i // ERROR "Proved IsInBounds$" + a[i] = i // ERROR "Proved IsInBounds$" + a[i+5] = i // ERROR "Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -225,12 +225,12 @@ func k1(a [100]int) [100]int { continue } useSlice(a[:i-11]) - useSlice(a[:i-10]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i-5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i+5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i+10]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[:i+11]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[:i-10]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i-5]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i+5]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i+10]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i+11]) // ERROR "Proved IsSliceInBounds$" useSlice(a[:i+12]) } @@ -243,13 +243,13 @@ func k2(a [100]int) [100]int { // This is a trick to prohibit sccp to optimize out the following out of bound check continue } - useSlice(a[i-11:]) - useSlice(a[i-10:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i-5:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i+5:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i+10:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" - useSlice(a[i+11:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[i-11:]) // ERROR "Proved slicemask not needed \(by limit\)$" + useSlice(a[i-10:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i-5:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i+5:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i+10:]) // ERROR "Proved IsSliceInBounds$" "Proved slicemask not needed \(by limit\)$" + useSlice(a[i+11:]) // ERROR "Proved IsSliceInBounds$" useSlice(a[i+12:]) } return a @@ -262,7 +262,7 @@ func k3(a [100]int) [100]int { continue } a[i+9] = i - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -275,7 +275,7 @@ func k3neg(a [100]int) [100]int { continue } a[i+9] = i - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -288,7 +288,7 @@ func k3neg2(a [100]int) [100]int { continue } a[i+9] = i - a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "Proved IsInBounds$" a[i+11] = i } return a @@ -299,7 +299,7 @@ func k4(a [100]int) [100]int { // and it isn't worth adding that special case to prove. min := (-1)<<63 + 1 for i := min; i < min+50; i++ { // ERROR "Induction variable: limits \[-9223372036854775807,-9223372036854775757\), increment 1$" - a[i-min] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-min] = i // ERROR "Proved IsInBounds$" } return a } @@ -307,8 +307,8 @@ func k4(a [100]int) [100]int { func k5(a [100]int) [100]int { max := (1 << 63) - 1 for i := max - 50; i < max; i++ { // ERROR "Induction variable: limits \[9223372036854775757,9223372036854775807\), increment 1$" - a[i-max+50] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" - a[i-(max-70)] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-max+50] = i // ERROR "Proved IsInBounds$" + a[i-(max-70)] = i // ERROR "Proved IsInBounds$" } return a } @@ -374,22 +374,22 @@ func d4() { } func d5() { - for i := int64(math.MinInt64 + 9); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4" + for i := int64(math.MinInt64 + 9); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4$" useString("foo") } - for i := int64(math.MinInt64 + 8); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4" + for i := int64(math.MinInt64 + 8); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4$" useString("foo") } for i := int64(math.MinInt64 + 7); i > math.MinInt64+2; i -= 4 { useString("foo") } - for i := int64(math.MinInt64 + 6); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775802,-9223372036854775802\], increment 4" + for i := int64(math.MinInt64 + 6); i > math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775802,-9223372036854775802\], increment 4$" useString("foo") } - for i := int64(math.MinInt64 + 9); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4" + for i := int64(math.MinInt64 + 9); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775803,-9223372036854775799\], increment 4$" useString("foo") } - for i := int64(math.MinInt64 + 8); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4" + for i := int64(math.MinInt64 + 8); i >= math.MinInt64+2; i -= 4 { // ERROR "Induction variable: limits \[-9223372036854775804,-9223372036854775800\], increment 4$" useString("foo") } for i := int64(math.MinInt64 + 7); i >= math.MinInt64+2; i -= 4 { @@ -410,23 +410,23 @@ func bce1() { panic("invalid test: modulos should differ") } - for i := b; i < a; i += z { // ERROR "Induction variable: limits \[-1547,9223372036854772720\], increment 1337" + for i := b; i < a; i += z { // ERROR "Induction variable: limits \[-1547,9223372036854772720\], increment 1337$" useString("foobar") } } func nobce2(a string) { for i := int64(0); i < int64(len(a)); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "Proved IsSliceInBounds$" } for i := int64(0); i < int64(len(a))-31337; i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "Proved IsSliceInBounds$" } - for i := int64(0); i < int64(len(a))+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" "Disproved Less64" + for i := int64(0); i < int64(len(a))+int64(-1<<63); i++ { // ERROR "Disproved Less64$" "Induction variable: limits \[0,\?\), increment 1$" useString(a[i:]) } j := int64(len(a)) - 123 - for i := int64(0); i < j+123+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" "Disproved Less64" + for i := int64(0); i < j+123+int64(-1<<63); i++ { // ERROR "Disproved Less64$" "Induction variable: limits \[0,\?\), increment 1$" useString(a[i:]) } for i := int64(0); i < j+122+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" @@ -455,16 +455,16 @@ func issue26116a(a []int) { func stride1(x *[7]int) int { s := 0 - for i := 0; i <= 8; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3" - s += x[i] // ERROR "Proved IsInBounds" + for i := 0; i <= 8; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3$" + s += x[i] // ERROR "Proved IsInBounds$" } return s } func stride2(x *[7]int) int { s := 0 - for i := 0; i < 9; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3" - s += x[i] // ERROR "Proved IsInBounds" + for i := 0; i < 9; i += 3 { // ERROR "Induction variable: limits \[0,6\], increment 3$" + s += x[i] // ERROR "Proved IsInBounds$" } return s } diff --git a/test/prove.go b/test/prove.go index 70a27865cfd7c3..c48280aa31e7b5 100644 --- a/test/prove.go +++ b/test/prove.go @@ -511,10 +511,10 @@ func f19() (e int64, err error) { func sm1(b []int, x int) { // Test constant argument to slicemask. - useSlice(b[2:8]) // ERROR "Proved slicemask not needed$" + useSlice(b[2:8]) // optimized away earlier by rewrite // Test non-constant argument with known limits. if cap(b) > 10 { - useSlice(b[2:]) + useSlice(b[2:]) // ERROR "Proved slicemask not needed$" } } @@ -773,8 +773,8 @@ func indexGT0(b []byte, n int) { func unrollUpExcl(a []int) int { var i, x int for i = 0; i < len(a)-1; i += 2 { // ERROR "Induction variable: limits \[0,\?\), increment 2$" - x += a[i] // ERROR "Proved IsInBounds$" - x += a[i+1] + x += a[i] // ERROR "Proved IsInBounds$" + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -786,8 +786,8 @@ func unrollUpExcl(a []int) int { func unrollUpIncl(a []int) int { var i, x int for i = 0; i <= len(a)-2; i += 2 { // ERROR "Induction variable: limits \[0,\?\], increment 2$" - x += a[i] // ERROR "Proved IsInBounds$" - x += a[i+1] + x += a[i] // ERROR "Proved IsInBounds$" + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -839,7 +839,7 @@ func unrollExclStepTooLarge(a []int) int { var i, x int for i = 0; i < len(a)-1; i += 3 { x += a[i] - x += a[i+1] + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -852,7 +852,7 @@ func unrollInclStepTooLarge(a []int) int { var i, x int for i = 0; i <= len(a)-2; i += 3 { x += a[i] - x += a[i+1] + x += a[i+1] // ERROR "Proved IsInBounds( for blocked indexing)?$" } if i == len(a)-1 { x += a[i] @@ -2342,6 +2342,148 @@ func setCapMaxBasedOnElementSize(x []uint64) int { return 0 } +func issue75144for(a, b []uint64) bool { + if len(a) == len(b) { + for len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } + if len(a) == len(b) { // ERROR "Proved Eq64$" + return true + } + } + return false +} + +func issue75144if(a, b []uint64) bool { + if len(a) == len(b) { + if len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } + if len(a) == len(b) { // ERROR "Proved Eq64$" + return true + } + } + return false +} + +func issue75144if2(a, b, c, d []uint64) (r bool) { + if len(a) != len(b) || len(c) != len(d) { + return + } + if len(a) <= 4 || len(c) <= 4 { + return + } + if len(a) < len(c) { + c = c[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + d = d[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } else { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } + if len(a) == len(c) { + return + } + if len(a) == len(b) { // ERROR "Proved Eq64$" + r = true + } + if len(c) == len(d) { // ERROR "Proved Eq64$" + r = true + } + return +} + +func issue75144forCannot(a, b []uint64) bool { + if len(a) == len(b) { + for len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] + for len(a) > 2 { + a = a[2:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[2:] + } + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144ifCannot(a, b []uint64) bool { + if len(a) == len(b) { + if len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + if len(a) > 2 { + a = a[2:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[2:] + } + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144ifCannot2(a, b []uint64) bool { + if len(a) == len(b) { + if len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } else if len(a) > 2 { + a = a[2:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[2:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144forNot(a, b []uint64) bool { + if len(a) == len(b) { + for len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = b[3:] + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144forNot2(a, b, c []uint64) bool { + if len(a) == len(b) { + for len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + b = c[4:] + } + if len(a) == len(b) { + return true + } + } + return false +} + +func issue75144ifNot(a, b []uint64) bool { + if len(a) == len(b) { + if len(a) > 4 { + a = a[4:] // ERROR "Proved slicemask not needed$" "Proved IsSliceInBounds$" + } else { + b = b[4:] + } + if len(a) == len(b) { + return true + } + } + return false +} + //go:noinline func useInt(a int) { } diff --git a/test/typeparam/issue46461.go b/test/typeparam/issue46461.go index 363a87cfe08f10..7e35106c15b42e 100644 --- a/test/typeparam/issue46461.go +++ b/test/typeparam/issue46461.go @@ -1,4 +1,4 @@ -// errorcheck +// compile // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -6,7 +6,7 @@ package p -type T[U interface{ M() T[U] }] int // ERROR "invalid recursive type: T refers to itself" +type T[U interface{ M() T[U] }] int type X int diff --git a/test/typeparam/issue46461b.dir/a.go b/test/typeparam/issue46461b.dir/a.go index fcb414266d741c..0d53b3e20429cd 100644 --- a/test/typeparam/issue46461b.dir/a.go +++ b/test/typeparam/issue46461b.dir/a.go @@ -4,4 +4,4 @@ package a -type T[U interface{ M() int }] int +type T[U interface{ M() T[U] }] int diff --git a/test/typeparam/issue46461b.dir/b.go b/test/typeparam/issue46461b.dir/b.go index a4583257ffd657..3393a375c20570 100644 --- a/test/typeparam/issue46461b.dir/b.go +++ b/test/typeparam/issue46461b.dir/b.go @@ -8,6 +8,4 @@ import "./a" type X int -func (X) M() int { return 0 } - -type _ a.T[X] +func (X) M() a.T[X] { return 0 } diff --git a/test/typeparam/issue48280.dir/a.go b/test/typeparam/issue48280.dir/a.go index f66fd30e34ee43..17859e6aa902f0 100644 --- a/test/typeparam/issue48280.dir/a.go +++ b/test/typeparam/issue48280.dir/a.go @@ -4,7 +4,7 @@ package a -type I[T any] interface { +type I[T I[T]] interface { F() T } diff --git a/test/typeparam/issue48306.dir/a.go b/test/typeparam/issue48306.dir/a.go index fdfd86cb6d4a5d..739750b20b35e6 100644 --- a/test/typeparam/issue48306.dir/a.go +++ b/test/typeparam/issue48306.dir/a.go @@ -4,6 +4,6 @@ package a -type I[T any] interface { +type I[T I[T]] interface { F() T } diff --git a/test/uintptrescapes2.go b/test/uintptrescapes2.go index 656286c0ff2bd7..e111d47fab95c4 100644 --- a/test/uintptrescapes2.go +++ b/test/uintptrescapes2.go @@ -33,8 +33,8 @@ func (T) M1(a uintptr) {} // ERROR "escaping uintptr" func (T) M2(a ...uintptr) {} // ERROR "escaping ...uintptr" func TestF1() { - var t int // ERROR "moved to heap" - F1(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to F1: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + var t int // ERROR "moved to heap" + F1(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to F1: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } func TestF3() { @@ -49,17 +49,17 @@ func TestM1() { } func TestF2() { - var v int // ERROR "moved to heap" - F2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + var v int // ERROR "moved to heap" + F2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to mallocgcSmallNoScanSC[0-9]+: .?autotmp" "live at call to F2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } func TestF4() { var v2 int // ERROR "moved to heap" - F4(0, 1, uintptr(unsafe.Pointer(&v2)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F4: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + F4(0, 1, uintptr(unsafe.Pointer(&v2)), 2) // ERROR "live at call to mallocgcSmallNoScanSC[0-9]+: .?autotmp" "live at call to F4: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } func TestM2() { var t T var v int // ERROR "moved to heap" - t.M2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to T.M2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" + t.M2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to mallocgcSmallNoScanSC[0-9]+: .?autotmp" "live at call to T.M2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } diff --git a/test/wasmmemsize.dir/main.go b/test/wasmmemsize.dir/main.go index e3aa5b5e92154c..c51e6b3b0476c9 100644 --- a/test/wasmmemsize.dir/main.go +++ b/test/wasmmemsize.dir/main.go @@ -9,17 +9,19 @@ import ( "io" ) -// Expect less than 3 MB of memory usage for a small wasm program. -// This reflects the current allocator. If the allocator changes, -// update this value. -const want = 3 << 20 +// Wasm page size. +const pageSize = 64 * 1024 + +// Expect less than 3 MB + 1 page of memory usage for a small wasm +// program. This reflects the current allocator. If the allocator +// changes, update this value. +const want = 3<<20 + pageSize var w = io.Discard func main() { fmt.Fprintln(w, "hello world") - const pageSize = 64 * 1024 sz := uintptr(currentMemory()) * pageSize if sz > want { fmt.Printf("FAIL: unexpected memory size %d, want <= %d\n", sz, want)