diff --git a/api/next/49097.txt b/api/next/49097.txt
new file mode 100644
index 00000000000000..f7240954c66228
--- /dev/null
+++ b/api/next/49097.txt
@@ -0,0 +1,4 @@
+pkg net, method (*Dialer) DialIP(context.Context, string, netip.Addr, netip.Addr) (*IPConn, error) #49097
+pkg net, method (*Dialer) DialTCP(context.Context, string, netip.AddrPort, netip.AddrPort) (*TCPConn, error) #49097
+pkg net, method (*Dialer) DialUDP(context.Context, string, netip.AddrPort, netip.AddrPort) (*UDPConn, error) #49097
+pkg net, method (*Dialer) DialUnix(context.Context, string, *UnixAddr, *UnixAddr) (*UnixConn, error) #49097
diff --git a/api/next/67546.txt b/api/next/67546.txt
new file mode 100644
index 00000000000000..0b5b4b981c19a6
--- /dev/null
+++ b/api/next/67546.txt
@@ -0,0 +1,5 @@
+pkg database/sql/driver, type RowsColumnScanner interface { Close, Columns, Next, ScanColumn } #67546
+pkg database/sql/driver, type RowsColumnScanner interface, Close() error #67546
+pkg database/sql/driver, type RowsColumnScanner interface, Columns() []string #67546
+pkg database/sql/driver, type RowsColumnScanner interface, Next([]Value) error #67546
+pkg database/sql/driver, type RowsColumnScanner interface, ScanColumn(interface{}, int) error #67546
diff --git a/doc/go_mem.html b/doc/go_mem.html
index 7adc34828a739a..8db7d65e7342a8 100644
--- a/doc/go_mem.html
+++ b/doc/go_mem.html
@@ -231,7 +231,7 @@
Implementation Restrictions for Programs Containing Data R
A read of an array, struct, or complex number
-may by implemented as a read of each individual sub-value
+may be implemented as a read of each individual sub-value
(array element, struct field, or real/imaginary component),
in any order.
Similarly, a write of an array, struct, or complex number
diff --git a/doc/next/5-toolchain.md b/doc/next/5-toolchain.md
index 0f4a816479754c..cc32f30a521a6d 100644
--- a/doc/next/5-toolchain.md
+++ b/doc/next/5-toolchain.md
@@ -4,4 +4,9 @@
## Linker {#linker}
+## Bootstrap {#bootstrap}
+
+As mentioned in the [Go 1.24 release notes](/doc/go1.24#bootstrap), Go 1.26 now requires
+Go 1.24.6 or later for bootstrap.
+We expect that Go 1.28 will require a minor release of Go 1.26 or later for bootstrap.
diff --git a/doc/next/6-stdlib/99-minor/database/sql/driver/67546.md b/doc/next/6-stdlib/99-minor/database/sql/driver/67546.md
new file mode 100644
index 00000000000000..8cb9089583a89d
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/database/sql/driver/67546.md
@@ -0,0 +1 @@
+A database driver may implement [RowsColumnScanner] to entirely override `Scan` behavior.
diff --git a/doc/next/6-stdlib/99-minor/net/49097.md b/doc/next/6-stdlib/99-minor/net/49097.md
new file mode 100644
index 00000000000000..bb7947b0a11377
--- /dev/null
+++ b/doc/next/6-stdlib/99-minor/net/49097.md
@@ -0,0 +1 @@
+Added context aware dial functions for TCP, UDP, IP and Unix networks.
diff --git a/doc/next/7-ports.md b/doc/next/7-ports.md
index 8bea3f8fbc33f9..07445454227bcb 100644
--- a/doc/next/7-ports.md
+++ b/doc/next/7-ports.md
@@ -1,2 +1,6 @@
## Ports {#ports}
+### Windows
+
+
+As [announced](/doc/go1.25#windows) in the Go 1.25 release notes, the [broken](/doc/go1.24#windows) 32-bit windows/arm port (`GOOS=windows` `GOARCH=arm`) is removed.
diff --git a/src/cmd/asm/internal/arch/loong64.go b/src/cmd/asm/internal/arch/loong64.go
index bf5175f4a0bad6..21263d34331513 100644
--- a/src/cmd/asm/internal/arch/loong64.go
+++ b/src/cmd/asm/internal/arch/loong64.go
@@ -23,18 +23,6 @@ func jumpLoong64(word string) bool {
return false
}
-// IsLoong64MUL reports whether the op (as defined by an loong64.A* constant) is
-// one of the MUL/DIV/REM instructions that require special handling.
-func IsLoong64MUL(op obj.As) bool {
- switch op {
- case loong64.AMUL, loong64.AMULU, loong64.AMULV, loong64.AMULVU,
- loong64.ADIV, loong64.ADIVU, loong64.ADIVV, loong64.ADIVVU,
- loong64.AREM, loong64.AREMU, loong64.AREMV, loong64.AREMVU:
- return true
- }
- return false
-}
-
// IsLoong64RDTIME reports whether the op (as defined by an loong64.A*
// constant) is one of the RDTIMELW/RDTIMEHW/RDTIMED instructions that
// require special handling.
diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go
index 9a6e22cab2c282..6bdbcb9c1b7f0e 100644
--- a/src/cmd/asm/internal/asm/asm.go
+++ b/src/cmd/asm/internal/asm/asm.go
@@ -974,14 +974,6 @@ func (p *Parser) getConstant(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
return addr.Offset
}
-// getImmediate checks that addr represents an immediate constant and returns its value.
-func (p *Parser) getImmediate(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
- if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
- p.errorf("%s: expected immediate constant; found %s", op, obj.Dconv(prog, addr))
- }
- return addr.Offset
-}
-
// getRegister checks that addr represents a register and returns its value.
func (p *Parser) getRegister(prog *obj.Prog, op obj.As, addr *obj.Addr) int16 {
if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 {
diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s
index bfff555782e9f7..72e65734666c2a 100644
--- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s
+++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s
@@ -376,6 +376,10 @@ lable2:
FTINTRNEVF F0, F2 // 02e41a01
FTINTRNEVD F0, F2 // 02e81a01
+ // FSEL instruction
+ FSEL FCC0, F1, F2, F3 // 4304000d
+ FSEL FCC1, F1, F2 // 4284000d
+
// LDX.{B,BU,H,HU,W,WU,D} instructions
MOVB (R14)(R13), R12 // cc350038
MOVBU (R14)(R13), R12 // cc352038
@@ -510,6 +514,16 @@ lable2:
VMOVQ V3.W[1], V7.W4 // 67e4f772
VMOVQ V4.V[0], V6.V2 // 86f0f772
+ // Load data from memory and broadcast to each element of a vector register: VMOVQ offset(Rj), .
+ VMOVQ (R4), V0.B16 // 80008030
+ VMOVQ 1(R4), V1.H8 // 81044030
+ VMOVQ 2(R4), V2.W4 // 82082030
+ VMOVQ 3(R4), V3.V2 // 830c1030
+ XVMOVQ (R4), X0.B32 // 80008032
+ XVMOVQ 1(R4), X1.H16 // 81044032
+ XVMOVQ 2(R4), X2.W8 // 82082032
+ XVMOVQ 3(R4), X3.V4 // 830c1032
+
// VSEQ{B,H,W,V}, XVSEQ{B,H,W,V} instruction
VSEQB V1, V2, V3 // 43040070
VSEQH V1, V2, V3 // 43840070
@@ -1035,3 +1049,58 @@ lable2:
PRELD (R4), $0 // 8000c02a
PRELD -1(R4), $8 // 88fcff2a
PRELD 8(R4), $31 // 9f20c02a
+
+ // [X]{VBITCLR/VBITSET/VBITREV}{B,H,W,V} instructions
+ VBITCLRB V1, V2, V3 // 43040c71
+ VBITCLRH V1, V2, V3 // 43840c71
+ VBITCLRW V1, V2, V3 // 43040d71
+ VBITCLRV V1, V2, V3 // 43840d71
+ VBITSETB V1, V2, V3 // 43040e71
+ VBITSETH V1, V2, V3 // 43840e71
+ VBITSETW V1, V2, V3 // 43040f71
+ VBITSETV V1, V2, V3 // 43840f71
+ VBITREVB V1, V2, V3 // 43041071
+ VBITREVH V1, V2, V3 // 43841071
+ VBITREVW V1, V2, V3 // 43041171
+ VBITREVV V1, V2, V3 // 43841171
+ XVBITCLRB X3, X2, X1 // 410c0c75
+ XVBITCLRH X3, X2, X1 // 418c0c75
+ XVBITCLRW X3, X2, X1 // 410c0d75
+ XVBITCLRV X3, X2, X1 // 418c0d75
+ XVBITSETB X3, X2, X1 // 410c0e75
+ XVBITSETH X3, X2, X1 // 418c0e75
+ XVBITSETW X3, X2, X1 // 410c0f75
+ XVBITSETV X3, X2, X1 // 418c0f75
+ XVBITREVB X3, X2, X1 // 410c1075
+ XVBITREVH X3, X2, X1 // 418c1075
+ XVBITREVW X3, X2, X1 // 410c1175
+ XVBITREVV X3, X2, X1 // 418c1175
+ VBITCLRB $7, V2, V3 // 433c1073
+ VBITCLRH $15, V2, V3 // 437c1073
+ VBITCLRW $31, V2, V3 // 43fc1073
+ VBITCLRV $63, V2, V3 // 43fc1173
+ VBITSETB $7, V2, V3 // 433c1473
+ VBITSETH $15, V2, V3 // 437c1473
+ VBITSETW $31, V2, V3 // 43fc1473
+ VBITSETV $63, V2, V3 // 43fc1573
+ VBITREVB $7, V2, V3 // 433c1873
+ VBITREVH $15, V2, V3 // 437c1873
+ VBITREVW $31, V2, V3 // 43fc1873
+ VBITREVV $63, V2, V3 // 43fc1973
+ XVBITCLRB $7, X2, X1 // 413c1077
+ XVBITCLRH $15, X2, X1 // 417c1077
+ XVBITCLRW $31, X2, X1 // 41fc1077
+ XVBITCLRV $63, X2, X1 // 41fc1177
+ XVBITSETB $7, X2, X1 // 413c1477
+ XVBITSETH $15, X2, X1 // 417c1477
+ XVBITSETW $31, X2, X1 // 41fc1477
+ XVBITSETV $63, X2, X1 // 41fc1577
+ XVBITREVB $7, X2, X1 // 413c1877
+ XVBITREVH $15, X2, X1 // 417c1877
+ XVBITREVW $31, X2, X1 // 41fc1877
+ XVBITREVV $63, X2, X1 // 41fc1977
+
+ // ALSL{W/WU/D}
+ ALSLW $4, R4, R5, R6 // 86940500
+ ALSLWU $4, R4, R5, R6 // 86940700
+ ALSLV $4, R4, R5, R6 // 86942d00
diff --git a/src/cmd/cgo/internal/test/test.go b/src/cmd/cgo/internal/test/test.go
index 844b2dd42c8cbf..fb4a8250a2666f 100644
--- a/src/cmd/cgo/internal/test/test.go
+++ b/src/cmd/cgo/internal/test/test.go
@@ -245,7 +245,7 @@ static void *thread(void *p) {
return NULL;
}
void testSendSIG() {
- const int N = 20;
+ enum { N = 20 };
int i;
pthread_t tid[N];
for (i = 0; i < N; i++) {
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 625e725fe34606..8c8c7d90274dcb 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -142,45 +142,6 @@ func memIdx(a *obj.Addr, v *ssa.Value) {
a.Index = i
}
-// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
-// See runtime/mkduff.go.
-const (
- dzBlocks = 16 // number of MOV/ADD blocks
- dzBlockLen = 4 // number of clears per block
- dzBlockSize = 23 // size of instructions in a single block
- dzMovSize = 5 // size of single MOV instruction w/ offset
- dzLeaqSize = 4 // size of single LEAQ instruction
- dzClearStep = 16 // number of bytes cleared by each MOV instruction
-)
-
-func duffStart(size int64) int64 {
- x, _ := duff(size)
- return x
-}
-func duffAdj(size int64) int64 {
- _, x := duff(size)
- return x
-}
-
-// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
-// required to use the duffzero mechanism for a block of the given size.
-func duff(size int64) (int64, int64) {
- if size < 32 || size > 1024 || size%dzClearStep != 0 {
- panic("bad duffzero size")
- }
- steps := size / dzClearStep
- blocks := steps / dzBlockLen
- steps %= dzBlockLen
- off := dzBlockSize * (dzBlocks - blocks)
- var adj int64
- if steps != 0 {
- off -= dzLeaqSize
- off -= dzMovSize * steps
- adj -= dzClearStep * (dzBlockLen - steps)
- }
- return off, adj
-}
-
func getgFromTLS(s *ssagen.State, r int16) {
// See the comments in cmd/internal/obj/x86/obj6.go
// near CanUse1InsnTLS for a detailed explanation of these instructions.
@@ -1104,20 +1065,110 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
zero16(off + n - 16)
}
- case ssa.OpAMD64DUFFCOPY:
- p := s.Prog(obj.ADUFFCOPY)
- p.To.Type = obj.TYPE_ADDR
- p.To.Sym = ir.Syms.Duffcopy
- if v.AuxInt%16 != 0 {
- v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
+ case ssa.OpAMD64LoweredMove:
+ dstReg := v.Args[0].Reg()
+ srcReg := v.Args[1].Reg()
+ if dstReg == srcReg {
+ break
+ }
+ tmpReg := int16(x86.REG_X14)
+ n := v.AuxInt
+ if n < 16 {
+ v.Fatalf("Move too small %d", n)
+ }
+ // move 16 bytes from srcReg+off to dstReg+off.
+ move16 := func(off int64) {
+ move16(s, srcReg, dstReg, tmpReg, off)
+ }
+
+ // Generate copying instructions.
+ var off int64
+ for n >= 16 {
+ move16(off)
+ off += 16
+ n -= 16
+ }
+ if n != 0 {
+ // use partially overlapped read/write.
+ // TODO: use smaller operations when we can?
+ move16(off + n - 16)
+ }
+
+ case ssa.OpAMD64LoweredMoveLoop:
+ dstReg := v.Args[0].Reg()
+ srcReg := v.Args[1].Reg()
+ if dstReg == srcReg {
+ break
+ }
+ countReg := v.RegTmp()
+ tmpReg := int16(x86.REG_X14)
+ n := v.AuxInt
+ loopSize := int64(64)
+ if n < 3*loopSize {
+ // - a loop count of 0 won't work.
+ // - a loop count of 1 is useless.
+ // - a loop count of 2 is a code size ~tie
+ // 4 instructions to implement the loop
+ // 4 instructions in the loop body
+ // vs
+ // 8 instructions in the straightline code
+ // Might as well use straightline code.
+ v.Fatalf("ZeroLoop size too small %d", n)
+ }
+ // move 16 bytes from srcReg+off to dstReg+off.
+ move16 := func(off int64) {
+ move16(s, srcReg, dstReg, tmpReg, off)
+ }
+
+ // Put iteration count in a register.
+ // MOVL $n, countReg
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = n / loopSize
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = countReg
+ cntInit := p
+
+ // Copy loopSize bytes starting at srcReg to dstReg.
+ for i := range loopSize / 16 {
+ move16(i * 16)
+ }
+ // ADDQ $loopSize, srcReg
+ p = s.Prog(x86.AADDQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = loopSize
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = srcReg
+ // ADDQ $loopSize, dstReg
+ p = s.Prog(x86.AADDQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = loopSize
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dstReg
+ // DECL countReg
+ p = s.Prog(x86.ADECL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = countReg
+ // Jump to loop header if we're not done yet.
+ // JNE head
+ p = s.Prog(x86.AJNE)
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(cntInit.Link)
+
+ // Multiples of the loop size are now done.
+ n %= loopSize
+
+ // Copy any fractional portion.
+ var off int64
+ for n >= 16 {
+ move16(off)
+ off += 16
+ n -= 16
+ }
+ if n != 0 {
+ // Use partially-overlapping copy.
+ move16(off + n - 16)
}
- p.To.Offset = 14 * (64 - v.AuxInt/16)
- // 14 and 64 are magic constants. 14 is the number of bytes to encode:
- // MOVUPS (SI), X0
- // ADDQ $16, SI
- // MOVUPS X0, (DI)
- // ADDQ $16, DI
- // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
case ssa.OpCopy: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
if v.Type.IsMemory() {
@@ -1709,3 +1760,21 @@ func zero16(s *ssagen.State, reg int16, off int64) {
p.To.Reg = reg
p.To.Offset = off
}
+
+// move 16 bytes from src+off to dst+off using temporary register tmp.
+func move16(s *ssagen.State, src, dst, tmp int16, off int64) {
+ // MOVUPS off(srcReg), tmpReg
+ // MOVUPS tmpReg, off(dstReg)
+ p := s.Prog(x86.AMOVUPS)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = src
+ p.From.Offset = off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = tmp
+ p = s.Prog(x86.AMOVUPS)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = tmp
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dst
+ p.To.Offset = off
+}
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index be7887318a4424..cd0c2cdfaa7416 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -1050,33 +1050,118 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Offset = int64(condCode)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- case ssa.OpARM64DUFFZERO:
- // runtime.duffzero expects start address in R20
- p := s.Prog(obj.ADUFFZERO)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ir.Syms.Duffzero
- p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredZero:
- // STP.P (ZR,ZR), 16(R16)
- // CMP Rarg1, R16
- // BLE -2(PC)
- // arg1 is the address of the last 16-byte unit to zero
- p := s.Prog(arm64.ASTP)
- p.Scond = arm64.C_XPOST
- p.From.Type = obj.TYPE_REGREG
- p.From.Reg = arm64.REGZERO
- p.From.Offset = int64(arm64.REGZERO)
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = arm64.REG_R16
- p.To.Offset = 16
- p2 := s.Prog(arm64.ACMP)
- p2.From.Type = obj.TYPE_REG
- p2.From.Reg = v.Args[1].Reg()
- p2.Reg = arm64.REG_R16
- p3 := s.Prog(arm64.ABLE)
- p3.To.Type = obj.TYPE_BRANCH
- p3.To.SetTarget(p)
+ ptrReg := v.Args[0].Reg()
+ n := v.AuxInt
+ if n < 16 {
+ v.Fatalf("Zero too small %d", n)
+ }
+
+ // Generate zeroing instructions.
+ var off int64
+ for n >= 16 {
+ // STP (ZR, ZR), off(ptrReg)
+ zero16(s, ptrReg, off, false)
+ off += 16
+ n -= 16
+ }
+ // Write any fractional portion.
+ // An overlapping 16-byte write can't be used here
+ // because STP's offsets must be a multiple of 8.
+ if n > 8 {
+ // MOVD ZR, off(ptrReg)
+ zero8(s, ptrReg, off)
+ off += 8
+ n -= 8
+ }
+ if n != 0 {
+ // MOVD ZR, off+n-8(ptrReg)
+ // TODO: for n<=4 we could use a smaller write.
+ zero8(s, ptrReg, off+n-8)
+ }
+ case ssa.OpARM64LoweredZeroLoop:
+ ptrReg := v.Args[0].Reg()
+ countReg := v.RegTmp()
+ n := v.AuxInt
+ loopSize := int64(64)
+ if n < 3*loopSize {
+ // - a loop count of 0 won't work.
+ // - a loop count of 1 is useless.
+ // - a loop count of 2 is a code size ~tie
+ // 3 instructions to implement the loop
+ // 4 instructions in the loop body
+ // vs
+ // 8 instructions in the straightline code
+ // Might as well use straightline code.
+ v.Fatalf("ZeroLoop size too small %d", n)
+ }
+
+ // Put iteration count in a register.
+ // MOVD $n, countReg
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = n / loopSize
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = countReg
+ cntInit := p
+
+ // Zero loopSize bytes starting at ptrReg.
+ // Increment ptrReg by loopSize as a side effect.
+ for range loopSize / 16 {
+ // STP.P (ZR, ZR), 16(ptrReg)
+ zero16(s, ptrReg, 0, true)
+ // TODO: should we use the postincrement form,
+ // or use a separate += 64 instruction?
+ // postincrement saves an instruction, but maybe
+ // it requires more integer units to do the +=16s.
+ }
+ // Decrement loop count.
+ // SUB $1, countReg
+ p = s.Prog(arm64.ASUB)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = countReg
+ // Jump to loop header if we're not done yet.
+ // CBNZ head
+ p = s.Prog(arm64.ACBNZ)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = countReg
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(cntInit.Link)
+
+ // Multiples of the loop size are now done.
+ n %= loopSize
+
+ // Write any fractional portion.
+ var off int64
+ for n >= 16 {
+ // STP (ZR, ZR), off(ptrReg)
+ zero16(s, ptrReg, off, false)
+ off += 16
+ n -= 16
+ }
+ if n > 8 {
+ // Note: an overlapping 16-byte write can't be used
+ // here because STP's offsets must be a multiple of 8.
+ // MOVD ZR, off(ptrReg)
+ zero8(s, ptrReg, off)
+ off += 8
+ n -= 8
+ }
+ if n != 0 {
+ // MOVD ZR, off+n-8(ptrReg)
+ // TODO: for n<=4 we could use a smaller write.
+ zero8(s, ptrReg, off+n-8)
+ }
+ // TODO: maybe we should use the count register to instead
+ // hold an end pointer and compare against that?
+ // ADD $n, ptrReg, endReg
+ // then
+ // CMP ptrReg, endReg
+ // BNE loop
+ // There's a past-the-end pointer here, any problem with that?
+
case ssa.OpARM64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
@@ -1482,3 +1567,35 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in
p.Pos = p.Pos.WithNotStmt()
return p
}
+
+// zero16 zeroes 16 bytes at reg+off.
+// If postInc is true, increment reg by 16.
+func zero16(s *ssagen.State, reg int16, off int64, postInc bool) {
+ // STP (ZR, ZR), off(reg)
+ p := s.Prog(arm64.ASTP)
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = arm64.REGZERO
+ p.From.Offset = int64(arm64.REGZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = reg
+ p.To.Offset = off
+ if postInc {
+ if off != 0 {
+ panic("can't postinc with non-zero offset")
+ }
+ // STP.P (ZR, ZR), 16(reg)
+ p.Scond = arm64.C_XPOST
+ p.To.Offset = 16
+ }
+}
+
+// zero8 zeroes 8 bytes at reg+off.
+func zero8(s *ssagen.State, reg int16, off int64) {
+ // MOVD ZR, off(reg)
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = reg
+ p.To.Offset = off
+}
diff --git a/src/cmd/compile/internal/importer/support.go b/src/cmd/compile/internal/importer/support.go
index a443b4d8621e54..6ce721557a052e 100644
--- a/src/cmd/compile/internal/importer/support.go
+++ b/src/cmd/compile/internal/importer/support.go
@@ -9,20 +9,14 @@ package importer
import (
"cmd/compile/internal/base"
"cmd/compile/internal/types2"
- "fmt"
"go/token"
"internal/pkgbits"
- "sync"
)
func assert(p bool) {
base.Assert(p)
}
-func errorf(format string, args ...interface{}) {
- panic(fmt.Sprintf(format, args...))
-}
-
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
// Synthesize a token.Pos
@@ -31,108 +25,6 @@ type fakeFileSet struct {
files map[string]*token.File
}
-func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
- // TODO(mdempsky): Make use of column.
-
- // Since we don't know the set of needed file positions, we
- // reserve maxlines positions per file.
- const maxlines = 64 * 1024
- f := s.files[file]
- if f == nil {
- f = s.fset.AddFile(file, -1, maxlines)
- s.files[file] = f
- // Allocate the fake linebreak indices on first use.
- // TODO(adonovan): opt: save ~512KB using a more complex scheme?
- fakeLinesOnce.Do(func() {
- fakeLines = make([]int, maxlines)
- for i := range fakeLines {
- fakeLines[i] = i
- }
- })
- f.SetLines(fakeLines)
- }
-
- if line > maxlines {
- line = 1
- }
-
- // Treat the file as if it contained only newlines
- // and column=1: use the line number as the offset.
- return f.Pos(line - 1)
-}
-
-var (
- fakeLines []int
- fakeLinesOnce sync.Once
-)
-
-func chanDir(d int) types2.ChanDir {
- // tag values must match the constants in cmd/compile/internal/gc/go.go
- switch d {
- case 1 /* Crecv */ :
- return types2.RecvOnly
- case 2 /* Csend */ :
- return types2.SendOnly
- case 3 /* Cboth */ :
- return types2.SendRecv
- default:
- errorf("unexpected channel dir %d", d)
- return 0
- }
-}
-
-var predeclared = []types2.Type{
- // basic types
- types2.Typ[types2.Bool],
- types2.Typ[types2.Int],
- types2.Typ[types2.Int8],
- types2.Typ[types2.Int16],
- types2.Typ[types2.Int32],
- types2.Typ[types2.Int64],
- types2.Typ[types2.Uint],
- types2.Typ[types2.Uint8],
- types2.Typ[types2.Uint16],
- types2.Typ[types2.Uint32],
- types2.Typ[types2.Uint64],
- types2.Typ[types2.Uintptr],
- types2.Typ[types2.Float32],
- types2.Typ[types2.Float64],
- types2.Typ[types2.Complex64],
- types2.Typ[types2.Complex128],
- types2.Typ[types2.String],
-
- // basic type aliases
- types2.Universe.Lookup("byte").Type(),
- types2.Universe.Lookup("rune").Type(),
-
- // error
- types2.Universe.Lookup("error").Type(),
-
- // untyped types
- types2.Typ[types2.UntypedBool],
- types2.Typ[types2.UntypedInt],
- types2.Typ[types2.UntypedRune],
- types2.Typ[types2.UntypedFloat],
- types2.Typ[types2.UntypedComplex],
- types2.Typ[types2.UntypedString],
- types2.Typ[types2.UntypedNil],
-
- // package unsafe
- types2.Typ[types2.UnsafePointer],
-
- // invalid type
- types2.Typ[types2.Invalid], // only appears in packages with errors
-
- // used internally by gc; never used by this package or in .a files
- // not to be confused with the universe any
- anyType{},
-
- // comparable
- types2.Universe.Lookup("comparable").Type(),
-
- // "any" has special handling: see usage of predeclared.
-}
-
type anyType struct{}
func (t anyType) Underlying() types2.Type { return t }
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index c06f76fe9ff029..b39710548ebafa 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -1211,17 +1211,6 @@ func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
return s
}
-// numNonClosures returns the number of functions in list which are not closures.
-func numNonClosures(list []*ir.Func) int {
- count := 0
- for _, fn := range list {
- if fn.OClosure == nil {
- count++
- }
- }
- return count
-}
-
func doList(list []ir.Node, do func(ir.Node) bool) bool {
for _, x := range list {
if x != nil {
diff --git a/src/cmd/compile/internal/inline/inlheur/scoring.go b/src/cmd/compile/internal/inline/inlheur/scoring.go
index 28fa643132952a..1396c4d800331b 100644
--- a/src/cmd/compile/internal/inline/inlheur/scoring.go
+++ b/src/cmd/compile/internal/inline/inlheur/scoring.go
@@ -399,14 +399,6 @@ func LargestNegativeScoreAdjustment(fn *ir.Func, props *FuncProps) int {
return score
}
-// LargestPositiveScoreAdjustment tries to estimate the largest possible
-// positive score adjustment that could be applied to a given callsite.
-// At the moment we don't have very many positive score adjustments, so
-// this is just hard-coded, not table-driven.
-func LargestPositiveScoreAdjustment(fn *ir.Func) int {
- return adjValues[panicPathAdj] + adjValues[initFuncAdj]
-}
-
// callSiteTab contains entries for each call in the function
// currently being processed by InlineCalls; this variable will either
// be set to 'cstabCache' below (for non-inlinable routines) or to the
diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go
index d30f7bc6880938..4ec887d397ca68 100644
--- a/src/cmd/compile/internal/ir/copy.go
+++ b/src/cmd/compile/internal/ir/copy.go
@@ -32,12 +32,3 @@ func DeepCopy(pos src.XPos, n Node) Node {
}
return edit(n)
}
-
-// DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list.
-func DeepCopyList(pos src.XPos, list []Node) []Node {
- var out []Node
- for _, n := range list {
- out = append(out, DeepCopy(pos, n))
- }
- return out
-}
diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go
index 70897fc3f9c8de..6c91560e526441 100644
--- a/src/cmd/compile/internal/ir/mini.go
+++ b/src/cmd/compile/internal/ir/mini.go
@@ -34,15 +34,6 @@ type miniNode struct {
esc uint16
}
-// posOr returns pos if known, or else n.pos.
-// For use in DeepCopy.
-func (n *miniNode) posOr(pos src.XPos) src.XPos {
- if pos.IsKnown() {
- return pos
- }
- return n.pos
-}
-
// op can be read, but not written.
// An embedding implementation can provide a SetOp if desired.
// (The panicking SetOp is with the other panics below.)
diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go
index 8dff11af335e23..c68bb5d0330337 100644
--- a/src/cmd/compile/internal/ir/visit.go
+++ b/src/cmd/compile/internal/ir/visit.go
@@ -155,19 +155,6 @@ func Any(n Node, cond func(Node) bool) bool {
return do(n)
}
-// AnyList calls Any(x, cond) for each node x in the list, in order.
-// If any call returns true, AnyList stops and returns true.
-// Otherwise, AnyList returns false after calling Any(x, cond)
-// for every x in the list.
-func AnyList(list Nodes, cond func(Node) bool) bool {
- for _, x := range list {
- if Any(x, cond) {
- return true
- }
- }
- return false
-}
-
// EditChildren edits the child nodes of n, replacing each child x with edit(x).
//
// Note that EditChildren(n, edit) only calls edit(x) for n's immediate children.
diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go
index 2d986a5ff4e67f..c7fb903d5d6460 100644
--- a/src/cmd/compile/internal/loong64/ssa.go
+++ b/src/cmd/compile/internal/loong64/ssa.go
@@ -1065,6 +1065,17 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
{Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 0) & 0x1f)},
})
+ case ssa.OpLOONG64ADDshiftLLV:
+ // ADDshiftLLV Rarg0, Rarg1, $shift
+ // ALSLV $shift, Rarg1, Rarg0, Rtmp
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[1].Reg()
+ p.AddRestSourceReg(v.Args[0].Reg())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
case ssa.OpClobber, ssa.OpClobberReg:
// TODO: implement for clobberdead experiment. Nop is ok for now.
default:
@@ -1075,8 +1086,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
var blockJump = map[ssa.BlockKind]struct {
asm, invasm obj.As
}{
- ssa.BlockLOONG64EQ: {loong64.ABEQ, loong64.ABNE},
- ssa.BlockLOONG64NE: {loong64.ABNE, loong64.ABEQ},
+ ssa.BlockLOONG64EQZ: {loong64.ABEQ, loong64.ABNE},
+ ssa.BlockLOONG64NEZ: {loong64.ABNE, loong64.ABEQ},
ssa.BlockLOONG64LTZ: {loong64.ABLTZ, loong64.ABGEZ},
ssa.BlockLOONG64GEZ: {loong64.ABGEZ, loong64.ABLTZ},
ssa.BlockLOONG64LEZ: {loong64.ABLEZ, loong64.ABGTZ},
@@ -1102,7 +1113,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
case ssa.BlockExit, ssa.BlockRetJmp:
case ssa.BlockRet:
s.Prog(obj.ARET)
- case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE,
+ case ssa.BlockLOONG64EQZ, ssa.BlockLOONG64NEZ,
ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ,
ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ,
ssa.BlockLOONG64BEQ, ssa.BlockLOONG64BNE,
@@ -1132,7 +1143,7 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
p.From.Type = obj.TYPE_REG
p.From.Reg = b.Controls[0].Reg()
p.Reg = b.Controls[1].Reg()
- case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE,
+ case ssa.BlockLOONG64EQZ, ssa.BlockLOONG64NEZ,
ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ,
ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ,
ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF:
diff --git a/src/cmd/compile/internal/noder/posmap.go b/src/cmd/compile/internal/noder/posmap.go
index 439daf454e6fc3..9b02765e95cfe7 100644
--- a/src/cmd/compile/internal/noder/posmap.go
+++ b/src/cmd/compile/internal/noder/posmap.go
@@ -23,7 +23,6 @@ type poser interface{ Pos() syntax.Pos }
type ender interface{ End() syntax.Pos }
func (m *posMap) pos(p poser) src.XPos { return m.makeXPos(p.Pos()) }
-func (m *posMap) end(p ender) src.XPos { return m.makeXPos(p.End()) }
func (m *posMap) makeXPos(pos syntax.Pos) src.XPos {
// Predeclared objects (e.g., the result parameter for error.Error)
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index 38b0bc1d8a4153..3cbc7989a74613 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -3681,17 +3681,6 @@ func expandInline(fn *ir.Func, pri pkgReaderIndex) {
typecheck.Target.Funcs = typecheck.Target.Funcs[:topdcls]
}
-// usedLocals returns a set of local variables that are used within body.
-func usedLocals(body []ir.Node) ir.NameSet {
- var used ir.NameSet
- ir.VisitList(body, func(n ir.Node) {
- if n, ok := n.(*ir.Name); ok && n.Op() == ir.ONAME && n.Class == ir.PAUTO {
- used.Add(n)
- }
- })
- return used
-}
-
// @@@ Method wrappers
//
// Here we handle constructing "method wrappers," alternative entry
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
index 85982d7c189e14..05f4483d0d9f28 100644
--- a/src/cmd/compile/internal/noder/unified.go
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -7,7 +7,6 @@ package noder
import (
"cmp"
"fmt"
- "internal/buildcfg"
"internal/pkgbits"
"internal/types/errors"
"io"
@@ -464,11 +463,8 @@ func readPackage(pr *pkgReader, importpkg *types.Pkg, localStub bool) {
// writeUnifiedExport writes to `out` the finalized, self-contained
// Unified IR export data file for the current compilation unit.
func writeUnifiedExport(out io.Writer) {
- // Use V2 as the encoded version aliastypeparams GOEXPERIMENT is enabled.
- version := pkgbits.V1
- if buildcfg.Experiment.AliasTypeParams {
- version = pkgbits.V2
- }
+ // Use V2 as the encoded version for aliastypeparams.
+ version := pkgbits.V2
l := linker{
pw: pkgbits.NewPkgEncoder(version, base.Debug.SyncFrames),
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
index dd79c3ef4c87cb..54e5f1ea5f677f 100644
--- a/src/cmd/compile/internal/noder/writer.go
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -96,11 +96,8 @@ type pkgWriter struct {
// newPkgWriter returns an initialized pkgWriter for the specified
// package.
func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info, otherInfo map[*syntax.FuncLit]bool) *pkgWriter {
- // Use V2 as the encoded version aliastypeparams GOEXPERIMENT is enabled.
- version := pkgbits.V1
- if buildcfg.Experiment.AliasTypeParams {
- version = pkgbits.V2
- }
+ // Use V2 as the encoded version for aliastypeparams.
+ version := pkgbits.V2
return &pkgWriter{
PkgEncoder: pkgbits.NewPkgEncoder(version, base.Debug.SyncFrames),
@@ -2413,11 +2410,6 @@ func (p posVar) String() string {
return p.pos.String() + ":" + p.var_.String()
}
-func (w *writer) exprList(expr syntax.Expr) {
- w.Sync(pkgbits.SyncExprList)
- w.exprs(syntax.UnpackListExpr(expr))
-}
-
func (w *writer) exprs(exprs []syntax.Expr) {
w.Sync(pkgbits.SyncExprs)
w.Len(len(exprs))
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index c1f2484bf4482a..1086a9ccbf22dc 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -14,6 +14,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
+ "internal/abi"
"internal/buildcfg"
"math"
"strings"
@@ -1913,12 +1914,90 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpPPC64LoweredPanicBoundsA, ssa.OpPPC64LoweredPanicBoundsB, ssa.OpPPC64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+ case ssa.OpPPC64LoweredPanicBoundsRR, ssa.OpPPC64LoweredPanicBoundsRC, ssa.OpPPC64LoweredPanicBoundsCR, ssa.OpPPC64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpPPC64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - ppc64.REG_R3)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - ppc64.REG_R3)
+ case ssa.OpPPC64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - ppc64.REG_R3)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(yVal)
+ }
+ case ssa.OpPPC64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - ppc64.REG_R3)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(xVal)
+ }
+ case ssa.OpPPC64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
case ssa.OpPPC64LoweredNilCheck:
if buildcfg.GOOS == "aix" {
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 4d1d7801900a2e..c561d527a7d161 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -1468,10 +1468,3 @@ func MarkUsedIfaceMethod(n *ir.CallExpr) {
Add: InterfaceMethodOffset(ityp, midx),
})
}
-
-func deref(t *types.Type) *types.Type {
- if t.IsPtr() {
- return t.Elem()
- }
- return t
-}
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 21edcabc58b3a8..3d2f65a75e6212 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -14,6 +14,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
+ "internal/abi"
)
// ssaRegToReg maps ssa register numbers to obj register numbers.
@@ -416,9 +417,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
- ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX,
+ ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVXS, ssa.OpRISCV64FMVDX, ssa.OpRISCV64FMVXD,
ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
+ ssa.OpRISCV64FCLASSS, ssa.OpRISCV64FCLASSD,
ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW, ssa.OpRISCV64CLZ, ssa.OpRISCV64CLZW, ssa.OpRISCV64CTZ, ssa.OpRISCV64CTZW,
ssa.OpRISCV64REV8, ssa.OpRISCV64CPOP, ssa.OpRISCV64CPOPW:
p := s.Prog(v.Op.Asm())
@@ -508,12 +510,91 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+
+ case ssa.OpRISCV64LoweredPanicBoundsRR, ssa.OpRISCV64LoweredPanicBoundsRC, ssa.OpRISCV64LoweredPanicBoundsCR, ssa.OpRISCV64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpRISCV64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - riscv.REG_X5)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - riscv.REG_X5)
+ case ssa.OpRISCV64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - riscv.REG_X5)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(yVal)
+ }
+ case ssa.OpRISCV64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - riscv.REG_X5)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(xVal)
+ }
+ case ssa.OpRISCV64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
case ssa.OpRISCV64LoweredAtomicLoad8:
s.Prog(riscv.AFENCE)
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index ad66bfb5d85334..86efde4fa09b87 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -15,6 +15,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
+ "internal/abi"
)
// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
@@ -573,12 +574,92 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpS390XLoweredPanicBoundsA, ssa.OpS390XLoweredPanicBoundsB, ssa.OpS390XLoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+
+ case ssa.OpS390XLoweredPanicBoundsRR, ssa.OpS390XLoweredPanicBoundsRC, ssa.OpS390XLoweredPanicBoundsCR, ssa.OpS390XLoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpS390XLoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - s390x.REG_R0)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - s390x.REG_R0)
+ case ssa.OpS390XLoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - s390x.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(yVal)
+ }
+ case ssa.OpS390XLoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - s390x.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(xVal)
+ }
+ case ssa.OpS390XLoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
+
case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
ssa.OpS390XNEG, ssa.OpS390XNEGW,
ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
index 95e63001269c2d..7d3efef5cdc837 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
@@ -264,24 +264,6 @@
(Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem)
(Move [16] dst src mem) => (MOVOstore dst (MOVOload src mem) mem)
-(Move [32] dst src mem) =>
- (Move [16]
- (OffPtr dst [16])
- (OffPtr src [16])
- (Move [16] dst src mem))
-
-(Move [48] dst src mem) =>
- (Move [32]
- (OffPtr dst [16])
- (OffPtr src [16])
- (Move [16] dst src mem))
-
-(Move [64] dst src mem) =>
- (Move [32]
- (OffPtr dst [32])
- (OffPtr src [32])
- (Move [32] dst src mem))
-
(Move [3] dst src mem) =>
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVWstore dst (MOVWload src mem) mem))
@@ -310,28 +292,19 @@
(MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
(MOVQstore dst (MOVQload src mem) mem))
-// Adjust moves to be a multiple of 16 bytes.
-(Move [s] dst src mem)
- && s > 16 && s%16 != 0 && s%16 <= 8 =>
- (Move [s-s%16]
- (OffPtr dst [s%16])
- (OffPtr src [s%16])
- (MOVQstore dst (MOVQload src mem) mem))
-(Move [s] dst src mem)
- && s > 16 && s%16 != 0 && s%16 > 8 =>
- (Move [s-s%16]
- (OffPtr dst [s%16])
- (OffPtr src [s%16])
- (MOVOstore dst (MOVOload src mem) mem))
-
-// Medium copying uses a duff device.
-(Move [s] dst src mem)
- && s > 64 && s <= 16*64 && s%16 == 0
- && logLargeCopy(v, s) =>
- (DUFFCOPY [s] dst src mem)
+// Copying up to 192 bytes uses straightline code.
+(Move [s] dst src mem) && s > 16 && s < 192 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem)
+
+// Copying up to ~1KB uses a small loop.
+(Move [s] dst src mem) && s >= 192 && s <= repMoveThreshold && logLargeCopy(v, s) => (LoweredMoveLoop [s] dst src mem)
// Large copying uses REP MOVSQ.
-(Move [s] dst src mem) && s > 16*64 && s%8 == 0 && logLargeCopy(v, s) =>
+(Move [s] dst src mem) && s > repMoveThreshold && s%8 != 0 =>
+ (Move [s-s%8]
+ (OffPtr dst [s%8])
+ (OffPtr src [s%8])
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [s] dst src mem) && s > repMoveThreshold && s%8 == 0 && logLargeCopy(v, s) =>
(REPMOVSQ dst src (MOVQconst [s/8]) mem)
// Lowering Zero instructions
@@ -606,31 +579,31 @@
// mutandis, for UGE and SETAE, and CC and SETCC.
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
-((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
- => ((ULT|UGE) (BTLconst [int8(log32(c))] x))
-((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
- => ((ULT|UGE) (BTQconst [int8(log32(c))] x))
-((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
- => ((ULT|UGE) (BTQconst [int8(log64(c))] x))
+((NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c))
+ => ((ULT|UGE) (BTLconst [int8(log32u(uint32(c)))] x))
+((NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c))
+ => ((ULT|UGE) (BTQconst [int8(log32u(uint32(c)))] x))
+((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c))
+ => ((ULT|UGE) (BTQconst [int8(log64u(uint64(c)))] x))
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
-(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
- => (SET(B|AE) (BTLconst [int8(log32(c))] x))
-(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
- => (SET(B|AE) (BTQconst [int8(log32(c))] x))
-(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
- => (SET(B|AE) (BTQconst [int8(log64(c))] x))
+(SET(NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c))
+ => (SET(B|AE) (BTLconst [int8(log32u(uint32(c)))] x))
+(SET(NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c))
+ => (SET(B|AE) (BTQconst [int8(log32u(uint32(c)))] x))
+(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c))
+ => (SET(B|AE) (BTQconst [int8(log64u(uint64(c)))] x))
// SET..store variant
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
=> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
-(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c))
- => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
-(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
- => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
-(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
- => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUnsignedPowerOfTwo(uint32(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUnsignedPowerOfTwo(uint64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUnsignedPowerOfTwo(uint64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem)
// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
// and further combining shifts.
@@ -655,14 +628,14 @@
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
// Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in
// the constant field of the OR/XOR instruction. See issue 61694.
-((OR|XOR)Q (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64(c))] x)
+((OR|XOR)Q (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64u(uint64(c)))] x)
// Recognize bit clearing: a &^= 1< (BTR(Q|L) x y)
(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
// Note: only convert AND to BTR if the constant wouldn't fit in
// the constant field of the AND instruction. See issue 61694.
-(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64(^c))] x)
+(ANDQ (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64u(uint64(^c)))] x)
// Special-case bit patterns on first/last bit.
// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
index b6c019f28aaf90..e42b54398db1b1 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
@@ -939,20 +939,38 @@ func init() {
// arg0 = destination pointer
// arg1 = source pointer
// arg2 = mem
- // auxint = # of bytes to copy, must be multiple of 16
+ // auxint = # of bytes to copy
// returns memory
{
- name: "DUFFCOPY",
+ name: "LoweredMove",
aux: "Int64",
argLength: 3,
reg: regInfo{
- inputs: []regMask{buildReg("DI"), buildReg("SI")},
- clobbers: buildReg("DI SI X0"), // uses X0 as a temporary
+ inputs: []regMask{gp, gp},
+ clobbers: buildReg("X14"), // uses X14 as a temporary
},
- clobberFlags: true,
- //faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point
- //faultOnNilArg1: true,
- unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = mem
+ // auxint = # of bytes to copy
+ // returns memory
+ {
+ name: "LoweredMoveLoop",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{gp, gp},
+ clobbers: buildReg("X14"), // uses X14 as a temporary
+ clobbersArg0: true,
+ clobbersArg1: true,
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ needIntTemp: true,
},
// arg0 = destination pointer
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
index 15ba10e216be19..197db974b2a501 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
@@ -392,44 +392,8 @@
(Zero [16] ptr mem) =>
(STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
-(Zero [32] ptr mem) =>
- (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
- (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
-
-(Zero [48] ptr mem) =>
- (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
- (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
- (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
-
-(Zero [64] ptr mem) =>
- (STP [48] ptr (MOVDconst [0]) (MOVDconst [0])
- (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
- (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
- (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
-
-// strip off fractional word zeroing
-(Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
- (Zero [8]
- (OffPtr ptr [s-8])
- (Zero [s-s%16] ptr mem))
-(Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
- (Zero [16]
- (OffPtr ptr [s-16])
- (Zero [s-s%16] ptr mem))
-
-// medium zeroing uses a duff device
-// 4, 16, and 64 are magic constants, see runtime/mkduff.go
-(Zero [s] ptr mem)
- && s%16 == 0 && s > 64 && s <= 16*64 =>
- (DUFFZERO [4 * (64 - s/16)] ptr mem)
-
-// large zeroing uses a loop
-(Zero [s] ptr mem)
- && s%16 == 0 && s > 16*64 =>
- (LoweredZero
- ptr
- (ADDconst [s-16] ptr)
- mem)
+(Zero [s] ptr mem) && s > 16 && s < 192 => (LoweredZero [s] ptr mem)
+(Zero [s] ptr mem) && s >= 192 => (LoweredZeroLoop [s] ptr mem)
// moves
(Move [0] _ _ mem) => mem
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
index 69db139ff027be..072cc2f4c83f3a 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
@@ -536,44 +536,36 @@ func init() {
{name: "LessThanNoov", argLength: 1, reg: readflags}, // bool, true flags encode signed x=y but without honoring overflow, false otherwise.
- // duffzero
+ // medium zeroing
// arg0 = address of memory to zero
// arg1 = mem
- // auxint = offset into duffzero code to start executing
+ // auxint = # of bytes to zero
// returns mem
- // R20 changed as side effect
- // R16 and R17 may be clobbered by linker trampoline.
{
- name: "DUFFZERO",
+ name: "LoweredZero",
aux: "Int64",
argLength: 2,
reg: regInfo{
- inputs: []regMask{buildReg("R20")},
- clobbers: buildReg("R16 R17 R20 R30"),
+ inputs: []regMask{gp},
},
- //faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point
- unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts
+ faultOnNilArg0: true,
},
// large zeroing
- // arg0 = address of memory to zero (in R16 aka arm64.REGRT1, changed as side effect)
- // arg1 = address of the last 16-byte unit to zero
- // arg2 = mem
+ // arg0 = address of memory to zero
+ // arg1 = mem
+ // auxint = # of bytes to zero
// returns mem
- // STP.P (ZR,ZR), 16(R16)
- // CMP Rarg1, R16
- // BLE -2(PC)
- // Note: the-end-of-the-memory may be not a valid pointer. it's a problem if it is spilled.
- // the-end-of-the-memory - 16 is with the area to zero, ok to spill.
{
- name: "LoweredZero",
- argLength: 3,
+ name: "LoweredZeroLoop",
+ aux: "Int64",
+ argLength: 2,
reg: regInfo{
- inputs: []regMask{buildReg("R16"), gp},
- clobbers: buildReg("R16"),
+ inputs: []regMask{gp},
+ clobbersArg0: true,
},
- clobberFlags: true,
faultOnNilArg0: true,
+ needIntTemp: true,
},
// duffcopy
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
index 9d0ad0148fd010..efeeca652c81c7 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
@@ -517,7 +517,7 @@
(GetCallerSP ...) => (LoweredGetCallerSP ...)
(GetCallerPC ...) => (LoweredGetCallerPC ...)
-(If cond yes no) => (NE (MOVBUreg cond) yes no)
+(If cond yes no) => (NEZ (MOVBUreg cond) yes no)
(MOVBUreg x:((SGT|SGTU) _ _)) => x
(MOVBUreg x:(XOR (MOVVconst [1]) ((SGT|SGTU) _ _))) => x
@@ -755,6 +755,9 @@
(MULV x (MOVVconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)}
+(MULV (NEGV x) (MOVVconst [c])) => (MULV x (MOVVconst [-c]))
+(MULV (NEGV x) (NEGV y)) => (MULV x y)
+
// div by constant
(DIVVU x (MOVVconst [1])) => x
(DIVVU x (MOVVconst [c])) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x)
@@ -899,41 +902,46 @@
// Optimizations
// Absorb boolean tests into block
-(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
-(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
-(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
-(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
-(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
-(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
-(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
-(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
-(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
-(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
-(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
-(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
-(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
-(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
-(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
-(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
-
-(EQ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (EQ (SGTUconst [c] y) yes no)
-(NE (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (NE (SGTUconst [c] y) yes no)
-(EQ (SUBV x y) yes no) => (BEQ x y yes no)
-(NE (SUBV x y) yes no) => (BNE x y yes no)
-(EQ (SGT x y) yes no) => (BGE y x yes no)
-(NE (SGT x y) yes no) => (BLT y x yes no)
-(EQ (SGTU x y) yes no) => (BGEU y x yes no)
-(NE (SGTU x y) yes no) => (BLTU y x yes no)
+(NEZ (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NEZ (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQZ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQZ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NEZ (XORconst [1] cmp:(SGT _ _)) yes no) => (EQZ cmp yes no)
+(NEZ (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQZ cmp yes no)
+(NEZ (XORconst [1] cmp:(SGTconst _)) yes no) => (EQZ cmp yes no)
+(NEZ (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQZ cmp yes no)
+(EQZ (XORconst [1] cmp:(SGT _ _)) yes no) => (NEZ cmp yes no)
+(EQZ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NEZ cmp yes no)
+(EQZ (XORconst [1] cmp:(SGTconst _)) yes no) => (NEZ cmp yes no)
+(EQZ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NEZ cmp yes no)
+(NEZ (SGTUconst [1] x) yes no) => (EQZ x yes no)
+(EQZ (SGTUconst [1] x) yes no) => (NEZ x yes no)
+(NEZ (SGTU x (MOVVconst [0])) yes no) => (NEZ x yes no)
+(EQZ (SGTU x (MOVVconst [0])) yes no) => (EQZ x yes no)
+(NEZ (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQZ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NEZ (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
+(EQZ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
+
+// Convert EQZ/NEZ into more optimal branch conditions.
+(EQZ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (EQZ (SGTUconst [c] y) yes no)
+(NEZ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (NEZ (SGTUconst [c] y) yes no)
+(EQZ (SUBV x y) yes no) => (BEQ x y yes no)
+(NEZ (SUBV x y) yes no) => (BNE x y yes no)
+(EQZ (SGT x y) yes no) => (BGE y x yes no)
+(NEZ (SGT x y) yes no) => (BLT y x yes no)
+(EQZ (SGTU x y) yes no) => (BGEU y x yes no)
+(NEZ (SGTU x y) yes no) => (BLTU y x yes no)
+(EQZ (SGTconst [c] y) yes no) => (BGE y (MOVVconst [c]) yes no)
+(NEZ (SGTconst [c] y) yes no) => (BLT y (MOVVconst [c]) yes no)
+(EQZ (SGTUconst [c] y) yes no) => (BGEU y (MOVVconst [c]) yes no)
+(NEZ (SGTUconst [c] y) yes no) => (BLTU y (MOVVconst [c]) yes no)
// absorb constants into branches
-(EQ (MOVVconst [0]) yes no) => (First yes no)
-(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
-(NE (MOVVconst [0]) yes no) => (First no yes)
-(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
+(EQZ (MOVVconst [0]) yes no) => (First yes no)
+(EQZ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
+(NEZ (MOVVconst [0]) yes no) => (First no yes)
+(NEZ (MOVVconst [c]) yes no) && c != 0 => (First yes no)
(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
@@ -943,6 +951,22 @@
(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
+// absorb NEGV into branches
+(EQZ (NEGV x) yes no) => (EQZ x yes no)
+(NEZ (NEGV x) yes no) => (NEZ x yes no)
+
+// Convert branch with zero to more optimal branch zero.
+(BEQ (MOVVconst [0]) cond yes no) => (EQZ cond yes no)
+(BEQ cond (MOVVconst [0]) yes no) => (EQZ cond yes no)
+(BNE (MOVVconst [0]) cond yes no) => (NEZ cond yes no)
+(BNE cond (MOVVconst [0]) yes no) => (NEZ cond yes no)
+(BLT (MOVVconst [0]) cond yes no) => (GTZ cond yes no)
+(BLT cond (MOVVconst [0]) yes no) => (LTZ cond yes no)
+(BLTU (MOVVconst [0]) cond yes no) => (NEZ cond yes no)
+(BGE (MOVVconst [0]) cond yes no) => (LEZ cond yes no)
+(BGE cond (MOVVconst [0]) yes no) => (GEZ cond yes no)
+(BGEU (MOVVconst [0]) cond yes no) => (EQZ cond yes no)
+
// Arch-specific inlining for small or disjoint runtime.memmove
// Match post-lowering calls, register version.
(SelectN [0] call:(CALLstatic {sym} dst src (MOVVconst [sz]) mem))
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
index 0e204c4a3c4476..d6818e8592ee16 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
@@ -577,11 +577,13 @@ func init() {
// is $hint and bit[41:5] is $n.
{name: "PRELD", argLength: 2, aux: "Int64", reg: preldreg, asm: "PRELD", hasSideEffects: true},
{name: "PRELDX", argLength: 2, aux: "Int64", reg: preldreg, asm: "PRELDX", hasSideEffects: true},
+
+ {name: "ADDshiftLLV", argLength: 2, aux: "Int64", reg: gp21, asm: "ALSLV"}, // arg0 + arg1< 0
@@ -589,7 +591,7 @@ func init() {
{name: "FPT", controls: 1}, // FP flag is true
{name: "FPF", controls: 1}, // FP flag is false
{name: "BEQ", controls: 2}, // controls[0] == controls[1]
- {name: "BNE", controls: 2}, // controls[0] == controls[1]
+ {name: "BNE", controls: 2}, // controls[0] != controls[1]
{name: "BGE", controls: 2}, // controls[0] >= controls[1]
{name: "BLT", controls: 2}, // controls[0] < controls[1]
{name: "BGEU", controls: 2}, // controls[0] >= controls[1], unsigned
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64latelower.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64latelower.rules
index 95844381c28aa4..44583e8e340926 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64latelower.rules
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64latelower.rules
@@ -4,3 +4,6 @@
// Prefer addition when shifting left by one.
(SLLVconst [1] x) => (ADDV x x)
+
+(EQZ (XOR x y) yes no) => (BEQ x y yes no)
+(NEZ (XOR x y) yes no) => (BNE x y yes no)
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
index 1749811b849030..f5e381ac413fcf 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
@@ -553,9 +553,11 @@
// Publication barrier as intrinsic
(PubBarrier ...) => (LoweredPubBarrier ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// Optimizations
// Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
index 1dae76366b7ec7..3c06208f7e21cd 100644
--- a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
@@ -171,10 +171,7 @@ func init() {
fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}}
fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}}
callerSave = regMask(gp | fp | gr | xer)
- r3 = buildReg("R3")
- r4 = buildReg("R4")
- r5 = buildReg("R5")
- r6 = buildReg("R6")
+ first7 = buildReg("R3 R4 R5 R6 R7 R8 R9")
)
ops := []opData{
{name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
@@ -706,12 +703,16 @@ func init() {
{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ buildReg("R0 R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17 R20 R21 g")) | buildReg("R31"), outputs: []regMask{buildReg("R29")}}, clobberFlags: true, aux: "Int64"},
{name: "LoweredPubBarrier", argLength: 1, asm: "LWSYNC", hasSideEffects: true}, // Do data barrier. arg0=memory
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r6}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r5}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first7, first7}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first7}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first7}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// (InvertFlags (CMP a b)) == (CMP b a)
// So if we want (LessThan (CMP a b)) but we can't do that because a is a constant,
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
index dc1cc97fb3cd05..9d79fc34e8732b 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
@@ -299,6 +299,11 @@
(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
(MOV(B|BU|H|HU|W|WU|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOV(W|D)load [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
+ is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
+ (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOV(W|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
(MOV(B|H|W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
@@ -309,15 +314,26 @@
(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
(MOV(B|H|W|D)storezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOV(W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
+ is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
+ (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOV(W|D)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
(MOV(B|BU|H|HU|W|WU|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOV(B|BU|H|HU|W|WU|D)load [off1+int32(off2)] {sym} base mem)
+(FMOV(W|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (FMOV(W|D)load [off1+int32(off2)] {sym} base mem)
+
(MOV(B|H|W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
(MOV(B|H|W|D)store [off1+int32(off2)] {sym} base val mem)
(MOV(B|H|W|D)storezero [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
(MOV(B|H|W|D)storezero [off1+int32(off2)] {sym} base mem)
+(FMOV(W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (FMOV(W|D)store [off1+int32(off2)] {sym} base val mem)
+
// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
// with OffPtr -> ADDI.
(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
@@ -407,9 +423,11 @@
// Publication barrier as intrinsic
(PubBarrier ...) => (LoweredPubBarrier ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// Small moves
(Move [0] _ _ mem) => mem
@@ -699,6 +717,13 @@
(MOVHUreg x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload [off] {sym} ptr mem)
(MOVWUreg x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload [off] {sym} ptr mem)
+// Replace load from same location as preceding store with copy.
+(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXD x)
+(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVDX x)
+(MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXS x)
+(MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWUreg (FMVXS x))
+(FMOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVSX x)
+
// If a register move has only 1 use, just use the same register without emitting instruction
// MOVnop does not emit an instruction, only for ensuring the type.
(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
@@ -837,6 +862,18 @@
(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
+// Test for -∞ (bit 0) using 64 bit classify instruction.
+(FLTD x (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))]))) => (ANDI [1] (FCLASSD x))
+(FLED (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])) x) => (SNEZ (ANDI [0xff &^ 1] (FCLASSD x)))
+(FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) => (ANDI [1] (FCLASSD x))
+(FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))]))) => (SEQZ (ANDI [1] (FCLASSD x)))
+
+// Test for +∞ (bit 7) using 64 bit classify instruction.
+(FLTD (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])) x) => (SNEZ (ANDI [1<<7] (FCLASSD x)))
+(FLED x (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))]))) => (SNEZ (ANDI [0xff &^ (1<<7)] (FCLASSD x)))
+(FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SNEZ (ANDI [1<<7] (FCLASSD x)))
+(FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SEQZ (ANDI [1<<7] (FCLASSD x)))
+
//
// Optimisations for rva22u64 and above.
//
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
index 8cb042a604bee6..0bccaf63bc4182 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
@@ -49,7 +49,7 @@ func riscv64RegName(r int) string {
func init() {
var regNamesRISCV64 []string
- var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask regMask
+ var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask, first16Mask regMask
regNamed := make(map[string]regMask)
// Build the list of register names, creating an appropriately indexed
@@ -93,6 +93,9 @@ func init() {
gpspMask |= mask
gpspsbMask |= mask
gpspsbgMask |= mask
+ if r >= 5 && r < 5+16 {
+ first16Mask |= mask
+ }
}
}
@@ -429,12 +432,15 @@ func init() {
// Do data barrier. arg0=memorys
{name: "LoweredPubBarrier", argLength: 1, asm: "FENCE", hasSideEffects: true},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X7"], regNamed["X28"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X6"], regNamed["X7"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first16Mask, first16Mask}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16Mask}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16Mask}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// F extension.
{name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true, typ: "Float32"}, // arg0 + arg1
@@ -447,7 +453,8 @@ func init() {
{name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS", commutative: true, typ: "Float32"}, // -(arg0 * arg1) - arg2
{name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0)
{name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0
- {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float
+ {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float32
+ {name: "FMVXS", argLength: 1, reg: fpgp, asm: "FMVXS", typ: "Int32"}, // reinterpret arg0 as int32, sign extended to 64 bits
{name: "FCVTSW", argLength: 1, reg: gpfp, asm: "FCVTSW", typ: "Float32"}, // float32(low 32 bits of arg0)
{name: "FCVTSL", argLength: 1, reg: gpfp, asm: "FCVTSL", typ: "Float32"}, // float32(arg0)
{name: "FCVTWS", argLength: 1, reg: fpgp, asm: "FCVTWS", typ: "Int32"}, // int32(arg0)
@@ -474,7 +481,8 @@ func init() {
{name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0
{name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD", typ: "Float64"}, // abs(arg0)
{name: "FSGNJD", argLength: 2, reg: fp21, asm: "FSGNJD", typ: "Float64"}, // copy sign of arg1 to arg0
- {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float
+ {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float64
+ {name: "FMVXD", argLength: 1, reg: fpgp, asm: "FMVXD", typ: "Int64"}, // reinterpret arg0 as int64
{name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0)
{name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0)
{name: "FCVTWD", argLength: 1, reg: fpgp, asm: "FCVTWD", typ: "Int32"}, // int32(arg0)
@@ -489,6 +497,27 @@ func init() {
{name: "FLED", argLength: 2, reg: fp2gp, asm: "FLED"}, // arg0 <= arg1
{name: "LoweredFMIND", argLength: 2, reg: fp21, resultNotInArgs: true, asm: "FMIND", commutative: true, typ: "Float64"}, // min(arg0, arg1)
{name: "LoweredFMAXD", argLength: 2, reg: fp21, resultNotInArgs: true, asm: "FMAXD", commutative: true, typ: "Float64"}, // max(arg0, arg1)
+
+ // Floating point classify (in the F and D extensions).
+ //
+ // The FCLASS instructions will always set exactly one bit in the output
+ // register, all other bits will be cleared.
+ //
+ // Bit | Class
+ // ====+=============================
+ // 0 | -∞
+ // 1 | a negative normal number
+ // 2 | a negative subnormal number
+ // 3 | -0
+ // 4 | +0
+ // 5 | a positive subnormal number
+ // 6 | a positive normal number
+ // 7 | +∞
+ // 8 | qNaN
+ // 9 | sNaN
+ // ====+=============================
+ {name: "FCLASSS", argLength: 1, reg: fpgp, asm: "FCLASSS", typ: "Int64"}, // classify float32
+ {name: "FCLASSD", argLength: 1, reg: fpgp, asm: "FCLASSD", typ: "Int64"}, // classify float64
}
RISCV64blocks := []blockData{
diff --git a/src/cmd/compile/internal/ssa/_gen/S390X.rules b/src/cmd/compile/internal/ssa/_gen/S390X.rules
index 80e12f8e29d6d2..664bf4a89c9024 100644
--- a/src/cmd/compile/internal/ssa/_gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/_gen/S390X.rules
@@ -458,9 +458,11 @@
// Write barrier.
(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// ***************************
// Above: lowering rules
diff --git a/src/cmd/compile/internal/ssa/_gen/S390XOps.go b/src/cmd/compile/internal/ssa/_gen/S390XOps.go
index 38fb3cb0748932..c002d5bcc382a3 100644
--- a/src/cmd/compile/internal/ssa/_gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/_gen/S390XOps.go
@@ -114,6 +114,7 @@ func init() {
sb = buildReg("SB")
r0 = buildReg("R0")
tmp = buildReg("R11") // R11 is used as a temporary in a small number of instructions.
+ lr = buildReg("R14")
// R10 is reserved by the assembler.
gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14")
@@ -518,12 +519,15 @@ func init() {
// Returns a pointer to a write barrier buffer in R9.
{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R14") | r1, outputs: []regMask{r9}}, clobberFlags: true, aux: "Int64"},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{gp &^ lr, gp &^ lr}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp &^ lr}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp &^ lr}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// Constant condition code values. The condition code can be 0, 1, 2 or 3.
{name: "FlagEQ"}, // CC=0 (equal)
diff --git a/src/cmd/compile/internal/ssa/biasedsparsemap.go b/src/cmd/compile/internal/ssa/biasedsparsemap.go
index 3032309b7a2f1b..25fbaf68625753 100644
--- a/src/cmd/compile/internal/ssa/biasedsparsemap.go
+++ b/src/cmd/compile/internal/ssa/biasedsparsemap.go
@@ -84,14 +84,6 @@ func (s *biasedSparseMap) getEntry(i int) (x uint, v int32) {
return
}
-// add inserts x->0 into s, provided that x is in the range of keys stored in s.
-func (s *biasedSparseMap) add(x uint) {
- if int(x) < s.first || int(x) >= s.cap() {
- return
- }
- s.s.set(ID(int(x)-s.first), 0)
-}
-
// add inserts x->v into s, provided that x is in the range of keys stored in s.
func (s *biasedSparseMap) set(x uint, v int32) {
if int(x) < s.first || int(x) >= s.cap() {
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
index f16959dd572973..a7d339cad064ac 100644
--- a/src/cmd/compile/internal/ssa/branchelim.go
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -436,8 +436,15 @@ func canSpeculativelyExecute(b *Block) bool {
// don't fuse memory ops, Phi ops, divides (can panic),
// or anything else with side-effects
for _, v := range b.Values {
- if v.Op == OpPhi || isDivMod(v.Op) || isPtrArithmetic(v.Op) || v.Type.IsMemory() ||
- v.MemoryArg() != nil || opcodeTable[v.Op].hasSideEffects {
+ if v.Op == OpPhi || isDivMod(v.Op) || isPtrArithmetic(v.Op) ||
+ v.Type.IsMemory() || opcodeTable[v.Op].hasSideEffects {
+ return false
+ }
+
+ // Allow inlining markers to be speculatively executed
+ // even though they have a memory argument.
+ // See issue #74915.
+ if v.Op != OpInlMark && v.MemoryArg() != nil {
return false
}
}
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 50ec2ec177a88b..f2097170f47e04 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -566,7 +566,7 @@ func (c *Config) buildRecipes(arch string) {
}
case "loong64":
// - multiply is 4 cycles.
- // - add/sub/shift are 1 cycle.
+ // - add/sub/shift/alsl are 1 cycle.
// On loong64, using a multiply also needs to load the constant into a register.
// TODO: figure out a happy medium.
mulCost = 45
@@ -601,6 +601,15 @@ func (c *Config) buildRecipes(arch string) {
return m.Block.NewValue1I(m.Pos, OpLOONG64SLLVconst, m.Type, int64(i), x)
})
}
+
+ // ADDshiftLLV
+ for i := 1; i < 5; i++ {
+ c := 10
+ r(1, 1< 0 {
+ heapSize := uint64(1)<<(uint64(f.Config.PtrSize)*8) - 1
+ maximumElementsFittingInHeap := heapSize / elemSize
+ lim = lim.unsignedMax(maximumElementsFittingInHeap)
+ }
+ fallthrough
+ case OpStringLen:
lim = lim.signedMin(0)
}
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index fb9642cfedfbfd..f3c1d3bd968073 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -561,7 +561,14 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos
pos = pos.WithNotStmt()
// Check if v is already in a requested register.
if mask&vi.regs != 0 {
- r := pickReg(mask & vi.regs)
+ mask &= vi.regs
+ r := pickReg(mask)
+ if mask.contains(s.SPReg) {
+ // Prefer the stack pointer if it is allowed.
+ // (Needed because the op might have an Aux symbol
+ // that needs SP as its base.)
+ r = s.SPReg
+ }
if !s.allocatable.contains(r) {
return v // v is in a fixed register
}
@@ -2476,7 +2483,7 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP
}
// Check if we're allowed to clobber the destination location.
- if len(e.cache[occupant.vid]) == 1 && !e.s.values[occupant.vid].rematerializeable {
+ if len(e.cache[occupant.vid]) == 1 && !e.s.values[occupant.vid].rematerializeable && !opcodeTable[e.s.orig[occupant.vid].Op].fixedReg {
// We can't overwrite the last copy
// of a value that needs to survive.
return false
@@ -2980,11 +2987,6 @@ type desiredStateEntry struct {
regs [4]register
}
-func (d *desiredState) clear() {
- d.entries = d.entries[:0]
- d.avoid = 0
-}
-
// get returns a list of desired registers for value vid.
func (d *desiredState) get(vid ID) [4]register {
for _, e := range d.entries {
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
index e7ed416c507d51..0f69b852d12971 100644
--- a/src/cmd/compile/internal/ssa/regalloc_test.go
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -240,6 +240,30 @@ func TestClobbersArg0(t *testing.T) {
}
}
+func TestClobbersArg1(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("src", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
+ Valu("dst", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
+ Valu("use1", OpArg, c.config.Types.Int64.PtrTo().PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo().PtrTo())),
+ Valu("use2", OpArg, c.config.Types.Int64.PtrTo().PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo().PtrTo())),
+ Valu("move", OpAMD64LoweredMoveLoop, types.TypeMem, 256, nil, "dst", "src", "mem"),
+ Valu("store1", OpAMD64MOVQstore, types.TypeMem, 0, nil, "use1", "src", "move"),
+ Valu("store2", OpAMD64MOVQstore, types.TypeMem, 0, nil, "use2", "dst", "store1"),
+ Exit("store2")))
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // LoweredMoveLoop clobbers its arguments, so there must be a copy of "src" and "dst" somewhere
+ // so we still have that value available at the stores.
+ if n := numCopies(f.blocks["entry"]); n != 2 {
+ fmt.Printf("%s\n", f.f.String())
+ t.Errorf("got %d copies, want 2", n)
+ }
+}
+
func numSpills(b *Block) int {
return numOps(b, OpStoreReg)
}
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index f9a35deecc3d0b..236a3f885adb4b 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -31,6 +31,7 @@ const (
removeDeadValues = true
repZeroThreshold = 1408 // size beyond which we use REP STOS for zeroing
+ repMoveThreshold = 1408 // size beyond which we use REP MOVS for copying
)
// deadcode indicates whether rewrite should try to remove any values that become dead.
@@ -504,18 +505,6 @@ func isUnsignedPowerOfTwo[T uint8 | uint16 | uint32 | uint64](n T) bool {
return n != 0 && n&(n-1) == 0
}
-// isUint64PowerOfTwo reports whether uint64(n) is a power of 2.
-func isUint64PowerOfTwo(in int64) bool {
- n := uint64(in)
- return n > 0 && n&(n-1) == 0
-}
-
-// isUint32PowerOfTwo reports whether uint32(n) is a power of 2.
-func isUint32PowerOfTwo(in int64) bool {
- n := uint64(uint32(in))
- return n > 0 && n&(n-1) == 0
-}
-
// is32Bit reports whether n can be represented as a signed 32 bit integer.
func is32Bit(n int64) bool {
return n == int64(int32(n))
@@ -637,51 +626,16 @@ func truncate64Fto32F(f float64) float32 {
return math.Float32frombits(r)
}
-// extend32Fto64F converts a float32 value to a float64 value preserving the bit
-// pattern of the mantissa.
-func extend32Fto64F(f float32) float64 {
- if !math.IsNaN(float64(f)) {
- return float64(f)
- }
- // NaN bit patterns aren't necessarily preserved across conversion
- // instructions so we need to do the conversion manually.
- b := uint64(math.Float32bits(f))
- // | sign | exponent | mantissa |
- r := ((b << 32) & (1 << 63)) | (0x7ff << 52) | ((b & 0x7fffff) << (52 - 23))
- return math.Float64frombits(r)
-}
-
// DivisionNeedsFixUp reports whether the division needs fix-up code.
func DivisionNeedsFixUp(v *Value) bool {
return v.AuxInt == 0
}
-// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
-func auxFrom64F(f float64) int64 {
- if f != f {
- panic("can't encode a NaN in AuxInt field")
- }
- return int64(math.Float64bits(f))
-}
-
-// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
-func auxFrom32F(f float32) int64 {
- if f != f {
- panic("can't encode a NaN in AuxInt field")
- }
- return int64(math.Float64bits(extend32Fto64F(f)))
-}
-
// auxTo32F decodes a float32 from the AuxInt value provided.
func auxTo32F(i int64) float32 {
return truncate64Fto32F(math.Float64frombits(uint64(i)))
}
-// auxTo64F decodes a float64 from the AuxInt value provided.
-func auxTo64F(i int64) float64 {
- return math.Float64frombits(uint64(i))
-}
-
func auxIntToBool(i int64) bool {
if i == 0 {
return false
@@ -715,12 +669,6 @@ func auxIntToValAndOff(i int64) ValAndOff {
func auxIntToArm64BitField(i int64) arm64BitField {
return arm64BitField(i)
}
-func auxIntToInt128(x int64) int128 {
- if x != 0 {
- panic("nonzero int128 not allowed")
- }
- return 0
-}
func auxIntToFlagConstant(x int64) flagConstant {
return flagConstant(x)
}
@@ -762,12 +710,6 @@ func valAndOffToAuxInt(v ValAndOff) int64 {
func arm64BitFieldToAuxInt(v arm64BitField) int64 {
return int64(v)
}
-func int128ToAuxInt(x int128) int64 {
- if x != 0 {
- panic("nonzero int128 not allowed")
- }
- return 0
-}
func flagConstantToAuxInt(x flagConstant) int64 {
return int64(x)
}
@@ -838,23 +780,6 @@ func uaddOvf(a, b int64) bool {
return uint64(a)+uint64(b) < uint64(a)
}
-// loadLSymOffset simulates reading a word at an offset into a
-// read-only symbol's runtime memory. If it would read a pointer to
-// another symbol, that symbol is returned. Otherwise, it returns nil.
-func loadLSymOffset(lsym *obj.LSym, offset int64) *obj.LSym {
- if lsym.Type != objabi.SRODATA {
- return nil
- }
-
- for _, r := range lsym.R {
- if int64(r.Off) == offset && r.Type&^objabi.R_WEAK == objabi.R_ADDR && r.Add == 0 {
- return r.Sym
- }
- }
-
- return nil
-}
-
func devirtLECall(v *Value, sym *obj.LSym) *Value {
v.Op = OpStaticLECall
auxcall := v.Aux.(*AuxCall)
@@ -1576,10 +1501,6 @@ func GetPPC64Shiftmb(auxint int64) int64 {
return int64(int8(auxint >> 8))
}
-func GetPPC64Shiftme(auxint int64) int64 {
- return int64(int8(auxint))
-}
-
// Test if this value can encoded as a mask for a rlwinm like
// operation. Masks can also extend from the msb and wrap to
// the lsb too. That is, the valid masks are 32 bit strings
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 3532d42b0cda6e..a7ee632ae1af72 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -3108,8 +3108,8 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
break
}
// match: (ANDQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31
- // result: (BTRQconst [int8(log64(^c))] x)
+ // cond: isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31
+ // result: (BTRQconst [int8(log64u(uint64(^c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64MOVQconst {
@@ -3117,11 +3117,11 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
+ if !(isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31) {
continue
}
v.reset(OpAMD64BTRQconst)
- v.AuxInt = int8ToAuxInt(int8(log64(^c)))
+ v.AuxInt = int8ToAuxInt(int8(log64u(uint64(^c))))
v.AddArg(x)
return true
}
@@ -14431,8 +14431,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
break
}
// match: (ORQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
- // result: (BTSQconst [int8(log64(c))] x)
+ // cond: isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31
+ // result: (BTSQconst [int8(log64u(uint64(c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64MOVQconst {
@@ -14440,11 +14440,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
+ if !(isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31) {
continue
}
v.reset(OpAMD64BTSQconst)
- v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v.AddArg(x)
return true
}
@@ -17398,46 +17398,46 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
break
}
// match: (SETEQ (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (SETAE (BTLconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SETAE (BTLconst [int8(log32u(uint32(c)))] x))
for {
if v_0.Op != OpAMD64TESTLconst {
break
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpAMD64SETAE)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (SETEQ (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (SETAE (BTQconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETAE (BTQconst [int8(log32u(uint32(c)))] x))
for {
if v_0.Op != OpAMD64TESTQconst {
break
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpAMD64SETAE)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (SETEQ (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c)
- // result: (SETAE (BTQconst [int8(log64(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETAE (BTQconst [int8(log64u(uint64(c)))] x))
for {
if v_0.Op != OpAMD64TESTQ {
break
@@ -17451,12 +17451,12 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
}
c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v.reset(OpAMD64SETAE)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -17875,8 +17875,8 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
break
}
// match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -17887,21 +17887,21 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
c := auxIntToInt32(v_1.AuxInt)
x := v_1.Args[0]
mem := v_2
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpAMD64SETAEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -17912,21 +17912,21 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
c := auxIntToInt32(v_1.AuxInt)
x := v_1.Args[0]
mem := v_2
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpAMD64SETAEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
- // cond: isUint64PowerOfTwo(c)
- // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -17944,14 +17944,14 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
c := auxIntToInt64(v_1_0.AuxInt)
x := v_1_1
mem := v_2
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v.reset(OpAMD64SETAEstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
@@ -19444,46 +19444,46 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
break
}
// match: (SETNE (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (SETB (BTLconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SETB (BTLconst [int8(log32u(uint32(c)))] x))
for {
if v_0.Op != OpAMD64TESTLconst {
break
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (SETNE (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (SETB (BTQconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETB (BTQconst [int8(log32u(uint32(c)))] x))
for {
if v_0.Op != OpAMD64TESTQconst {
break
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
}
// match: (SETNE (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c)
- // result: (SETB (BTQconst [int8(log64(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETB (BTQconst [int8(log64u(uint64(c)))] x))
for {
if v_0.Op != OpAMD64TESTQ {
break
@@ -19497,12 +19497,12 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
}
c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v.reset(OpAMD64SETB)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
v.AddArg(v0)
return true
@@ -19921,8 +19921,8 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
break
}
// match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -19933,21 +19933,21 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
c := auxIntToInt32(v_1.AuxInt)
x := v_1.Args[0]
mem := v_2
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpAMD64SETBstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -19958,21 +19958,21 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
c := auxIntToInt32(v_1.AuxInt)
x := v_1.Args[0]
mem := v_2
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpAMD64SETBstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
}
// match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
- // cond: isUint64PowerOfTwo(c)
- // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem)
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
@@ -19990,14 +19990,14 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
c := auxIntToInt64(v_1_0.AuxInt)
x := v_1_1
mem := v_2
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v.reset(OpAMD64SETBstore)
v.AuxInt = int32ToAuxInt(off)
v.Aux = symToAux(sym)
v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
v.AddArg3(ptr, v0, mem)
return true
@@ -23495,8 +23495,8 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
break
}
// match: (XORQ (MOVQconst [c]) x)
- // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
- // result: (BTCQconst [int8(log64(c))] x)
+ // cond: isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31
+ // result: (BTCQconst [int8(log64u(uint64(c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpAMD64MOVQconst {
@@ -23504,11 +23504,11 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
}
c := auxIntToInt64(v_0.AuxInt)
x := v_1
- if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
+ if !(isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31) {
continue
}
v.reset(OpAMD64BTCQconst)
- v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v.AddArg(x)
return true
}
@@ -27307,75 +27307,6 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
v.AddArg3(dst, v0, mem)
return true
}
- // match: (Move [32] dst src mem)
- // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem))
- for {
- if auxIntToInt64(v.AuxInt) != 32 {
- break
- }
- dst := v_0
- src := v_1
- mem := v_2
- v.reset(OpMove)
- v.AuxInt = int64ToAuxInt(16)
- v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = int64ToAuxInt(16)
- v0.AddArg(dst)
- v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = int64ToAuxInt(16)
- v1.AddArg(src)
- v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
- v2.AuxInt = int64ToAuxInt(16)
- v2.AddArg3(dst, src, mem)
- v.AddArg3(v0, v1, v2)
- return true
- }
- // match: (Move [48] dst src mem)
- // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem))
- for {
- if auxIntToInt64(v.AuxInt) != 48 {
- break
- }
- dst := v_0
- src := v_1
- mem := v_2
- v.reset(OpMove)
- v.AuxInt = int64ToAuxInt(32)
- v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = int64ToAuxInt(16)
- v0.AddArg(dst)
- v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = int64ToAuxInt(16)
- v1.AddArg(src)
- v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
- v2.AuxInt = int64ToAuxInt(16)
- v2.AddArg3(dst, src, mem)
- v.AddArg3(v0, v1, v2)
- return true
- }
- // match: (Move [64] dst src mem)
- // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem))
- for {
- if auxIntToInt64(v.AuxInt) != 64 {
- break
- }
- dst := v_0
- src := v_1
- mem := v_2
- v.reset(OpMove)
- v.AuxInt = int64ToAuxInt(32)
- v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = int64ToAuxInt(32)
- v0.AddArg(dst)
- v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = int64ToAuxInt(32)
- v1.AddArg(src)
- v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
- v2.AuxInt = int64ToAuxInt(32)
- v2.AddArg3(dst, src, mem)
- v.AddArg3(v0, v1, v2)
- return true
- }
// match: (Move [3] dst src mem)
// result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
for {
@@ -27568,82 +27499,72 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
return true
}
// match: (Move [s] dst src mem)
- // cond: s > 16 && s%16 != 0 && s%16 <= 8
- // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem))
+ // cond: s > 16 && s < 192 && logLargeCopy(v, s)
+ // result: (LoweredMove [s] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s > 16 && s%16 != 0 && s%16 <= 8) {
+ if !(s > 16 && s < 192 && logLargeCopy(v, s)) {
break
}
- v.reset(OpMove)
- v.AuxInt = int64ToAuxInt(s - s%16)
- v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = int64ToAuxInt(s % 16)
- v0.AddArg(dst)
- v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = int64ToAuxInt(s % 16)
- v1.AddArg(src)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v3.AddArg2(src, mem)
- v2.AddArg3(dst, v3, mem)
- v.AddArg3(v0, v1, v2)
+ v.reset(OpAMD64LoweredMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
return true
}
// match: (Move [s] dst src mem)
- // cond: s > 16 && s%16 != 0 && s%16 > 8
- // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem))
+ // cond: s >= 192 && s <= repMoveThreshold && logLargeCopy(v, s)
+ // result: (LoweredMoveLoop [s] dst src mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s > 16 && s%16 != 0 && s%16 > 8) {
+ if !(s >= 192 && s <= repMoveThreshold && logLargeCopy(v, s)) {
break
}
- v.reset(OpMove)
- v.AuxInt = int64ToAuxInt(s - s%16)
- v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = int64ToAuxInt(s % 16)
- v0.AddArg(dst)
- v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = int64ToAuxInt(s % 16)
- v1.AddArg(src)
- v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
- v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
- v3.AddArg2(src, mem)
- v2.AddArg3(dst, v3, mem)
- v.AddArg3(v0, v1, v2)
+ v.reset(OpAMD64LoweredMoveLoop)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
return true
}
// match: (Move [s] dst src mem)
- // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)
- // result: (DUFFCOPY [s] dst src mem)
+ // cond: s > repMoveThreshold && s%8 != 0
+ // result: (Move [s-s%8] (OffPtr dst [s%8]) (OffPtr src [s%8]) (MOVQstore dst (MOVQload src mem) mem))
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) {
+ if !(s > repMoveThreshold && s%8 != 0) {
break
}
- v.reset(OpAMD64DUFFCOPY)
- v.AuxInt = int64ToAuxInt(s)
- v.AddArg3(dst, src, mem)
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 8)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 8)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
return true
}
// match: (Move [s] dst src mem)
- // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s)
+ // cond: s > repMoveThreshold && s%8 == 0 && logLargeCopy(v, s)
// result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
for {
s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
- if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) {
+ if !(s > repMoveThreshold && s%8 == 0 && logLargeCopy(v, s)) {
break
}
v.reset(OpAMD64REPMOVSQ)
@@ -30148,40 +30069,40 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
// match: (EQ (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (UGE (BTLconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (UGE (BTLconst [int8(log32u(uint32(c)))] x))
for b.Controls[0].Op == OpAMD64TESTLconst {
v_0 := b.Controls[0]
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64UGE, v0)
return true
}
// match: (EQ (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (UGE (BTQconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (UGE (BTQconst [int8(log32u(uint32(c)))] x))
for b.Controls[0].Op == OpAMD64TESTQconst {
v_0 := b.Controls[0]
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64UGE, v0)
return true
}
// match: (EQ (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c)
- // result: (UGE (BTQconst [int8(log64(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (UGE (BTQconst [int8(log64u(uint64(c)))] x))
for b.Controls[0].Op == OpAMD64TESTQ {
v_0 := b.Controls[0]
_ = v_0.Args[1]
@@ -30193,11 +30114,11 @@ func rewriteBlockAMD64(b *Block) bool {
}
c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64UGE, v0)
return true
@@ -31128,40 +31049,40 @@ func rewriteBlockAMD64(b *Block) bool {
break
}
// match: (NE (TESTLconst [c] x))
- // cond: isUint32PowerOfTwo(int64(c))
- // result: (ULT (BTLconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (ULT (BTLconst [int8(log32u(uint32(c)))] x))
for b.Controls[0].Op == OpAMD64TESTLconst {
v_0 := b.Controls[0]
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint32PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64ULT, v0)
return true
}
// match: (NE (TESTQconst [c] x))
- // cond: isUint64PowerOfTwo(int64(c))
- // result: (ULT (BTQconst [int8(log32(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (ULT (BTQconst [int8(log32u(uint32(c)))] x))
for b.Controls[0].Op == OpAMD64TESTQconst {
v_0 := b.Controls[0]
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(isUint64PowerOfTwo(int64(c))) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log32u(uint32(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64ULT, v0)
return true
}
// match: (NE (TESTQ (MOVQconst [c]) x))
- // cond: isUint64PowerOfTwo(c)
- // result: (ULT (BTQconst [int8(log64(c))] x))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (ULT (BTQconst [int8(log64u(uint64(c)))] x))
for b.Controls[0].Op == OpAMD64TESTQ {
v_0 := b.Controls[0]
_ = v_0.Args[1]
@@ -31173,11 +31094,11 @@ func rewriteBlockAMD64(b *Block) bool {
}
c := auxIntToInt64(v_0_0.AuxInt)
x := v_0_1
- if !(isUint64PowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
continue
}
v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
- v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AuxInt = int8ToAuxInt(int8(log64u(uint64(c))))
v0.AddArg(x)
b.resetWithControl(BlockAMD64ULT, v0)
return true
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 32f0f554341f45..59d6fe64db5001 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -22321,141 +22321,34 @@ func rewriteValueARM64_OpZero(v *Value) bool {
v.AddArg4(ptr, v0, v0, mem)
return true
}
- // match: (Zero [32] ptr mem)
- // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
- for {
- if auxIntToInt64(v.AuxInt) != 32 {
- break
- }
- ptr := v_0
- mem := v_1
- v.reset(OpARM64STP)
- v.AuxInt = int32ToAuxInt(16)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
- v0.AuxInt = int64ToAuxInt(0)
- v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
- v1.AuxInt = int32ToAuxInt(0)
- v1.AddArg4(ptr, v0, v0, mem)
- v.AddArg4(ptr, v0, v0, v1)
- return true
- }
- // match: (Zero [48] ptr mem)
- // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
- for {
- if auxIntToInt64(v.AuxInt) != 48 {
- break
- }
- ptr := v_0
- mem := v_1
- v.reset(OpARM64STP)
- v.AuxInt = int32ToAuxInt(32)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
- v0.AuxInt = int64ToAuxInt(0)
- v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
- v1.AuxInt = int32ToAuxInt(16)
- v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
- v2.AuxInt = int32ToAuxInt(0)
- v2.AddArg4(ptr, v0, v0, mem)
- v1.AddArg4(ptr, v0, v0, v2)
- v.AddArg4(ptr, v0, v0, v1)
- return true
- }
- // match: (Zero [64] ptr mem)
- // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
- for {
- if auxIntToInt64(v.AuxInt) != 64 {
- break
- }
- ptr := v_0
- mem := v_1
- v.reset(OpARM64STP)
- v.AuxInt = int32ToAuxInt(48)
- v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
- v0.AuxInt = int64ToAuxInt(0)
- v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
- v1.AuxInt = int32ToAuxInt(32)
- v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
- v2.AuxInt = int32ToAuxInt(16)
- v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
- v3.AuxInt = int32ToAuxInt(0)
- v3.AddArg4(ptr, v0, v0, mem)
- v2.AddArg4(ptr, v0, v0, v3)
- v1.AddArg4(ptr, v0, v0, v2)
- v.AddArg4(ptr, v0, v0, v1)
- return true
- }
- // match: (Zero [s] ptr mem)
- // cond: s%16 != 0 && s%16 <= 8 && s > 16
- // result: (Zero [8] (OffPtr ptr [s-8]) (Zero [s-s%16] ptr mem))
- for {
- s := auxIntToInt64(v.AuxInt)
- ptr := v_0
- mem := v_1
- if !(s%16 != 0 && s%16 <= 8 && s > 16) {
- break
- }
- v.reset(OpZero)
- v.AuxInt = int64ToAuxInt(8)
- v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type)
- v0.AuxInt = int64ToAuxInt(s - 8)
- v0.AddArg(ptr)
- v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
- v1.AuxInt = int64ToAuxInt(s - s%16)
- v1.AddArg2(ptr, mem)
- v.AddArg2(v0, v1)
- return true
- }
- // match: (Zero [s] ptr mem)
- // cond: s%16 != 0 && s%16 > 8 && s > 16
- // result: (Zero [16] (OffPtr ptr [s-16]) (Zero [s-s%16] ptr mem))
- for {
- s := auxIntToInt64(v.AuxInt)
- ptr := v_0
- mem := v_1
- if !(s%16 != 0 && s%16 > 8 && s > 16) {
- break
- }
- v.reset(OpZero)
- v.AuxInt = int64ToAuxInt(16)
- v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type)
- v0.AuxInt = int64ToAuxInt(s - 16)
- v0.AddArg(ptr)
- v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
- v1.AuxInt = int64ToAuxInt(s - s%16)
- v1.AddArg2(ptr, mem)
- v.AddArg2(v0, v1)
- return true
- }
// match: (Zero [s] ptr mem)
- // cond: s%16 == 0 && s > 64 && s <= 16*64
- // result: (DUFFZERO [4 * (64 - s/16)] ptr mem)
+ // cond: s > 16 && s < 192
+ // result: (LoweredZero [s] ptr mem)
for {
s := auxIntToInt64(v.AuxInt)
ptr := v_0
mem := v_1
- if !(s%16 == 0 && s > 64 && s <= 16*64) {
+ if !(s > 16 && s < 192) {
break
}
- v.reset(OpARM64DUFFZERO)
- v.AuxInt = int64ToAuxInt(4 * (64 - s/16))
+ v.reset(OpARM64LoweredZero)
+ v.AuxInt = int64ToAuxInt(s)
v.AddArg2(ptr, mem)
return true
}
// match: (Zero [s] ptr mem)
- // cond: s%16 == 0 && s > 16*64
- // result: (LoweredZero ptr (ADDconst [s-16] ptr) mem)
+ // cond: s >= 192
+ // result: (LoweredZeroLoop [s] ptr mem)
for {
s := auxIntToInt64(v.AuxInt)
ptr := v_0
mem := v_1
- if !(s%16 == 0 && s > 16*64) {
+ if !(s >= 192) {
break
}
- v.reset(OpARM64LoweredZero)
- v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type)
- v0.AuxInt = int64ToAuxInt(s - 16)
- v0.AddArg(ptr)
- v.AddArg3(ptr, v0, mem)
+ v.reset(OpARM64LoweredZeroLoop)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
return true
}
return false
diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
index 83242413f06ab7..6f29588f9a51a3 100644
--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go
+++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
@@ -5539,6 +5539,7 @@ func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (MULV _ (MOVVconst [0]))
// result: (MOVVconst [0])
for {
@@ -5583,6 +5584,44 @@ func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool {
}
break
}
+ // match: (MULV (NEGV x) (MOVVconst [c]))
+ // result: (MULV x (MOVVconst [-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLOONG64NEGV {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpLOONG64MULV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(-c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (MULV (NEGV x) (NEGV y))
+ // result: (MULV x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLOONG64NEGV {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpLOONG64NEGV {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpLOONG64MULV)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
// match: (MULV (MOVVconst [c]) (MOVVconst [d]))
// result: (MOVVconst [c*d])
for {
@@ -11440,8 +11479,124 @@ func rewriteValueLOONG64_OpZero(v *Value) bool {
func rewriteBlockLOONG64(b *Block) bool {
typ := &b.Func.Config.Types
switch b.Kind {
- case BlockLOONG64EQ:
- // match: (EQ (FPFlagTrue cmp) yes no)
+ case BlockLOONG64BEQ:
+ // match: (BEQ (MOVVconst [0]) cond yes no)
+ // result: (EQZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64EQZ, cond)
+ return true
+ }
+ // match: (BEQ cond (MOVVconst [0]) yes no)
+ // result: (EQZ cond yes no)
+ for b.Controls[1].Op == OpLOONG64MOVVconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64EQZ, cond)
+ return true
+ }
+ case BlockLOONG64BGE:
+ // match: (BGE (MOVVconst [0]) cond yes no)
+ // result: (LEZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64LEZ, cond)
+ return true
+ }
+ // match: (BGE cond (MOVVconst [0]) yes no)
+ // result: (GEZ cond yes no)
+ for b.Controls[1].Op == OpLOONG64MOVVconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64GEZ, cond)
+ return true
+ }
+ case BlockLOONG64BGEU:
+ // match: (BGEU (MOVVconst [0]) cond yes no)
+ // result: (EQZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64EQZ, cond)
+ return true
+ }
+ case BlockLOONG64BLT:
+ // match: (BLT (MOVVconst [0]) cond yes no)
+ // result: (GTZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64GTZ, cond)
+ return true
+ }
+ // match: (BLT cond (MOVVconst [0]) yes no)
+ // result: (LTZ cond yes no)
+ for b.Controls[1].Op == OpLOONG64MOVVconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64LTZ, cond)
+ return true
+ }
+ case BlockLOONG64BLTU:
+ // match: (BLTU (MOVVconst [0]) cond yes no)
+ // result: (NEZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64NEZ, cond)
+ return true
+ }
+ case BlockLOONG64BNE:
+ // match: (BNE (MOVVconst [0]) cond yes no)
+ // result: (NEZ cond yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockLOONG64NEZ, cond)
+ return true
+ }
+ // match: (BNE cond (MOVVconst [0]) yes no)
+ // result: (NEZ cond yes no)
+ for b.Controls[1].Op == OpLOONG64MOVVconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64NEZ, cond)
+ return true
+ }
+ case BlockLOONG64EQZ:
+ // match: (EQZ (FPFlagTrue cmp) yes no)
// result: (FPF cmp yes no)
for b.Controls[0].Op == OpLOONG64FPFlagTrue {
v_0 := b.Controls[0]
@@ -11449,7 +11604,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64FPF, cmp)
return true
}
- // match: (EQ (FPFlagFalse cmp) yes no)
+ // match: (EQZ (FPFlagFalse cmp) yes no)
// result: (FPT cmp yes no)
for b.Controls[0].Op == OpLOONG64FPFlagFalse {
v_0 := b.Controls[0]
@@ -11457,8 +11612,8 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64FPT, cmp)
return true
}
- // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
- // result: (NE cmp yes no)
+ // match: (EQZ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (NEZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11468,11 +11623,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGT {
break
}
- b.resetWithControl(BlockLOONG64NE, cmp)
+ b.resetWithControl(BlockLOONG64NEZ, cmp)
return true
}
- // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
- // result: (NE cmp yes no)
+ // match: (EQZ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (NEZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11482,11 +11637,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTU {
break
}
- b.resetWithControl(BlockLOONG64NE, cmp)
+ b.resetWithControl(BlockLOONG64NEZ, cmp)
return true
}
- // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
- // result: (NE cmp yes no)
+ // match: (EQZ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (NEZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11496,11 +11651,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTconst {
break
}
- b.resetWithControl(BlockLOONG64NE, cmp)
+ b.resetWithControl(BlockLOONG64NEZ, cmp)
return true
}
- // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
- // result: (NE cmp yes no)
+ // match: (EQZ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (NEZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11510,22 +11665,22 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTUconst {
break
}
- b.resetWithControl(BlockLOONG64NE, cmp)
+ b.resetWithControl(BlockLOONG64NEZ, cmp)
return true
}
- // match: (EQ (SGTUconst [1] x) yes no)
- // result: (NE x yes no)
+ // match: (EQZ (SGTUconst [1] x) yes no)
+ // result: (NEZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTUconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
- b.resetWithControl(BlockLOONG64NE, x)
+ b.resetWithControl(BlockLOONG64NEZ, x)
return true
}
- // match: (EQ (SGTU x (MOVVconst [0])) yes no)
- // result: (EQ x yes no)
+ // match: (EQZ (SGTU x (MOVVconst [0])) yes no)
+ // result: (EQZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
_ = v_0.Args[1]
@@ -11534,10 +11689,10 @@ func rewriteBlockLOONG64(b *Block) bool {
if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
break
}
- b.resetWithControl(BlockLOONG64EQ, x)
+ b.resetWithControl(BlockLOONG64EQZ, x)
return true
}
- // match: (EQ (SGTconst [0] x) yes no)
+ // match: (EQZ (SGTconst [0] x) yes no)
// result: (GEZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTconst {
v_0 := b.Controls[0]
@@ -11548,7 +11703,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64GEZ, x)
return true
}
- // match: (EQ (SGT x (MOVVconst [0])) yes no)
+ // match: (EQZ (SGT x (MOVVconst [0])) yes no)
// result: (LEZ x yes no)
for b.Controls[0].Op == OpLOONG64SGT {
v_0 := b.Controls[0]
@@ -11561,9 +11716,9 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64LEZ, x)
return true
}
- // match: (EQ (SGTU (MOVVconst [c]) y) yes no)
+ // match: (EQZ (SGTU (MOVVconst [c]) y) yes no)
// cond: c >= -2048 && c <= 2047
- // result: (EQ (SGTUconst [c] y) yes no)
+ // result: (EQZ (SGTUconst [c] y) yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
y := v_0.Args[1]
@@ -11578,10 +11733,10 @@ func rewriteBlockLOONG64(b *Block) bool {
v0 := b.NewValue0(v_0.Pos, OpLOONG64SGTUconst, typ.Bool)
v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(y)
- b.resetWithControl(BlockLOONG64EQ, v0)
+ b.resetWithControl(BlockLOONG64EQZ, v0)
return true
}
- // match: (EQ (SUBV x y) yes no)
+ // match: (EQZ (SUBV x y) yes no)
// result: (BEQ x y yes no)
for b.Controls[0].Op == OpLOONG64SUBV {
v_0 := b.Controls[0]
@@ -11590,7 +11745,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BEQ, x, y)
return true
}
- // match: (EQ (SGT x y) yes no)
+ // match: (EQZ (SGT x y) yes no)
// result: (BGE y x yes no)
for b.Controls[0].Op == OpLOONG64SGT {
v_0 := b.Controls[0]
@@ -11599,7 +11754,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BGE, y, x)
return true
}
- // match: (EQ (SGTU x y) yes no)
+ // match: (EQZ (SGTU x y) yes no)
// result: (BGEU y x yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
@@ -11608,7 +11763,29 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BGEU, y, x)
return true
}
- // match: (EQ (MOVVconst [0]) yes no)
+ // match: (EQZ (SGTconst [c] y) yes no)
+ // result: (BGE y (MOVVconst [c]) yes no)
+ for b.Controls[0].Op == OpLOONG64SGTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ b.resetWithControl2(BlockLOONG64BGE, y, v0)
+ return true
+ }
+ // match: (EQZ (SGTUconst [c] y) yes no)
+ // result: (BGEU y (MOVVconst [c]) yes no)
+ for b.Controls[0].Op == OpLOONG64SGTUconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ b.resetWithControl2(BlockLOONG64BGEU, y, v0)
+ return true
+ }
+ // match: (EQZ (MOVVconst [0]) yes no)
// result: (First yes no)
for b.Controls[0].Op == OpLOONG64MOVVconst {
v_0 := b.Controls[0]
@@ -11618,7 +11795,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.Reset(BlockFirst)
return true
}
- // match: (EQ (MOVVconst [c]) yes no)
+ // match: (EQZ (MOVVconst [c]) yes no)
// cond: c != 0
// result: (First no yes)
for b.Controls[0].Op == OpLOONG64MOVVconst {
@@ -11631,6 +11808,14 @@ func rewriteBlockLOONG64(b *Block) bool {
b.swapSuccessors()
return true
}
+ // match: (EQZ (NEGV x) yes no)
+ // result: (EQZ x yes no)
+ for b.Controls[0].Op == OpLOONG64NEGV {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64EQZ, x)
+ return true
+ }
case BlockLOONG64GEZ:
// match: (GEZ (MOVVconst [c]) yes no)
// cond: c >= 0
@@ -11685,12 +11870,12 @@ func rewriteBlockLOONG64(b *Block) bool {
}
case BlockIf:
// match: (If cond yes no)
- // result: (NE (MOVBUreg cond) yes no)
+ // result: (NEZ (MOVBUreg cond) yes no)
for {
cond := b.Controls[0]
v0 := b.NewValue0(cond.Pos, OpLOONG64MOVBUreg, typ.UInt64)
v0.AddArg(cond)
- b.resetWithControl(BlockLOONG64NE, v0)
+ b.resetWithControl(BlockLOONG64NEZ, v0)
return true
}
case BlockLOONG64LEZ:
@@ -11745,8 +11930,8 @@ func rewriteBlockLOONG64(b *Block) bool {
b.swapSuccessors()
return true
}
- case BlockLOONG64NE:
- // match: (NE (FPFlagTrue cmp) yes no)
+ case BlockLOONG64NEZ:
+ // match: (NEZ (FPFlagTrue cmp) yes no)
// result: (FPT cmp yes no)
for b.Controls[0].Op == OpLOONG64FPFlagTrue {
v_0 := b.Controls[0]
@@ -11754,7 +11939,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64FPT, cmp)
return true
}
- // match: (NE (FPFlagFalse cmp) yes no)
+ // match: (NEZ (FPFlagFalse cmp) yes no)
// result: (FPF cmp yes no)
for b.Controls[0].Op == OpLOONG64FPFlagFalse {
v_0 := b.Controls[0]
@@ -11762,8 +11947,8 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64FPF, cmp)
return true
}
- // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
- // result: (EQ cmp yes no)
+ // match: (NEZ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (EQZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11773,11 +11958,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGT {
break
}
- b.resetWithControl(BlockLOONG64EQ, cmp)
+ b.resetWithControl(BlockLOONG64EQZ, cmp)
return true
}
- // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
- // result: (EQ cmp yes no)
+ // match: (NEZ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (EQZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11787,11 +11972,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTU {
break
}
- b.resetWithControl(BlockLOONG64EQ, cmp)
+ b.resetWithControl(BlockLOONG64EQZ, cmp)
return true
}
- // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
- // result: (EQ cmp yes no)
+ // match: (NEZ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (EQZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11801,11 +11986,11 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTconst {
break
}
- b.resetWithControl(BlockLOONG64EQ, cmp)
+ b.resetWithControl(BlockLOONG64EQZ, cmp)
return true
}
- // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
- // result: (EQ cmp yes no)
+ // match: (NEZ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (EQZ cmp yes no)
for b.Controls[0].Op == OpLOONG64XORconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
@@ -11815,22 +12000,22 @@ func rewriteBlockLOONG64(b *Block) bool {
if cmp.Op != OpLOONG64SGTUconst {
break
}
- b.resetWithControl(BlockLOONG64EQ, cmp)
+ b.resetWithControl(BlockLOONG64EQZ, cmp)
return true
}
- // match: (NE (SGTUconst [1] x) yes no)
- // result: (EQ x yes no)
+ // match: (NEZ (SGTUconst [1] x) yes no)
+ // result: (EQZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTUconst {
v_0 := b.Controls[0]
if auxIntToInt64(v_0.AuxInt) != 1 {
break
}
x := v_0.Args[0]
- b.resetWithControl(BlockLOONG64EQ, x)
+ b.resetWithControl(BlockLOONG64EQZ, x)
return true
}
- // match: (NE (SGTU x (MOVVconst [0])) yes no)
- // result: (NE x yes no)
+ // match: (NEZ (SGTU x (MOVVconst [0])) yes no)
+ // result: (NEZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
_ = v_0.Args[1]
@@ -11839,10 +12024,10 @@ func rewriteBlockLOONG64(b *Block) bool {
if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
break
}
- b.resetWithControl(BlockLOONG64NE, x)
+ b.resetWithControl(BlockLOONG64NEZ, x)
return true
}
- // match: (NE (SGTconst [0] x) yes no)
+ // match: (NEZ (SGTconst [0] x) yes no)
// result: (LTZ x yes no)
for b.Controls[0].Op == OpLOONG64SGTconst {
v_0 := b.Controls[0]
@@ -11853,7 +12038,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64LTZ, x)
return true
}
- // match: (NE (SGT x (MOVVconst [0])) yes no)
+ // match: (NEZ (SGT x (MOVVconst [0])) yes no)
// result: (GTZ x yes no)
for b.Controls[0].Op == OpLOONG64SGT {
v_0 := b.Controls[0]
@@ -11866,9 +12051,9 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl(BlockLOONG64GTZ, x)
return true
}
- // match: (NE (SGTU (MOVVconst [c]) y) yes no)
+ // match: (NEZ (SGTU (MOVVconst [c]) y) yes no)
// cond: c >= -2048 && c <= 2047
- // result: (NE (SGTUconst [c] y) yes no)
+ // result: (NEZ (SGTUconst [c] y) yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
y := v_0.Args[1]
@@ -11883,10 +12068,10 @@ func rewriteBlockLOONG64(b *Block) bool {
v0 := b.NewValue0(v_0.Pos, OpLOONG64SGTUconst, typ.Bool)
v0.AuxInt = int64ToAuxInt(c)
v0.AddArg(y)
- b.resetWithControl(BlockLOONG64NE, v0)
+ b.resetWithControl(BlockLOONG64NEZ, v0)
return true
}
- // match: (NE (SUBV x y) yes no)
+ // match: (NEZ (SUBV x y) yes no)
// result: (BNE x y yes no)
for b.Controls[0].Op == OpLOONG64SUBV {
v_0 := b.Controls[0]
@@ -11895,7 +12080,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BNE, x, y)
return true
}
- // match: (NE (SGT x y) yes no)
+ // match: (NEZ (SGT x y) yes no)
// result: (BLT y x yes no)
for b.Controls[0].Op == OpLOONG64SGT {
v_0 := b.Controls[0]
@@ -11904,7 +12089,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BLT, y, x)
return true
}
- // match: (NE (SGTU x y) yes no)
+ // match: (NEZ (SGTU x y) yes no)
// result: (BLTU y x yes no)
for b.Controls[0].Op == OpLOONG64SGTU {
v_0 := b.Controls[0]
@@ -11913,7 +12098,29 @@ func rewriteBlockLOONG64(b *Block) bool {
b.resetWithControl2(BlockLOONG64BLTU, y, x)
return true
}
- // match: (NE (MOVVconst [0]) yes no)
+ // match: (NEZ (SGTconst [c] y) yes no)
+ // result: (BLT y (MOVVconst [c]) yes no)
+ for b.Controls[0].Op == OpLOONG64SGTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ b.resetWithControl2(BlockLOONG64BLT, y, v0)
+ return true
+ }
+ // match: (NEZ (SGTUconst [c] y) yes no)
+ // result: (BLTU y (MOVVconst [c]) yes no)
+ for b.Controls[0].Op == OpLOONG64SGTUconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ b.resetWithControl2(BlockLOONG64BLTU, y, v0)
+ return true
+ }
+ // match: (NEZ (MOVVconst [0]) yes no)
// result: (First no yes)
for b.Controls[0].Op == OpLOONG64MOVVconst {
v_0 := b.Controls[0]
@@ -11924,7 +12131,7 @@ func rewriteBlockLOONG64(b *Block) bool {
b.swapSuccessors()
return true
}
- // match: (NE (MOVVconst [c]) yes no)
+ // match: (NEZ (MOVVconst [c]) yes no)
// cond: c != 0
// result: (First yes no)
for b.Controls[0].Op == OpLOONG64MOVVconst {
@@ -11936,6 +12143,14 @@ func rewriteBlockLOONG64(b *Block) bool {
b.Reset(BlockFirst)
return true
}
+ // match: (NEZ (NEGV x) yes no)
+ // result: (NEZ x yes no)
+ for b.Controls[0].Op == OpLOONG64NEGV {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64NEZ, x)
+ return true
+ }
}
return false
}
diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64latelower.go b/src/cmd/compile/internal/ssa/rewriteLOONG64latelower.go
index ef9b83192ccbe4..60ba120e48dad6 100644
--- a/src/cmd/compile/internal/ssa/rewriteLOONG64latelower.go
+++ b/src/cmd/compile/internal/ssa/rewriteLOONG64latelower.go
@@ -25,5 +25,37 @@ func rewriteValueLOONG64latelower_OpLOONG64SLLVconst(v *Value) bool {
return false
}
func rewriteBlockLOONG64latelower(b *Block) bool {
+ switch b.Kind {
+ case BlockLOONG64EQZ:
+ // match: (EQZ (XOR x y) yes no)
+ // result: (BEQ x y yes no)
+ for b.Controls[0].Op == OpLOONG64XOR {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ b.resetWithControl2(BlockLOONG64BEQ, x, y)
+ return true
+ }
+ }
+ case BlockLOONG64NEZ:
+ // match: (NEZ (XOR x y) yes no)
+ // result: (BNE x y yes no)
+ for b.Controls[0].Op == OpLOONG64XOR {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ b.resetWithControl2(BlockLOONG64BNE, x, y)
+ return true
+ }
+ }
+ }
return false
}
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index e987ae9662884b..050ace83dea2d7 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -540,6 +540,12 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64LessEqual(v)
case OpPPC64LessThan:
return rewriteValuePPC64_OpPPC64LessThan(v)
+ case OpPPC64LoweredPanicBoundsCR:
+ return rewriteValuePPC64_OpPPC64LoweredPanicBoundsCR(v)
+ case OpPPC64LoweredPanicBoundsRC:
+ return rewriteValuePPC64_OpPPC64LoweredPanicBoundsRC(v)
+ case OpPPC64LoweredPanicBoundsRR:
+ return rewriteValuePPC64_OpPPC64LoweredPanicBoundsRR(v)
case OpPPC64MFVSRD:
return rewriteValuePPC64_OpPPC64MFVSRD(v)
case OpPPC64MOVBZload:
@@ -667,7 +673,8 @@ func rewriteValuePPC64(v *Value) bool {
case OpPPC64XORconst:
return rewriteValuePPC64_OpPPC64XORconst(v)
case OpPanicBounds:
- return rewriteValuePPC64_OpPanicBounds(v)
+ v.Op = OpPPC64LoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValuePPC64_OpPopCount16(v)
case OpPopCount32:
@@ -6826,6 +6833,86 @@ func rewriteValuePPC64_OpPPC64LessThan(v *Value) bool {
return true
}
}
+func rewriteValuePPC64_OpPPC64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpPPC64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpPPC64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpPPC64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -12981,60 +13068,6 @@ func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool {
}
return false
}
-func rewriteValuePPC64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpPPC64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpPPC64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpPPC64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValuePPC64_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index 95c6489a511be8..c78ae89561910e 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -486,7 +486,8 @@ func rewriteValueRISCV64(v *Value) bool {
v.Op = OpRISCV64OR
return true
case OpPanicBounds:
- return rewriteValueRISCV64_OpPanicBounds(v)
+ v.Op = OpRISCV64LoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValueRISCV64_OpPopCount16(v)
case OpPopCount32:
@@ -512,14 +513,30 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpRISCV64FADDD(v)
case OpRISCV64FADDS:
return rewriteValueRISCV64_OpRISCV64FADDS(v)
+ case OpRISCV64FEQD:
+ return rewriteValueRISCV64_OpRISCV64FEQD(v)
+ case OpRISCV64FLED:
+ return rewriteValueRISCV64_OpRISCV64FLED(v)
+ case OpRISCV64FLTD:
+ return rewriteValueRISCV64_OpRISCV64FLTD(v)
case OpRISCV64FMADDD:
return rewriteValueRISCV64_OpRISCV64FMADDD(v)
case OpRISCV64FMADDS:
return rewriteValueRISCV64_OpRISCV64FMADDS(v)
+ case OpRISCV64FMOVDload:
+ return rewriteValueRISCV64_OpRISCV64FMOVDload(v)
+ case OpRISCV64FMOVDstore:
+ return rewriteValueRISCV64_OpRISCV64FMOVDstore(v)
+ case OpRISCV64FMOVWload:
+ return rewriteValueRISCV64_OpRISCV64FMOVWload(v)
+ case OpRISCV64FMOVWstore:
+ return rewriteValueRISCV64_OpRISCV64FMOVWstore(v)
case OpRISCV64FMSUBD:
return rewriteValueRISCV64_OpRISCV64FMSUBD(v)
case OpRISCV64FMSUBS:
return rewriteValueRISCV64_OpRISCV64FMSUBS(v)
+ case OpRISCV64FNED:
+ return rewriteValueRISCV64_OpRISCV64FNED(v)
case OpRISCV64FNMADDD:
return rewriteValueRISCV64_OpRISCV64FNMADDD(v)
case OpRISCV64FNMADDS:
@@ -532,6 +549,12 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpRISCV64FSUBD(v)
case OpRISCV64FSUBS:
return rewriteValueRISCV64_OpRISCV64FSUBS(v)
+ case OpRISCV64LoweredPanicBoundsCR:
+ return rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsCR(v)
+ case OpRISCV64LoweredPanicBoundsRC:
+ return rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRC(v)
+ case OpRISCV64LoweredPanicBoundsRR:
+ return rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRR(v)
case OpRISCV64MOVBUload:
return rewriteValueRISCV64_OpRISCV64MOVBUload(v)
case OpRISCV64MOVBUreg:
@@ -3416,60 +3439,6 @@ func rewriteValueRISCV64_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueRISCV64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpRISCV64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpRISCV64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpRISCV64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueRISCV64_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -3801,6 +3770,149 @@ func rewriteValueRISCV64_OpRISCV64FADDS(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FEQD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))])))
+ // result: (ANDI [1] (FCLASSD x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(-1))) {
+ continue
+ }
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))])))
+ // result: (SNEZ (ANDI [1<<7] (FCLASSD x)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(1))) {
+ continue
+ }
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1 << 7)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FLED(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FLED (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])) x)
+ // result: (SNEZ (ANDI [0xff &^ 1] (FCLASSD x)))
+ for {
+ if v_0.Op != OpRISCV64FMVDX {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != int64(math.Float64bits(-math.MaxFloat64)) {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff &^ 1)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (FLED x (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])))
+ // result: (SNEZ (ANDI [0xff &^ (1<<7)] (FCLASSD x)))
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.MaxFloat64)) {
+ break
+ }
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff &^ (1 << 7))
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FLTD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FLTD x (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])))
+ // result: (ANDI [1] (FCLASSD x))
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(-math.MaxFloat64)) {
+ break
+ }
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (FLTD (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])) x)
+ // result: (SNEZ (ANDI [1<<7] (FCLASSD x)))
+ for {
+ if v_0.Op != OpRISCV64FMVDX {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != int64(math.Float64bits(math.MaxFloat64)) {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1 << 7)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FMADDD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -3891,6 +4003,250 @@ func rewriteValueRISCV64_OpRISCV64FMADDS(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVDload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVDX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVDX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVDstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVWload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVWload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (FMOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (FMOVWstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (FMOVWstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FMSUBD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -3981,6 +4337,59 @@ func rewriteValueRISCV64_OpRISCV64FMSUBS(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64FNED(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))])))
+ // result: (SEQZ (ANDI [1] (FCLASSD x)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(-1))) {
+ continue
+ }
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))])))
+ // result: (SEQZ (ANDI [1<<7] (FCLASSD x)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpRISCV64FMVDX {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != int64(math.Float64bits(math.Inf(1))) {
+ continue
+ }
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1 << 7)
+ v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64FNMADDD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -4239,6 +4648,86 @@ func rewriteValueRISCV64_OpRISCV64FSUBS(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpRISCV64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpRISCV64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpRISCV64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpRISCV64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -4944,6 +5433,25 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
v.AddArg2(base, mem)
return true
}
+ // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVXD x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVXD)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVDnop(v *Value) bool {
@@ -5625,6 +6133,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
config := b.Func.Config
+ typ := &b.Func.Config.Types
// match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
// cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
// result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
@@ -5668,6 +6177,27 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
v.AddArg2(base, mem)
return true
}
+ // match: (MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVWUreg (FMVXS x))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64FMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUreg)
+ v0 := b.NewValue0(v_1.Pos, OpRISCV64FMVXS, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool {
@@ -5858,6 +6388,25 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
v.AddArg2(base, mem)
return true
}
+ // match: (MOVWload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (FMVXS x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpRISCV64FMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpRISCV64FMVXS)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVWreg(v *Value) bool {
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index a7fde81c4789b2..07dbe7bf7a697c 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -477,7 +477,8 @@ func rewriteValueS390X(v *Value) bool {
v.Op = OpS390XORW
return true
case OpPanicBounds:
- return rewriteValueS390X_OpPanicBounds(v)
+ v.Op = OpS390XLoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValueS390X_OpPopCount16(v)
case OpPopCount32:
@@ -644,6 +645,12 @@ func rewriteValueS390X(v *Value) bool {
return rewriteValueS390X_OpS390XLTDBR(v)
case OpS390XLTEBR:
return rewriteValueS390X_OpS390XLTEBR(v)
+ case OpS390XLoweredPanicBoundsCR:
+ return rewriteValueS390X_OpS390XLoweredPanicBoundsCR(v)
+ case OpS390XLoweredPanicBoundsRC:
+ return rewriteValueS390X_OpS390XLoweredPanicBoundsRC(v)
+ case OpS390XLoweredPanicBoundsRR:
+ return rewriteValueS390X_OpS390XLoweredPanicBoundsRR(v)
case OpS390XLoweredRound32F:
return rewriteValueS390X_OpS390XLoweredRound32F(v)
case OpS390XLoweredRound64F:
@@ -3971,60 +3978,6 @@ func rewriteValueS390X_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueS390X_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpS390XLoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpS390XLoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpS390XLoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueS390X_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -8147,6 +8100,86 @@ func rewriteValueS390X_OpS390XLTEBR(v *Value) bool {
}
return false
}
+func rewriteValueS390X_OpS390XLoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpS390XLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpS390XLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpS390XLoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpS390XLoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool {
v_0 := v.Args[0]
// match: (LoweredRound32F x:(FMOVSconst))
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index bce94d35f94841..1e2159579dfbf2 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -184,42 +184,6 @@ func InitConfig() {
BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
- } else {
- BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
- BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
- BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
- BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
- BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
- BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
- BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
- BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
- BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
- BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
- BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
- BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
- BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
- BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
- BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
- BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
- BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
- }
- if Arch.LinkArch.PtrSize == 4 {
- ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
- ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
- ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
- ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
- ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
- ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
- ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
- ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
- ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
- ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
- ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
- ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
- ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
- ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
- ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
- ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
}
// Wasm (all asm funcs with special ABIs)
@@ -1358,9 +1322,6 @@ func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
}
return s.constInt32(t, int32(c))
}
-func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
- return s.f.ConstOffPtrSP(t, c, s.sp)
-}
// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
// soft-float runtime function instead (when emitting soft-float code).
@@ -5418,26 +5379,6 @@ func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
return a
}
-func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
- pt := types.NewPtr(t)
- var addr *ssa.Value
- if base == s.sp {
- // Use special routine that avoids allocation on duplicate offsets.
- addr = s.constOffPtrSP(pt, off)
- } else {
- addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
- }
-
- if !ssa.CanSSA(t) {
- a := s.addr(n)
- s.move(t, addr, a)
- return
- }
-
- a := s.expr(n)
- s.storeType(t, addr, a, 0, false)
-}
-
// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
// i,j,k may be nil, in which case they are set to their default value.
// v may be a slice, string or pointer to an array.
@@ -7758,7 +7699,4 @@ func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
}
}
-var (
- BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
- ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
-)
+var BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
index ce2e921771ed94..5e39bb512f45f4 100644
--- a/src/cmd/compile/internal/staticinit/sched.go
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -622,12 +622,6 @@ func (s *Schedule) staticAssignInlinedCall(l *ir.Name, loff int64, call *ir.Inli
// INLCALL-ReturnVars
// . NAME-p.~R0 Class:PAUTO Offset:0 OnStack Used PTR-*T tc(1) # x.go:18:13
//
- // In non-unified IR, the tree is slightly different:
- // - if there are no arguments to the inlined function,
- // the INLCALL-init omits the AS2.
- // - the DCL inside BLOCK is on the AS2's init list,
- // not its own statement in the top level of the BLOCK.
- //
// If the init values are side-effect-free and each either only
// appears once in the function body or is safely repeatable,
// then we inline the value expressions into the return argument
@@ -647,39 +641,26 @@ func (s *Schedule) staticAssignInlinedCall(l *ir.Name, loff int64, call *ir.Inli
// is the most important case for us to get right.
init := call.Init()
- var as2init *ir.AssignListStmt
- if len(init) == 2 && init[0].Op() == ir.OAS2 && init[1].Op() == ir.OINLMARK {
- as2init = init[0].(*ir.AssignListStmt)
- } else if len(init) == 1 && init[0].Op() == ir.OINLMARK {
- as2init = new(ir.AssignListStmt)
- } else {
+ if len(init) != 2 || init[0].Op() != ir.OAS2 || init[1].Op() != ir.OINLMARK {
return false
}
+ as2init := init[0].(*ir.AssignListStmt)
+
if len(call.Body) != 2 || call.Body[0].Op() != ir.OBLOCK || call.Body[1].Op() != ir.OLABEL {
return false
}
label := call.Body[1].(*ir.LabelStmt).Label
block := call.Body[0].(*ir.BlockStmt)
list := block.List
- var dcl *ir.Decl
- if len(list) == 3 && list[0].Op() == ir.ODCL {
- dcl = list[0].(*ir.Decl)
- list = list[1:]
- }
- if len(list) != 2 ||
- list[0].Op() != ir.OAS2 ||
- list[1].Op() != ir.OGOTO ||
- list[1].(*ir.BranchStmt).Label != label {
+ if len(list) != 3 ||
+ list[0].Op() != ir.ODCL ||
+ list[1].Op() != ir.OAS2 ||
+ list[2].Op() != ir.OGOTO ||
+ list[2].(*ir.BranchStmt).Label != label {
return false
}
- as2body := list[0].(*ir.AssignListStmt)
- if dcl == nil {
- ainit := as2body.Init()
- if len(ainit) != 1 || ainit[0].Op() != ir.ODCL {
- return false
- }
- dcl = ainit[0].(*ir.Decl)
- }
+ dcl := list[0].(*ir.Decl)
+ as2body := list[1].(*ir.AssignListStmt)
if len(as2body.Lhs) != 1 || as2body.Lhs[0] != dcl.X {
return false
}
diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go
index 1c0bfc190ecf29..d86d77e73f70ec 100644
--- a/src/cmd/compile/internal/syntax/printer.go
+++ b/src/cmd/compile/internal/syntax/printer.go
@@ -138,10 +138,6 @@ func impliesSemi(tok token) bool {
// TODO(gri) provide table of []byte values for all tokens to avoid repeated string conversion
-func lineComment(text string) bool {
- return strings.HasPrefix(text, "//")
-}
-
func (p *printer) addWhitespace(kind ctrlSymbol, text string) {
p.pending = append(p.pending, whitespace{p.lastTok, kind /*text*/})
switch kind {
diff --git a/src/cmd/compile/internal/test/bench_test.go b/src/cmd/compile/internal/test/bench_test.go
index 472460009170e2..7303f672fee2a2 100644
--- a/src/cmd/compile/internal/test/bench_test.go
+++ b/src/cmd/compile/internal/test/bench_test.go
@@ -122,3 +122,26 @@ func BenchmarkBitToggleConst(b *testing.B) {
}
}
}
+
+func BenchmarkMulNeg(b *testing.B) {
+ x := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s = (-x[i]) * 11
+ }
+ globl = s
+ }
+}
+
+func BenchmarkMul2Neg(b *testing.B) {
+ x := make([]int64, 1024)
+ y := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s = (-x[i]) * (-y[i])
+ }
+ globl = s
+ }
+}
diff --git a/src/cmd/compile/internal/test/float_test.go b/src/cmd/compile/internal/test/float_test.go
index c736f970f99417..9e61148c5297e5 100644
--- a/src/cmd/compile/internal/test/float_test.go
+++ b/src/cmd/compile/internal/test/float_test.go
@@ -523,6 +523,106 @@ func TestFloatSignalingNaNConversionConst(t *testing.T) {
}
}
+//go:noinline
+func isPosInf(x float64) bool {
+ return math.IsInf(x, 1)
+}
+
+//go:noinline
+func isPosInfEq(x float64) bool {
+ return x == math.Inf(1)
+}
+
+//go:noinline
+func isPosInfCmp(x float64) bool {
+ return x > math.MaxFloat64
+}
+
+//go:noinline
+func isNotPosInf(x float64) bool {
+ return !math.IsInf(x, 1)
+}
+
+//go:noinline
+func isNotPosInfEq(x float64) bool {
+ return x != math.Inf(1)
+}
+
+//go:noinline
+func isNotPosInfCmp(x float64) bool {
+ return x <= math.MaxFloat64
+}
+
+//go:noinline
+func isNegInf(x float64) bool {
+ return math.IsInf(x, -1)
+}
+
+//go:noinline
+func isNegInfEq(x float64) bool {
+ return x == math.Inf(-1)
+}
+
+//go:noinline
+func isNegInfCmp(x float64) bool {
+ return x < -math.MaxFloat64
+}
+
+//go:noinline
+func isNotNegInf(x float64) bool {
+ return !math.IsInf(x, -1)
+}
+
+//go:noinline
+func isNotNegInfEq(x float64) bool {
+ return x != math.Inf(-1)
+}
+
+//go:noinline
+func isNotNegInfCmp(x float64) bool {
+ return x >= -math.MaxFloat64
+}
+
+func TestInf(t *testing.T) {
+ tests := []struct {
+ value float64
+ isPosInf bool
+ isNegInf bool
+ isNaN bool
+ }{
+ {value: math.Inf(1), isPosInf: true},
+ {value: math.MaxFloat64},
+ {value: math.Inf(-1), isNegInf: true},
+ {value: -math.MaxFloat64},
+ {value: math.NaN(), isNaN: true},
+ }
+
+ check := func(name string, f func(x float64) bool, value float64, want bool) {
+ got := f(value)
+ if got != want {
+ t.Errorf("%v(%g): want %v, got %v", name, value, want, got)
+ }
+ }
+
+ for _, test := range tests {
+ check("isPosInf", isPosInf, test.value, test.isPosInf)
+ check("isPosInfEq", isPosInfEq, test.value, test.isPosInf)
+ check("isPosInfCmp", isPosInfCmp, test.value, test.isPosInf)
+
+ check("isNotPosInf", isNotPosInf, test.value, !test.isPosInf)
+ check("isNotPosInfEq", isNotPosInfEq, test.value, !test.isPosInf)
+ check("isNotPosInfCmp", isNotPosInfCmp, test.value, !test.isPosInf && !test.isNaN)
+
+ check("isNegInf", isNegInf, test.value, test.isNegInf)
+ check("isNegInfEq", isNegInfEq, test.value, test.isNegInf)
+ check("isNegInfCmp", isNegInfCmp, test.value, test.isNegInf)
+
+ check("isNotNegInf", isNotNegInf, test.value, !test.isNegInf)
+ check("isNotNegInfEq", isNotNegInfEq, test.value, !test.isNegInf)
+ check("isNotNegInfCmp", isNotNegInfCmp, test.value, !test.isNegInf && !test.isNaN)
+ }
+}
+
var sinkFloat float64
func BenchmarkMul2(b *testing.B) {
diff --git a/src/cmd/compile/internal/test/mulconst_test.go b/src/cmd/compile/internal/test/mulconst_test.go
index c4aed84432de50..1d1b351af19640 100644
--- a/src/cmd/compile/internal/test/mulconst_test.go
+++ b/src/cmd/compile/internal/test/mulconst_test.go
@@ -143,7 +143,7 @@ func BenchmarkMulconstI32(b *testing.B) {
}
mulSinkI32 = x
})
- // -120x = 8x - 120x
+ // -120x = 8x - 128x
b.Run("-120", func(b *testing.B) {
x := int32(1)
for i := 0; i < b.N; i++ {
@@ -202,7 +202,7 @@ func BenchmarkMulconstI64(b *testing.B) {
}
mulSinkI64 = x
})
- // -120x = 8x - 120x
+ // -120x = 8x - 128x
b.Run("-120", func(b *testing.B) {
x := int64(1)
for i := 0; i < b.N; i++ {
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index 29d6b2cc2dcb55..f3498f60090118 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -235,27 +235,7 @@
package typecheck
-import (
- "strings"
-)
-
const blankMarker = "$"
-// TparamName returns the real name of a type parameter, after stripping its
-// qualifying prefix and reverting blank-name encoding. See TparamExportName
-// for details.
-func TparamName(exportName string) string {
- // Remove the "path" from the type param name that makes it unique.
- ix := strings.LastIndex(exportName, ".")
- if ix < 0 {
- return ""
- }
- name := exportName[ix+1:]
- if strings.HasPrefix(name, blankMarker) {
- return "_"
- }
- return name
-}
-
// The name used for dictionary parameters or local variables.
const LocalDictName = ".dict"
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
index ac49f251bb93a4..2ca8e7fb861626 100644
--- a/src/cmd/compile/internal/typecheck/stmt.go
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -19,9 +19,6 @@ func RangeExprType(t *types.Type) *types.Type {
return t
}
-func typecheckrangeExpr(n *ir.RangeStmt) {
-}
-
// type check assignment.
// if this assignment is the definition of a var on the left side,
// fill in the var's type.
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 1c7f0a19e9158d..6a3e9b512e72d5 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -1694,13 +1694,6 @@ func fieldsHasShape(fields []*Field) bool {
return false
}
-// newBasic returns a new basic type of the given kind.
-func newBasic(kind Kind, obj Object) *Type {
- t := newType(kind)
- t.obj = obj
- return t
-}
-
// NewInterface returns a new interface for the given methods and
// embedded types. Embedded types are specified as fields with no Sym.
func NewInterface(methods []*Field) *Type {
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index 49cc0e54ecf750..8752eff99212e6 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -187,10 +187,6 @@ type Config struct {
EnableAlias bool
}
-func srcimporter_setUsesCgo(conf *Config) {
- conf.go115UsesCgo = true
-}
-
// Info holds result type information for a type-checked package.
// Only the information for which a map is provided is collected.
// If the package has type errors, the collected information may
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index 31a1aa2abe9290..411a1719ce4bb5 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -22,7 +22,7 @@ var nopos syntax.Pos
const debug = false // leave on during development
// position tracing for panics during type checking
-const tracePos = false // TODO(markfreeman): check performance implications
+const tracePos = true
// _aliasAny changes the behavior of [Scope.Lookup] for "any" in the
// [Universe] scope.
diff --git a/src/cmd/compile/internal/types2/compilersupport.go b/src/cmd/compile/internal/types2/compilersupport.go
index 20a13642887a7f..d29241a2ed70b9 100644
--- a/src/cmd/compile/internal/types2/compilersupport.go
+++ b/src/cmd/compile/internal/types2/compilersupport.go
@@ -13,12 +13,6 @@ func AsPointer(t Type) *Pointer {
return u
}
-// If t is a signature, AsSignature returns that type, otherwise it returns nil.
-func AsSignature(t Type) *Signature {
- u, _ := t.Underlying().(*Signature)
- return u
-}
-
// If typ is a type parameter, CoreType returns the single underlying
// type of all types in the corresponding type constraint if it exists, or
// nil otherwise. If the type set contains only unrestricted and restricted
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index 64047aa84ff42c..34105816a65af3 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -8,7 +8,6 @@ import (
"cmd/compile/internal/syntax"
"fmt"
"go/constant"
- "internal/buildcfg"
. "internal/types/errors"
"slices"
)
@@ -525,10 +524,6 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN
// handle type parameters even if not allowed (Alias type is supported)
if tparam0 != nil {
- if !versionErr && !buildcfg.Experiment.AliasTypeParams {
- check.error(tdecl, UnsupportedFeature, "generic type alias requires GOEXPERIMENT=aliastypeparams")
- versionErr = true
- }
check.openScope(tdecl, "type parameters")
defer check.closeScope()
check.collectTypeParams(&alias.tparams, tdecl.TParamList)
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index f7346cab46d3e8..1c8c12d07cfafa 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -11,7 +11,6 @@ import (
"cmd/compile/internal/syntax"
"errors"
"fmt"
- "internal/buildcfg"
. "internal/types/errors"
)
@@ -130,10 +129,6 @@ func (check *Checker) instance(pos syntax.Pos, orig genericType, targs []Type, e
res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily
case *Alias:
- if !buildcfg.Experiment.AliasTypeParams {
- assert(expanding == nil) // Alias instances cannot be reached from Named types
- }
-
// verify type parameter count (see go.dev/issue/71198 for a test case)
tparams := orig.TypeParams()
if !check.validateTArgLen(pos, orig.obj.Name(), tparams.Len(), len(targs)) {
diff --git a/src/cmd/compile/internal/types2/object_test.go b/src/cmd/compile/internal/types2/object_test.go
index f19264e4db6d41..4f1a653ff3be27 100644
--- a/src/cmd/compile/internal/types2/object_test.go
+++ b/src/cmd/compile/internal/types2/object_test.go
@@ -99,8 +99,7 @@ var testObjects = []struct {
{"type t = struct{f int}", "t", "type p.t = struct{f int}", false},
{"type t = func(int)", "t", "type p.t = func(int)", false},
{"type A = B; type B = int", "A", "type p.A = p.B", true},
- {"type A[P ~int] = struct{}", "A", "type p.A[P ~int] = struct{}", true}, // requires GOEXPERIMENT=aliastypeparams
-
+ {"type A[P ~int] = struct{}", "A", "type p.A[P ~int] = struct{}", true},
{"var v int", "v", "var p.v int", false},
{"func f(int) string", "f", "func p.f(int) string", false},
@@ -114,10 +113,6 @@ func TestObjectString(t *testing.T) {
for i, test := range testObjects {
t.Run(fmt.Sprint(i), func(t *testing.T) {
- if test.alias {
- revert := setGOEXPERIMENT("aliastypeparams")
- defer revert()
- }
src := "package p; " + test.src
conf := Config{Error: func(error) {}, Importer: defaultImporter(), EnableAlias: test.alias}
pkg, err := typecheck(src, &conf, nil)
diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go
index 35e15d814defbb..365bc97462683b 100644
--- a/src/cmd/compile/internal/types2/stdlib_test.go
+++ b/src/cmd/compile/internal/types2/stdlib_test.go
@@ -332,6 +332,8 @@ func TestStdFixed(t *testing.T) {
"issue49814.go", // go/types does not have constraints on array size
"issue56103.go", // anonymous interface cycles; will be a type checker error in 1.22
"issue52697.go", // types2 does not have constraints on stack size
+ "issue68054.go", // this test requires GODEBUG=gotypesalias=1
+ "issue68580.go", // this test requires GODEBUG=gotypesalias=1
"issue73309.go", // this test requires GODEBUG=gotypesalias=1
"issue73309b.go", // this test requires GODEBUG=gotypesalias=1
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
index fbfc56a39c3071..b9e226b20741fc 100644
--- a/src/cmd/compile/internal/walk/expr.go
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -131,6 +131,14 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
n := n.(*ir.BinaryExpr)
n.X = walkExpr(n.X, init)
n.Y = walkExpr(n.Y, init)
+ if n.Op() == ir.OUNSAFEADD && ir.ShouldCheckPtr(ir.CurFunc, 1) {
+ // For unsafe.Add(p, n), just walk "unsafe.Pointer(uintptr(p)+uintptr(n))"
+ // for the side effects of validating unsafe.Pointer rules.
+ x := typecheck.ConvNop(n.X, types.Types[types.TUINTPTR])
+ y := typecheck.Conv(n.Y, types.Types[types.TUINTPTR])
+ conv := typecheck.ConvNop(ir.NewBinaryExpr(n.Pos(), ir.OADD, x, y), types.Types[types.TUNSAFEPTR])
+ walkExpr(conv, init)
+ }
return n
case ir.OUNSAFESLICE:
diff --git a/src/cmd/dist/README b/src/cmd/dist/README
index 78a6d42d0a80be..d6005200407dfd 100644
--- a/src/cmd/dist/README
+++ b/src/cmd/dist/README
@@ -4,18 +4,17 @@ As of Go 1.5, dist and other parts of the compiler toolchain are written
in Go, making bootstrapping a little more involved than in the past.
The approach is to build the current release of Go with an earlier one.
-The process to install Go 1.x, for x ≥ 24, is:
+The process to install Go 1.x, for x ≥ 26, is:
-1. Build cmd/dist with Go 1.22.6.
-2. Using dist, build Go 1.x compiler toolchain with Go 1.22.6.
+1. Build cmd/dist with Go 1.24.6.
+2. Using dist, build Go 1.x compiler toolchain with Go 1.24.6.
3. Using dist, rebuild Go 1.x compiler toolchain with itself.
4. Using dist, build Go 1.x cmd/go (as go_bootstrap) with Go 1.x compiler toolchain.
5. Using go_bootstrap, build the remaining Go 1.x standard library and commands.
-Because of backward compatibility, although the steps above say Go 1.22.6,
-in practice any release ≥ Go 1.22.6 but < Go 1.x will work as the bootstrap base.
+Because of backward compatibility, although the steps above say Go 1.24.6,
+in practice any release ≥ Go 1.24.6 but < Go 1.x will work as the bootstrap base.
Releases ≥ Go 1.x are very likely to work as well.
-See https://go.dev/s/go15bootstrap for more details about the original bootstrap
-and https://go.dev/issue/54265 for details about later bootstrap version bumps.
-
+See go.dev/s/go15bootstrap for more details about the original bootstrap
+and go.dev/issue/54265 for details about later bootstrap version bumps.
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index 024050c2dd70c4..fb70047dd0e81c 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -1819,7 +1819,6 @@ var cgoEnabled = map[string]bool{
"solaris/amd64": true,
"windows/386": true,
"windows/amd64": true,
- "windows/arm": false,
"windows/arm64": true,
}
@@ -1828,9 +1827,9 @@ var cgoEnabled = map[string]bool{
// get filtered out of cgoEnabled for 'dist list'.
// See go.dev/issue/56679.
var broken = map[string]bool{
- "linux/sparc64": true, // An incomplete port. See CL 132155.
- "openbsd/mips64": true, // Broken: go.dev/issue/58110.
- "windows/arm": true, // Broken: go.dev/issue/68552.
+ "freebsd/riscv64": true, // Broken: go.dev/issue/73568.
+ "linux/sparc64": true, // An incomplete port. See CL 132155.
+ "openbsd/mips64": true, // Broken: go.dev/issue/58110.
}
// List of platforms which are first class ports. See go.dev/issue/38874.
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index 013b769b90f0e2..b7e58919815c8a 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -121,7 +121,7 @@ var ignoreSuffixes = []string{
"~",
}
-const minBootstrap = "go1.22.6"
+const minBootstrap = "go1.24.6"
var tryDirs = []string{
"sdk/" + minBootstrap,
diff --git a/src/cmd/dist/imports.go b/src/cmd/dist/imports.go
index 05dd84d0f12a0a..0ec2b06c4f6e7a 100644
--- a/src/cmd/dist/imports.go
+++ b/src/cmd/dist/imports.go
@@ -205,18 +205,6 @@ func (r *importReader) readImport(imports *[]string) {
r.readString(imports)
}
-// readComments is like ioutil.ReadAll, except that it only reads the leading
-// block of comments in the file.
-func readComments(f io.Reader) ([]byte, error) {
- r := &importReader{b: bufio.NewReader(f)}
- r.peekByte(true)
- if r.err == nil && !r.eof {
- // Didn't reach EOF, so must have found a non-space byte. Remove it.
- r.buf = r.buf[:len(r.buf)-1]
- }
- return r.buf, r.err
-}
-
// readimports returns the imports found in the named file.
func readimports(file string) []string {
var imports []string
diff --git a/src/cmd/dist/notgo122.go b/src/cmd/dist/notgo124.go
similarity index 62%
rename from src/cmd/dist/notgo122.go
rename to src/cmd/dist/notgo124.go
index 229a26e757b20c..dc6ef4d8bd7fe1 100644
--- a/src/cmd/dist/notgo122.go
+++ b/src/cmd/dist/notgo124.go
@@ -2,20 +2,20 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Go 1.24 and later requires Go 1.22.6 as the bootstrap toolchain.
+// Go 1.26 and later requires Go 1.24.6 as the minimum bootstrap toolchain.
// If cmd/dist is built using an earlier Go version, this file will be
// included in the build and cause an error like:
//
// % GOROOT_BOOTSTRAP=$HOME/sdk/go1.16 ./make.bash
// Building Go cmd/dist using /Users/rsc/sdk/go1.16. (go1.16 darwin/amd64)
-// found packages main (build.go) and building_Go_requires_Go_1_22_6_or_later (notgo122.go) in /Users/rsc/go/src/cmd/dist
+// found packages main (build.go) and building_Go_requires_Go_1_24_6_or_later (notgo124.go) in /Users/rsc/go/src/cmd/dist
// %
//
// which is the best we can do under the circumstances.
//
-// See go.dev/issue/44505 for more background on
-// why Go moved on from Go 1.4 for bootstrap.
+// See go.dev/issue/44505 and go.dev/issue/54265 for more
+// background on why Go moved on from Go 1.4 for bootstrap.
-//go:build !go1.22
+//go:build !go1.24
-package building_Go_requires_Go_1_22_6_or_later
+package building_Go_requires_Go_1_24_6_or_later
diff --git a/src/cmd/dist/sys_windows.go b/src/cmd/dist/sys_windows.go
index 37dffb8541447e..fbcbf6fb887048 100644
--- a/src/cmd/dist/sys_windows.go
+++ b/src/cmd/dist/sys_windows.go
@@ -33,7 +33,6 @@ type systeminfo struct {
const (
PROCESSOR_ARCHITECTURE_AMD64 = 9
PROCESSOR_ARCHITECTURE_INTEL = 0
- PROCESSOR_ARCHITECTURE_ARM = 5
PROCESSOR_ARCHITECTURE_ARM64 = 12
PROCESSOR_ARCHITECTURE_IA64 = 6
)
@@ -47,8 +46,6 @@ func sysinit() {
gohostarch = "amd64"
case PROCESSOR_ARCHITECTURE_INTEL:
gohostarch = "386"
- case PROCESSOR_ARCHITECTURE_ARM:
- gohostarch = "arm"
case PROCESSOR_ARCHITECTURE_ARM64:
gohostarch = "arm64"
default:
diff --git a/src/cmd/dist/util.go b/src/cmd/dist/util.go
index 4d5e3589dc27d3..7db06f86ea7297 100644
--- a/src/cmd/dist/util.go
+++ b/src/cmd/dist/util.go
@@ -362,16 +362,6 @@ func errprintf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format, args...)
}
-// xsamefile reports whether f1 and f2 are the same file (or dir).
-func xsamefile(f1, f2 string) bool {
- fi1, err1 := os.Stat(f1)
- fi2, err2 := os.Stat(f2)
- if err1 != nil || err2 != nil {
- return f1 == f2
- }
- return os.SameFile(fi1, fi2)
-}
-
func xgetgoarm() string {
// If we're building on an actual arm system, and not building
// a cross-compiling toolchain, try to exec ourselves
diff --git a/src/cmd/go.mod b/src/cmd/go.mod
index 5624b81cc51bac..46630f4ed83f05 100644
--- a/src/cmd/go.mod
+++ b/src/cmd/go.mod
@@ -1,21 +1,21 @@
module cmd
-go 1.25
+go 1.26
require (
- github.com/google/pprof v0.0.0-20250208200701-d0013a598941
- golang.org/x/arch v0.18.1-0.20250605182141-b2f4e2807dec
- golang.org/x/build v0.0.0-20250606033421-8c8ff6f34a83
- golang.org/x/mod v0.25.0
- golang.org/x/sync v0.15.0
- golang.org/x/sys v0.33.0
- golang.org/x/telemetry v0.0.0-20250606142133-60998feb31a8
- golang.org/x/term v0.32.0
- golang.org/x/tools v0.34.0
+ github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5
+ golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58
+ golang.org/x/build v0.0.0-20250806225920-b7c66c047964
+ golang.org/x/mod v0.27.0
+ golang.org/x/sync v0.16.0
+ golang.org/x/sys v0.35.0
+ golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488
+ golang.org/x/term v0.34.0
+ golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f
)
require (
- github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd // indirect
- golang.org/x/text v0.26.0 // indirect
+ github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b // indirect
+ golang.org/x/text v0.28.0 // indirect
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef // indirect
)
diff --git a/src/cmd/go.sum b/src/cmd/go.sum
index d5d1553e5b7829..57c506936c4475 100644
--- a/src/cmd/go.sum
+++ b/src/cmd/go.sum
@@ -1,28 +1,28 @@
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc=
-github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
-github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd h1:EVX1s+XNss9jkRW9K6XGJn2jL2lB1h5H804oKPsxOec=
-github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
+github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ=
+github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
+github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b h1:ogbOPx86mIhFy764gGkqnkFC8m5PJA7sPzlk9ppLVQA=
+github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/yuin/goldmark v1.6.0 h1:boZcn2GTjpsynOsC0iJHnBWa4Bi0qzfJjthwauItG68=
github.com/yuin/goldmark v1.6.0/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-golang.org/x/arch v0.18.1-0.20250605182141-b2f4e2807dec h1:fCOjXc18tBlkVy4m+VuL1WU8VTukYOGtAk7nC5QYPRY=
-golang.org/x/arch v0.18.1-0.20250605182141-b2f4e2807dec/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
-golang.org/x/build v0.0.0-20250606033421-8c8ff6f34a83 h1:IiFSc399rOkpudtnsTDKdtfFEsvd+dGfNfl+ytV267c=
-golang.org/x/build v0.0.0-20250606033421-8c8ff6f34a83/go.mod h1:SDzKvZFXqZyl3tLink1AnKsAocWm0yFc3UfmxR6aIOw=
-golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
-golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
-golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
-golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
-golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
-golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/telemetry v0.0.0-20250606142133-60998feb31a8 h1:jBJ3nsFeGb1DzjhOg2ZgZTpnDnOZfHId7RNlBJUtkOM=
-golang.org/x/telemetry v0.0.0-20250606142133-60998feb31a8/go.mod h1:mUcjA5g0luJpMYCLjhH91f4t4RAUNp+zq9ZmUoqPD7M=
-golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
-golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
-golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
-golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
-golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
-golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
+golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58 h1:uxPa6+/WsUfzikIAPMqpTho10y4qtYpINBurU+6NrHE=
+golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
+golang.org/x/build v0.0.0-20250806225920-b7c66c047964 h1:yRs1K51GKq7hsIO+YHJ8LsslrvwFceNPIv0tYjpcBd0=
+golang.org/x/build v0.0.0-20250806225920-b7c66c047964/go.mod h1:i9Vx7+aOQUpYJRxSO+OpRStVBCVL/9ccI51xblWm5WY=
+golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
+golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488 h1:3doPGa+Gg4snce233aCWnbZVFsyFMo/dR40KK/6skyE=
+golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488/go.mod h1:fGb/2+tgXXjhjHsTNdVEEMZNWA0quBnfrO+AfoDSAKw=
+golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
+golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f h1:9m2Iptt9ZZU5llKDJy1XUl5d13PN1ZYV16KwOvE6jOw=
+golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef h1:mqLYrXCXYEZOop9/Dbo6RPX11539nwiCNBb1icVPmw8=
rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ=
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index 7403b92cd14a3c..e472f25eea8322 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -2527,7 +2527,7 @@
// The default is GOFIPS140=off, which makes no FIPS-140 changes at all.
// Other values enable FIPS-140 compliance measures and select alternate
// versions of the cryptography source code.
-// See https://go.dev/security/fips140 for details.
+// See https://go.dev/doc/security/fips140 for details.
// GO_EXTLINK_ENABLED
// Whether the linker should use external linking mode
// when using -linkmode=auto with code that uses cgo.
diff --git a/src/cmd/go/internal/gover/toolchain.go b/src/cmd/go/internal/gover/toolchain.go
index 43b117edcf0023..a24df98168056b 100644
--- a/src/cmd/go/internal/gover/toolchain.go
+++ b/src/cmd/go/internal/gover/toolchain.go
@@ -52,16 +52,6 @@ func maybeToolchainVersion(name string) string {
return FromToolchain(name)
}
-// ToolchainMax returns the maximum of x and y interpreted as toolchain names,
-// compared using Compare(FromToolchain(x), FromToolchain(y)).
-// If x and y compare equal, Max returns x.
-func ToolchainMax(x, y string) string {
- if Compare(FromToolchain(x), FromToolchain(y)) < 0 {
- return y
- }
- return x
-}
-
// Startup records the information that went into the startup-time version switch.
// It is initialized by switchGoToolchain.
var Startup struct {
diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go
index 7f8565a3cbab82..9d44b862eec3e5 100644
--- a/src/cmd/go/internal/help/helpdoc.go
+++ b/src/cmd/go/internal/help/helpdoc.go
@@ -695,7 +695,7 @@ Special-purpose environment variables:
The default is GOFIPS140=off, which makes no FIPS-140 changes at all.
Other values enable FIPS-140 compliance measures and select alternate
versions of the cryptography source code.
- See https://go.dev/security/fips140 for details.
+ See https://go.dev/doc/security/fips140 for details.
GO_EXTLINK_ENABLED
Whether the linker should use external linking mode
when using -linkmode=auto with code that uses cgo.
diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go
index d37331892d1efb..f0452f014777f9 100644
--- a/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go
+++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go
@@ -8,7 +8,6 @@
package filelock
import (
- "errors"
"io/fs"
)
@@ -74,10 +73,3 @@ func (lt lockType) String() string {
return "Unlock"
}
}
-
-// IsNotSupported returns a boolean indicating whether the error is known to
-// report that a function is not supported (possibly for a specific input).
-// It is satisfied by errors.ErrUnsupported as well as some syscall errors.
-func IsNotSupported(err error) bool {
- return errors.Is(err, errors.ErrUnsupported)
-}
diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go
index b445ac24862202..8a1c12b90a5c07 100644
--- a/src/cmd/go/internal/modfetch/codehost/git.go
+++ b/src/cmd/go/internal/modfetch/codehost/git.go
@@ -387,23 +387,6 @@ func (r *gitRepo) Latest(ctx context.Context) (*RevInfo, error) {
return info, nil
}
-// findRef finds some ref name for the given hash,
-// for use when the server requires giving a ref instead of a hash.
-// There may be multiple ref names for a given hash,
-// in which case this returns some name - it doesn't matter which.
-func (r *gitRepo) findRef(ctx context.Context, hash string) (ref string, ok bool) {
- refs, err := r.loadRefs(ctx)
- if err != nil {
- return "", false
- }
- for ref, h := range refs {
- if h == hash {
- return ref, true
- }
- }
- return "", false
-}
-
func (r *gitRepo) checkConfigSHA256(ctx context.Context) bool {
if hashType, sha256CfgErr := r.runGit(ctx, "git", "config", "extensions.objectformat"); sha256CfgErr == nil {
return "sha256" == strings.TrimSpace(string(hashType))
diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go
index afed35c970975e..3df469d28525fa 100644
--- a/src/cmd/go/internal/modfetch/coderepo.go
+++ b/src/cmd/go/internal/modfetch/coderepo.go
@@ -1009,10 +1009,6 @@ func LegacyGoMod(modPath string) []byte {
return fmt.Appendf(nil, "module %s\n", modfile.AutoQuote(modPath))
}
-func (r *codeRepo) modPrefix(rev string) string {
- return r.modPath + "@" + rev
-}
-
func (r *codeRepo) retractedVersions(ctx context.Context) (func(string) bool, error) {
vs, err := r.Versions(ctx, "")
if err != nil {
diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go
index 31e9244e2dce54..25dbf3972fd465 100644
--- a/src/cmd/go/internal/modget/get.go
+++ b/src/cmd/go/internal/modget/get.go
@@ -453,7 +453,7 @@ func updateTools(ctx context.Context, queries []*query, opts *modload.WriteOpts)
if queries[i].version == "none" {
opts.DropTools = append(opts.DropTools, m.Pkgs...)
} else {
- opts.AddTools = append(opts.DropTools, m.Pkgs...)
+ opts.AddTools = append(opts.AddTools, m.Pkgs...)
}
}
}
diff --git a/src/cmd/go/internal/modindex/build.go b/src/cmd/go/internal/modindex/build.go
index d7e09fed25f43a..761bda8d39b158 100644
--- a/src/cmd/go/internal/modindex/build.go
+++ b/src/cmd/go/internal/modindex/build.go
@@ -10,7 +10,6 @@ package modindex
import (
"bytes"
"cmd/go/internal/fsys"
- "cmd/go/internal/str"
"errors"
"fmt"
"go/ast"
@@ -118,96 +117,12 @@ func (ctxt *Context) joinPath(elem ...string) string {
return filepath.Join(elem...)
}
-// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
-func (ctxt *Context) splitPathList(s string) []string {
- if f := ctxt.SplitPathList; f != nil {
- return f(s)
- }
- return filepath.SplitList(s)
-}
-
-// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs.
-func (ctxt *Context) isAbsPath(path string) bool {
- if f := ctxt.IsAbsPath; f != nil {
- return f(path)
- }
- return filepath.IsAbs(path)
-}
-
// isDir reports whether path is a directory.
func isDir(path string) bool {
fi, err := fsys.Stat(path)
return err == nil && fi.IsDir()
}
-// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
-// the local file system to answer the question.
-func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
- if f := ctxt.HasSubdir; f != nil {
- return f(root, dir)
- }
-
- // Try using paths we received.
- if rel, ok = hasSubdir(root, dir); ok {
- return
- }
-
- // Try expanding symlinks and comparing
- // expanded against unexpanded and
- // expanded against expanded.
- rootSym, _ := filepath.EvalSymlinks(root)
- dirSym, _ := filepath.EvalSymlinks(dir)
-
- if rel, ok = hasSubdir(rootSym, dir); ok {
- return
- }
- if rel, ok = hasSubdir(root, dirSym); ok {
- return
- }
- return hasSubdir(rootSym, dirSym)
-}
-
-// hasSubdir reports if dir is within root by performing lexical analysis only.
-func hasSubdir(root, dir string) (rel string, ok bool) {
- root = str.WithFilePathSeparator(filepath.Clean(root))
- dir = filepath.Clean(dir)
- if !strings.HasPrefix(dir, root) {
- return "", false
- }
- return filepath.ToSlash(dir[len(root):]), true
-}
-
-// gopath returns the list of Go path directories.
-func (ctxt *Context) gopath() []string {
- var all []string
- for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
- if p == "" || p == ctxt.GOROOT {
- // Empty paths are uninteresting.
- // If the path is the GOROOT, ignore it.
- // People sometimes set GOPATH=$GOROOT.
- // Do not get confused by this common mistake.
- continue
- }
- if strings.HasPrefix(p, "~") {
- // Path segments starting with ~ on Unix are almost always
- // users who have incorrectly quoted ~ while setting GOPATH,
- // preventing it from expanding to $HOME.
- // The situation is made more confusing by the fact that
- // bash allows quoted ~ in $PATH (most shells do not).
- // Do not get confused by this, and do not try to use the path.
- // It does not exist, and printing errors about it confuses
- // those users even more, because they think "sure ~ exists!".
- // The go command diagnoses this situation and prints a
- // useful error.
- // On Windows, ~ is used in short names, such as c:\progra~1
- // for c:\program files.
- continue
- }
- all = append(all, p)
- }
- return all
-}
-
var defaultToolTags, defaultReleaseTags []string
// NoGoError is the error used by Import to describe a directory
@@ -266,114 +181,12 @@ func fileListForExt(p *build.Package, ext string) *[]string {
return nil
}
-var errNoModules = errors.New("not using modules")
-
-func findImportComment(data []byte) (s string, line int) {
- // expect keyword package
- word, data := parseWord(data)
- if string(word) != "package" {
- return "", 0
- }
-
- // expect package name
- _, data = parseWord(data)
-
- // now ready for import comment, a // or /* */ comment
- // beginning and ending on the current line.
- for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') {
- data = data[1:]
- }
-
- var comment []byte
- switch {
- case bytes.HasPrefix(data, slashSlash):
- comment, _, _ = bytes.Cut(data[2:], newline)
- case bytes.HasPrefix(data, slashStar):
- var ok bool
- comment, _, ok = bytes.Cut(data[2:], starSlash)
- if !ok {
- // malformed comment
- return "", 0
- }
- if bytes.Contains(comment, newline) {
- return "", 0
- }
- }
- comment = bytes.TrimSpace(comment)
-
- // split comment into `import`, `"pkg"`
- word, arg := parseWord(comment)
- if string(word) != "import" {
- return "", 0
- }
-
- line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline)
- return strings.TrimSpace(string(arg)), line
-}
-
var (
slashSlash = []byte("//")
slashStar = []byte("/*")
starSlash = []byte("*/")
- newline = []byte("\n")
)
-// skipSpaceOrComment returns data with any leading spaces or comments removed.
-func skipSpaceOrComment(data []byte) []byte {
- for len(data) > 0 {
- switch data[0] {
- case ' ', '\t', '\r', '\n':
- data = data[1:]
- continue
- case '/':
- if bytes.HasPrefix(data, slashSlash) {
- i := bytes.Index(data, newline)
- if i < 0 {
- return nil
- }
- data = data[i+1:]
- continue
- }
- if bytes.HasPrefix(data, slashStar) {
- data = data[2:]
- i := bytes.Index(data, starSlash)
- if i < 0 {
- return nil
- }
- data = data[i+2:]
- continue
- }
- }
- break
- }
- return data
-}
-
-// parseWord skips any leading spaces or comments in data
-// and then parses the beginning of data as an identifier or keyword,
-// returning that word and what remains after the word.
-func parseWord(data []byte) (word, rest []byte) {
- data = skipSpaceOrComment(data)
-
- // Parse past leading word characters.
- rest = data
- for {
- r, size := utf8.DecodeRune(rest)
- if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' {
- rest = rest[size:]
- continue
- }
- break
- }
-
- word = data[:len(data)-len(rest)]
- if len(word) == 0 {
- return nil, nil
- }
-
- return word, rest
-}
-
var dummyPkg build.Package
// fileInfo records information learned about a file included in a build.
diff --git a/src/cmd/go/internal/modindex/read.go b/src/cmd/go/internal/modindex/read.go
index d87fb06b57e173..399e89eca3cf47 100644
--- a/src/cmd/go/internal/modindex/read.go
+++ b/src/cmd/go/internal/modindex/read.go
@@ -1039,11 +1039,6 @@ func (r *reader) string() string {
return r.d.stringTableAt(r.int())
}
-// bool reads the next bool.
-func (r *reader) bool() bool {
- return r.int() != 0
-}
-
// tokpos reads the next token.Position.
func (r *reader) tokpos() token.Position {
return token.Position{
diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go
index eefc0083c93ddb..cd3ec4f102473b 100644
--- a/src/cmd/go/internal/modload/buildlist.go
+++ b/src/cmd/go/internal/modload/buildlist.go
@@ -658,11 +658,6 @@ func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (chang
return changed, nil
}
-// OverrideRoots edits the global requirement roots by replacing the specific module versions.
-func OverrideRoots(ctx context.Context, replace []module.Version) {
- requirements = overrideRoots(ctx, requirements, replace)
-}
-
func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Version) *Requirements {
drop := make(map[string]bool)
for _, m := range replace {
diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go
index cb9d74df68cb5b..25151103edb579 100644
--- a/src/cmd/go/internal/modload/init.go
+++ b/src/cmd/go/internal/modload/init.go
@@ -305,30 +305,6 @@ func (mms *MainModuleSet) Godebugs() []*modfile.Godebug {
return nil
}
-// Toolchain returns the toolchain set on the single module, in module mode,
-// or the go.work file in workspace mode.
-func (mms *MainModuleSet) Toolchain() string {
- if inWorkspaceMode() {
- if mms.workFile != nil && mms.workFile.Toolchain != nil {
- return mms.workFile.Toolchain.Name
- }
- return "go" + mms.GoVersion()
- }
- if mms != nil && len(mms.versions) == 1 {
- f := mms.ModFile(mms.mustGetSingleMainModule())
- if f == nil {
- // Special case: we are outside a module, like 'go run x.go'.
- // Assume the local Go version.
- // TODO(#49228): Clean this up; see loadModFile.
- return gover.LocalToolchain()
- }
- if f.Toolchain != nil {
- return f.Toolchain.Name
- }
- }
- return "go" + mms.GoVersion()
-}
-
func (mms *MainModuleSet) WorkFileReplaceMap() map[module.Version]module.Version {
return mms.workFileReplaceMap
}
diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go
index 39a1f5f74c228c..71c36e80cbaaab 100644
--- a/src/cmd/go/internal/work/gc.go
+++ b/src/cmd/go/internal/work/gc.go
@@ -6,7 +6,6 @@ package work
import (
"bufio"
- "bytes"
"fmt"
"internal/buildcfg"
"internal/platform"
@@ -438,32 +437,6 @@ func (gcToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, erro
return symabis, nil
}
-// toolVerify checks that the command line args writes the same output file
-// if run using newTool instead.
-// Unused now but kept around for future use.
-func toolVerify(a *Action, b *Builder, p *load.Package, newTool string, ofile string, args []any) error {
- newArgs := make([]any, len(args))
- copy(newArgs, args)
- newArgs[1] = base.Tool(newTool)
- newArgs[3] = ofile + ".new" // x.6 becomes x.6.new
- if err := b.Shell(a).run(p.Dir, p.ImportPath, nil, newArgs...); err != nil {
- return err
- }
- data1, err := os.ReadFile(ofile)
- if err != nil {
- return err
- }
- data2, err := os.ReadFile(ofile + ".new")
- if err != nil {
- return err
- }
- if !bytes.Equal(data1, data2) {
- return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(str.StringList(args...), " "), strings.Join(str.StringList(newArgs...), " "))
- }
- os.Remove(ofile + ".new")
- return nil
-}
-
func (gcToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error {
absOfiles := make([]string, 0, len(ofiles))
for _, f := range ofiles {
diff --git a/src/cmd/go/testdata/script/mod_get_tool_issue74035.txt b/src/cmd/go/testdata/script/mod_get_tool_issue74035.txt
new file mode 100644
index 00000000000000..d6fa592c7b2726
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_get_tool_issue74035.txt
@@ -0,0 +1,25 @@
+# Regression test for https://go.dev/issue/74035.
+go get -tool example.com/foo/cmd/a example.com/foo/cmd/b
+cmp go.mod go.mod.want
+
+-- go.mod --
+module example.com/foo
+go 1.24
+-- go.mod.want --
+module example.com/foo
+
+go 1.24
+
+tool (
+ example.com/foo/cmd/a
+ example.com/foo/cmd/b
+)
+-- cmd/a/a.go --
+package a
+
+func main() {}
+
+-- cmd/b/b.go --
+package b
+
+func main() {}
diff --git a/src/cmd/internal/archive/archive.go b/src/cmd/internal/archive/archive.go
index 393034d7769f2d..b8abc0d4f648fd 100644
--- a/src/cmd/internal/archive/archive.go
+++ b/src/cmd/internal/archive/archive.go
@@ -498,20 +498,6 @@ func exactly16Bytes(s string) string {
// architecture-independent object file output
const HeaderSize = 60
-func ReadHeader(b *bufio.Reader, name string) int {
- var buf [HeaderSize]byte
- if _, err := io.ReadFull(b, buf[:]); err != nil {
- return -1
- }
- aname := strings.Trim(string(buf[0:16]), " ")
- if !strings.HasPrefix(aname, name) {
- return -1
- }
- asize := strings.Trim(string(buf[48:58]), " ")
- i, _ := strconv.Atoi(asize)
- return i
-}
-
func FormatHeader(arhdr []byte, name string, size int64) {
copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
}
diff --git a/src/cmd/internal/gcprog/gcprog.go b/src/cmd/internal/gcprog/gcprog.go
index eeea53daf4a5e9..52505f3b20243d 100644
--- a/src/cmd/internal/gcprog/gcprog.go
+++ b/src/cmd/internal/gcprog/gcprog.go
@@ -56,11 +56,6 @@ func (w *Writer) Debug(out io.Writer) {
w.debug = out
}
-// BitIndex returns the number of bits written to the bit stream so far.
-func (w *Writer) BitIndex() int64 {
- return w.index
-}
-
// byte writes the byte x to the output.
func (w *Writer) byte(x byte) {
if w.debug != nil {
@@ -98,20 +93,6 @@ func (w *Writer) Ptr(index int64) {
w.lit(1)
}
-// ShouldRepeat reports whether it would be worthwhile to
-// use a Repeat to describe c elements of n bits each,
-// compared to just emitting c copies of the n-bit description.
-func (w *Writer) ShouldRepeat(n, c int64) bool {
- // Should we lay out the bits directly instead of
- // encoding them as a repetition? Certainly if count==1,
- // since there's nothing to repeat, but also if the total
- // size of the plain pointer bits for the type will fit in
- // 4 or fewer bytes, since using a repetition will require
- // flushing the current bits plus at least one byte for
- // the repeat size and one for the repeat count.
- return c > 1 && c*n > 4*8
-}
-
// Repeat emits an instruction to repeat the description
// of the last n words c times (including the initial description, c+1 times in total).
func (w *Writer) Repeat(n, c int64) {
@@ -163,36 +144,6 @@ func (w *Writer) ZeroUntil(index int64) {
w.Repeat(1, skip-1)
}
-// Append emits the given GC program into the current output.
-// The caller asserts that the program emits n bits (describes n words),
-// and Append panics if that is not true.
-func (w *Writer) Append(prog []byte, n int64) {
- w.flushlit()
- if w.debug != nil {
- fmt.Fprintf(w.debug, "gcprog: append prog for %d ptrs\n", n)
- fmt.Fprintf(w.debug, "\t")
- }
- n1 := progbits(prog)
- if n1 != n {
- panic("gcprog: wrong bit count in append")
- }
- // The last byte of the prog terminates the program.
- // Don't emit that, or else our own program will end.
- for i, x := range prog[:len(prog)-1] {
- if w.debug != nil {
- if i > 0 {
- fmt.Fprintf(w.debug, " ")
- }
- fmt.Fprintf(w.debug, "%02x", x)
- }
- w.byte(x)
- }
- if w.debug != nil {
- fmt.Fprintf(w.debug, "\n")
- }
- w.index += n
-}
-
// progbits returns the length of the bit stream encoded by the program p.
func progbits(p []byte) int64 {
var n int64
diff --git a/src/cmd/internal/goobj/objfile.go b/src/cmd/internal/goobj/objfile.go
index a9342427efbc09..38da67076d5bb2 100644
--- a/src/cmd/internal/goobj/objfile.go
+++ b/src/cmd/internal/goobj/objfile.go
@@ -635,29 +635,11 @@ func (r *Reader) uint64At(off uint32) uint64 {
return binary.LittleEndian.Uint64(b)
}
-func (r *Reader) int64At(off uint32) int64 {
- return int64(r.uint64At(off))
-}
-
func (r *Reader) uint32At(off uint32) uint32 {
b := r.BytesAt(off, 4)
return binary.LittleEndian.Uint32(b)
}
-func (r *Reader) int32At(off uint32) int32 {
- return int32(r.uint32At(off))
-}
-
-func (r *Reader) uint16At(off uint32) uint16 {
- b := r.BytesAt(off, 2)
- return binary.LittleEndian.Uint16(b)
-}
-
-func (r *Reader) uint8At(off uint32) uint8 {
- b := r.BytesAt(off, 1)
- return b[0]
-}
-
func (r *Reader) StringAt(off uint32, len uint32) string {
b := r.b[off : off+len]
if r.readonly {
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index 344b73e658f11e..0c9c70aa89aee1 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -1054,15 +1054,6 @@ var sysInstFields = map[SpecialOperand]struct {
// Used for padding NOOP instruction
const OP_NOOP = 0xd503201f
-// pcAlignPadLength returns the number of bytes required to align pc to alignedValue,
-// reporting an error if alignedValue is not a power of two or is out of range.
-func pcAlignPadLength(ctxt *obj.Link, pc int64, alignedValue int64) int {
- if !((alignedValue&(alignedValue-1) == 0) && 8 <= alignedValue && alignedValue <= 2048) {
- ctxt.Diag("alignment value of an instruction must be a power of two and in the range [8, 2048], got %d\n", alignedValue)
- }
- return int(-pc & (alignedValue - 1))
-}
-
// size returns the size of the sequence of machine instructions when p is encoded with o.
// Usually it just returns o.size directly, in some cases it checks whether the optimization
// conditions are met, and if so returns the size of the optimized instruction sequence.
@@ -1209,10 +1200,6 @@ type codeBuffer struct {
data *[]byte
}
-func (cb *codeBuffer) pc() int64 {
- return int64(len(*cb.data))
-}
-
// Write a sequence of opcodes into the code buffer.
func (cb *codeBuffer) emit(op ...uint32) {
for _, o := range op {
diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go
index 193993ec4d9b4c..f5d20cfabe76d5 100644
--- a/src/cmd/internal/obj/loong64/a.out.go
+++ b/src/cmd/internal/obj/loong64/a.out.go
@@ -567,6 +567,11 @@ const (
AMOVVF
AMOVVD
+ // 2.2.1.3
+ AALSLW
+ AALSLWU
+ AALSLV
+
// 2.2.1.8
AORN
AANDN
@@ -743,6 +748,9 @@ const (
AFTINTRNEVF
AFTINTRNEVD
+ // 3.2.4.2
+ AFSEL
+
// LSX and LASX memory access instructions
AVMOVQ
AXVMOVQ
@@ -816,6 +824,31 @@ const (
AXVPCNTW
AXVPCNTV
+ AVBITCLRB
+ AVBITCLRH
+ AVBITCLRW
+ AVBITCLRV
+ AVBITSETB
+ AVBITSETH
+ AVBITSETW
+ AVBITSETV
+ AVBITREVB
+ AVBITREVH
+ AVBITREVW
+ AVBITREVV
+ AXVBITCLRB
+ AXVBITCLRH
+ AXVBITCLRW
+ AXVBITCLRV
+ AXVBITSETB
+ AXVBITSETH
+ AXVBITSETW
+ AXVBITSETV
+ AXVBITREVB
+ AXVBITREVH
+ AXVBITREVW
+ AXVBITREVV
+
// LSX and LASX integer comparison instruction
AVSEQB
AXVSEQB
diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go
index bf9b0722cc39d7..67b5f2fc809927 100644
--- a/src/cmd/internal/obj/loong64/anames.go
+++ b/src/cmd/internal/obj/loong64/anames.go
@@ -125,6 +125,9 @@ var Anames = []string{
"MOVDV",
"MOVVF",
"MOVVD",
+ "ALSLW",
+ "ALSLWU",
+ "ALSLV",
"ORN",
"ANDN",
"AMSWAPB",
@@ -261,6 +264,7 @@ var Anames = []string{
"FTINTRNEWD",
"FTINTRNEVF",
"FTINTRNEVD",
+ "FSEL",
"VMOVQ",
"XVMOVQ",
"VADDB",
@@ -327,6 +331,30 @@ var Anames = []string{
"XVPCNTH",
"XVPCNTW",
"XVPCNTV",
+ "VBITCLRB",
+ "VBITCLRH",
+ "VBITCLRW",
+ "VBITCLRV",
+ "VBITSETB",
+ "VBITSETH",
+ "VBITSETW",
+ "VBITSETV",
+ "VBITREVB",
+ "VBITREVH",
+ "VBITREVW",
+ "VBITREVV",
+ "XVBITCLRB",
+ "XVBITCLRH",
+ "XVBITCLRW",
+ "XVBITCLRV",
+ "XVBITSETB",
+ "XVBITSETH",
+ "XVBITSETW",
+ "XVBITSETV",
+ "XVBITREVB",
+ "XVBITREVH",
+ "XVBITREVW",
+ "XVBITREVV",
"VSEQB",
"XVSEQB",
"VSEQH",
diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go
index 6e09930183383c..ffd1177350b119 100644
--- a/src/cmd/internal/obj/loong64/asm.go
+++ b/src/cmd/internal/obj/loong64/asm.go
@@ -154,6 +154,9 @@ var optab = []Optab{
{AFMADDF, C_FREG, C_FREG, C_NONE, C_FREG, C_NONE, 37, 4, 0, 0},
{AFMADDF, C_FREG, C_FREG, C_FREG, C_FREG, C_NONE, 37, 4, 0, 0},
+ {AFSEL, C_FCCREG, C_FREG, C_FREG, C_FREG, C_NONE, 33, 4, 0, 0},
+ {AFSEL, C_FCCREG, C_FREG, C_NONE, C_FREG, C_NONE, 33, 4, 0, 0},
+
{AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGSP, 0},
{AMOVWU, C_REG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGSP, 0},
{AMOVV, C_REG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGSP, 0},
@@ -416,8 +419,13 @@ var optab = []Optab{
{AVMOVQ, C_ELEM, C_NONE, C_NONE, C_ARNG, C_NONE, 45, 4, 0, 0},
- {APRELD, C_SOREG, C_U5CON, C_NONE, C_NONE, C_NONE, 46, 4, 0, 0},
- {APRELDX, C_SOREG, C_DCON, C_U5CON, C_NONE, C_NONE, 47, 20, 0, 0},
+ {AVMOVQ, C_SOREG, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0},
+ {AXVMOVQ, C_SOREG, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0},
+
+ {APRELD, C_SOREG, C_U5CON, C_NONE, C_NONE, C_NONE, 47, 4, 0, 0},
+ {APRELDX, C_SOREG, C_DCON, C_U5CON, C_NONE, C_NONE, 48, 20, 0, 0},
+
+ {AALSLV, C_U3CON, C_REG, C_REG, C_REG, C_NONE, 64, 4, 0, 0},
{obj.APCALIGN, C_U12CON, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0},
{obj.APCDATA, C_32CON, C_NONE, C_NONE, C_32CON, C_NONE, 0, 0, 0, 0},
@@ -726,10 +734,6 @@ func isint32(v int64) bool {
return int64(int32(v)) == v
}
-func isuint32(v uint64) bool {
- return uint64(uint32(v)) == v
-}
-
func (c *ctxt0) aclass(a *obj.Addr) int {
switch a.Type {
case obj.TYPE_NONE:
@@ -1493,6 +1497,10 @@ func buildop(ctxt *obj.Link) {
case ABFPT:
opset(ABFPF, r0)
+ case AALSLV:
+ opset(AALSLW, r0)
+ opset(AALSLWU, r0)
+
case AMOVW,
AMOVD,
AMOVF,
@@ -1512,6 +1520,7 @@ func buildop(ctxt *obj.Link) {
AWORD,
APRELD,
APRELDX,
+ AFSEL,
obj.ANOP,
obj.ATEXT,
obj.AFUNCDATA,
@@ -1830,21 +1839,33 @@ func buildop(ctxt *obj.Link) {
opset(AVSRLB, r0)
opset(AVSRAB, r0)
opset(AVROTRB, r0)
+ opset(AVBITCLRB, r0)
+ opset(AVBITSETB, r0)
+ opset(AVBITREVB, r0)
case AXVSLLB:
opset(AXVSRLB, r0)
opset(AXVSRAB, r0)
opset(AXVROTRB, r0)
+ opset(AXVBITCLRB, r0)
+ opset(AXVBITSETB, r0)
+ opset(AXVBITREVB, r0)
case AVSLLH:
opset(AVSRLH, r0)
opset(AVSRAH, r0)
opset(AVROTRH, r0)
+ opset(AVBITCLRH, r0)
+ opset(AVBITSETH, r0)
+ opset(AVBITREVH, r0)
case AXVSLLH:
opset(AXVSRLH, r0)
opset(AXVSRAH, r0)
opset(AXVROTRH, r0)
+ opset(AXVBITCLRH, r0)
+ opset(AXVBITSETH, r0)
+ opset(AXVBITREVH, r0)
case AVSLLW:
opset(AVSRLW, r0)
@@ -1858,6 +1879,9 @@ func buildop(ctxt *obj.Link) {
opset(AVSUBHU, r0)
opset(AVSUBWU, r0)
opset(AVSUBVU, r0)
+ opset(AVBITCLRW, r0)
+ opset(AVBITSETW, r0)
+ opset(AVBITREVW, r0)
case AXVSLLW:
opset(AXVSRLW, r0)
@@ -1871,16 +1895,25 @@ func buildop(ctxt *obj.Link) {
opset(AXVSUBHU, r0)
opset(AXVSUBWU, r0)
opset(AXVSUBVU, r0)
+ opset(AXVBITCLRW, r0)
+ opset(AXVBITSETW, r0)
+ opset(AXVBITREVW, r0)
case AVSLLV:
opset(AVSRLV, r0)
opset(AVSRAV, r0)
opset(AVROTRV, r0)
+ opset(AVBITCLRV, r0)
+ opset(AVBITSETV, r0)
+ opset(AVBITREVV, r0)
case AXVSLLV:
opset(AXVSRLV, r0)
opset(AXVSRAV, r0)
opset(AXVROTRV, r0)
+ opset(AXVBITCLRV, r0)
+ opset(AXVBITSETV, r0)
+ opset(AXVBITREVV, r0)
case AVSETEQV:
opset(AVSETNEV, r0)
@@ -1925,6 +1958,10 @@ func OP_RR(op uint32, r2 uint32, r3 uint32) uint32 {
return op | (r2&0x1F)<<5 | (r3&0x1F)<<0
}
+func OP_2IRRR(op uint32, i uint32, r2 uint32, r3 uint32, r4 uint32) uint32 {
+ return op | (i&0x3)<<15 | (r2&0x1F)<<10 | (r3&0x1F)<<5 | (r4&0x1F)<<0
+}
+
func OP_16IR_5I(op uint32, i uint32, r2 uint32) uint32 {
return op | (i&0xFFFF)<<10 | (r2&0x1F)<<5 | ((i >> 16) & 0x1F)
}
@@ -2354,6 +2391,16 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
}
o1 = OP_6IRR(c.opirr(p.As), uint32(v), uint32(r), uint32(p.To.Reg))
+ case 33: // fsel ca, fk, [fj], fd
+ ca := uint32(p.From.Reg)
+ fk := uint32(p.Reg)
+ fd := uint32(p.To.Reg)
+ fj := fd
+ if len(p.RestArgs) > 0 {
+ fj = uint32(p.GetFrom3().Reg)
+ }
+ o1 = 0x340<<18 | (ca&0x7)<<15 | (fk&0x1F)<<10 | (fj&0x1F)<<5 | (fd & 0x1F)
+
case 34: // mov $con,fr
v := c.regoff(&p.From)
a := AADDU
@@ -2395,7 +2442,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = uint32(c.regoff(&p.From))
case 39: // vmov Rn, Vd.[index]
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2407,7 +2454,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Rj << 5) | Vd
case 40: // vmov Vd.[index], Rn
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2419,7 +2466,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Vj << 5) | Rd
case 41: // vmov Rn, Vd.
- v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2429,7 +2476,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (Rj << 5) | Vd
case 42: // vmov xj, xd.
- v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2439,7 +2486,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (Xj << 5) | Xd
case 43: // vmov xj, xd.[index]
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2451,7 +2498,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Xj << 5) | Xd
case 44: // vmov xj.[index], xd
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2463,7 +2510,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Xj << 5) | Xd
case 45: // vmov vj.[index], vd.
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2474,12 +2521,23 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
c.checkindex(p, index, m)
o1 = v | (index << 10) | (vj << 5) | vd
- case 46: // preld offset(Rbase), $hint
+ case 46: // vmov offset(vj), vd.
+ v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, true)
+ if v == 0 {
+ c.ctxt.Diag("illegal arng type combination: %v\n", p)
+ }
+
+ si := c.regoff(&p.From)
+ Rj := uint32(p.From.Reg & EXT_REG_MASK)
+ Vd := uint32(p.To.Reg & EXT_REG_MASK)
+ o1 = v | uint32(si<<10) | (Rj << 5) | Vd
+
+ case 47: // preld offset(Rbase), $hint
offs := c.regoff(&p.From)
hint := p.GetFrom3().Offset
o1 = OP_12IR_5I(c.opiir(p.As), uint32(offs), uint32(p.From.Reg), uint32(hint))
- case 47: // preldx offset(Rbase), $n, $hint
+ case 48: // preldx offset(Rbase), $n, $hint
offs := c.regoff(&p.From)
hint := p.RestArgs[1].Offset
n := uint64(p.GetFrom3().Offset)
@@ -2683,6 +2741,14 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
case 62: // rdtimex rd, rj
o1 = OP_RR(c.oprr(p.As), uint32(p.To.Reg), uint32(p.RegTo2))
+ case 64: // alsl rd, rj, rk, sa2
+ sa := p.From.Offset - 1
+ if sa < 0 || sa > 3 {
+ c.ctxt.Diag("%v: shift amount out of range[1, 4].\n", p)
+ }
+ r := p.GetFrom3().Reg
+ o1 = OP_2IRRR(c.opirrr(p.As), uint32(sa), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
+
case 65: // mov sym@GOT, r ==> pcalau12i + ld.d
o1 = OP_IR(c.opir(APCALAU12I), uint32(0), uint32(p.To.Reg))
c.cursym.AddRel(c.ctxt, obj.Reloc{
@@ -3504,6 +3570,54 @@ func (c *ctxt0) oprrr(a obj.As) uint32 {
return 0xea75 << 15 // xvfdiv.s
case AXVDIVD:
return 0xea76 << 15 // xvfdiv.d
+ case AVBITCLRB:
+ return 0xe218 << 15 // vbitclr.b
+ case AVBITCLRH:
+ return 0xe219 << 15 // vbitclr.h
+ case AVBITCLRW:
+ return 0xe21a << 15 // vbitclr.w
+ case AVBITCLRV:
+ return 0xe21b << 15 // vbitclr.d
+ case AVBITSETB:
+ return 0xe21c << 15 // vbitset.b
+ case AVBITSETH:
+ return 0xe21d << 15 // vbitset.h
+ case AVBITSETW:
+ return 0xe21e << 15 // vbitset.w
+ case AVBITSETV:
+ return 0xe21f << 15 // vbitset.d
+ case AVBITREVB:
+ return 0xe220 << 15 // vbitrev.b
+ case AVBITREVH:
+ return 0xe221 << 15 // vbitrev.h
+ case AVBITREVW:
+ return 0xe222 << 15 // vbitrev.w
+ case AVBITREVV:
+ return 0xe223 << 15 // vbitrev.d
+ case AXVBITCLRB:
+ return 0xea18 << 15 // xvbitclr.b
+ case AXVBITCLRH:
+ return 0xea19 << 15 // xvbitclr.h
+ case AXVBITCLRW:
+ return 0xea1a << 15 // xvbitclr.w
+ case AXVBITCLRV:
+ return 0xea1b << 15 // xvbitclr.d
+ case AXVBITSETB:
+ return 0xea1c << 15 // xvbitset.b
+ case AXVBITSETH:
+ return 0xea1d << 15 // xvbitset.h
+ case AXVBITSETW:
+ return 0xea1e << 15 // xvbitset.w
+ case AXVBITSETV:
+ return 0xea1f << 15 // xvbitset.d
+ case AXVBITREVB:
+ return 0xea20 << 15 // xvbitrev.b
+ case AXVBITREVH:
+ return 0xea21 << 15 // xvbitrev.h
+ case AXVBITREVW:
+ return 0xea22 << 15 // xvbitrev.w
+ case AXVBITREVV:
+ return 0xea23 << 15 // xvbitrev.d
}
if a < 0 {
@@ -4104,6 +4218,54 @@ func (c *ctxt0) opirr(a obj.As) uint32 {
return 0x1de6 << 18 // xvshuf4i.w
case AXVSHUF4IV:
return 0x1de7 << 18 // xvshuf4i.d
+ case AVBITCLRB:
+ return 0x1CC4<<18 | 0x1<<13 // vbitclri.b
+ case AVBITCLRH:
+ return 0x1CC4<<18 | 0x1<<14 // vbitclri.h
+ case AVBITCLRW:
+ return 0x1CC4<<18 | 0x1<<15 // vbitclri.w
+ case AVBITCLRV:
+ return 0x1CC4<<18 | 0x1<<16 // vbitclri.d
+ case AVBITSETB:
+ return 0x1CC5<<18 | 0x1<<13 // vbitseti.b
+ case AVBITSETH:
+ return 0x1CC5<<18 | 0x1<<14 // vbitseti.h
+ case AVBITSETW:
+ return 0x1CC5<<18 | 0x1<<15 // vbitseti.w
+ case AVBITSETV:
+ return 0x1CC5<<18 | 0x1<<16 // vbitseti.d
+ case AVBITREVB:
+ return 0x1CC6<<18 | 0x1<<13 // vbitrevi.b
+ case AVBITREVH:
+ return 0x1CC6<<18 | 0x1<<14 // vbitrevi.h
+ case AVBITREVW:
+ return 0x1CC6<<18 | 0x1<<15 // vbitrevi.w
+ case AVBITREVV:
+ return 0x1CC6<<18 | 0x1<<16 // vbitrevi.d
+ case AXVBITCLRB:
+ return 0x1DC4<<18 | 0x1<<13 // xvbitclri.b
+ case AXVBITCLRH:
+ return 0x1DC4<<18 | 0x1<<14 // xvbitclri.h
+ case AXVBITCLRW:
+ return 0x1DC4<<18 | 0x1<<15 // xvbitclri.w
+ case AXVBITCLRV:
+ return 0x1DC4<<18 | 0x1<<16 // xvbitclri.d
+ case AXVBITSETB:
+ return 0x1DC5<<18 | 0x1<<13 // xvbitseti.b
+ case AXVBITSETH:
+ return 0x1DC5<<18 | 0x1<<14 // xvbitseti.h
+ case AXVBITSETW:
+ return 0x1DC5<<18 | 0x1<<15 // xvbitseti.w
+ case AXVBITSETV:
+ return 0x1DC5<<18 | 0x1<<16 // xvbitseti.d
+ case AXVBITREVB:
+ return 0x1DC6<<18 | 0x1<<13 // xvbitrevi.b
+ case AXVBITREVH:
+ return 0x1DC6<<18 | 0x1<<14 // xvbitrevi.h
+ case AXVBITREVW:
+ return 0x1DC6<<18 | 0x1<<15 // xvbitrevi.w
+ case AXVBITREVV:
+ return 0x1DC6<<18 | 0x1<<16 // xvbitrevi.d
}
if a < 0 {
@@ -4114,6 +4276,19 @@ func (c *ctxt0) opirr(a obj.As) uint32 {
return 0
}
+func (c *ctxt0) opirrr(a obj.As) uint32 {
+ switch a {
+ case AALSLW:
+ return 0x2 << 17 // alsl.w
+ case AALSLWU:
+ return 0x3 << 17 // alsl.wu
+ case AALSLV:
+ return 0x16 << 17 // alsl.d
+ }
+
+ return 0
+}
+
func (c *ctxt0) opirir(a obj.As) uint32 {
switch a {
case ABSTRINSW:
@@ -4192,7 +4367,7 @@ func (c *ctxt0) specialFpMovInst(a obj.As, fclass int, tclass int) uint32 {
return 0
}
-func (c *ctxt0) specialLsxMovInst(a obj.As, fReg, tReg int16) (op_code, index_mask uint32) {
+func (c *ctxt0) specialLsxMovInst(a obj.As, fReg, tReg int16, offset_flag bool) (op_code, index_mask uint32) {
farng := (fReg >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK
tarng := (tReg >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK
fclass := c.rclass(fReg)
@@ -4258,29 +4433,58 @@ func (c *ctxt0) specialLsxMovInst(a obj.As, fReg, tReg int16) (op_code, index_ma
}
case C_REG | (C_ARNG << 16):
- // vmov Rn, Vd.
- switch a {
- case AVMOVQ:
- switch tarng {
- case ARNG_16B:
- return (0x1CA7C0 << 10), 0x0 // vreplgr2vr.b
- case ARNG_8H:
- return (0x1CA7C1 << 10), 0x0 // vreplgr2vr.h
- case ARNG_4W:
- return (0x1CA7C2 << 10), 0x0 // vreplgr2vr.w
- case ARNG_2V:
- return (0x1CA7C3 << 10), 0x0 // vreplgr2vr.d
+ switch {
+ case offset_flag:
+ // vmov offset(vj), vd.
+ switch a {
+ case AVMOVQ:
+ switch tarng {
+ case ARNG_16B:
+ return (0xC2 << 22), 0x0 // vldrepl.b
+ case ARNG_8H:
+ return (0x182 << 21), 0x0 // vldrepl.h
+ case ARNG_4W:
+ return (0x302 << 20), 0x0 // vldrepl.w
+ case ARNG_2V:
+ return (0x602 << 19), 0x0 // vldrepl.d
+ }
+ case AXVMOVQ:
+ switch tarng {
+ case ARNG_32B:
+ return (0xCA << 22), 0x0 // xvldrepl.b
+ case ARNG_16H:
+ return (0x192 << 21), 0x0 // xvldrepl.h
+ case ARNG_8W:
+ return (0x322 << 20), 0x0 // xvldrepl.w
+ case ARNG_4V:
+ return (0x642 << 19), 0x0 // xvldrepl.d
+ }
}
- case AXVMOVQ:
- switch tarng {
- case ARNG_32B:
- return (0x1DA7C0 << 10), 0x0 // xvreplgr2vr.b
- case ARNG_16H:
- return (0x1DA7C1 << 10), 0x0 // xvreplgr2vr.h
- case ARNG_8W:
- return (0x1DA7C2 << 10), 0x0 // xvreplgr2vr.w
- case ARNG_4V:
- return (0x1DA7C3 << 10), 0x0 // xvreplgr2vr.d
+ default:
+ // vmov Rn, Vd.
+ switch a {
+ case AVMOVQ:
+ switch tarng {
+ case ARNG_16B:
+ return (0x1CA7C0 << 10), 0x0 // vreplgr2vr.b
+ case ARNG_8H:
+ return (0x1CA7C1 << 10), 0x0 // vreplgr2vr.h
+ case ARNG_4W:
+ return (0x1CA7C2 << 10), 0x0 // vreplgr2vr.w
+ case ARNG_2V:
+ return (0x1CA7C3 << 10), 0x0 // vreplgr2vr.d
+ }
+ case AXVMOVQ:
+ switch tarng {
+ case ARNG_32B:
+ return (0x1DA7C0 << 10), 0x0 // xvreplgr2vr.b
+ case ARNG_16H:
+ return (0x1DA7C1 << 10), 0x0 // xvreplgr2vr.h
+ case ARNG_8W:
+ return (0x1DA7C2 << 10), 0x0 // xvreplgr2vr.w
+ case ARNG_4V:
+ return (0x1DA7C3 << 10), 0x0 // xvreplgr2vr.d
+ }
}
}
diff --git a/src/cmd/internal/obj/loong64/doc.go b/src/cmd/internal/obj/loong64/doc.go
index 0818389c8d9366..64bb41ae5a2219 100644
--- a/src/cmd/internal/obj/loong64/doc.go
+++ b/src/cmd/internal/obj/loong64/doc.go
@@ -203,6 +203,23 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate)
VMOVQ Vj.W[index], Vd.W4 | vreplvei.w vd, vj, ui2 | for i in range(4) : VR[vd].w[i] = VR[vj].w[ui2]
VMOVQ Vj.V[index], Vd.V2 | vreplvei.d vd, vj, ui1 | for i in range(2) : VR[vd].d[i] = VR[vj].d[ui1]
+3.7 Load data from memory and broadcast to each element of a vector register.
+
+ Instruction format:
+ VMOVQ offset(Rj), .
+
+ Mapping between Go and platform assembly:
+ Go assembly | platform assembly | semantics
+ -------------------------------------------------------------------------------------------------------------------------------------------------------
+ VMOVQ offset(Rj), Vd.B16 | vldrepl.b Vd, Rj, si12 | for i in range(16): VR[vd].b[i] = load 8 bit memory data from (GR[rj]+SignExtend(si12))
+ VMOVQ offset(Rj), Vd.H8 | vldrepl.h Vd, Rj, si11 | for i in range(8) : VR[vd].h[i] = load 16 bit memory data from (GR[rj]+SignExtend(si11<<1))
+ VMOVQ offset(Rj), Vd.W4 | vldrepl.w Vd, Rj, si10 | for i in range(4) : VR[vd].w[i] = load 32 bit memory data from (GR[rj]+SignExtend(si10<<2))
+ VMOVQ offset(Rj), Vd.V2 | vldrepl.d Vd, Rj, si9 | for i in range(2) : VR[vd].d[i] = load 64 bit memory data from (GR[rj]+SignExtend(si9<<3))
+ XVMOVQ offset(Rj), Xd.B32 | xvldrepl.b Xd, Rj, si12 | for i in range(32): XR[xd].b[i] = load 8 bit memory data from (GR[rj]+SignExtend(si12))
+ XVMOVQ offset(Rj), Xd.H16 | xvldrepl.h Xd, Rj, si11 | for i in range(16): XR[xd].h[i] = load 16 bit memory data from (GR[rj]+SignExtend(si11<<1))
+ XVMOVQ offset(Rj), Xd.W8 | xvldrepl.w Xd, Rj, si10 | for i in range(8) : XR[xd].w[i] = load 32 bit memory data from (GR[rj]+SignExtend(si10<<2))
+ XVMOVQ offset(Rj), Xd.V4 | xvldrepl.d Xd, Rj, si9 | for i in range(4) : XR[xd].d[i] = load 64 bit memory data from (GR[rj]+SignExtend(si9<<3))
+
# Special instruction encoding definition and description on LoongArch
1. DBAR hint encoding for LA664(Loongson 3A6000) and later micro-architectures, paraphrased
@@ -251,6 +268,27 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate)
bits[11:1]: block size, the value range is [16, 1024], and it must be an integer multiple of 16
bits[20:12]: block num, the value range is [1, 256]
bits[36:21]: stride, the value range is [0, 0xffff]
+
+4. ShiftAdd instructions
+ Mapping between Go and platform assembly:
+ Go assembly | platform assembly
+ ALSL.W/WU/V $Imm, Rj, Rk, Rd | alsl.w/wu/d rd, rj, rk, $imm
+
+ Instruction encoding format is as follows:
+
+ | 31 ~ 17 | 16 ~ 15 | 14 ~ 10 | 9 ~ 5 | 4 ~ 0 |
+ | opcode | sa2 | rk | rj | rd |
+
+ The alsl.w/wu/v series of instructions shift the data in rj left by sa+1, add the value
+ in rk, and write the result to rd.
+
+ To allow programmers to directly write the desired shift amount in assembly code, we actually write
+ the value of sa2+1 in the assembly code and then include the value of sa2 in the instruction encoding.
+
+ For example:
+
+ Go assembly | instruction Encoding
+ ALSLV $4, r4, r5, R6 | 002d9486
*/
package loong64
diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go
index 79fbb23fef92e1..a1eb786da31067 100644
--- a/src/cmd/internal/obj/loong64/obj.go
+++ b/src/cmd/internal/obj/loong64/obj.go
@@ -771,14 +771,6 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
return end
}
-func (c *ctxt0) addnop(p *obj.Prog) {
- q := c.newprog()
- q.As = ANOOP
- q.Pos = p.Pos
- q.Link = p.Link
- p.Link = q
-}
-
var Linkloong64 = obj.LinkArch{
Arch: sys.ArchLoong64,
Init: buildop,
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index 9cba8c33ced087..dcd3aa59a4690a 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -2137,10 +2137,6 @@ func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
return o<<26 | xo<<1 | oe<<10 | rc&1
}
-func OPCC(o uint32, xo uint32, rc uint32) uint32 {
- return OPVCC(o, xo, 0, rc)
-}
-
/* Generate MD-form opcode */
func OPMD(o, xo, rc uint32) uint32 {
return o<<26 | xo<<2 | rc&1
diff --git a/src/cmd/internal/obj/riscv/anames.go b/src/cmd/internal/obj/riscv/anames.go
index a689f2de274a30..f0be8f6b875060 100644
--- a/src/cmd/internal/obj/riscv/anames.go
+++ b/src/cmd/internal/obj/riscv/anames.go
@@ -192,6 +192,47 @@ var Anames = []string{
"FLEQ",
"FLTQ",
"FCLASSQ",
+ "CLWSP",
+ "CFLWSP",
+ "CLDSP",
+ "CFLDSP",
+ "CSWSP",
+ "CSDSP",
+ "CFSWSP",
+ "CFSDSP",
+ "CLW",
+ "CLD",
+ "CFLW",
+ "CFLD",
+ "CSW",
+ "CSD",
+ "CFSW",
+ "CFSD",
+ "CJ",
+ "CJR",
+ "CJALR",
+ "CBEQZ",
+ "CBNEZ",
+ "CLI",
+ "CLUI",
+ "CADDI",
+ "CADDIW",
+ "CADDI16SP",
+ "CADDI4SPN",
+ "CSLLI",
+ "CSRLI",
+ "CSRAI",
+ "CANDI",
+ "CMV",
+ "CADD",
+ "CAND",
+ "COR",
+ "CXOR",
+ "CSUB",
+ "CADDW",
+ "CSUBW",
+ "CNOP",
+ "CEBREAK",
"ADDUW",
"SH1ADD",
"SH1ADDUW",
diff --git a/src/cmd/internal/obj/riscv/cpu.go b/src/cmd/internal/obj/riscv/cpu.go
index 382c08a9d96f5f..116ccb4ea4d7f0 100644
--- a/src/cmd/internal/obj/riscv/cpu.go
+++ b/src/cmd/internal/obj/riscv/cpu.go
@@ -551,7 +551,7 @@ const (
AFNMADDQ
AFNMSUBQ
- // 22.3 Quad-Precision Convert and Move Instructions
+ // 22.3: Quad-Precision Convert and Move Instructions
AFCVTWQ
AFCVTLQ
AFCVTSQ
@@ -568,14 +568,73 @@ const (
AFSGNJNQ
AFSGNJXQ
- // 22.4 Quad-Precision Floating-Point Compare Instructions
+ // 22.4: Quad-Precision Floating-Point Compare Instructions
AFEQQ
AFLEQ
AFLTQ
- // 22.5 Quad-Precision Floating-Point Classify Instruction
+ // 22.5: Quad-Precision Floating-Point Classify Instruction
AFCLASSQ
+ //
+ // "C" Extension for Compressed Instructions
+ //
+
+ // 26.3.1: Compressed Stack-Pointer-Based Loads and Stores
+ ACLWSP
+ ACFLWSP
+ ACLDSP
+ ACFLDSP
+ ACSWSP
+ ACSDSP
+ ACFSWSP
+ ACFSDSP
+
+ // 26.3.2: Compressed Register-Based Loads and Stores
+ ACLW
+ ACLD
+ ACFLW
+ ACFLD
+ ACSW
+ ACSD
+ ACFSW
+ ACFSD
+
+ // 26.4: Compressed Control Transfer Instructions
+ ACJ
+ ACJR
+ ACJALR
+ ACBEQZ
+ ACBNEZ
+
+ // 26.5.1: Compressed Integer Constant-Generation Instructions
+ ACLI
+ ACLUI
+ ACADDI
+ ACADDIW
+ ACADDI16SP
+ ACADDI4SPN
+ ACSLLI
+ ACSRLI
+ ACSRAI
+ ACANDI
+
+ // 26.5.3: Compressed Integer Register-Register Operations
+ ACMV
+ ACADD
+ ACAND
+ ACOR
+ ACXOR
+ ACSUB
+ ACADDW
+ ACSUBW
+
+ // 26.5.5: Compressed NOP Instruction
+ ACNOP
+
+ // 26.5.6: Compressed Breakpoint Instruction
+ ACEBREAK
+
//
// "B" Extension for Bit Manipulation, Version 1.0.0
//
diff --git a/src/cmd/internal/obj/riscv/inst.go b/src/cmd/internal/obj/riscv/inst.go
index 5ee5bda361f08e..16f2272b03f6f1 100644
--- a/src/cmd/internal/obj/riscv/inst.go
+++ b/src/cmd/internal/obj/riscv/inst.go
@@ -1,4 +1,4 @@
-// Code generated by ./parse.py -go rv64_a rv64_d rv64_f rv64_i rv64_m rv64_q rv64_zba rv64_zbb rv64_zbs rv_a rv_d rv_f rv_i rv_m rv_q rv_s rv_system rv_v rv_zba rv_zbb rv_zbs rv_zicsr; DO NOT EDIT.
+// Code generated by ./parse.py -go rv64_a rv64_c rv64_d rv64_f rv64_i rv64_m rv64_q rv64_zba rv64_zbb rv64_zbs rv_a rv_c rv_c_d rv_d rv_f rv_i rv_m rv_q rv_s rv_system rv_v rv_zba rv_zbb rv_zbs rv_zicsr; DO NOT EDIT.
package riscv
import "cmd/internal/obj"
@@ -96,6 +96,80 @@ func encode(a obj.As) *inst {
return &inst{0x33, 0x1, 0x0, 0x0, 640, 0x14}
case ABSETI:
return &inst{0x13, 0x1, 0x0, 0x0, 640, 0x14}
+ case ACADD:
+ return &inst{0x2, 0x1, 0x1, 0x0, 0, 0x0}
+ case ACADDI:
+ return &inst{0x1, 0x0, 0x0, 0x0, 0, 0x0}
+ case ACADDI16SP:
+ return &inst{0x1, 0x6, 0x0, 0x0, 0, 0x0}
+ case ACADDI4SPN:
+ return &inst{0x0, 0x0, 0x0, 0x0, 0, 0x0}
+ case ACADDIW:
+ return &inst{0x1, 0x2, 0x0, 0x0, 0, 0x0}
+ case ACADDW:
+ return &inst{0x21, 0x1, 0x1, 0x0, 0, 0x0}
+ case ACAND:
+ return &inst{0x61, 0x0, 0x1, 0x0, 0, 0x0}
+ case ACANDI:
+ return &inst{0x1, 0x0, 0x1, 0x0, 0, 0x0}
+ case ACBEQZ:
+ return &inst{0x1, 0x4, 0x1, 0x0, 0, 0x0}
+ case ACBNEZ:
+ return &inst{0x1, 0x6, 0x1, 0x0, 0, 0x0}
+ case ACEBREAK:
+ return &inst{0x2, 0x1, 0x1, 0x0, 0, 0x0}
+ case ACFLD:
+ return &inst{0x0, 0x2, 0x0, 0x0, 0, 0x0}
+ case ACFLDSP:
+ return &inst{0x2, 0x2, 0x0, 0x0, 0, 0x0}
+ case ACFSD:
+ return &inst{0x0, 0x2, 0x1, 0x0, 0, 0x0}
+ case ACFSDSP:
+ return &inst{0x2, 0x2, 0x1, 0x0, 0, 0x0}
+ case ACJ:
+ return &inst{0x1, 0x2, 0x1, 0x0, 0, 0x0}
+ case ACJALR:
+ return &inst{0x2, 0x1, 0x1, 0x0, 0, 0x0}
+ case ACJR:
+ return &inst{0x2, 0x0, 0x1, 0x0, 0, 0x0}
+ case ACLD:
+ return &inst{0x0, 0x6, 0x0, 0x0, 0, 0x0}
+ case ACLDSP:
+ return &inst{0x2, 0x6, 0x0, 0x0, 0, 0x0}
+ case ACLI:
+ return &inst{0x1, 0x4, 0x0, 0x0, 0, 0x0}
+ case ACLUI:
+ return &inst{0x1, 0x6, 0x0, 0x0, 0, 0x0}
+ case ACLW:
+ return &inst{0x0, 0x4, 0x0, 0x0, 0, 0x0}
+ case ACLWSP:
+ return &inst{0x2, 0x4, 0x0, 0x0, 0, 0x0}
+ case ACMV:
+ return &inst{0x2, 0x0, 0x1, 0x0, 0, 0x0}
+ case ACNOP:
+ return &inst{0x1, 0x0, 0x0, 0x0, 0, 0x0}
+ case ACOR:
+ return &inst{0x41, 0x0, 0x1, 0x0, 0, 0x0}
+ case ACSD:
+ return &inst{0x0, 0x6, 0x1, 0x0, 0, 0x0}
+ case ACSDSP:
+ return &inst{0x2, 0x6, 0x1, 0x0, 0, 0x0}
+ case ACSLLI:
+ return &inst{0x2, 0x0, 0x0, 0x0, 0, 0x0}
+ case ACSRAI:
+ return &inst{0x1, 0x0, 0x1, 0x0, 0, 0x0}
+ case ACSRLI:
+ return &inst{0x1, 0x0, 0x1, 0x0, 0, 0x0}
+ case ACSUB:
+ return &inst{0x1, 0x0, 0x1, 0x0, 0, 0x0}
+ case ACSUBW:
+ return &inst{0x1, 0x1, 0x1, 0x0, 0, 0x0}
+ case ACSW:
+ return &inst{0x0, 0x4, 0x1, 0x0, 0, 0x0}
+ case ACSWSP:
+ return &inst{0x2, 0x4, 0x1, 0x0, 0, 0x0}
+ case ACXOR:
+ return &inst{0x21, 0x0, 0x1, 0x0, 0, 0x0}
case ACLZ:
return &inst{0x13, 0x1, 0x0, 0x0, 1536, 0x30}
case ACLZW:
diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go
index 078e81a2f7a317..44edb8d841837a 100644
--- a/src/cmd/internal/obj/riscv/obj.go
+++ b/src/cmd/internal/obj/riscv/obj.go
@@ -1072,24 +1072,6 @@ func regV(r uint32) uint32 {
return regVal(r, REG_V0, REG_V31)
}
-// regAddr extracts a register from an Addr.
-func regAddr(a obj.Addr, min, max uint32) uint32 {
- if a.Type != obj.TYPE_REG {
- panic(fmt.Sprintf("ill typed: %+v", a))
- }
- return regVal(uint32(a.Reg), min, max)
-}
-
-// regIAddr extracts the integer register from an Addr.
-func regIAddr(a obj.Addr) uint32 {
- return regAddr(a, REG_X0, REG_X31)
-}
-
-// regFAddr extracts the float register from an Addr.
-func regFAddr(a obj.Addr) uint32 {
- return regAddr(a, REG_F0, REG_F31)
-}
-
// immEven checks that the immediate is a multiple of two. If it
// is not, an error is returned.
func immEven(x int64) error {
diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go
index 957222a1559388..97de5a4a0896d5 100644
--- a/src/cmd/internal/obj/s390x/asmz.go
+++ b/src/cmd/internal/obj/s390x/asmz.go
@@ -2677,20 +2677,6 @@ func (c *ctxtz) addrilreloc(sym *obj.LSym, add int64) {
})
}
-func (c *ctxtz) addrilrelocoffset(sym *obj.LSym, add, offset int64) {
- if sym == nil {
- c.ctxt.Diag("require symbol to apply relocation")
- }
- offset += int64(2) // relocation offset from start of instruction
- c.cursym.AddRel(c.ctxt, obj.Reloc{
- Type: objabi.R_PCRELDBL,
- Off: int32(c.pc + offset),
- Siz: 4,
- Sym: sym,
- Add: add + offset + 4,
- })
-}
-
// Add a CALL relocation for the immediate in a RIL style instruction.
// The addend will be adjusted as required.
func (c *ctxtz) addcallreloc(sym *obj.LSym, add int64) {
@@ -4745,16 +4731,6 @@ func zI(op, i1 uint32, asm *[]byte) {
*asm = append(*asm, uint8(op>>8), uint8(i1))
}
-func zMII(op, m1, ri2, ri3 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(m1)<<4)|uint8((ri2>>8)&0x0F),
- uint8(ri2),
- uint8(ri3>>16),
- uint8(ri3>>8),
- uint8(ri3))
-}
-
func zRI(op, r1_m1, i2_ri2 uint32, asm *[]byte) {
*asm = append(*asm,
uint8(op>>8),
@@ -4807,16 +4783,6 @@ func zRIL(f form, op, r1_m1, i2_ri2 uint32, asm *[]byte) {
uint8(i2_ri2))
}
-func zRIS(op, r1, m3, b4, d4, i2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r1)<<4)|uint8(m3&0x0F),
- (uint8(b4)<<4)|(uint8(d4>>8)&0x0F),
- uint8(d4),
- uint8(i2),
- uint8(op))
-}
-
func zRR(op, r1, r2 uint32, asm *[]byte) {
*asm = append(*asm, uint8(op>>8), (uint8(r1)<<4)|uint8(r2&0x0F))
}
@@ -4845,16 +4811,6 @@ func zRRF(op, r3_m3, m4, r1, r2 uint32, asm *[]byte) {
(uint8(r1)<<4)|uint8(r2&0x0F))
}
-func zRRS(op, r1, r2, b4, d4, m3 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r1)<<4)|uint8(r2&0x0F),
- (uint8(b4)<<4)|uint8((d4>>8)&0x0F),
- uint8(d4),
- uint8(m3)<<4,
- uint8(op))
-}
-
func zRS(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
*asm = append(*asm,
uint8(op>>8),
@@ -4863,23 +4819,6 @@ func zRS(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
uint8(d2))
}
-func zRSI(op, r1, r3, ri2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r1)<<4)|uint8(r3&0x0F),
- uint8(ri2>>8),
- uint8(ri2))
-}
-
-func zRSL(op, l1, b2, d2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- uint8(l1),
- (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
- uint8(d2),
- uint8(op))
-}
-
func zRSY(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
dl2 := uint16(d2) & 0x0FFF
*asm = append(*asm,
@@ -4909,16 +4848,6 @@ func zRXE(op, r1, x2, b2, d2, m3 uint32, asm *[]byte) {
uint8(op))
}
-func zRXF(op, r3, x2, b2, d2, m1 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r3)<<4)|uint8(x2&0x0F),
- (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
- uint8(d2),
- uint8(m1)<<4,
- uint8(op))
-}
-
func zRXY(op, r1_m1, x2, b2, d2 uint32, asm *[]byte) {
dl2 := uint16(d2) & 0x0FFF
*asm = append(*asm,
@@ -4967,16 +4896,6 @@ func zSIY(op, i2, b1, d1 uint32, asm *[]byte) {
uint8(op))
}
-func zSMI(op, m1, b3, d3, ri2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- uint8(m1)<<4,
- (uint8(b3)<<4)|uint8((d3>>8)&0x0F),
- uint8(d3),
- uint8(ri2>>8),
- uint8(ri2))
-}
-
// Expected argument values for the instruction formats.
//
// Format a1 a2 a3 a4 a5 a6
@@ -5006,26 +4925,6 @@ func zSS(f form, op, l1_r1, l2_i3_r3, b1_b2, d1_d2, b2_b4, d2_d4 uint32, asm *[]
uint8(d2_d4))
}
-func zSSE(op, b1, d1, b2, d2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- uint8(op),
- (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
- uint8(d1),
- (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
- uint8(d2))
-}
-
-func zSSF(op, r3, b1, d1, b2, d2 uint32, asm *[]byte) {
- *asm = append(*asm,
- uint8(op>>8),
- (uint8(r3)<<4)|(uint8(op)&0x0F),
- (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
- uint8(d1),
- (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
- uint8(d2))
-}
-
func rxb(va, vb, vc, vd uint32) uint8 {
mask := uint8(0)
if va >= REG_V16 && va <= REG_V31 {
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index 33321341417764..0906f16eaae750 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -2037,23 +2037,6 @@ type nopPad struct {
n int32 // Size of the pad
}
-// requireAlignment ensures that the function alignment is at
-// least as high as a, which should be a power of two
-// and between 8 and 2048, inclusive.
-//
-// the boolean result indicates whether the alignment meets those constraints
-func requireAlignment(a int64, ctxt *obj.Link, cursym *obj.LSym) bool {
- if !((a&(a-1) == 0) && 8 <= a && a <= 2048) {
- ctxt.Diag("alignment value of an instruction must be a power of two and in the range [8, 2048], got %d\n", a)
- return false
- }
- // By default function alignment is 32 bytes for amd64
- if cursym.Func().Align < int32(a) {
- cursym.Func().Align = int32(a)
- }
- return true
-}
-
func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
if ctxt.Retpoline && ctxt.Arch.Family == sys.I386 {
ctxt.Diag("-spectre=ret not supported on 386")
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index 7f308686c113eb..48287546b38769 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -852,11 +852,6 @@ func isZeroArgRuntimeCall(s *obj.LSym) bool {
return false
}
-func indir_cx(ctxt *obj.Link, a *obj.Addr) {
- a.Type = obj.TYPE_MEM
- a.Reg = REG_CX
-}
-
// loadG ensures the G is loaded into a register (either CX or REGG),
// appending instructions to p if necessary. It returns the new last
// instruction and the G register.
diff --git a/src/cmd/internal/objabi/pkgspecial.go b/src/cmd/internal/objabi/pkgspecial.go
index fe510160b31a05..94efa6883bd3e3 100644
--- a/src/cmd/internal/objabi/pkgspecial.go
+++ b/src/cmd/internal/objabi/pkgspecial.go
@@ -52,6 +52,7 @@ var runtimePkgs = []string{
"internal/runtime/cgroup",
"internal/runtime/exithook",
"internal/runtime/gc",
+ "internal/runtime/gc/scan",
"internal/runtime/maps",
"internal/runtime/math",
"internal/runtime/strconv",
diff --git a/src/cmd/internal/objfile/pe.go b/src/cmd/internal/objfile/pe.go
index 774760829c3d0a..c5c08264a9cdb0 100644
--- a/src/cmd/internal/objfile/pe.go
+++ b/src/cmd/internal/objfile/pe.go
@@ -174,8 +174,6 @@ func (f *peFile) goarch() string {
return "386"
case pe.IMAGE_FILE_MACHINE_AMD64:
return "amd64"
- case pe.IMAGE_FILE_MACHINE_ARMNT:
- return "arm"
case pe.IMAGE_FILE_MACHINE_ARM64:
return "arm64"
default:
diff --git a/src/cmd/internal/robustio/robustio.go b/src/cmd/internal/robustio/robustio.go
index 15b33773cf5f5b..73f88dcdd040af 100644
--- a/src/cmd/internal/robustio/robustio.go
+++ b/src/cmd/internal/robustio/robustio.go
@@ -37,17 +37,3 @@ func ReadFile(filename string) ([]byte, error) {
func RemoveAll(path string) error {
return removeAll(path)
}
-
-// IsEphemeralError reports whether err is one of the errors that the functions
-// in this package attempt to mitigate.
-//
-// Errors considered ephemeral include:
-// - syscall.ERROR_ACCESS_DENIED
-// - syscall.ERROR_FILE_NOT_FOUND
-// - internal/syscall/windows.ERROR_SHARING_VIOLATION
-//
-// This set may be expanded in the future; programs must not rely on the
-// non-ephemerality of any given error.
-func IsEphemeralError(err error) bool {
- return isEphemeralError(err)
-}
diff --git a/src/cmd/internal/script/engine.go b/src/cmd/internal/script/engine.go
index ba821712e5ec69..eb9344f6e2a1eb 100644
--- a/src/cmd/internal/script/engine.go
+++ b/src/cmd/internal/script/engine.go
@@ -72,14 +72,6 @@ type Engine struct {
Quiet bool
}
-// NewEngine returns an Engine configured with a basic set of commands and conditions.
-func NewEngine() *Engine {
- return &Engine{
- Cmds: DefaultCmds(),
- Conds: DefaultConds(),
- }
-}
-
// A Cmd is a command that is available to a script.
type Cmd interface {
// Run begins running the command.
diff --git a/src/cmd/internal/testdir/testdir_test.go b/src/cmd/internal/testdir/testdir_test.go
index 666645873bbc70..5781276afadba7 100644
--- a/src/cmd/internal/testdir/testdir_test.go
+++ b/src/cmd/internal/testdir/testdir_test.go
@@ -233,19 +233,23 @@ var stdlibImportcfgFile = sync.OnceValue(func() string {
return filename
})
-func linkFile(runcmd runCmd, goname string, importcfg string, ldflags []string) (err error) {
+// linkFile links infile with the given importcfg and ldflags, writes to outfile.
+// infile can be the name of an object file or a go source file.
+func linkFile(runcmd runCmd, outfile, infile string, importcfg string, ldflags []string) (err error) {
if importcfg == "" {
importcfg = stdlibImportcfgFile()
}
- pfile := strings.ReplaceAll(goname, ".go", ".o")
- cmd := []string{goTool, "tool", "link", "-w", "-o", "a.exe", "-importcfg=" + importcfg}
+ if strings.HasSuffix(infile, ".go") {
+ infile = infile[:len(infile)-3] + ".o"
+ }
+ cmd := []string{goTool, "tool", "link", "-s", "-w", "-buildid=test", "-o", outfile, "-importcfg=" + importcfg}
if *linkshared {
cmd = append(cmd, "-linkshared", "-installsuffix=dynlink")
}
if ldflags != nil {
cmd = append(cmd, ldflags...)
}
- cmd = append(cmd, pfile)
+ cmd = append(cmd, infile)
_, err = runcmd(cmd...)
return
}
@@ -853,7 +857,7 @@ func (t test) run() error {
}
if i == len(pkgs)-1 {
- err = linkFile(runcmd, pkg.files[0], importcfgfile, ldflags)
+ err = linkFile(runcmd, "a.exe", pkg.files[0], importcfgfile, ldflags)
if err != nil {
return err
}
@@ -974,8 +978,7 @@ func (t test) run() error {
if err != nil {
return err
}
- cmd = []string{goTool, "tool", "link", "-importcfg=" + stdlibImportcfgFile(), "-o", "a.exe", "all.a"}
- _, err = runcmd(cmd...)
+ err = linkFile(runcmd, "a.exe", "all.a", stdlibImportcfgFile(), nil)
if err != nil {
return err
}
@@ -1033,9 +1036,7 @@ func (t test) run() error {
return err
}
exe := filepath.Join(tempDir, "test.exe")
- cmd := []string{goTool, "tool", "link", "-s", "-w", "-importcfg=" + stdlibImportcfgFile()}
- cmd = append(cmd, "-o", exe, pkg)
- if _, err := runcmd(cmd...); err != nil {
+ if err := linkFile(runcmd, exe, pkg, stdlibImportcfgFile(), nil); err != nil {
return err
}
out, err = runcmd(append([]string{exe}, args...)...)
diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go
index 7754cf9bfa58bb..b8127a2538ea18 100644
--- a/src/cmd/link/internal/amd64/asm.go
+++ b/src/cmd/link/internal/amd64/asm.go
@@ -40,10 +40,6 @@ import (
"log"
)
-func PADDR(x uint32) uint32 {
- return x &^ 0x80000000
-}
-
func gentext(ctxt *ld.Link, ldr *loader.Loader) {
initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt)
if initfunc == nil {
diff --git a/src/cmd/link/internal/arm/obj.go b/src/cmd/link/internal/arm/obj.go
index 3a1830ce10e117..c17dca40b30eab 100644
--- a/src/cmd/link/internal/arm/obj.go
+++ b/src/cmd/link/internal/arm/obj.go
@@ -105,9 +105,5 @@ func archinit(ctxt *ld.Link) {
if *ld.FlagTextAddr == -1 {
*ld.FlagTextAddr = ld.Rnd(0x10000, *ld.FlagRound) + int64(ld.HEADR)
}
-
- case objabi.Hwindows: /* PE executable */
- // ld.HEADR, ld.FlagTextAddr, ld.FlagRound are set in ld.Peinit
- return
}
}
diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go
index b2d4ad7cb0e7f6..802fb35aee4e65 100644
--- a/src/cmd/link/internal/ld/config.go
+++ b/src/cmd/link/internal/ld/config.go
@@ -34,7 +34,7 @@ func (mode *BuildMode) Set(s string) error {
return fmt.Errorf("invalid buildmode: %q", s)
case "exe":
switch buildcfg.GOOS + "/" + buildcfg.GOARCH {
- case "darwin/arm64", "windows/arm", "windows/arm64": // On these platforms, everything is PIE
+ case "darwin/arm64", "windows/arm64": // On these platforms, everything is PIE
*mode = BuildModePIE
default:
*mode = BuildModeExe
diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go
index 8c9fa8efab5a1e..949ea5590589ae 100644
--- a/src/cmd/link/internal/ld/decodesym.go
+++ b/src/cmd/link/internal/ld/decodesym.go
@@ -244,35 +244,6 @@ func decodetypeGcmask(ctxt *Link, s loader.Sym) []byte {
return ctxt.loader.Data(mask)
}
-// Type.commonType.gc
-func decodetypeGcprog(ctxt *Link, s loader.Sym) []byte {
- if ctxt.loader.SymType(s) == sym.SDYNIMPORT {
- symData := ctxt.loader.Data(s)
- addr := decodetypeGcprogShlib(ctxt, symData)
- sect := findShlibSection(ctxt, ctxt.loader.SymPkg(s), addr)
- if sect != nil {
- // A gcprog is a 4-byte uint32 indicating length, followed by
- // the actual program.
- progsize := make([]byte, 4)
- _, err := sect.ReadAt(progsize, int64(addr-sect.Addr))
- if err != nil {
- log.Fatal(err)
- }
- progbytes := make([]byte, ctxt.Arch.ByteOrder.Uint32(progsize))
- _, err = sect.ReadAt(progbytes, int64(addr-sect.Addr+4))
- if err != nil {
- log.Fatal(err)
- }
- return append(progsize, progbytes...)
- }
- Exitf("cannot find gcprog for %s", ctxt.loader.SymName(s))
- return nil
- }
- relocs := ctxt.loader.Relocs(s)
- rs := decodeRelocSym(ctxt.loader, s, &relocs, 2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize))
- return ctxt.loader.Data(rs)
-}
-
// Find the elf.Section of a given shared library that contains a given address.
func findShlibSection(ctxt *Link, path string, addr uint64) *elf.Section {
for _, shlib := range ctxt.Shlibs {
diff --git a/src/cmd/link/internal/ld/fallocate_test.go b/src/cmd/link/internal/ld/fallocate_test.go
index d95fec788a616b..163ffc26e8406a 100644
--- a/src/cmd/link/internal/ld/fallocate_test.go
+++ b/src/cmd/link/internal/ld/fallocate_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build darwin || (freebsd && go1.21) || linux
+//go:build darwin || (freebsd && go1.21) || linux || (netbsd && go1.25)
package ld
diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go
index 6a684890be0a03..cc6b2fd37a3d79 100644
--- a/src/cmd/link/internal/ld/main.go
+++ b/src/cmd/link/internal/ld/main.go
@@ -292,14 +292,6 @@ func Main(arch *sys.Arch, theArch Arch) {
interpreter = *flagInterpreter
- if *flagBuildid == "" && ctxt.Target.IsOpenbsd() {
- // TODO(jsing): Remove once direct syscalls are no longer in use.
- // OpenBSD 6.7 onwards will not permit direct syscalls from a
- // dynamically linked binary unless it identifies the binary
- // contains a .note.go.buildid ELF note. See issue #36435.
- *flagBuildid = "go-openbsd"
- }
-
if *flagHostBuildid == "" && *flagBuildid != "" {
*flagHostBuildid = "gobuildid"
}
diff --git a/src/cmd/link/internal/ld/outbuf_freebsd.go b/src/cmd/link/internal/ld/outbuf_bsd.go
similarity index 90%
rename from src/cmd/link/internal/ld/outbuf_freebsd.go
rename to src/cmd/link/internal/ld/outbuf_bsd.go
index 7e718c1408e730..5dce83fefd2d7c 100644
--- a/src/cmd/link/internal/ld/outbuf_freebsd.go
+++ b/src/cmd/link/internal/ld/outbuf_bsd.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build freebsd && go1.21
+//go:build (freebsd && go1.21) || (netbsd && go1.25)
package ld
diff --git a/src/cmd/link/internal/ld/outbuf_mmap.go b/src/cmd/link/internal/ld/outbuf_mmap.go
index b8b8dc5158e918..e92a06dcb25f80 100644
--- a/src/cmd/link/internal/ld/outbuf_mmap.go
+++ b/src/cmd/link/internal/ld/outbuf_mmap.go
@@ -28,7 +28,7 @@ func (out *OutBuf) Mmap(filesize uint64) (err error) {
// Some file systems do not support fallocate. We ignore that error as linking
// can still take place, but you might SIGBUS when you write to the mmapped
// area.
- if err != syscall.ENOTSUP && err != syscall.EPERM && err != errNoFallocate {
+ if err != syscall.ENOTSUP && err != syscall.EOPNOTSUPP && err != syscall.EPERM && err != errNoFallocate {
return err
}
}
diff --git a/src/cmd/link/internal/ld/outbuf_nofallocate.go b/src/cmd/link/internal/ld/outbuf_nofallocate.go
index 435be5e09fe5b9..9169379e23897b 100644
--- a/src/cmd/link/internal/ld/outbuf_nofallocate.go
+++ b/src/cmd/link/internal/ld/outbuf_nofallocate.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !darwin && !(freebsd && go1.21) && !linux
+//go:build !darwin && !(freebsd && go1.21) && !linux && !(netbsd && go1.25)
package ld
diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go
index fbfd928e873be5..c290410b0ecb74 100644
--- a/src/cmd/link/internal/ld/pe.go
+++ b/src/cmd/link/internal/ld/pe.go
@@ -913,8 +913,6 @@ func (f *peFile) writeFileHeader(ctxt *Link) {
fh.Machine = pe.IMAGE_FILE_MACHINE_AMD64
case sys.I386:
fh.Machine = pe.IMAGE_FILE_MACHINE_I386
- case sys.ARM:
- fh.Machine = pe.IMAGE_FILE_MACHINE_ARMNT
case sys.ARM64:
fh.Machine = pe.IMAGE_FILE_MACHINE_ARM64
}
diff --git a/src/cmd/pprof/doc.go b/src/cmd/pprof/doc.go
index 59e1a47cd19ff2..f55a50a60f196e 100644
--- a/src/cmd/pprof/doc.go
+++ b/src/cmd/pprof/doc.go
@@ -12,5 +12,5 @@
//
// go tool pprof -h
//
-// For an example, see https://blog.golang.org/profiling-go-programs.
+// For an example, see https://go.dev/blog/pprof.
package main
diff --git a/src/cmd/trace/viewer.go b/src/cmd/trace/viewer.go
index da83e81ab9327e..f8abcec2cb7df1 100644
--- a/src/cmd/trace/viewer.go
+++ b/src/cmd/trace/viewer.go
@@ -9,7 +9,6 @@ import (
"internal/trace"
"internal/trace/traceviewer"
"slices"
- "time"
)
// viewerFrames returns the frames of the stack of ev. The given frame slice is
@@ -40,7 +39,3 @@ func viewerGState(state trace.GoState, inMarkAssist bool) traceviewer.GState {
panic(fmt.Sprintf("unknown GoState: %s", state.String()))
}
}
-
-func viewerTime(t time.Duration) float64 {
- return float64(t) / float64(time.Microsecond)
-}
diff --git a/src/cmd/vendor/github.com/google/pprof/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/driver/driver.go
index 989aac32ffef4c..657a6735214429 100644
--- a/src/cmd/vendor/github.com/google/pprof/driver/driver.go
+++ b/src/cmd/vendor/github.com/google/pprof/driver/driver.go
@@ -17,6 +17,7 @@ package driver
import (
"io"
+ "maps"
"net/http"
"regexp"
"time"
@@ -293,8 +294,6 @@ type internalSymbolizer struct {
func (s *internalSymbolizer) Symbolize(mode string, srcs plugin.MappingSources, prof *profile.Profile) error {
isrcs := MappingSources{}
- for m, s := range srcs {
- isrcs[m] = s
- }
+ maps.Copy(isrcs, srcs)
return s.Symbolizer.Symbolize(mode, isrcs, prof)
}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go
index 090230e2a78775..184de397ef57e4 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go
@@ -4,6 +4,7 @@ import (
"fmt"
"net/url"
"reflect"
+ "slices"
"strconv"
"strings"
"sync"
@@ -226,11 +227,9 @@ func (cfg *config) set(f configField, value string) error {
case *string:
if len(f.choices) > 0 {
// Verify that value is one of the allowed choices.
- for _, choice := range f.choices {
- if choice == value {
- *ptr = value
- return nil
- }
+ if slices.Contains(f.choices, value) {
+ *ptr = value
+ return nil
}
return fmt.Errorf("invalid %q value %q", f.name, value)
}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go
index fd05adb1469993..fb9c35c6010730 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go
@@ -17,6 +17,7 @@ package driver
import (
"fmt"
"regexp"
+ "slices"
"strconv"
"strings"
@@ -148,10 +149,8 @@ func compileTagFilter(name, value string, numLabelUnits map[string]string, ui pl
return func(s *profile.Sample) bool {
if vals, ok := s.Label[wantKey]; ok {
for _, rx := range rfx {
- for _, val := range vals {
- if rx.MatchString(val) {
- return true
- }
+ if slices.ContainsFunc(vals, rx.MatchString) {
+ return true
}
}
}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go
index a94ddf6adb71e3..969c1dac11ef35 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go
@@ -174,10 +174,7 @@ func chunkedGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTo
var count int
for start := 0; start < len(sources); start += chunkSize {
- end := start + chunkSize
- if end > len(sources) {
- end = len(sources)
- }
+ end := min(start+chunkSize, len(sources))
chunkP, chunkMsrc, chunkSave, chunkCount, chunkErr := concurrentGrab(sources[start:end], fetch, obj, ui, tr)
switch {
case chunkErr != nil:
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css
index 0a897ce2916927..8c7693d0a71c9e 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.css
@@ -218,10 +218,12 @@ a {
}
#graph {
overflow: hidden;
+ width: 100%;
+ height: 100%;
}
#graph svg {
width: 100%;
- height: auto;
+ height: 100%;
padding: 10px;
}
#content.source .filename {
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js
index 7db06996da499c..8ba2f35c373d8d 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js
@@ -579,7 +579,7 @@ function stackViewer(stacks, nodes) {
}
// percentText returns text that displays v in appropriate units alongside its
- // percentange.
+ // percentage.
function percentText(v) {
function percent(v, total) {
return Number(((100.0 * v) / total).toFixed(1)) + '%';
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go
index dd628f7c2dd7c3..ac7465c93e6c12 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go
@@ -19,11 +19,13 @@ import (
"fmt"
"html/template"
"io"
+ "maps"
"net"
"net/http"
gourl "net/url"
"os"
"os/exec"
+ "slices"
"strconv"
"strings"
"time"
@@ -107,9 +109,7 @@ func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options, d
for n, c := range pprofCommands {
ui.help[n] = c.description
}
- for n, help := range configHelp {
- ui.help[n] = help
- }
+ maps.Copy(ui.help, configHelp)
ui.help["details"] = "Show information about the profile and this view"
ui.help["graph"] = "Display profile as a directed graph"
ui.help["flamegraph"] = "Display profile as a flame graph"
@@ -227,12 +227,7 @@ func redirectWithQuery(path string, code int) http.HandlerFunc {
}
func isLocalhost(host string) bool {
- for _, v := range []string{"localhost", "127.0.0.1", "[::1]", "::1"} {
- if host == v {
- return true
- }
- }
- return false
+ return slices.Contains([]string{"localhost", "127.0.0.1", "[::1]", "::1"}, host)
}
func openBrowser(url string, o *plugin.Options) {
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go
index 3f5b09b5e707f8..37884033d3c8c6 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go
@@ -230,7 +230,7 @@ func GetBase(fh *elf.FileHeader, loadSegment *elf.ProgHeader, stextOffset *uint6
}
if stextOffset == nil && start > 0 && start < 0x8000000000000000 {
// A regular user-mode executable. Compute the base offset using same
- // arithmetics as in ET_DYN case below, see the explanation there.
+ // arithmetic as in ET_DYN case below, see the explanation there.
// Ideally, the condition would just be "stextOffset == nil" as that
// represents the address of _stext symbol in the vmlinux image. Alas,
// the caller may skip reading it from the binary (it's expensive to scan
@@ -313,7 +313,7 @@ func ProgramHeadersForMapping(phdrs []elf.ProgHeader, mapOff, mapSz uint64) []*e
// value is dependent on the memory management unit of the CPU. The page
// size is 4KB virtually on all the architectures that we care about, so we
// define this metric as a constant. If we encounter architectures where
- // page sie is not 4KB, we must try to guess the page size on the system
+ // page size is not 4KB, we must try to guess the page size on the system
// where the profile was collected, possibly using the architecture
// specified in the ELF file header.
pageSize = 4096
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go
index 8abbd83f76513d..c4b0d4869f5316 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go
@@ -336,12 +336,8 @@ func newGraph(prof *profile.Profile, o *Options) (*Graph, map[uint64]Nodes) {
if dw == 0 && w == 0 {
continue
}
- for k := range seenNode {
- delete(seenNode, k)
- }
- for k := range seenEdge {
- delete(seenEdge, k)
- }
+ clear(seenNode)
+ clear(seenEdge)
var parent *Node
// A residual edge goes over one or more nodes that were not kept.
residual := false
@@ -850,10 +846,7 @@ func (g *Graph) selectTopNodes(maxNodes int, visualMode bool) Nodes {
// If generating a visual graph, count tags as nodes. Update
// maxNodes to account for them.
for i, n := range g.Nodes {
- tags := countTags(n)
- if tags > maxNodelets {
- tags = maxNodelets
- }
+ tags := min(countTags(n), maxNodelets)
if count += tags + 1; count >= maxNodes {
maxNodes = i + 1
break
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
index e5b7dbc6c4a88e..479235c0a615bd 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go
@@ -18,6 +18,7 @@ package measurement
import (
"fmt"
"math"
+ "slices"
"strings"
"time"
@@ -197,16 +198,14 @@ type UnitType struct {
// nil if the unit with such alias is not found.
func (ut UnitType) findByAlias(alias string) *Unit {
for _, u := range ut.Units {
- for _, a := range u.aliases {
- if alias == a {
- return &u
- }
+ if slices.Contains(u.aliases, alias) {
+ return &u
}
}
return nil
}
-// sniffUnit simpifies the input alias and returns the unit associated with the
+// sniffUnit simplifies the input alias and returns the unit associated with the
// specified alias. It returns nil if the unit with such alias is not found.
func (ut UnitType) sniffUnit(unit string) *Unit {
unit = strings.ToLower(unit)
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go
index 9d52872b7d4383..ad8b84bf808638 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go
@@ -569,7 +569,7 @@ func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regex
return objSyms
}
-// objSym represents a symbol identified from a binary. It includes
+// objSymbol represents a symbol identified from a binary. It includes
// the SymbolInfo from the disasm package and the base that must be
// added to correspond to sample addresses
type objSymbol struct {
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go
index d2148607ea2d10..f17952faee1f0a 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go
@@ -25,6 +25,7 @@ import (
"os"
"path/filepath"
"regexp"
+ "slices"
"sort"
"strconv"
"strings"
@@ -490,7 +491,7 @@ func (sp *sourcePrinter) addStack(addr uint64, frames []plugin.Frame) {
file.lines[f.Line] = append(file.lines[f.Line], sourceInst{addr, stack})
// Remember the first function name encountered per source line
- // and assume that that line belongs to that function.
+ // and assume that line belongs to that function.
if _, ok := file.funcName[f.Line]; !ok {
file.funcName[f.Line] = f.Func
}
@@ -553,7 +554,7 @@ func (sp *sourcePrinter) splitIntoRanges(prof *profile.Profile, addrMap map[uint
unprocessed = append(unprocessed, addr)
}
}
- sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] })
+ slices.Sort(addrs)
const expand = 500 // How much to expand range to pick up nearby addresses.
var result []addressRange
@@ -769,10 +770,7 @@ func (sp *sourcePrinter) functions(f *sourceFile) []sourceFunction {
}
} else {
// Find gap from predecessor and divide between predecessor and f.
- halfGap := (f.begin - funcs[i-1].end) / 2
- if halfGap > expand {
- halfGap = expand
- }
+ halfGap := min((f.begin-funcs[i-1].end)/2, expand)
funcs[i-1].end += halfGap
f.begin -= halfGap
}
diff --git a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go
index 95c15b136655c7..3a279eca753d92 100644
--- a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go
+++ b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go
@@ -281,14 +281,11 @@ func demanglerModeToOptions(demanglerMode string) []demangle.Option {
panic(fmt.Sprintf("unknown demanglerMode %s", demanglerMode))
}
-func demangleSingleFunction(fn *profile.Function, options []demangle.Option) {
+func demangleSingleFunction(fn *profile.Function, opts []demangle.Option) {
if fn.Name != "" && fn.SystemName != fn.Name {
return // Already demangled.
}
- // Copy the options because they may be updated by the call.
- o := make([]demangle.Option, len(options))
- copy(o, options)
- if demangled := demangle.Filter(fn.SystemName, o...); demangled != fn.SystemName {
+ if demangled := demangle.Filter(fn.SystemName, opts...); demangled != fn.SystemName {
fn.Name = demangled
return
}
@@ -296,7 +293,7 @@ func demangleSingleFunction(fn *profile.Function, options []demangle.Option) {
// OSX has all the symbols prefixed with extra '_' so lets try
// once more without it
if strings.HasPrefix(fn.SystemName, "_") {
- if demangled := demangle.Filter(fn.SystemName[1:], o...); demangled != fn.SystemName {
+ if demangled := demangle.Filter(fn.SystemName[1:], opts...); demangled != fn.SystemName[1:] {
fn.Name = demangled
return
}
@@ -306,7 +303,7 @@ func demangleSingleFunction(fn *profile.Function, options []demangle.Option) {
// already demangled.
name := fn.SystemName
if looksLikeDemangledCPlusPlus(name) {
- for _, o := range options {
+ for _, o := range opts {
switch o {
case demangle.NoParams:
name = removeMatching(name, '(', ')')
diff --git a/src/cmd/vendor/github.com/google/pprof/profile/merge.go b/src/cmd/vendor/github.com/google/pprof/profile/merge.go
index ba4d746407c133..8a51690be44c59 100644
--- a/src/cmd/vendor/github.com/google/pprof/profile/merge.go
+++ b/src/cmd/vendor/github.com/google/pprof/profile/merge.go
@@ -17,6 +17,7 @@ package profile
import (
"encoding/binary"
"fmt"
+ "slices"
"sort"
"strconv"
"strings"
@@ -78,12 +79,10 @@ func Merge(srcs []*Profile) (*Profile, error) {
}
}
- for _, s := range p.Sample {
- if isZeroSample(s) {
- // If there are any zero samples, re-merge the profile to GC
- // them.
- return Merge([]*Profile{p})
- }
+ if slices.ContainsFunc(p.Sample, isZeroSample) {
+ // If there are any zero samples, re-merge the profile to GC
+ // them.
+ return Merge([]*Profile{p})
}
return p, nil
diff --git a/src/cmd/vendor/github.com/google/pprof/profile/profile.go b/src/cmd/vendor/github.com/google/pprof/profile/profile.go
index f47a243903e405..43f561d4455415 100644
--- a/src/cmd/vendor/github.com/google/pprof/profile/profile.go
+++ b/src/cmd/vendor/github.com/google/pprof/profile/profile.go
@@ -24,6 +24,7 @@ import (
"math"
"path/filepath"
"regexp"
+ "slices"
"sort"
"strings"
"sync"
@@ -734,12 +735,7 @@ func (p *Profile) RemoveLabel(key string) {
// HasLabel returns true if a sample has a label with indicated key and value.
func (s *Sample) HasLabel(key, value string) bool {
- for _, v := range s.Label[key] {
- if v == value {
- return true
- }
- }
- return false
+ return slices.Contains(s.Label[key], value)
}
// SetNumLabel sets the specified key to the specified value for all samples in the
@@ -852,7 +848,17 @@ func (p *Profile) HasFileLines() bool {
// "[vdso]", "[vsyscall]" and some others, see the code.
func (m *Mapping) Unsymbolizable() bool {
name := filepath.Base(m.File)
- return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
+ switch {
+ case strings.HasPrefix(name, "["):
+ case strings.HasPrefix(name, "linux-vdso"):
+ case strings.HasPrefix(m.File, "/dev/dri/"):
+ case m.File == "//anon":
+ case m.File == "":
+ case strings.HasPrefix(m.File, "/memfd:"):
+ default:
+ return false
+ }
+ return true
}
// Copy makes a fully independent copy of a profile.
diff --git a/src/cmd/vendor/github.com/google/pprof/profile/prune.go b/src/cmd/vendor/github.com/google/pprof/profile/prune.go
index b2f9fd54660d9a..7bba31e8ceadc0 100644
--- a/src/cmd/vendor/github.com/google/pprof/profile/prune.go
+++ b/src/cmd/vendor/github.com/google/pprof/profile/prune.go
@@ -19,6 +19,7 @@ package profile
import (
"fmt"
"regexp"
+ "slices"
"strings"
)
@@ -40,13 +41,7 @@ func simplifyFunc(f string) string {
// Account for unsimplified names -- try to remove the argument list by trimming
// starting from the first '(', but skipping reserved names that have '('.
for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
- foundReserved := false
- for _, res := range reservedNames {
- if funcName[ind[0]:ind[1]] == res {
- foundReserved = true
- break
- }
- }
+ foundReserved := slices.Contains(reservedNames, funcName[ind[0]:ind[1]])
if !foundReserved {
funcName = funcName[:ind[0]]
break
diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/svgpan/svgpan.js b/src/cmd/vendor/github.com/google/pprof/third_party/svgpan/svgpan.js
index 2c4951ecd30f48..dda7f0f0417b18 100644
--- a/src/cmd/vendor/github.com/google/pprof/third_party/svgpan/svgpan.js
+++ b/src/cmd/vendor/github.com/google/pprof/third_party/svgpan/svgpan.js
@@ -3,7 +3,7 @@
* ======================
*
* Given an unique existing element with id "viewport" (or when missing, the
- * first g-element), including the the library into any SVG adds the following
+ * first g-element), including the library into any SVG adds the following
* capabilities:
*
* - Mouse panning
diff --git a/src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go b/src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go
index dc238e07734d27..b812d35ac2db9e 100644
--- a/src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go
+++ b/src/cmd/vendor/github.com/ianlancetaylor/demangle/demangle.go
@@ -186,7 +186,7 @@ func ToAST(name string, options ...Option) (AST, error) {
i := 0
for i < len(options) {
if options[i] == NoParams {
- options = append(options[:i], options[i+1:]...)
+ options = append(options[:i:i], options[i+1:]...)
} else {
i++
}
diff --git a/src/cmd/vendor/golang.org/x/sync/errgroup/errgroup.go b/src/cmd/vendor/golang.org/x/sync/errgroup/errgroup.go
index cb6bb9ad3ba9f8..1d8cffae8cfc87 100644
--- a/src/cmd/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/src/cmd/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -12,8 +12,6 @@ package errgroup
import (
"context"
"fmt"
- "runtime"
- "runtime/debug"
"sync"
)
@@ -33,10 +31,6 @@ type Group struct {
errOnce sync.Once
err error
-
- mu sync.Mutex
- panicValue any // = PanicError | PanicValue; non-nil if some Group.Go goroutine panicked.
- abnormal bool // some Group.Go goroutine terminated abnormally (panic or goexit).
}
func (g *Group) done() {
@@ -56,22 +50,13 @@ func WithContext(ctx context.Context) (*Group, context.Context) {
return &Group{cancel: cancel}, ctx
}
-// Wait blocks until all function calls from the Go method have returned
-// normally, then returns the first non-nil error (if any) from them.
-//
-// If any of the calls panics, Wait panics with a [PanicValue];
-// and if any of them calls [runtime.Goexit], Wait calls runtime.Goexit.
+// Wait blocks until all function calls from the Go method have returned, then
+// returns the first non-nil error (if any) from them.
func (g *Group) Wait() error {
g.wg.Wait()
if g.cancel != nil {
g.cancel(g.err)
}
- if g.panicValue != nil {
- panic(g.panicValue)
- }
- if g.abnormal {
- runtime.Goexit()
- }
return g.err
}
@@ -81,53 +66,31 @@ func (g *Group) Wait() error {
// It blocks until the new goroutine can be added without the number of
// goroutines in the group exceeding the configured limit.
//
-// The first goroutine in the group that returns a non-nil error, panics, or
-// invokes [runtime.Goexit] will cancel the associated Context, if any.
+// The first goroutine in the group that returns a non-nil error will
+// cancel the associated Context, if any. The error will be returned
+// by Wait.
func (g *Group) Go(f func() error) {
if g.sem != nil {
g.sem <- token{}
}
- g.add(f)
-}
-
-func (g *Group) add(f func() error) {
g.wg.Add(1)
go func() {
defer g.done()
- normalReturn := false
- defer func() {
- if normalReturn {
- return
- }
- v := recover()
- g.mu.Lock()
- defer g.mu.Unlock()
- if !g.abnormal {
- if g.cancel != nil {
- g.cancel(g.err)
- }
- g.abnormal = true
- }
- if v != nil && g.panicValue == nil {
- switch v := v.(type) {
- case error:
- g.panicValue = PanicError{
- Recovered: v,
- Stack: debug.Stack(),
- }
- default:
- g.panicValue = PanicValue{
- Recovered: v,
- Stack: debug.Stack(),
- }
- }
- }
- }()
- err := f()
- normalReturn = true
- if err != nil {
+ // It is tempting to propagate panics from f()
+ // up to the goroutine that calls Wait, but
+ // it creates more problems than it solves:
+ // - it delays panics arbitrarily,
+ // making bugs harder to detect;
+ // - it turns f's panic stack into a mere value,
+ // hiding it from crash-monitoring tools;
+ // - it risks deadlocks that hide the panic entirely,
+ // if f's panic leaves the program in a state
+ // that prevents the Wait call from being reached.
+ // See #53757, #74275, #74304, #74306.
+
+ if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
@@ -152,7 +115,19 @@ func (g *Group) TryGo(f func() error) bool {
}
}
- g.add(f)
+ g.wg.Add(1)
+ go func() {
+ defer g.done()
+
+ if err := f(); err != nil {
+ g.errOnce.Do(func() {
+ g.err = err
+ if g.cancel != nil {
+ g.cancel(g.err)
+ }
+ })
+ }
+ }()
return true
}
@@ -174,34 +149,3 @@ func (g *Group) SetLimit(n int) {
}
g.sem = make(chan token, n)
}
-
-// PanicError wraps an error recovered from an unhandled panic
-// when calling a function passed to Go or TryGo.
-type PanicError struct {
- Recovered error
- Stack []byte // result of call to [debug.Stack]
-}
-
-func (p PanicError) Error() string {
- if len(p.Stack) > 0 {
- return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack)
- }
- return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
-}
-
-func (p PanicError) Unwrap() error { return p.Recovered }
-
-// PanicValue wraps a value that does not implement the error interface,
-// recovered from an unhandled panic when calling a function passed to Go or
-// TryGo.
-type PanicValue struct {
- Recovered any
- Stack []byte // result of call to [debug.Stack]
-}
-
-func (p PanicValue) String() string {
- if len(p.Stack) > 0 {
- return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack)
- }
- return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
-}
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh b/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh
index 6ab02b6c3122af..d1c8b2640ebd4b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -349,6 +349,9 @@ struct ltchars {
#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN)
#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN)
+// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info")
+#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME
+#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION
'
includes_NetBSD='
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 798f61ad3bf97d..7838ca5db200b2 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI
return
}
-// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
const minIovec = 8
func Readv(fd int, iovs [][]byte) (n int, err error) {
- if !darwinKernelVersionMin(11, 0, 0) {
- return 0, ENOSYS
- }
-
iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
n, err = readv(fd, iovecs)
@@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) {
}
func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
- if !darwinKernelVersionMin(11, 0, 0) {
- return 0, ENOSYS
- }
iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
n, err = preadv(fd, iovecs, offset)
@@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
}
func Writev(fd int, iovs [][]byte) (n int, err error) {
- if !darwinKernelVersionMin(11, 0, 0) {
- return 0, ENOSYS
- }
-
iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
if raceenabled {
@@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) {
}
func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
- if !darwinKernelVersionMin(11, 0, 0) {
- return 0, ENOSYS
- }
-
iovecs := make([]Iovec, 0, minIovec)
iovecs = appendBytes(iovecs, iovs)
if raceenabled {
@@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) {
}
}
-func darwinMajorMinPatch() (maj, min, patch int, err error) {
- var un Utsname
- err = Uname(&un)
- if err != nil {
- return
- }
-
- var mmp [3]int
- c := 0
-Loop:
- for _, b := range un.Release[:] {
- switch {
- case b >= '0' && b <= '9':
- mmp[c] = 10*mmp[c] + int(b-'0')
- case b == '.':
- c++
- if c > 2 {
- return 0, 0, 0, ENOTSUP
- }
- case b == 0:
- break Loop
- default:
- return 0, 0, 0, ENOTSUP
- }
- }
- if c != 2 {
- return 0, 0, 0, ENOTSUP
- }
- return mmp[0], mmp[1], mmp[2], nil
-}
-
-func darwinKernelVersionMin(maj, min, patch int) bool {
- actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch()
- if err != nil {
- return false
- }
- return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch)
-}
-
+//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 4f432bfe8feeee..b6db27d937c8fa 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -319,6 +319,7 @@ const (
AUDIT_INTEGRITY_POLICY_RULE = 0x70f
AUDIT_INTEGRITY_RULE = 0x70d
AUDIT_INTEGRITY_STATUS = 0x70a
+ AUDIT_INTEGRITY_USERSPACE = 0x710
AUDIT_IPC = 0x517
AUDIT_IPC_SET_PERM = 0x51f
AUDIT_IPE_ACCESS = 0x58c
@@ -327,6 +328,8 @@ const (
AUDIT_KERNEL = 0x7d0
AUDIT_KERNEL_OTHER = 0x524
AUDIT_KERN_MODULE = 0x532
+ AUDIT_LANDLOCK_ACCESS = 0x58f
+ AUDIT_LANDLOCK_DOMAIN = 0x590
AUDIT_LAST_FEATURE = 0x1
AUDIT_LAST_KERN_ANOM_MSG = 0x707
AUDIT_LAST_USER_MSG = 0x4af
@@ -491,6 +494,7 @@ const (
BPF_F_BEFORE = 0x8
BPF_F_ID = 0x20
BPF_F_NETFILTER_IP_DEFRAG = 0x1
+ BPF_F_PREORDER = 0x40
BPF_F_QUERY_EFFECTIVE = 0x1
BPF_F_REDIRECT_FLAGS = 0x19
BPF_F_REPLACE = 0x4
@@ -527,6 +531,7 @@ const (
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LL_OFF = -0x200000
+ BPF_LOAD_ACQ = 0x100
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXINSNS = 0x1000
@@ -554,6 +559,7 @@ const (
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
+ BPF_STORE_REL = 0x110
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAG_SIZE = 0x8
@@ -843,9 +849,9 @@ const (
DM_UUID_FLAG = 0x4000
DM_UUID_LEN = 0x81
DM_VERSION = 0xc138fd00
- DM_VERSION_EXTRA = "-ioctl (2023-03-01)"
+ DM_VERSION_EXTRA = "-ioctl (2025-04-28)"
DM_VERSION_MAJOR = 0x4
- DM_VERSION_MINOR = 0x30
+ DM_VERSION_MINOR = 0x32
DM_VERSION_PATCHLEVEL = 0x0
DT_BLK = 0x6
DT_CHR = 0x2
@@ -936,11 +942,10 @@ const (
EPOLL_CTL_MOD = 0x3
EPOLL_IOC_TYPE = 0x8a
EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2
- ESP_V4_FLOW = 0xa
- ESP_V6_FLOW = 0xc
- ETHER_FLOW = 0x12
ETHTOOL_BUSINFO_LEN = 0x20
ETHTOOL_EROMVERS_LEN = 0x20
+ ETHTOOL_FAMILY_NAME = "ethtool"
+ ETHTOOL_FAMILY_VERSION = 0x1
ETHTOOL_FEC_AUTO = 0x2
ETHTOOL_FEC_BASER = 0x10
ETHTOOL_FEC_LLRS = 0x20
@@ -1203,13 +1208,18 @@ const (
FAN_DENY = 0x2
FAN_ENABLE_AUDIT = 0x40
FAN_EPIDFD = -0x2
+ FAN_ERRNO_BITS = 0x8
+ FAN_ERRNO_MASK = 0xff
+ FAN_ERRNO_SHIFT = 0x18
FAN_EVENT_INFO_TYPE_DFID = 0x3
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
FAN_EVENT_INFO_TYPE_ERROR = 0x5
FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_INFO_TYPE_MNT = 0x7
FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc
FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa
FAN_EVENT_INFO_TYPE_PIDFD = 0x4
+ FAN_EVENT_INFO_TYPE_RANGE = 0x6
FAN_EVENT_METADATA_LEN = 0x18
FAN_EVENT_ON_CHILD = 0x8000000
FAN_FS_ERROR = 0x8000
@@ -1224,9 +1234,12 @@ const (
FAN_MARK_IGNORED_SURV_MODIFY = 0x40
FAN_MARK_IGNORE_SURV = 0x440
FAN_MARK_INODE = 0x0
+ FAN_MARK_MNTNS = 0x110
FAN_MARK_MOUNT = 0x10
FAN_MARK_ONLYDIR = 0x8
FAN_MARK_REMOVE = 0x2
+ FAN_MNT_ATTACH = 0x1000000
+ FAN_MNT_DETACH = 0x2000000
FAN_MODIFY = 0x2
FAN_MOVE = 0xc0
FAN_MOVED_FROM = 0x40
@@ -1240,6 +1253,7 @@ const (
FAN_OPEN_EXEC = 0x1000
FAN_OPEN_EXEC_PERM = 0x40000
FAN_OPEN_PERM = 0x10000
+ FAN_PRE_ACCESS = 0x100000
FAN_Q_OVERFLOW = 0x4000
FAN_RENAME = 0x10000000
FAN_REPORT_DFID_NAME = 0xc00
@@ -1247,6 +1261,7 @@ const (
FAN_REPORT_DIR_FID = 0x400
FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200
+ FAN_REPORT_MNT = 0x4000
FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80
FAN_REPORT_TARGET_FID = 0x1000
@@ -1266,6 +1281,7 @@ const (
FIB_RULE_PERMANENT = 0x1
FIB_RULE_UNRESOLVED = 0x4
FIDEDUPERANGE = 0xc0189436
+ FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1
FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8
FSCRYPT_KEY_DESC_PREFIX = "fscrypt:"
FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8
@@ -1574,7 +1590,6 @@ const (
IPV6_DONTFRAG = 0x3e
IPV6_DROP_MEMBERSHIP = 0x15
IPV6_DSTOPTS = 0x3b
- IPV6_FLOW = 0x11
IPV6_FREEBIND = 0x4e
IPV6_HDRINCL = 0x24
IPV6_HOPLIMIT = 0x34
@@ -1625,7 +1640,6 @@ const (
IPV6_TRANSPARENT = 0x4b
IPV6_UNICAST_HOPS = 0x10
IPV6_UNICAST_IF = 0x4c
- IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
@@ -1687,7 +1701,6 @@ const (
IP_TTL = 0x2
IP_UNBLOCK_SOURCE = 0x25
IP_UNICAST_IF = 0x32
- IP_USER_FLOW = 0xd
IP_XFRM_POLICY = 0x11
ISOFS_SUPER_MAGIC = 0x9660
ISTRIP = 0x20
@@ -1809,7 +1822,11 @@ const (
LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2
LANDLOCK_ACCESS_NET_BIND_TCP = 0x1
LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2
+ LANDLOCK_CREATE_RULESET_ERRATA = 0x2
LANDLOCK_CREATE_RULESET_VERSION = 0x1
+ LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2
+ LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1
+ LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4
LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1
LANDLOCK_SCOPE_SIGNAL = 0x2
LINUX_REBOOT_CMD_CAD_OFF = 0x0
@@ -2485,6 +2502,10 @@ const (
PR_FP_EXC_UND = 0x40000
PR_FP_MODE_FR = 0x1
PR_FP_MODE_FRE = 0x2
+ PR_FUTEX_HASH = 0x4e
+ PR_FUTEX_HASH_GET_IMMUTABLE = 0x3
+ PR_FUTEX_HASH_GET_SLOTS = 0x2
+ PR_FUTEX_HASH_SET_SLOTS = 0x1
PR_GET_AUXV = 0x41555856
PR_GET_CHILD_SUBREAPER = 0x25
PR_GET_DUMPABLE = 0x3
@@ -2644,6 +2665,10 @@ const (
PR_TAGGED_ADDR_ENABLE = 0x1
PR_TASK_PERF_EVENTS_DISABLE = 0x1f
PR_TASK_PERF_EVENTS_ENABLE = 0x20
+ PR_TIMER_CREATE_RESTORE_IDS = 0x4d
+ PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2
+ PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0
+ PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1
PR_TIMING_STATISTICAL = 0x0
PR_TIMING_TIMESTAMP = 0x1
PR_TSC_ENABLE = 0x1
@@ -2724,6 +2749,7 @@ const (
PTRACE_SETREGSET = 0x4205
PTRACE_SETSIGINFO = 0x4203
PTRACE_SETSIGMASK = 0x420b
+ PTRACE_SET_SYSCALL_INFO = 0x4212
PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210
PTRACE_SINGLESTEP = 0x9
PTRACE_SYSCALL = 0x18
@@ -2787,7 +2813,7 @@ const (
RTAX_UNSPEC = 0x0
RTAX_WINDOW = 0x3
RTA_ALIGNTO = 0x4
- RTA_MAX = 0x1e
+ RTA_MAX = 0x1f
RTCF_DIRECTSRC = 0x4000000
RTCF_DOREDIRECT = 0x1000000
RTCF_LOG = 0x2000000
@@ -2864,10 +2890,12 @@ const (
RTM_DELACTION = 0x31
RTM_DELADDR = 0x15
RTM_DELADDRLABEL = 0x49
+ RTM_DELANYCAST = 0x3d
RTM_DELCHAIN = 0x65
RTM_DELLINK = 0x11
RTM_DELLINKPROP = 0x6d
RTM_DELMDB = 0x55
+ RTM_DELMULTICAST = 0x39
RTM_DELNEIGH = 0x1d
RTM_DELNETCONF = 0x51
RTM_DELNEXTHOP = 0x69
@@ -2917,11 +2945,13 @@ const (
RTM_NEWACTION = 0x30
RTM_NEWADDR = 0x14
RTM_NEWADDRLABEL = 0x48
+ RTM_NEWANYCAST = 0x3c
RTM_NEWCACHEREPORT = 0x60
RTM_NEWCHAIN = 0x64
RTM_NEWLINK = 0x10
RTM_NEWLINKPROP = 0x6c
RTM_NEWMDB = 0x54
+ RTM_NEWMULTICAST = 0x38
RTM_NEWNDUSEROPT = 0x44
RTM_NEWNEIGH = 0x1c
RTM_NEWNEIGHTBL = 0x40
@@ -2970,6 +3000,7 @@ const (
RTPROT_NTK = 0xf
RTPROT_OPENR = 0x63
RTPROT_OSPF = 0xbc
+ RTPROT_OVN = 0x54
RTPROT_RA = 0x9
RTPROT_REDIRECT = 0x1
RTPROT_RIP = 0xbd
@@ -2987,11 +3018,12 @@ const (
RUSAGE_THREAD = 0x1
RWF_APPEND = 0x10
RWF_ATOMIC = 0x40
+ RWF_DONTCACHE = 0x80
RWF_DSYNC = 0x2
RWF_HIPRI = 0x1
RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8
- RWF_SUPPORTED = 0x7f
+ RWF_SUPPORTED = 0xff
RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3
@@ -3271,6 +3303,7 @@ const (
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_DIOALIGN = 0x2000
+ STATX_DIO_READ_ALIGN = 0x20000
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MNT_ID = 0x1000
@@ -3322,7 +3355,7 @@ const (
TASKSTATS_GENL_NAME = "TASKSTATS"
TASKSTATS_GENL_VERSION = 0x1
TASKSTATS_TYPE_MAX = 0x6
- TASKSTATS_VERSION = 0xe
+ TASKSTATS_VERSION = 0x10
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
@@ -3392,8 +3425,6 @@ const (
TCP_TX_DELAY = 0x25
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
- TCP_V4_FLOW = 0x1
- TCP_V6_FLOW = 0x5
TCP_WINDOW_CLAMP = 0xa
TCP_ZEROCOPY_RECEIVE = 0x23
TFD_TIMER_ABSTIME = 0x1
@@ -3503,6 +3534,7 @@ const (
TP_STATUS_WRONG_FORMAT = 0x4
TRACEFS_MAGIC = 0x74726163
TS_COMM_LEN = 0x20
+ UBI_IOCECNFO = 0xc01c6f06
UDF_SUPER_MAGIC = 0x15013346
UDP_CORK = 0x1
UDP_ENCAP = 0x64
@@ -3515,8 +3547,6 @@ const (
UDP_NO_CHECK6_RX = 0x66
UDP_NO_CHECK6_TX = 0x65
UDP_SEGMENT = 0x67
- UDP_V4_FLOW = 0x2
- UDP_V6_FLOW = 0x6
UMOUNT_NOFOLLOW = 0x8
USBDEVICE_SUPER_MAGIC = 0x9fa2
UTIME_NOW = 0x3fffffff
@@ -3559,7 +3589,7 @@ const (
WDIOS_TEMPPANIC = 0x4
WDIOS_UNKNOWN = -0x1
WEXITED = 0x4
- WGALLOWEDIP_A_MAX = 0x3
+ WGALLOWEDIP_A_MAX = 0x4
WGDEVICE_A_MAX = 0x8
WGPEER_A_MAX = 0xa
WG_CMD_MAX = 0x1
@@ -3673,6 +3703,7 @@ const (
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TXMD_FLAGS_CHECKSUM = 0x2
+ XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4
XDP_TXMD_FLAGS_TIMESTAMP = 0x1
XDP_TX_METADATA = 0x2
XDP_TX_RING = 0x3
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 75207613c785db..1c37f9fbc45c2f 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -360,6 +361,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -372,6 +374,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index c68acda53522d1..6f54d34aefc9c2 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -361,6 +362,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -373,6 +375,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index a8c607ab86b51b..783ec5c126f092 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -366,6 +367,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -378,6 +380,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 18563dd8d33a0f..ca83d3ba162c42 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 22912cdaa94483..607e611c0cbe35 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -353,6 +354,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -365,6 +367,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 29344eb37ab55a..b9cb5bd3c09e02 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 20d51fb96a897f..65b078a6382e7b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 321b60902ae5cd..5298a3033d0a08 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 9bacdf1e27910f..7bc557c8761837 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index c2242726156a94..152399bb04a1c0 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -414,6 +415,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
@@ -426,6 +428,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 6270c8ee13e3f5..1a1ce2409cf0d0 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -418,6 +419,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
@@ -430,6 +432,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 9966c1941f8301..4231a1fb5787f2 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -418,6 +419,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
@@ -430,6 +432,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 848e5fcc42e6f2..21c0e9526656fb 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -350,6 +351,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -362,6 +364,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 669b2adb80b778..f00d1cd7cf486d 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -422,6 +423,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -434,6 +436,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 4834e57514e44a..bc8d539e6af7d6 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -71,6 +71,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -461,6 +462,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x2
SO_PASSPIDFD = 0x55
+ SO_PASSRIGHTS = 0x5c
SO_PASSSEC = 0x1f
SO_PEEK_OFF = 0x26
SO_PEERCRED = 0x40
@@ -473,6 +475,7 @@ const (
SO_RCVBUFFORCE = 0x100b
SO_RCVLOWAT = 0x800
SO_RCVMARK = 0x54
+ SO_RCVPRIORITY = 0x5b
SO_RCVTIMEO = 0x2000
SO_RCVTIMEO_NEW = 0x44
SO_RCVTIMEO_OLD = 0x2000
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index c79aaff306ae3e..aca56ee494747a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -462,4 +462,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 5eb450695e95a8..2ea1ef58c3ecc1 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -385,4 +385,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 05e50297445861..d22c8af31968e9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -426,4 +426,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 38c53ec51bb3e6..5ee264ae974329 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -329,4 +329,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 31d2e71a18e17f..f9f03ebf5fa1b2 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -325,4 +325,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index f4184a336b0e02..87c2118e84967b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -446,4 +446,5 @@ const (
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
+ SYS_OPEN_TREE_ATTR = 4467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 05b9962278f276..391ad102fb68e4 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -376,4 +376,5 @@ const (
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
+ SYS_OPEN_TREE_ATTR = 5467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 43a256e9e67585..5656157757a9de 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -376,4 +376,5 @@ const (
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
+ SYS_OPEN_TREE_ATTR = 5467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index eea5ddfc220774..0482b52e3c3866 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -446,4 +446,5 @@ const (
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
+ SYS_OPEN_TREE_ATTR = 4467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index 0d777bfbb1408e..71806f08f3870c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -453,4 +453,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index b44636502561e6..e35a7105829d47 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -425,4 +425,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 0c7d21c1881653..2aea476705e1e3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -425,4 +425,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 8405391698787a..6c9bb4e5607816 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -330,4 +330,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index fcf1b790d6cfd3..680bc9915a314a 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -391,4 +391,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 52d15b5f9d4597..620f271052f986 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -404,4 +404,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go
index a46abe64720547..cd236443f64530 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -114,8 +114,10 @@ type Statx_t struct {
Atomic_write_unit_min uint32
Atomic_write_unit_max uint32
Atomic_write_segments_max uint32
+ Dio_read_offset_align uint32
+ Atomic_write_unit_max_opt uint32
_ [1]uint32
- _ [9]uint64
+ _ [8]uint64
}
type Fsid struct {
@@ -199,7 +201,8 @@ type FscryptAddKeyArg struct {
Key_spec FscryptKeySpecifier
Raw_size uint32
Key_id uint32
- _ [8]uint32
+ Flags uint32
+ _ [7]uint32
}
type FscryptRemoveKeyArg struct {
@@ -2226,8 +2229,11 @@ const (
NFT_PAYLOAD_LL_HEADER = 0x0
NFT_PAYLOAD_NETWORK_HEADER = 0x1
NFT_PAYLOAD_TRANSPORT_HEADER = 0x2
+ NFT_PAYLOAD_INNER_HEADER = 0x3
+ NFT_PAYLOAD_TUN_HEADER = 0x4
NFT_PAYLOAD_CSUM_NONE = 0x0
NFT_PAYLOAD_CSUM_INET = 0x1
+ NFT_PAYLOAD_CSUM_SCTP = 0x2
NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1
NFTA_PAYLOAD_UNSPEC = 0x0
NFTA_PAYLOAD_DREG = 0x1
@@ -2314,6 +2320,11 @@ const (
NFT_CT_AVGPKT = 0x10
NFT_CT_ZONE = 0x11
NFT_CT_EVENTMASK = 0x12
+ NFT_CT_SRC_IP = 0x13
+ NFT_CT_DST_IP = 0x14
+ NFT_CT_SRC_IP6 = 0x15
+ NFT_CT_DST_IP6 = 0x16
+ NFT_CT_ID = 0x17
NFTA_CT_UNSPEC = 0x0
NFTA_CT_DREG = 0x1
NFTA_CT_KEY = 0x2
@@ -2594,8 +2605,8 @@ const (
SOF_TIMESTAMPING_BIND_PHC = 0x8000
SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000
- SOF_TIMESTAMPING_LAST = 0x20000
- SOF_TIMESTAMPING_MASK = 0x3ffff
+ SOF_TIMESTAMPING_LAST = 0x40000
+ SOF_TIMESTAMPING_MASK = 0x7ffff
SCM_TSTAMP_SND = 0x0
SCM_TSTAMP_SCHED = 0x1
@@ -3802,7 +3813,16 @@ const (
ETHTOOL_MSG_PSE_GET = 0x24
ETHTOOL_MSG_PSE_SET = 0x25
ETHTOOL_MSG_RSS_GET = 0x26
- ETHTOOL_MSG_USER_MAX = 0x2d
+ ETHTOOL_MSG_PLCA_GET_CFG = 0x27
+ ETHTOOL_MSG_PLCA_SET_CFG = 0x28
+ ETHTOOL_MSG_PLCA_GET_STATUS = 0x29
+ ETHTOOL_MSG_MM_GET = 0x2a
+ ETHTOOL_MSG_MM_SET = 0x2b
+ ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c
+ ETHTOOL_MSG_PHY_GET = 0x2d
+ ETHTOOL_MSG_TSCONFIG_GET = 0x2e
+ ETHTOOL_MSG_TSCONFIG_SET = 0x2f
+ ETHTOOL_MSG_USER_MAX = 0x2f
ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@@ -3842,7 +3862,17 @@ const (
ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_PSE_GET_REPLY = 0x25
ETHTOOL_MSG_RSS_GET_REPLY = 0x26
- ETHTOOL_MSG_KERNEL_MAX = 0x2e
+ ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27
+ ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28
+ ETHTOOL_MSG_PLCA_NTF = 0x29
+ ETHTOOL_MSG_MM_GET_REPLY = 0x2a
+ ETHTOOL_MSG_MM_NTF = 0x2b
+ ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c
+ ETHTOOL_MSG_PHY_GET_REPLY = 0x2d
+ ETHTOOL_MSG_PHY_NTF = 0x2e
+ ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f
+ ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30
+ ETHTOOL_MSG_KERNEL_MAX = 0x30
ETHTOOL_FLAG_COMPACT_BITSETS = 0x1
ETHTOOL_FLAG_OMIT_REPLY = 0x2
ETHTOOL_FLAG_STATS = 0x4
@@ -3949,7 +3979,12 @@ const (
ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb
ETHTOOL_A_RINGS_CQE_SIZE = 0xc
ETHTOOL_A_RINGS_TX_PUSH = 0xd
- ETHTOOL_A_RINGS_MAX = 0x10
+ ETHTOOL_A_RINGS_RX_PUSH = 0xe
+ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf
+ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10
+ ETHTOOL_A_RINGS_HDS_THRESH = 0x11
+ ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12
+ ETHTOOL_A_RINGS_MAX = 0x12
ETHTOOL_A_CHANNELS_UNSPEC = 0x0
ETHTOOL_A_CHANNELS_HEADER = 0x1
ETHTOOL_A_CHANNELS_RX_MAX = 0x2
@@ -4015,7 +4050,9 @@ const (
ETHTOOL_A_TSINFO_TX_TYPES = 0x3
ETHTOOL_A_TSINFO_RX_FILTERS = 0x4
ETHTOOL_A_TSINFO_PHC_INDEX = 0x5
- ETHTOOL_A_TSINFO_MAX = 0x6
+ ETHTOOL_A_TSINFO_STATS = 0x6
+ ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7
+ ETHTOOL_A_TSINFO_MAX = 0x9
ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0
ETHTOOL_A_CABLE_TEST_HEADER = 0x1
ETHTOOL_A_CABLE_TEST_MAX = 0x1
@@ -4101,6 +4138,19 @@ const (
ETHTOOL_A_TUNNEL_INFO_MAX = 0x2
)
+const (
+ TCP_V4_FLOW = 0x1
+ UDP_V4_FLOW = 0x2
+ TCP_V6_FLOW = 0x5
+ UDP_V6_FLOW = 0x6
+ ESP_V4_FLOW = 0xa
+ ESP_V6_FLOW = 0xc
+ IP_USER_FLOW = 0xd
+ IPV6_USER_FLOW = 0xe
+ IPV6_FLOW = 0x11
+ ETHER_FLOW = 0x12
+)
+
const SPEED_UNKNOWN = -0x1
type EthtoolDrvinfo struct {
@@ -4613,6 +4663,7 @@ const (
NL80211_ATTR_AKM_SUITES = 0x4c
NL80211_ATTR_AP_ISOLATE = 0x60
NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135
+ NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a
NL80211_ATTR_AUTH_DATA = 0x9c
NL80211_ATTR_AUTH_TYPE = 0x35
NL80211_ATTR_BANDS = 0xef
@@ -4623,6 +4674,7 @@ const (
NL80211_ATTR_BSS_BASIC_RATES = 0x24
NL80211_ATTR_BSS = 0x2f
NL80211_ATTR_BSS_CTS_PROT = 0x1c
+ NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147
NL80211_ATTR_BSS_HT_OPMODE = 0x6d
NL80211_ATTR_BSSID = 0xf5
NL80211_ATTR_BSS_SELECT = 0xe3
@@ -4682,6 +4734,7 @@ const (
NL80211_ATTR_DTIM_PERIOD = 0xd
NL80211_ATTR_DURATION = 0x57
NL80211_ATTR_EHT_CAPABILITY = 0x136
+ NL80211_ATTR_EMA_RNR_ELEMS = 0x145
NL80211_ATTR_EML_CAPABILITY = 0x13d
NL80211_ATTR_EXT_CAPA = 0xa9
NL80211_ATTR_EXT_CAPA_MASK = 0xaa
@@ -4717,6 +4770,7 @@ const (
NL80211_ATTR_HIDDEN_SSID = 0x7e
NL80211_ATTR_HT_CAPABILITY = 0x1f
NL80211_ATTR_HT_CAPABILITY_MASK = 0x94
+ NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144
NL80211_ATTR_IE_ASSOC_RESP = 0x80
NL80211_ATTR_IE = 0x2a
NL80211_ATTR_IE_PROBE_RESP = 0x7f
@@ -4747,9 +4801,10 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x14d
+ NL80211_ATTR_MAX = 0x151
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
+ NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143
NL80211_ATTR_MAX_MATCH_SETS = 0x85
NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c
NL80211_ATTR_MAX_NUM_PMKIDS = 0x56
@@ -4774,9 +4829,12 @@ const (
NL80211_ATTR_MGMT_SUBTYPE = 0x29
NL80211_ATTR_MLD_ADDR = 0x13a
NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e
+ NL80211_ATTR_MLO_LINK_DISABLED = 0x146
NL80211_ATTR_MLO_LINK_ID = 0x139
NL80211_ATTR_MLO_LINKS = 0x138
NL80211_ATTR_MLO_SUPPORT = 0x13b
+ NL80211_ATTR_MLO_TTLM_DLINK = 0x148
+ NL80211_ATTR_MLO_TTLM_ULINK = 0x149
NL80211_ATTR_MNTR_FLAGS = 0x17
NL80211_ATTR_MPATH_INFO = 0x1b
NL80211_ATTR_MPATH_NEXT_HOP = 0x1a
@@ -4809,12 +4867,14 @@ const (
NL80211_ATTR_PORT_AUTHORIZED = 0x103
NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5
NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6
+ NL80211_ATTR_POWER_RULE_PSD = 0x8
NL80211_ATTR_PREV_BSSID = 0x4f
NL80211_ATTR_PRIVACY = 0x46
NL80211_ATTR_PROBE_RESP = 0x91
NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90
NL80211_ATTR_PROTOCOL_FEATURES = 0xad
NL80211_ATTR_PS_STATE = 0x5d
+ NL80211_ATTR_PUNCT_BITMAP = 0x142
NL80211_ATTR_QOS_MAP = 0xc7
NL80211_ATTR_RADAR_BACKGROUND = 0x134
NL80211_ATTR_RADAR_EVENT = 0xa8
@@ -4943,7 +5003,9 @@ const (
NL80211_ATTR_WIPHY_FREQ = 0x26
NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9
NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122
+ NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c
NL80211_ATTR_WIPHY_NAME = 0x2
+ NL80211_ATTR_WIPHY_RADIOS = 0x14b
NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e
NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d
NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40
@@ -4978,6 +5040,8 @@ const (
NL80211_BAND_ATTR_IFTYPE_DATA = 0x9
NL80211_BAND_ATTR_MAX = 0xd
NL80211_BAND_ATTR_RATES = 0x2
+ NL80211_BAND_ATTR_S1G_CAPA = 0xd
+ NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc
NL80211_BAND_ATTR_VHT_CAPA = 0x8
NL80211_BAND_ATTR_VHT_MCS_SET = 0x7
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8
@@ -5001,6 +5065,10 @@ const (
NL80211_BSS_BEACON_INTERVAL = 0x4
NL80211_BSS_BEACON_TSF = 0xd
NL80211_BSS_BSSID = 0x1
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1
+ NL80211_BSS_CANNOT_USE_REASONS = 0x18
+ NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2
NL80211_BSS_CAPABILITY = 0x5
NL80211_BSS_CHAIN_SIGNAL = 0x13
NL80211_BSS_CHAN_WIDTH_10 = 0x1
@@ -5032,6 +5100,9 @@ const (
NL80211_BSS_STATUS = 0x9
NL80211_BSS_STATUS_IBSS_JOINED = 0x2
NL80211_BSS_TSF = 0x3
+ NL80211_BSS_USE_FOR = 0x17
+ NL80211_BSS_USE_FOR_MLD_LINK = 0x2
+ NL80211_BSS_USE_FOR_NORMAL = 0x1
NL80211_CHAN_HT20 = 0x1
NL80211_CHAN_HT40MINUS = 0x2
NL80211_CHAN_HT40PLUS = 0x3
@@ -5117,7 +5188,8 @@ const (
NL80211_CMD_LEAVE_IBSS = 0x2c
NL80211_CMD_LEAVE_MESH = 0x45
NL80211_CMD_LEAVE_OCB = 0x6d
- NL80211_CMD_MAX = 0x9b
+ NL80211_CMD_LINKS_REMOVED = 0x9a
+ NL80211_CMD_MAX = 0x9d
NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29
NL80211_CMD_MODIFY_LINK_STA = 0x97
NL80211_CMD_NAN_MATCH = 0x78
@@ -5161,6 +5233,7 @@ const (
NL80211_CMD_SET_COALESCE = 0x65
NL80211_CMD_SET_CQM = 0x3f
NL80211_CMD_SET_FILS_AAD = 0x92
+ NL80211_CMD_SET_HW_TIMESTAMP = 0x99
NL80211_CMD_SET_INTERFACE = 0x6
NL80211_CMD_SET_KEY = 0xa
NL80211_CMD_SET_MAC_ACL = 0x5d
@@ -5180,6 +5253,7 @@ const (
NL80211_CMD_SET_SAR_SPECS = 0x8c
NL80211_CMD_SET_STATION = 0x12
NL80211_CMD_SET_TID_CONFIG = 0x89
+ NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b
NL80211_CMD_SET_TX_BITRATE_MASK = 0x39
NL80211_CMD_SET_WDS_PEER = 0x42
NL80211_CMD_SET_WIPHY = 0x2
@@ -5247,6 +5321,7 @@ const (
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21
NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22
NL80211_EXT_FEATURE_AQL = 0x28
+ NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40
NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e
NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29
NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36
@@ -5262,6 +5337,7 @@ const (
NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b
NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c
+ NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43
NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20
NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24
@@ -5281,9 +5357,12 @@ const (
NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13
NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31
+ NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42
+ NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41
NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d
NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b
NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39
+ NL80211_EXT_FEATURE_PUNCT = 0x3e
NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c
NL80211_EXT_FEATURE_RRM = 0x1
NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33
@@ -5295,8 +5374,10 @@ const (
NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23
NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc
NL80211_EXT_FEATURE_SECURE_LTF = 0x37
+ NL80211_EXT_FEATURE_SECURE_NAN = 0x3f
NL80211_EXT_FEATURE_SECURE_RTT = 0x38
NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44
NL80211_EXT_FEATURE_STA_TX_PWR = 0x25
NL80211_EXT_FEATURE_TXQS = 0x1c
NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35
@@ -5343,7 +5424,10 @@ const (
NL80211_FREQUENCY_ATTR_2MHZ = 0x16
NL80211_FREQUENCY_ATTR_4MHZ = 0x17
NL80211_FREQUENCY_ATTR_8MHZ = 0x18
+ NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21
+ NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20
NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd
+ NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d
NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7
NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8
NL80211_FREQUENCY_ATTR_DISABLED = 0x2
@@ -5351,12 +5435,14 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x21
+ NL80211_FREQUENCY_ATTR_MAX = 0x22
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10
NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e
NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb
NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b
NL80211_FREQUENCY_ATTR_NO_HE = 0x13
@@ -5364,8 +5450,11 @@ const (
NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa
NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3
NL80211_FREQUENCY_ATTR_NO_IR = 0x3
+ NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f
+ NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e
NL80211_FREQUENCY_ATTR_OFFSET = 0x14
NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3
+ NL80211_FREQUENCY_ATTR_PSD = 0x1c
NL80211_FREQUENCY_ATTR_RADAR = 0x5
NL80211_FREQUENCY_ATTR_WMM = 0x12
NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3
@@ -5430,6 +5519,7 @@ const (
NL80211_IFTYPE_STATION = 0x2
NL80211_IFTYPE_UNSPECIFIED = 0x0
NL80211_IFTYPE_WDS = 0x5
+ NL80211_KCK_EXT_LEN_32 = 0x20
NL80211_KCK_EXT_LEN = 0x18
NL80211_KCK_LEN = 0x10
NL80211_KEK_EXT_LEN = 0x20
@@ -5458,9 +5548,10 @@ const (
NL80211_MAX_SUPP_HT_RATES = 0x4d
NL80211_MAX_SUPP_RATES = 0x20
NL80211_MAX_SUPP_REG_RULES = 0x80
+ NL80211_MAX_SUPP_SELECTORS = 0x80
NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5
NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3
- NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5
+ NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6
NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2
NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1
NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4
@@ -5703,11 +5794,16 @@ const (
NL80211_RADAR_PRE_CAC_EXPIRED = 0x4
NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb
NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa
+ NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d
+ NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19
+ NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a
NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12
NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3
+ NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b
NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc
NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8
NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9
+ NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c
NL80211_RATE_INFO_BITRATE32 = 0x5
NL80211_RATE_INFO_BITRATE = 0x1
NL80211_RATE_INFO_EHT_GI_0_8 = 0x0
@@ -5753,6 +5849,8 @@ const (
NL80211_RATE_INFO_HE_RU_ALLOC = 0x11
NL80211_RATE_INFO_MAX = 0x1d
NL80211_RATE_INFO_MCS = 0x2
+ NL80211_RATE_INFO_S1G_MCS = 0x17
+ NL80211_RATE_INFO_S1G_NSS = 0x18
NL80211_RATE_INFO_SHORT_GI = 0x4
NL80211_RATE_INFO_VHT_MCS = 0x6
NL80211_RATE_INFO_VHT_NSS = 0x7
@@ -5770,14 +5868,19 @@ const (
NL80211_REKEY_DATA_KEK = 0x1
NL80211_REKEY_DATA_REPLAY_CTR = 0x3
NL80211_REPLAY_CTR_LEN = 0x8
+ NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000
NL80211_RRF_AUTO_BW = 0x800
NL80211_RRF_DFS = 0x10
+ NL80211_RRF_DFS_CONCURRENT = 0x200000
NL80211_RRF_GO_CONCURRENT = 0x1000
NL80211_RRF_IR_CONCURRENT = 0x1000
NL80211_RRF_NO_160MHZ = 0x10000
NL80211_RRF_NO_320MHZ = 0x40000
+ NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000
NL80211_RRF_NO_80MHZ = 0x8000
NL80211_RRF_NO_CCK = 0x2
+ NL80211_RRF_NO_EHT = 0x80000
NL80211_RRF_NO_HE = 0x20000
NL80211_RRF_NO_HT40 = 0x6000
NL80211_RRF_NO_HT40MINUS = 0x2000
@@ -5788,7 +5891,10 @@ const (
NL80211_RRF_NO_IR = 0x80
NL80211_RRF_NO_OFDM = 0x1
NL80211_RRF_NO_OUTDOOR = 0x8
+ NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000
+ NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000
NL80211_RRF_PASSIVE_SCAN = 0x80
+ NL80211_RRF_PSD = 0x100000
NL80211_RRF_PTMP_ONLY = 0x40
NL80211_RRF_PTP_ONLY = 0x20
NL80211_RXMGMT_FLAG_ANSWERED = 0x1
@@ -5849,6 +5955,7 @@ const (
NL80211_STA_FLAG_MAX_OLD_API = 0x6
NL80211_STA_FLAG_MFP = 0x4
NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2
+ NL80211_STA_FLAG_SPP_AMSDU = 0x8
NL80211_STA_FLAG_TDLS_PEER = 0x6
NL80211_STA_FLAG_WME = 0x3
NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23
@@ -6007,6 +6114,13 @@ const (
NL80211_VHT_CAPABILITY_LEN = 0xc
NL80211_VHT_NSS_MAX = 0x8
NL80211_WIPHY_NAME_MAXLEN = 0x40
+ NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2
+ NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1
+ NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3
+ NL80211_WIPHY_RADIO_ATTR_MAX = 0x4
+ NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2
+ NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2
+ NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1
NL80211_WMMR_AIFSN = 0x3
NL80211_WMMR_CW_MAX = 0x2
NL80211_WMMR_CW_MIN = 0x1
@@ -6038,6 +6152,7 @@ const (
NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4
NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9
NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14
NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa
NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb
NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index fd402da43fce18..485f2d3a1bc8c5 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -282,7 +282,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -338,6 +338,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index eb7a5e1864adae..ecbd1ad8bc54b5 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -351,6 +351,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index d78ac108b6c263..02f0463a44b241 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -91,7 +91,7 @@ type Stat_t struct {
Gid uint32
Rdev uint64
_ uint16
- _ [4]byte
+ _ [6]byte
Size int64
Blksize int32
_ [4]byte
@@ -273,7 +273,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -329,6 +329,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index cd06d47f1f7c76..6f4d400d2417cc 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -330,6 +330,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index 2f28fe26c1a533..cd532cfa558990 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -331,6 +331,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 71d6cac2f1aabd..41336208517524 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -278,7 +278,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -334,6 +334,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 8596d453563873..eaa37eb718e2d3 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -333,6 +333,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index cd60ea18662bfc..98ae6a1e4ac4d9 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -333,6 +333,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index b0ae420c489d6b..cae1961594d24b 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -278,7 +278,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -334,6 +334,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index 8359728759bcfd..6ce3b4e02830ee 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -90,7 +90,7 @@ type Stat_t struct {
Gid uint32
Rdev uint64
_ uint16
- _ [4]byte
+ _ [6]byte
Size int64
Blksize int32
_ [4]byte
@@ -285,7 +285,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -341,6 +341,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 69eb6a5c6892f5..c7429c6a1461ea 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -340,6 +340,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 5f583cb62bf304..4bf4baf4cac56c 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -340,6 +340,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index ad05b51a603649..e9709d70afbd87 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -358,6 +358,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index cf3ce90037704d..fb44268ca7da3d 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -353,6 +353,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 590b56739c5b33..9c38265c74aff6 100644
--- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -335,6 +335,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/src/cmd/vendor/golang.org/x/term/term_windows.go b/src/cmd/vendor/golang.org/x/term/term_windows.go
index df6bf948e14070..0ddd81c02a69cc 100644
--- a/src/cmd/vendor/golang.org/x/term/term_windows.go
+++ b/src/cmd/vendor/golang.org/x/term/term_windows.go
@@ -20,12 +20,14 @@ func isTerminal(fd int) bool {
return err == nil
}
+// This is intended to be used on a console input handle.
+// See https://learn.microsoft.com/en-us/windows/console/setconsolemode
func makeRaw(fd int) (*State, error) {
var st uint32
if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
return nil, err
}
- raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+ raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT)
raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil {
return nil, err
diff --git a/src/cmd/vendor/golang.org/x/term/terminal.go b/src/cmd/vendor/golang.org/x/term/terminal.go
index 13e9a64ad10a7e..bddb2e2aebd4a1 100644
--- a/src/cmd/vendor/golang.org/x/term/terminal.go
+++ b/src/cmd/vendor/golang.org/x/term/terminal.go
@@ -146,6 +146,7 @@ const (
keyCtrlD = 4
keyCtrlU = 21
keyEnter = '\r'
+ keyLF = '\n'
keyEscape = 27
keyBackspace = 127
keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota
@@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) {
// handleKey processes the given key and, optionally, returns a line of text
// that the user has entered.
func (t *Terminal) handleKey(key rune) (line string, ok bool) {
- if t.pasteActive && key != keyEnter {
+ if t.pasteActive && key != keyEnter && key != keyLF {
t.addKeyToLine(key)
return
}
@@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
t.setLine(runes, len(runes))
}
}
- case keyEnter:
+ case keyEnter, keyLF:
t.moveCursorToPos(len(t.line))
t.queue([]rune("\r\n"))
line = string(t.line)
@@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) {
if !t.pasteActive {
lineIsPasted = false
}
+ // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line.
+ if key == keyEnter && len(rest) > 0 && rest[0] == keyLF {
+ rest = rest[1:]
+ }
line, lineOk = t.handleKey(key)
}
if len(rest) > 0 {
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
index 6aefef25815852..18e01c40def3aa 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
@@ -318,24 +318,32 @@ var vetLegacyFlags = map[string]string{
// If contextLines is nonnegative, it also prints the
// offending line plus this many lines of context.
func PrintPlain(out io.Writer, fset *token.FileSet, contextLines int, diag analysis.Diagnostic) {
- posn := fset.Position(diag.Pos)
- fmt.Fprintf(out, "%s: %s\n", posn, diag.Message)
-
- // show offending line plus N lines of context.
- if contextLines >= 0 {
- posn := fset.Position(diag.Pos)
- end := fset.Position(diag.End)
- if !end.IsValid() {
- end = posn
- }
- data, _ := os.ReadFile(posn.Filename)
- lines := strings.Split(string(data), "\n")
- for i := posn.Line - contextLines; i <= end.Line+contextLines; i++ {
- if 1 <= i && i <= len(lines) {
- fmt.Fprintf(out, "%d\t%s\n", i, lines[i-1])
+ print := func(pos, end token.Pos, message string) {
+ posn := fset.Position(pos)
+ fmt.Fprintf(out, "%s: %s\n", posn, message)
+
+ // show offending line plus N lines of context.
+ if contextLines >= 0 {
+ end := fset.Position(end)
+ if !end.IsValid() {
+ end = posn
+ }
+ // TODO(adonovan): highlight the portion of the line indicated
+ // by pos...end using ASCII art, terminal colors, etc?
+ data, _ := os.ReadFile(posn.Filename)
+ lines := strings.Split(string(data), "\n")
+ for i := posn.Line - contextLines; i <= end.Line+contextLines; i++ {
+ if 1 <= i && i <= len(lines) {
+ fmt.Fprintf(out, "%d\t%s\n", i, lines[i-1])
+ }
}
}
}
+
+ print(diag.Pos, diag.End, diag.Message)
+ for _, rel := range diag.Related {
+ print(rel.Pos, rel.End, "\t"+rel.Message)
+ }
}
// A JSONTree is a mapping from package ID to analysis name to result.
diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go
index 1413ee13d293e0..1914bb476168ba 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go
@@ -9,11 +9,11 @@ package assign
import (
_ "embed"
- "fmt"
"go/ast"
"go/token"
"go/types"
"reflect"
+ "strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
@@ -48,31 +48,84 @@ func run(pass *analysis.Pass) (any, error) {
// If LHS and RHS have different cardinality, they can't be the same.
return
}
+
+ // Delete redundant LHS, RHS pairs, taking care
+ // to include intervening commas.
+ var (
+ exprs []string // expressions appearing on both sides (x = x)
+ edits []analysis.TextEdit
+ runStartLHS, runStartRHS token.Pos // non-zero => within a run
+ )
for i, lhs := range stmt.Lhs {
rhs := stmt.Rhs[i]
- if analysisutil.HasSideEffects(pass.TypesInfo, lhs) ||
- analysisutil.HasSideEffects(pass.TypesInfo, rhs) ||
- isMapIndex(pass.TypesInfo, lhs) {
- continue // expressions may not be equal
- }
- if reflect.TypeOf(lhs) != reflect.TypeOf(rhs) {
- continue // short-circuit the heavy-weight gofmt check
+ isSelfAssign := false
+ var le string
+
+ if !analysisutil.HasSideEffects(pass.TypesInfo, lhs) &&
+ !analysisutil.HasSideEffects(pass.TypesInfo, rhs) &&
+ !isMapIndex(pass.TypesInfo, lhs) &&
+ reflect.TypeOf(lhs) == reflect.TypeOf(rhs) { // short-circuit the heavy-weight gofmt check
+
+ le = analysisinternal.Format(pass.Fset, lhs)
+ re := analysisinternal.Format(pass.Fset, rhs)
+ if le == re {
+ isSelfAssign = true
+ }
}
- le := analysisinternal.Format(pass.Fset, lhs)
- re := analysisinternal.Format(pass.Fset, rhs)
- if le == re {
- pass.Report(analysis.Diagnostic{
- Pos: stmt.Pos(), Message: fmt.Sprintf("self-assignment of %s to %s", re, le),
- SuggestedFixes: []analysis.SuggestedFix{{
- Message: "Remove self-assignment",
- TextEdits: []analysis.TextEdit{{
- Pos: stmt.Pos(),
- End: stmt.End(),
- }}},
- },
- })
+
+ if isSelfAssign {
+ exprs = append(exprs, le)
+ if !runStartLHS.IsValid() {
+ // Start of a new run of self-assignments.
+ if i > 0 {
+ runStartLHS = stmt.Lhs[i-1].End()
+ runStartRHS = stmt.Rhs[i-1].End()
+ } else {
+ runStartLHS = lhs.Pos()
+ runStartRHS = rhs.Pos()
+ }
+ }
+ } else if runStartLHS.IsValid() {
+ // End of a run of self-assignments.
+ endLHS, endRHS := stmt.Lhs[i-1].End(), stmt.Rhs[i-1].End()
+ if runStartLHS == stmt.Lhs[0].Pos() {
+ endLHS, endRHS = lhs.Pos(), rhs.Pos()
+ }
+ edits = append(edits,
+ analysis.TextEdit{Pos: runStartLHS, End: endLHS},
+ analysis.TextEdit{Pos: runStartRHS, End: endRHS},
+ )
+ runStartLHS, runStartRHS = 0, 0
}
}
+
+ // If a run of self-assignments continues to the end of the statement, close it.
+ if runStartLHS.IsValid() {
+ last := len(stmt.Lhs) - 1
+ edits = append(edits,
+ analysis.TextEdit{Pos: runStartLHS, End: stmt.Lhs[last].End()},
+ analysis.TextEdit{Pos: runStartRHS, End: stmt.Rhs[last].End()},
+ )
+ }
+
+ if len(exprs) == 0 {
+ return
+ }
+
+ if len(exprs) == len(stmt.Lhs) {
+ // If every part of the statement is a self-assignment,
+ // remove the whole statement.
+ edits = []analysis.TextEdit{{Pos: stmt.Pos(), End: stmt.End()}}
+ }
+
+ pass.Report(analysis.Diagnostic{
+ Pos: stmt.Pos(),
+ Message: "self-assignment of " + strings.Join(exprs, ", "),
+ SuggestedFixes: []analysis.SuggestedFix{{
+ Message: "Remove self-assignment",
+ TextEdits: edits,
+ }},
+ })
})
return nil, nil
diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
index bc44b2c8e7e47a..a703cdfcf9092e 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
@@ -85,6 +85,7 @@ type event struct {
// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer).
// Type can be recovered from the sole bit in typ.
+// [Tried this, wasn't faster. --adonovan]
// Preorder visits all the nodes of the files supplied to New in
// depth-first order. It calls f(n) for each node n before it visits
diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
index e936c67c985a0b..9852331a3dbe3f 100644
--- a/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
+++ b/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -12,8 +12,6 @@ package inspector
import (
"go/ast"
"math"
-
- _ "unsafe"
)
const (
@@ -217,7 +215,6 @@ func typeOf(n ast.Node) uint64 {
return 0
}
-//go:linkname maskOf golang.org/x/tools/go/ast/inspector.maskOf
func maskOf(nodes []ast.Node) uint64 {
if len(nodes) == 0 {
return math.MaxUint64 // match all node types
diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/typeindex/typeindex.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/typeindex/typeindex.go
index e03deef4409716..01ad7b9cf76079 100644
--- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/typeindex/typeindex.go
+++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/typeindex/typeindex.go
@@ -59,24 +59,59 @@ func New(inspect *inspector.Inspector, pkg *types.Package, info *types.Info) *In
addPackage(obj.Pkg())
}
- us, ok := ix.uses[obj]
- if !ok {
- us = &uses{}
- us.code = us.initial[:0]
- ix.uses[obj] = us
- }
- delta := cur.Index() - us.last
- if delta < 0 {
- panic("non-monotonic")
+ for {
+ us, ok := ix.uses[obj]
+ if !ok {
+ us = &uses{}
+ us.code = us.initial[:0]
+ ix.uses[obj] = us
+ }
+ delta := cur.Index() - us.last
+ if delta < 0 {
+ panic("non-monotonic")
+ }
+ us.code = binary.AppendUvarint(us.code, uint64(delta))
+ us.last = cur.Index()
+
+ // If n is a selection of a field or method of an instantiated
+ // type, also record a use of the generic field or method.
+ obj, ok = objectOrigin(obj)
+ if !ok {
+ break
+ }
}
- us.code = binary.AppendUvarint(us.code, uint64(delta))
- us.last = cur.Index()
}
}
}
return ix
}
+// objectOrigin returns the generic object for obj if it is a field or
+// method of an instantied type; zero otherwise.
+//
+// (This operation is appropriate only for selections.
+// Lexically resolved references always resolve to the generic.
+// Although Named and Alias types also use Origin to express
+// an instance/generic distinction, that's in the domain
+// of Types; their TypeName objects always refer to the generic.)
+func objectOrigin(obj types.Object) (types.Object, bool) {
+ var origin types.Object
+ switch obj := obj.(type) {
+ case *types.Func:
+ if obj.Signature().Recv() != nil {
+ origin = obj.Origin() // G[int].method -> G[T].method
+ }
+ case *types.Var:
+ if obj.IsField() {
+ origin = obj.Origin() // G[int].field -> G[T].field
+ }
+ }
+ if origin != nil && origin != obj {
+ return origin, true
+ }
+ return nil, false
+}
+
// An Index holds an index mapping [types.Object] symbols to their syntax.
// In effect, it is the inverse of [types.Info].
type Index struct {
@@ -106,6 +141,10 @@ type uses struct {
// Uses returns the sequence of Cursors of [*ast.Ident]s in this package
// that refer to obj. If obj is nil, the sequence is empty.
+//
+// Uses, unlike the Uses field of [types.Info], records additional
+// entries mapping fields and methods of generic types to references
+// through their corresponding instantiated objects.
func (ix *Index) Uses(obj types.Object) iter.Seq[inspector.Cursor] {
return func(yield func(inspector.Cursor) bool) {
if uses := ix.uses[obj]; uses != nil {
diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt
index 6e5783fdd4767a..457fc9d619f6ca 100644
--- a/src/cmd/vendor/modules.txt
+++ b/src/cmd/vendor/modules.txt
@@ -1,5 +1,5 @@
-# github.com/google/pprof v0.0.0-20250208200701-d0013a598941
-## explicit; go 1.22
+# github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5
+## explicit; go 1.23.0
github.com/google/pprof/driver
github.com/google/pprof/internal/binutils
github.com/google/pprof/internal/driver
@@ -13,10 +13,10 @@ github.com/google/pprof/internal/symbolz
github.com/google/pprof/internal/transport
github.com/google/pprof/profile
github.com/google/pprof/third_party/svgpan
-# github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd
+# github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b
## explicit; go 1.13
github.com/ianlancetaylor/demangle
-# golang.org/x/arch v0.18.1-0.20250605182141-b2f4e2807dec
+# golang.org/x/arch v0.20.1-0.20250808194827-46ba08e3ae58
## explicit; go 1.23.0
golang.org/x/arch/arm/armasm
golang.org/x/arch/arm64/arm64asm
@@ -25,10 +25,10 @@ golang.org/x/arch/ppc64/ppc64asm
golang.org/x/arch/riscv64/riscv64asm
golang.org/x/arch/s390x/s390xasm
golang.org/x/arch/x86/x86asm
-# golang.org/x/build v0.0.0-20250606033421-8c8ff6f34a83
+# golang.org/x/build v0.0.0-20250806225920-b7c66c047964
## explicit; go 1.23.0
golang.org/x/build/relnote
-# golang.org/x/mod v0.25.0
+# golang.org/x/mod v0.27.0
## explicit; go 1.23.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
@@ -39,16 +39,16 @@ golang.org/x/mod/sumdb/dirhash
golang.org/x/mod/sumdb/note
golang.org/x/mod/sumdb/tlog
golang.org/x/mod/zip
-# golang.org/x/sync v0.15.0
+# golang.org/x/sync v0.16.0
## explicit; go 1.23.0
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.33.0
+# golang.org/x/sys v0.35.0
## explicit; go 1.23.0
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
-# golang.org/x/telemetry v0.0.0-20250606142133-60998feb31a8
+# golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488
## explicit; go 1.23.0
golang.org/x/telemetry
golang.org/x/telemetry/counter
@@ -60,10 +60,10 @@ golang.org/x/telemetry/internal/crashmonitor
golang.org/x/telemetry/internal/mmap
golang.org/x/telemetry/internal/telemetry
golang.org/x/telemetry/internal/upload
-# golang.org/x/term v0.32.0
+# golang.org/x/term v0.34.0
## explicit; go 1.23.0
golang.org/x/term
-# golang.org/x/text v0.26.0
+# golang.org/x/text v0.28.0
## explicit; go 1.23.0
golang.org/x/text/cases
golang.org/x/text/internal
@@ -73,7 +73,7 @@ golang.org/x/text/internal/tag
golang.org/x/text/language
golang.org/x/text/transform
golang.org/x/text/unicode/norm
-# golang.org/x/tools v0.34.0
+# golang.org/x/tools v0.36.1-0.20250808220315-8866876b956f
## explicit; go 1.23.0
golang.org/x/tools/cmd/bisect
golang.org/x/tools/cover
diff --git a/src/cmd/vet/testdata/assign/assign.go b/src/cmd/vet/testdata/assign/assign.go
index 112614e562c683..38e1b1ab0d9d80 100644
--- a/src/cmd/vet/testdata/assign/assign.go
+++ b/src/cmd/vet/testdata/assign/assign.go
@@ -15,11 +15,11 @@ type ST struct {
func (s *ST) SetX(x int, ch chan int) {
// Accidental self-assignment; it should be "s.x = x"
- x = x // ERROR "self-assignment of x to x"
+ x = x // ERROR "self-assignment of x"
// Another mistake
- s.x = s.x // ERROR "self-assignment of s.x to s.x"
+ s.x = s.x // ERROR "self-assignment of s.x"
- s.l[0] = s.l[0] // ERROR "self-assignment of s.l.0. to s.l.0."
+ s.l[0] = s.l[0] // ERROR "self-assignment of s.l.0."
// Bail on any potential side effects to avoid false positives
s.l[num()] = s.l[num()]
diff --git a/src/context/context.go b/src/context/context.go
index 4f150f6a1d6c7e..4fb537e23387ab 100644
--- a/src/context/context.go
+++ b/src/context/context.go
@@ -103,7 +103,7 @@ type Context interface {
// }
// }
//
- // See https://blog.golang.org/pipelines for more examples of how to use
+ // See https://go.dev/blog/pipelines for more examples of how to use
// a Done channel for cancellation.
Done() <-chan struct{}
diff --git a/src/crypto/internal/fips140/sha256/sha256block_loong64.s b/src/crypto/internal/fips140/sha256/sha256block_loong64.s
index e171d93e0ba5f0..ad03cd9931b7f5 100644
--- a/src/crypto/internal/fips140/sha256/sha256block_loong64.s
+++ b/src/crypto/internal/fips140/sha256/sha256block_loong64.s
@@ -52,6 +52,7 @@
#define REGTMP3 R18
#define REGTMP4 R7
#define REGTMP5 R6
+#define REG_KT R19
// W[i] = M[i]; for 0 <= i <= 15
#define LOAD0(index) \
@@ -89,8 +90,9 @@
// Ch(x, y, z) = (x AND y) XOR (NOT x AND z)
// = ((y XOR z) AND x) XOR z
// Calculate T1 in REGTMP4
-#define SHA256T1(const, e, f, g, h) \
- ADDV $const, h; \
+#define SHA256T1(index, e, f, g, h) \
+ MOVW (index*4)(REG_KT), REGTMP5; \
+ ADDV REGTMP5, h; \
ADD REGTMP4, h; \
ROTR $6, e, REGTMP5; \
ROTR $11, e, REGTMP; \
@@ -122,19 +124,19 @@
// Calculate T1 and T2, then e = d + T1 and a = T1 + T2.
// The values for e and a are stored in d and h, ready for rotation.
-#define SHA256ROUND(const, a, b, c, d, e, f, g, h) \
- SHA256T1(const, e, f, g, h); \
+#define SHA256ROUND(index, a, b, c, d, e, f, g, h) \
+ SHA256T1(index, e, f, g, h); \
SHA256T2(a, b, c); \
ADD REGTMP4, d; \
ADD REGTMP1, REGTMP4, h
-#define SHA256ROUND0(index, const, a, b, c, d, e, f, g, h) \
+#define SHA256ROUND0(index, a, b, c, d, e, f, g, h) \
LOAD0(index); \
- SHA256ROUND(const, a, b, c, d, e, f, g, h)
+ SHA256ROUND(index, a, b, c, d, e, f, g, h)
-#define SHA256ROUND1(index, const, a, b, c, d, e, f, g, h) \
+#define SHA256ROUND1(index, a, b, c, d, e, f, g, h) \
LOAD1(index); \
- SHA256ROUND(const, a, b, c, d, e, f, g, h)
+ SHA256ROUND(index, a, b, c, d, e, f, g, h)
// A stack frame size of 64 bytes is required here, because
// the frame size used for data expansion is 64 bytes.
@@ -147,6 +149,8 @@ TEXT ·block(SB),NOSPLIT,$64-32
AND $~63, R6
BEQ R6, end
+ MOVV $·_K(SB), REG_KT // const table
+
// p_len >= 64
MOVV dig+0(FP), R4
ADDV R5, R6, R25
@@ -160,71 +164,71 @@ TEXT ·block(SB),NOSPLIT,$64-32
MOVW (7*4)(R4), R15 // h = H7
loop:
- SHA256ROUND0(0, 0x428a2f98, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND0(1, 0x71374491, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND0(2, 0xb5c0fbcf, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND0(3, 0xe9b5dba5, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND0(4, 0x3956c25b, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND0(5, 0x59f111f1, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND0(6, 0x923f82a4, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND0(7, 0xab1c5ed5, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND0(8, 0xd807aa98, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND0(9, 0x12835b01, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND0(10, 0x243185be, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND0(11, 0x550c7dc3, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND0(12, 0x72be5d74, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND0(13, 0x80deb1fe, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND0(14, 0x9bdc06a7, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND0(15, 0xc19bf174, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND0(0, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND0(1, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND0(2, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND0(3, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND0(4, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND0(5, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND0(6, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND0(7, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND0(8, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND0(9, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND0(10, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND0(11, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND0(12, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND0(13, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND0(14, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND0(15, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(16, 0xe49b69c1, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(17, 0xefbe4786, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(18, 0x0fc19dc6, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(19, 0x240ca1cc, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(20, 0x2de92c6f, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(21, 0x4a7484aa, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(22, 0x5cb0a9dc, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(23, 0x76f988da, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(24, 0x983e5152, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(25, 0xa831c66d, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(26, 0xb00327c8, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(27, 0xbf597fc7, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(28, 0xc6e00bf3, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(29, 0xd5a79147, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(30, 0x06ca6351, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(31, 0x14292967, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(32, 0x27b70a85, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(33, 0x2e1b2138, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(34, 0x4d2c6dfc, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(35, 0x53380d13, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(36, 0x650a7354, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(37, 0x766a0abb, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(38, 0x81c2c92e, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(39, 0x92722c85, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(40, 0xa2bfe8a1, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(41, 0xa81a664b, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(42, 0xc24b8b70, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(43, 0xc76c51a3, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(44, 0xd192e819, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(45, 0xd6990624, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(46, 0xf40e3585, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(47, 0x106aa070, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(48, 0x19a4c116, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(49, 0x1e376c08, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(50, 0x2748774c, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(51, 0x34b0bcb5, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(52, 0x391c0cb3, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(53, 0x4ed8aa4a, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(54, 0x5b9cca4f, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(55, 0x682e6ff3, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA256ROUND1(56, 0x748f82ee, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA256ROUND1(57, 0x78a5636f, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA256ROUND1(58, 0x84c87814, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA256ROUND1(59, 0x8cc70208, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA256ROUND1(60, 0x90befffa, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA256ROUND1(61, 0xa4506ceb, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA256ROUND1(62, 0xbef9a3f7, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA256ROUND1(63, 0xc67178f2, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(16, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(17, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(18, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(19, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(20, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(21, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(22, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(23, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(24, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(25, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(26, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(27, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(28, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(29, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(30, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(31, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(32, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(33, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(34, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(35, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(36, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(37, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(38, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(39, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(40, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(41, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(42, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(43, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(44, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(45, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(46, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(47, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(48, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(49, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(50, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(51, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(52, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(53, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(54, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(55, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA256ROUND1(56, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA256ROUND1(57, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA256ROUND1(58, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA256ROUND1(59, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA256ROUND1(60, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA256ROUND1(61, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA256ROUND1(62, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA256ROUND1(63, R9, R10, R11, R12, R13, R14, R15, R8)
MOVW (0*4)(R4), REGTMP
MOVW (1*4)(R4), REGTMP1
diff --git a/src/crypto/internal/fips140/sha512/sha512block_loong64.s b/src/crypto/internal/fips140/sha512/sha512block_loong64.s
index f65d563ca34d82..751ab4e4f696e7 100644
--- a/src/crypto/internal/fips140/sha512/sha512block_loong64.s
+++ b/src/crypto/internal/fips140/sha512/sha512block_loong64.s
@@ -14,6 +14,7 @@
#define REGTMP3 R18
#define REGTMP4 R7
#define REGTMP5 R6
+#define REG_KT R19
// W[i] = M[i]; for 0 <= i <= 15
#define LOAD0(index) \
@@ -52,8 +53,9 @@
// Ch(x, y, z) = (x AND y) XOR (NOT x AND z)
// = ((y XOR z) AND x) XOR z
// Calculate T1 in REGTMP4
-#define SHA512T1(const, e, f, g, h) \
- ADDV $const, h; \
+#define SHA512T1(index, e, f, g, h) \
+ MOVV (index*8)(REG_KT), REGTMP5; \
+ ADDV REGTMP5, h; \
ADDV REGTMP4, h; \
ROTRV $14, e, REGTMP5; \
ROTRV $18, e, REGTMP; \
@@ -85,19 +87,19 @@
// Calculate T1 and T2, then e = d + T1 and a = T1 + T2.
// The values for e and a are stored in d and h, ready for rotation.
-#define SHA512ROUND(const, a, b, c, d, e, f, g, h) \
- SHA512T1(const, e, f, g, h); \
+#define SHA512ROUND(index, a, b, c, d, e, f, g, h) \
+ SHA512T1(index, e, f, g, h); \
SHA512T2(a, b, c); \
ADDV REGTMP4, d; \
ADDV REGTMP1, REGTMP4, h
-#define SHA512ROUND0(index, const, a, b, c, d, e, f, g, h) \
+#define SHA512ROUND0(index, a, b, c, d, e, f, g, h) \
LOAD0(index); \
- SHA512ROUND(const, a, b, c, d, e, f, g, h)
+ SHA512ROUND(index, a, b, c, d, e, f, g, h)
-#define SHA512ROUND1(index, const, a, b, c, d, e, f, g, h) \
+#define SHA512ROUND1(index, a, b, c, d, e, f, g, h) \
LOAD1(index); \
- SHA512ROUND(const, a, b, c, d, e, f, g, h)
+ SHA512ROUND(index, a, b, c, d, e, f, g, h)
// A stack frame size of 128 bytes is required here, because
// the frame size used for data expansion is 128 bytes.
@@ -110,6 +112,8 @@ TEXT ·block(SB),NOSPLIT,$128-32
AND $~127, R6
BEQ R6, end
+ MOVV $·_K(SB), REG_KT // const table
+
// p_len >= 128
MOVV dig+0(FP), R4
ADDV R5, R6, R25
@@ -123,87 +127,87 @@ TEXT ·block(SB),NOSPLIT,$128-32
MOVV (7*8)(R4), R15 // h = H7
loop:
- SHA512ROUND0( 0, 0x428a2f98d728ae22, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND0( 1, 0x7137449123ef65cd, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND0( 2, 0xb5c0fbcfec4d3b2f, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND0( 3, 0xe9b5dba58189dbbc, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND0( 4, 0x3956c25bf348b538, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND0( 5, 0x59f111f1b605d019, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND0( 6, 0x923f82a4af194f9b, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND0( 7, 0xab1c5ed5da6d8118, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND0( 8, 0xd807aa98a3030242, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND0( 9, 0x12835b0145706fbe, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND0(10, 0x243185be4ee4b28c, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND0(11, 0x550c7dc3d5ffb4e2, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND0(12, 0x72be5d74f27b896f, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND0(13, 0x80deb1fe3b1696b1, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND0(14, 0x9bdc06a725c71235, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND0(15, 0xc19bf174cf692694, R9, R10, R11, R12, R13, R14, R15, R8)
-
- SHA512ROUND1(16, 0xe49b69c19ef14ad2, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(17, 0xefbe4786384f25e3, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(18, 0x0fc19dc68b8cd5b5, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(19, 0x240ca1cc77ac9c65, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(20, 0x2de92c6f592b0275, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(21, 0x4a7484aa6ea6e483, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(22, 0x5cb0a9dcbd41fbd4, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(23, 0x76f988da831153b5, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(24, 0x983e5152ee66dfab, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(25, 0xa831c66d2db43210, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(26, 0xb00327c898fb213f, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(27, 0xbf597fc7beef0ee4, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(28, 0xc6e00bf33da88fc2, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(29, 0xd5a79147930aa725, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(30, 0x06ca6351e003826f, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(31, 0x142929670a0e6e70, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(32, 0x27b70a8546d22ffc, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(33, 0x2e1b21385c26c926, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(34, 0x4d2c6dfc5ac42aed, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(35, 0x53380d139d95b3df, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(36, 0x650a73548baf63de, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(37, 0x766a0abb3c77b2a8, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(38, 0x81c2c92e47edaee6, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(39, 0x92722c851482353b, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(40, 0xa2bfe8a14cf10364, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(41, 0xa81a664bbc423001, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(42, 0xc24b8b70d0f89791, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(43, 0xc76c51a30654be30, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(44, 0xd192e819d6ef5218, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(45, 0xd69906245565a910, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(46, 0xf40e35855771202a, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(47, 0x106aa07032bbd1b8, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(48, 0x19a4c116b8d2d0c8, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(49, 0x1e376c085141ab53, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(50, 0x2748774cdf8eeb99, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(51, 0x34b0bcb5e19b48a8, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(52, 0x391c0cb3c5c95a63, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(53, 0x4ed8aa4ae3418acb, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(54, 0x5b9cca4f7763e373, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(55, 0x682e6ff3d6b2b8a3, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(56, 0x748f82ee5defb2fc, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(57, 0x78a5636f43172f60, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(58, 0x84c87814a1f0ab72, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(59, 0x8cc702081a6439ec, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(60, 0x90befffa23631e28, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(61, 0xa4506cebde82bde9, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(62, 0xbef9a3f7b2c67915, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(63, 0xc67178f2e372532b, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(64, 0xca273eceea26619c, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(65, 0xd186b8c721c0c207, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(66, 0xeada7dd6cde0eb1e, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(67, 0xf57d4f7fee6ed178, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(68, 0x06f067aa72176fba, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(69, 0x0a637dc5a2c898a6, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(70, 0x113f9804bef90dae, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(71, 0x1b710b35131c471b, R9, R10, R11, R12, R13, R14, R15, R8)
- SHA512ROUND1(72, 0x28db77f523047d84, R8, R9, R10, R11, R12, R13, R14, R15)
- SHA512ROUND1(73, 0x32caab7b40c72493, R15, R8, R9, R10, R11, R12, R13, R14)
- SHA512ROUND1(74, 0x3c9ebe0a15c9bebc, R14, R15, R8, R9, R10, R11, R12, R13)
- SHA512ROUND1(75, 0x431d67c49c100d4c, R13, R14, R15, R8, R9, R10, R11, R12)
- SHA512ROUND1(76, 0x4cc5d4becb3e42b6, R12, R13, R14, R15, R8, R9, R10, R11)
- SHA512ROUND1(77, 0x597f299cfc657e2a, R11, R12, R13, R14, R15, R8, R9, R10)
- SHA512ROUND1(78, 0x5fcb6fab3ad6faec, R10, R11, R12, R13, R14, R15, R8, R9)
- SHA512ROUND1(79, 0x6c44198c4a475817, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND0( 0, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND0( 1, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND0( 2, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND0( 3, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND0( 4, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND0( 5, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND0( 6, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND0( 7, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND0( 8, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND0( 9, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND0(10, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND0(11, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND0(12, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND0(13, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND0(14, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND0(15, R9, R10, R11, R12, R13, R14, R15, R8)
+
+ SHA512ROUND1(16, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(17, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(18, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(19, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(20, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(21, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(22, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(23, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(24, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(25, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(26, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(27, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(28, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(29, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(30, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(31, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(32, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(33, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(34, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(35, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(36, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(37, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(38, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(39, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(40, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(41, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(42, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(43, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(44, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(45, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(46, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(47, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(48, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(49, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(50, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(51, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(52, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(53, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(54, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(55, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(56, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(57, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(58, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(59, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(60, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(61, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(62, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(63, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(64, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(65, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(66, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(67, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(68, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(69, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(70, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(71, R9, R10, R11, R12, R13, R14, R15, R8)
+ SHA512ROUND1(72, R8, R9, R10, R11, R12, R13, R14, R15)
+ SHA512ROUND1(73, R15, R8, R9, R10, R11, R12, R13, R14)
+ SHA512ROUND1(74, R14, R15, R8, R9, R10, R11, R12, R13)
+ SHA512ROUND1(75, R13, R14, R15, R8, R9, R10, R11, R12)
+ SHA512ROUND1(76, R12, R13, R14, R15, R8, R9, R10, R11)
+ SHA512ROUND1(77, R11, R12, R13, R14, R15, R8, R9, R10)
+ SHA512ROUND1(78, R10, R11, R12, R13, R14, R15, R8, R9)
+ SHA512ROUND1(79, R9, R10, R11, R12, R13, R14, R15, R8)
MOVV (0*8)(R4), REGTMP
MOVV (1*8)(R4), REGTMP1
diff --git a/src/crypto/tls/quic.go b/src/crypto/tls/quic.go
index ba8a235d84ad93..ed70100d11f0e7 100644
--- a/src/crypto/tls/quic.go
+++ b/src/crypto/tls/quic.go
@@ -302,6 +302,9 @@ type QUICSessionTicketOptions struct {
// Currently, it can only be called once.
func (q *QUICConn) SendSessionTicket(opts QUICSessionTicketOptions) error {
c := q.conn
+ if c.config.SessionTicketsDisabled {
+ return nil
+ }
if !c.isHandshakeComplete.Load() {
return quicError(errors.New("tls: SendSessionTicket called before handshake completed"))
}
diff --git a/src/crypto/tls/quic_test.go b/src/crypto/tls/quic_test.go
index 51cd4ef765dd6c..f6e8c55d9d63e4 100644
--- a/src/crypto/tls/quic_test.go
+++ b/src/crypto/tls/quic_test.go
@@ -231,6 +231,18 @@ func TestQUICSessionResumption(t *testing.T) {
if !cli2.conn.ConnectionState().DidResume {
t.Errorf("second connection did not use session resumption")
}
+
+ clientConfig.TLSConfig.SessionTicketsDisabled = true
+ cli3 := newTestQUICClient(t, clientConfig)
+ cli3.conn.SetTransportParameters(nil)
+ srv3 := newTestQUICServer(t, serverConfig)
+ srv3.conn.SetTransportParameters(nil)
+ if err := runTestQUICConnection(context.Background(), cli3, srv3, nil); err != nil {
+ t.Fatalf("error during third connection handshake: %v", err)
+ }
+ if cli3.conn.ConnectionState().DidResume {
+ t.Errorf("third connection unexpectedly used session resumption")
+ }
}
func TestQUICFragmentaryData(t *testing.T) {
diff --git a/src/database/sql/convert.go b/src/database/sql/convert.go
index 65fdfe6fa8c3ad..26b139ababd178 100644
--- a/src/database/sql/convert.go
+++ b/src/database/sql/convert.go
@@ -335,7 +335,6 @@ func convertAssignRows(dest, src any, rows *Rows) error {
if rows == nil {
return errors.New("invalid context to convert cursor rows, missing parent *Rows")
}
- rows.closemu.Lock()
*d = Rows{
dc: rows.dc,
releaseConn: func(error) {},
@@ -351,7 +350,6 @@ func convertAssignRows(dest, src any, rows *Rows) error {
parentCancel()
}
}
- rows.closemu.Unlock()
return nil
}
}
diff --git a/src/database/sql/driver/driver.go b/src/database/sql/driver/driver.go
index d0892e80fc28d5..487870be63209e 100644
--- a/src/database/sql/driver/driver.go
+++ b/src/database/sql/driver/driver.go
@@ -515,6 +515,18 @@ type RowsColumnTypePrecisionScale interface {
ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool)
}
+// RowsColumnScanner may be implemented by [Rows]. It allows the driver to completely
+// take responsibility for how values are scanned and replace the normal [database/sql].
+// scanning path. This allows drivers to directly support types that do not implement
+// [database/sql.Scanner].
+type RowsColumnScanner interface {
+ Rows
+
+ // ScanColumn copies the column in the current row into the value pointed at by
+ // dest. It returns [ErrSkip] to fall back to the normal [database/sql] scanning path.
+ ScanColumn(dest any, index int) error
+}
+
// Tx is a transaction.
type Tx interface {
Commit() error
diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go
index 3dfcd447b52bca..003e6c62986f31 100644
--- a/src/database/sql/fakedb_test.go
+++ b/src/database/sql/fakedb_test.go
@@ -5,6 +5,7 @@
package sql
import (
+ "bytes"
"context"
"database/sql/driver"
"errors"
@@ -15,7 +16,6 @@ import (
"strconv"
"strings"
"sync"
- "sync/atomic"
"testing"
"time"
)
@@ -91,8 +91,6 @@ func (cc *fakeDriverCtx) OpenConnector(name string) (driver.Connector, error) {
type fakeDB struct {
name string
- useRawBytes atomic.Bool
-
mu sync.Mutex
tables map[string]*table
badConn bool
@@ -684,8 +682,6 @@ func (c *fakeConn) PrepareContext(ctx context.Context, query string) (driver.Stm
switch cmd {
case "WIPE":
// Nothing
- case "USE_RAWBYTES":
- c.db.useRawBytes.Store(true)
case "SELECT":
stmt, err = c.prepareSelect(stmt, parts)
case "CREATE":
@@ -789,9 +785,6 @@ func (s *fakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (d
case "WIPE":
db.wipe()
return driver.ResultNoRows, nil
- case "USE_RAWBYTES":
- s.c.db.useRawBytes.Store(true)
- return driver.ResultNoRows, nil
case "CREATE":
if err := db.createTable(s.table, s.colName, s.colType); err != nil {
return nil, err
@@ -1076,10 +1069,9 @@ type rowsCursor struct {
errPos int
err error
- // a clone of slices to give out to clients, indexed by the
- // original slice's first byte address. we clone them
- // just so we're able to corrupt them on close.
- bytesClone map[*byte][]byte
+ // Data returned to clients.
+ // We clone and stash it here so it can be invalidated by Close and Next.
+ driverOwnedMemory [][]byte
// Every operation writes to line to enable the race detector
// check for data races.
@@ -1096,9 +1088,19 @@ func (rc *rowsCursor) touchMem() {
rc.line++
}
+func (rc *rowsCursor) invalidateDriverOwnedMemory() {
+ for _, buf := range rc.driverOwnedMemory {
+ for i := range buf {
+ buf[i] = 'x'
+ }
+ }
+ rc.driverOwnedMemory = nil
+}
+
func (rc *rowsCursor) Close() error {
rc.touchMem()
rc.parentMem.touchMem()
+ rc.invalidateDriverOwnedMemory()
rc.closed = true
return rc.closeErr
}
@@ -1129,6 +1131,8 @@ func (rc *rowsCursor) Next(dest []driver.Value) error {
if rc.posRow >= len(rc.rows[rc.posSet]) {
return io.EOF // per interface spec
}
+ // Corrupt any previously returned bytes.
+ rc.invalidateDriverOwnedMemory()
for i, v := range rc.rows[rc.posSet][rc.posRow].cols {
// TODO(bradfitz): convert to subset types? naah, I
// think the subset types should only be input to
@@ -1136,20 +1140,13 @@ func (rc *rowsCursor) Next(dest []driver.Value) error {
// a wider range of types coming out of drivers. all
// for ease of drivers, and to prevent drivers from
// messing up conversions or doing them differently.
- dest[i] = v
-
- if bs, ok := v.([]byte); ok && !rc.db.useRawBytes.Load() {
- if rc.bytesClone == nil {
- rc.bytesClone = make(map[*byte][]byte)
- }
- clone, ok := rc.bytesClone[&bs[0]]
- if !ok {
- clone = make([]byte, len(bs))
- copy(clone, bs)
- rc.bytesClone[&bs[0]] = clone
- }
- dest[i] = clone
+ if bs, ok := v.([]byte); ok {
+ // Clone []bytes and stash for later invalidation.
+ bs = bytes.Clone(bs)
+ rc.driverOwnedMemory = append(rc.driverOwnedMemory, bs)
+ v = bs
}
+ dest[i] = v
}
return nil
}
diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go
index b0abcf7fcd408b..85b9ffc37d9445 100644
--- a/src/database/sql/sql.go
+++ b/src/database/sql/sql.go
@@ -3368,38 +3368,45 @@ func (rs *Rows) Scan(dest ...any) error {
// without calling Next.
return fmt.Errorf("sql: Scan called without calling Next (closemuScanHold)")
}
+
rs.closemu.RLock()
+ rs.raw = rs.raw[:0]
+ err := rs.scanLocked(dest...)
+ if err == nil && scanArgsContainRawBytes(dest) {
+ rs.closemuScanHold = true
+ } else {
+ rs.closemu.RUnlock()
+ }
+ return err
+}
+func (rs *Rows) scanLocked(dest ...any) error {
if rs.lasterr != nil && rs.lasterr != io.EOF {
- rs.closemu.RUnlock()
return rs.lasterr
}
if rs.closed {
- err := rs.lasterrOrErrLocked(errRowsClosed)
- rs.closemu.RUnlock()
- return err
- }
-
- if scanArgsContainRawBytes(dest) {
- rs.closemuScanHold = true
- rs.raw = rs.raw[:0]
- } else {
- rs.closemu.RUnlock()
+ return rs.lasterrOrErrLocked(errRowsClosed)
}
if rs.lastcols == nil {
- rs.closemuRUnlockIfHeldByScan()
return errors.New("sql: Scan called without calling Next")
}
if len(dest) != len(rs.lastcols) {
- rs.closemuRUnlockIfHeldByScan()
return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest))
}
for i, sv := range rs.lastcols {
- err := convertAssignRows(dest[i], sv, rs)
+ err := driver.ErrSkip
+
+ if rcs, ok := rs.rowsi.(driver.RowsColumnScanner); ok {
+ err = rcs.ScanColumn(dest[i], i)
+ }
+
+ if err == driver.ErrSkip {
+ err = convertAssignRows(dest[i], sv, rs)
+ }
+
if err != nil {
- rs.closemuRUnlockIfHeldByScan()
return fmt.Errorf(`sql: Scan error on column index %d, name %q: %w`, i, rs.rowsi.Columns()[i], err)
}
}
diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go
index 74b9bf550249c7..f706610b87e85b 100644
--- a/src/database/sql/sql_test.go
+++ b/src/database/sql/sql_test.go
@@ -5,6 +5,7 @@
package sql
import (
+ "bytes"
"context"
"database/sql/driver"
"errors"
@@ -4200,6 +4201,102 @@ func TestNamedValueCheckerSkip(t *testing.T) {
}
}
+type rcsDriver struct {
+ fakeDriver
+}
+
+func (d *rcsDriver) Open(dsn string) (driver.Conn, error) {
+ c, err := d.fakeDriver.Open(dsn)
+ fc := c.(*fakeConn)
+ fc.db.allowAny = true
+ return &rcsConn{fc}, err
+}
+
+type rcsConn struct {
+ *fakeConn
+}
+
+func (c *rcsConn) PrepareContext(ctx context.Context, q string) (driver.Stmt, error) {
+ stmt, err := c.fakeConn.PrepareContext(ctx, q)
+ if err != nil {
+ return stmt, err
+ }
+ return &rcsStmt{stmt.(*fakeStmt)}, nil
+}
+
+type rcsStmt struct {
+ *fakeStmt
+}
+
+func (s *rcsStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ rows, err := s.fakeStmt.QueryContext(ctx, args)
+ if err != nil {
+ return rows, err
+ }
+ return &rcsRows{rows.(*rowsCursor)}, nil
+}
+
+type rcsRows struct {
+ *rowsCursor
+}
+
+func (r *rcsRows) ScanColumn(dest any, index int) error {
+ switch d := dest.(type) {
+ case *int64:
+ *d = 42
+ return nil
+ }
+
+ return driver.ErrSkip
+}
+
+func TestRowsColumnScanner(t *testing.T) {
+ Register("RowsColumnScanner", &rcsDriver{})
+ db, err := Open("RowsColumnScanner", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer db.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ _, err = db.ExecContext(ctx, "CREATE|t|str=string,n=int64")
+ if err != nil {
+ t.Fatal("exec create", err)
+ }
+
+ _, err = db.ExecContext(ctx, "INSERT|t|str=?,n=?", "foo", int64(1))
+ if err != nil {
+ t.Fatal("exec insert", err)
+ }
+ var (
+ str string
+ i64 int64
+ i int
+ f64 float64
+ ui uint
+ )
+ err = db.QueryRowContext(ctx, "SELECT|t|str,n,n,n,n|").Scan(&str, &i64, &i, &f64, &ui)
+ if err != nil {
+ t.Fatal("select", err)
+ }
+
+ list := []struct{ got, want any }{
+ {str, "foo"},
+ {i64, int64(42)},
+ {i, int(1)},
+ {f64, float64(1)},
+ {ui, uint(1)},
+ }
+
+ for index, item := range list {
+ if !reflect.DeepEqual(item.got, item.want) {
+ t.Errorf("got %#v wanted %#v for index %d", item.got, item.want, index)
+ }
+ }
+}
+
func TestOpenConnector(t *testing.T) {
Register("testctx", &fakeDriverCtx{})
db, err := Open("testctx", "people")
@@ -4434,10 +4531,6 @@ func testContextCancelDuringRawBytesScan(t *testing.T, mode string) {
db := newTestDB(t, "people")
defer closeDB(t, db)
- if _, err := db.Exec("USE_RAWBYTES"); err != nil {
- t.Fatal(err)
- }
-
// cancel used to call close asynchronously.
// This test checks that it waits so as not to interfere with RawBytes.
ctx, cancel := context.WithCancel(context.Background())
@@ -4529,6 +4622,61 @@ func TestContextCancelBetweenNextAndErr(t *testing.T) {
}
}
+type testScanner struct {
+ scanf func(src any) error
+}
+
+func (ts testScanner) Scan(src any) error { return ts.scanf(src) }
+
+func TestContextCancelDuringScan(t *testing.T) {
+ db := newTestDB(t, "people")
+ defer closeDB(t, db)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ scanStart := make(chan any)
+ scanEnd := make(chan error)
+ scanner := &testScanner{
+ scanf: func(src any) error {
+ scanStart <- src
+ return <-scanEnd
+ },
+ }
+
+ // Start a query, and pause it mid-scan.
+ want := []byte("Alice")
+ r, err := db.QueryContext(ctx, "SELECT|people|name|name=?", string(want))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !r.Next() {
+ t.Fatalf("r.Next() = false, want true")
+ }
+ go func() {
+ r.Scan(scanner)
+ }()
+ got := <-scanStart
+ defer close(scanEnd)
+ gotBytes, ok := got.([]byte)
+ if !ok {
+ t.Fatalf("r.Scan returned %T, want []byte", got)
+ }
+ if !bytes.Equal(gotBytes, want) {
+ t.Fatalf("before cancel: r.Scan returned %q, want %q", gotBytes, want)
+ }
+
+ // Cancel the query.
+ // Sleep to give it a chance to finish canceling.
+ cancel()
+ time.Sleep(10 * time.Millisecond)
+
+ // Cancelling the query should not have changed the result.
+ if !bytes.Equal(gotBytes, want) {
+ t.Fatalf("after cancel: r.Scan result is now %q, want %q", gotBytes, want)
+ }
+}
+
func TestNilErrorAfterClose(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
@@ -4562,10 +4710,6 @@ func TestRawBytesReuse(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
- if _, err := db.Exec("USE_RAWBYTES"); err != nil {
- t.Fatal(err)
- }
-
var raw RawBytes
// The RawBytes in this query aliases driver-owned memory.
diff --git a/src/debug/macho/file.go b/src/debug/macho/file.go
index fcf28c4b25edaf..52ff81750cc4c1 100644
--- a/src/debug/macho/file.go
+++ b/src/debug/macho/file.go
@@ -719,15 +719,28 @@ func (f *File) DWARF() (*dwarf.Data, error) {
// referred to by the binary f that are expected to be
// satisfied by other libraries at dynamic load time.
func (f *File) ImportedSymbols() ([]string, error) {
- if f.Dysymtab == nil || f.Symtab == nil {
+ if f.Symtab == nil {
return nil, &FormatError{0, "missing symbol table", nil}
}
st := f.Symtab
dt := f.Dysymtab
var all []string
- for _, s := range st.Syms[dt.Iundefsym : dt.Iundefsym+dt.Nundefsym] {
- all = append(all, s.Name)
+ if dt != nil {
+ for _, s := range st.Syms[dt.Iundefsym : dt.Iundefsym+dt.Nundefsym] {
+ all = append(all, s.Name)
+ }
+ } else {
+ // From Darwin's include/mach-o/nlist.h
+ const (
+ N_TYPE = 0x0e
+ N_UNDF = 0x0
+ )
+ for _, s := range st.Syms {
+ if s.Type&N_TYPE == N_UNDF && s.Sect == 0 {
+ all = append(all, s.Name)
+ }
+ }
}
return all, nil
}
diff --git a/src/debug/macho/file_test.go b/src/debug/macho/file_test.go
index 313c376c54a27f..fbcc7bdcb01e96 100644
--- a/src/debug/macho/file_test.go
+++ b/src/debug/macho/file_test.go
@@ -9,15 +9,17 @@ import (
"internal/obscuretestdata"
"io"
"reflect"
+ "slices"
"testing"
)
type fileTest struct {
- file string
- hdr FileHeader
- loads []any
- sections []*SectionHeader
- relocations map[string][]Reloc
+ file string
+ hdr FileHeader
+ loads []any
+ sections []*SectionHeader
+ relocations map[string][]Reloc
+ importedSyms []string
}
var fileTests = []fileTest{
@@ -46,6 +48,7 @@ var fileTests = []fileTest{
{"__jump_table", "__IMPORT", 0x3000, 0xa, 0x2000, 0x6, 0x0, 0x0, 0x4000008},
},
nil,
+ nil,
},
{
"testdata/gcc-amd64-darwin-exec.base64",
@@ -74,6 +77,7 @@ var fileTests = []fileTest{
{"__la_symbol_ptr", "__DATA", 0x100001058, 0x10, 0x1058, 0x2, 0x0, 0x0, 0x7},
},
nil,
+ nil,
},
{
"testdata/gcc-amd64-darwin-exec-debug.base64",
@@ -102,6 +106,7 @@ var fileTests = []fileTest{
{"__debug_str", "__DWARF", 0x10000215c, 0x60, 0x115c, 0x0, 0x0, 0x0, 0x0},
},
nil,
+ nil,
},
{
"testdata/clang-386-darwin-exec-with-rpath.base64",
@@ -126,6 +131,7 @@ var fileTests = []fileTest{
},
nil,
nil,
+ nil,
},
{
"testdata/clang-amd64-darwin-exec-with-rpath.base64",
@@ -150,6 +156,7 @@ var fileTests = []fileTest{
},
nil,
nil,
+ nil,
},
{
"testdata/clang-386-darwin.obj.base64",
@@ -185,6 +192,7 @@ var fileTests = []fileTest{
},
},
},
+ nil,
},
{
"testdata/clang-amd64-darwin.obj.base64",
@@ -221,6 +229,15 @@ var fileTests = []fileTest{
},
},
},
+ []string{"_printf"},
+ },
+ {
+ "testdata/clang-amd64-darwin-ld-r.obj.base64",
+ FileHeader{0xfeedfacf, CpuAmd64, 0x3, 0x1, 0x4, 0x1c0, 0x2000},
+ nil,
+ nil,
+ nil,
+ []string{"_printf"},
},
}
@@ -345,6 +362,17 @@ func TestOpen(t *testing.T) {
}
}
}
+
+ if tt.importedSyms != nil {
+ ss, err := f.ImportedSymbols()
+ if err != nil {
+ t.Errorf("open %s: fail to read imported symbols: %v", tt.file, err)
+ }
+ want := tt.importedSyms
+ if !slices.Equal(ss, want) {
+ t.Errorf("open %s: imported symbols differ:\n\thave %v\n\twant %v", tt.file, ss, want)
+ }
+ }
}
}
diff --git a/src/debug/macho/testdata/clang-amd64-darwin-ld-r.obj.base64 b/src/debug/macho/testdata/clang-amd64-darwin-ld-r.obj.base64
new file mode 100644
index 00000000000000..036b5746abe351
--- /dev/null
+++ b/src/debug/macho/testdata/clang-amd64-darwin-ld-r.obj.base64
@@ -0,0 +1 @@
+z/rt/gcAAAEDAAAAAQAAAAQAAADAAQAAACAAAAAAAAAZAAAAiAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJgAAAAAAAAAAAIAAAAAAACYAAAAAAAAAAcAAAAHAAAABAAAAAAAAABfX3RleHQAAAAAAAAAAAAAX19URVhUAAAAAAAAAAAAAAAAAAAAAAAAKgAAAAAAAAAAAgAABAAAAJgCAAACAAAAAAQAgAAAAAAAAAAAAAAAAF9fY3N0cmluZwAAAAAAAABfX1RFWFQAAAAAAAAAAAAAKgAAAAAAAAAOAAAAAAAAACoCAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAX19laF9mcmFtZQAAAAAAAF9fVEVYVAAAAAAAAAAAAAA4AAAAAAAAAEAAAAAAAAAAOAIAAAMAAACoAgAABAAAAAAAAAAAAAAAAAAAAAAAAABfX2NvbXBhY3RfdW53aW5kX19MRAAAAAAAAAAAAAAAAHgAAAAAAAAAIAAAAAAAAAB4AgAAAwAAAMgCAAABAAAAAAAAAgAAAAAAAAAAAAAAAAIAAAAYAAAA0AIAAAUAAAAgAwAAKAAAACQAAAAQAAAAAAwKAAAAAAApAAAAEAAAANACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABVSInlSIPsEEiNPQAAAADHRfwAAAAAsADoAAAAADHJiUX4ichIg8QQXcNoZWxsbywgd29ybGQKABQAAAAAAAAAAXpSAAF4EAEQDAcIkAEAACQAAAAEAAAA+P////////8qAAAAAAAAAABBDhCGAkMNBgAAAAAAAAAAAAAAAAAAACoAAAAAAAABAAAAAAAAAAAAAAAAAAAAABkAAAAEAAAtCwAAAAAAAB0cAAAAAQAAXBwAAAACAAAMIAAAAAIAAF4gAAAAAwAADgAAAAADAAAOEAAAAB4CAAAqAAAAAAAAABQAAAAOAwAAOAAAAAAAAAAeAAAADgMAAFAAAAAAAAAAAgAAAA8BAAAAAAAAAAAAAAgAAAABAAAAAAAAAAAAAAAgAF9tYWluAF9wcmludGYATEMxAEVIX0ZyYW1lMQBmdW5jLmVoAAAA
diff --git a/src/encoding/gob/doc.go b/src/encoding/gob/doc.go
index 0866ba1544666d..c746806887ab3d 100644
--- a/src/encoding/gob/doc.go
+++ b/src/encoding/gob/doc.go
@@ -274,7 +274,7 @@ released version, subject to issues such as security fixes. See the Go compatibi
document for background: https://golang.org/doc/go1compat
See "Gobs of data" for a design discussion of the gob wire format:
-https://blog.golang.org/gobs-of-data
+https://go.dev/blog/gob
# Security
diff --git a/src/encoding/json/scanner_test.go b/src/encoding/json/scanner_test.go
index fb64463599625e..a062e91243e477 100644
--- a/src/encoding/json/scanner_test.go
+++ b/src/encoding/json/scanner_test.go
@@ -74,6 +74,7 @@ func TestCompactAndIndent(t *testing.T) {
-5e+2
]`},
{Name(""), "{\"\":\"<>&\u2028\u2029\"}", "{\n\t\"\": \"<>&\u2028\u2029\"\n}"}, // See golang.org/issue/34070
+ {Name(""), `null`, "null \n\r\t"}, // See golang.org/issue/13520 and golang.org/issue/74806
}
var buf bytes.Buffer
for _, tt := range tests {
@@ -102,7 +103,7 @@ func TestCompactAndIndent(t *testing.T) {
buf.Reset()
if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
t.Errorf("%s: Indent error: %v", tt.Where, err)
- } else if got := buf.String(); got != tt.indent {
+ } else if got := buf.String(); got != strings.TrimRight(tt.indent, " \n\r\t") {
t.Errorf("%s: Compact:\n\tgot: %s\n\twant: %s", tt.Where, indentNewlines(got), indentNewlines(tt.indent))
}
})
diff --git a/src/encoding/json/v2/arshal.go b/src/encoding/json/v2/arshal.go
index e2ce778d5ad96c..6b4bcb0c74cf7c 100644
--- a/src/encoding/json/v2/arshal.go
+++ b/src/encoding/json/v2/arshal.go
@@ -470,7 +470,7 @@ func unmarshalDecode(in *jsontext.Decoder, out any, uo *jsonopts.Struct, last bo
// was validated before attempting to unmarshal it.
if uo.Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
if err := export.Decoder(in).CheckNextValue(last); err != nil {
- if err == io.EOF {
+ if err == io.EOF && last {
offset := in.InputOffset() + int64(len(in.UnreadBuffer()))
return &jsontext.SyntacticError{ByteOffset: offset, Err: io.ErrUnexpectedEOF}
}
@@ -487,7 +487,7 @@ func unmarshalDecode(in *jsontext.Decoder, out any, uo *jsonopts.Struct, last bo
if !uo.Flags.Get(jsonflags.AllowDuplicateNames) {
export.Decoder(in).Tokens.InvalidateDisabledNamespaces()
}
- if err == io.EOF {
+ if err == io.EOF && last {
offset := in.InputOffset() + int64(len(in.UnreadBuffer()))
return &jsontext.SyntacticError{ByteOffset: offset, Err: io.ErrUnexpectedEOF}
}
diff --git a/src/encoding/json/v2/arshal_any.go b/src/encoding/json/v2/arshal_any.go
index 3fb679d553e5dd..97a77e923766d9 100644
--- a/src/encoding/json/v2/arshal_any.go
+++ b/src/encoding/json/v2/arshal_any.go
@@ -8,6 +8,7 @@ package json
import (
"cmp"
+ "math"
"reflect"
"strconv"
@@ -35,20 +36,23 @@ func marshalValueAny(enc *jsontext.Encoder, val any, mo *jsonopts.Struct) error
case string:
return enc.WriteToken(jsontext.String(val))
case float64:
+ if math.IsNaN(val) || math.IsInf(val, 0) {
+ break // use default logic below
+ }
return enc.WriteToken(jsontext.Float(val))
case map[string]any:
return marshalObjectAny(enc, val, mo)
case []any:
return marshalArrayAny(enc, val, mo)
- default:
- v := newAddressableValue(reflect.TypeOf(val))
- v.Set(reflect.ValueOf(val))
- marshal := lookupArshaler(v.Type()).marshal
- if mo.Marshalers != nil {
- marshal, _ = mo.Marshalers.(*Marshalers).lookup(marshal, v.Type())
- }
- return marshal(enc, v, mo)
}
+
+ v := newAddressableValue(reflect.TypeOf(val))
+ v.Set(reflect.ValueOf(val))
+ marshal := lookupArshaler(v.Type()).marshal
+ if mo.Marshalers != nil {
+ marshal, _ = mo.Marshalers.(*Marshalers).lookup(marshal, v.Type())
+ }
+ return marshal(enc, v, mo)
}
// unmarshalValueAny unmarshals a JSON value as a Go any.
@@ -104,7 +108,7 @@ func marshalObjectAny(enc *jsontext.Encoder, obj map[string]any, mo *jsonopts.St
if xe.Tokens.Depth() > startDetectingCyclesAfter {
v := reflect.ValueOf(obj)
if err := visitPointer(&xe.SeenPointers, v); err != nil {
- return newMarshalErrorBefore(enc, anyType, err)
+ return newMarshalErrorBefore(enc, mapStringAnyType, err)
}
defer leavePointer(&xe.SeenPointers, v)
}
diff --git a/src/encoding/json/v2/arshal_default.go b/src/encoding/json/v2/arshal_default.go
index f3fc79beac0af4..c2307fa31d7fcc 100644
--- a/src/encoding/json/v2/arshal_default.go
+++ b/src/encoding/json/v2/arshal_default.go
@@ -128,7 +128,7 @@ func makeBoolArshaler(t reflect.Type) *arshaler {
fncs.marshal = func(enc *jsontext.Encoder, va addressableValue, mo *jsonopts.Struct) error {
xe := export.Encoder(enc)
if mo.Format != "" && mo.FormatDepth == xe.Tokens.Depth() {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
// Optimize for marshaling without preceding whitespace.
@@ -153,7 +153,7 @@ func makeBoolArshaler(t reflect.Type) *arshaler {
fncs.unmarshal = func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error {
xd := export.Decoder(dec)
if uo.Format != "" && uo.FormatDepth == xd.Tokens.Depth() {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
tok, err := dec.ReadToken()
if err != nil {
@@ -190,7 +190,7 @@ func makeBoolArshaler(t reflect.Type) *arshaler {
return nil
}
}
- return newUnmarshalErrorAfterWithSkipping(dec, uo, t, nil)
+ return newUnmarshalErrorAfterWithSkipping(dec, t, nil)
}
return &fncs
}
@@ -200,7 +200,7 @@ func makeStringArshaler(t reflect.Type) *arshaler {
fncs.marshal = func(enc *jsontext.Encoder, va addressableValue, mo *jsonopts.Struct) error {
xe := export.Encoder(enc)
if mo.Format != "" && mo.FormatDepth == xe.Tokens.Depth() {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
// Optimize for marshaling without preceding whitespace.
@@ -237,7 +237,7 @@ func makeStringArshaler(t reflect.Type) *arshaler {
fncs.unmarshal = func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error {
xd := export.Decoder(dec)
if uo.Format != "" && uo.FormatDepth == xd.Tokens.Depth() {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
var flags jsonwire.ValueFlags
val, err := xd.ReadValue(&flags)
@@ -327,7 +327,7 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler {
mo.Format = ""
return marshalArray(enc, va, mo)
default:
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
} else if mo.Flags.Get(jsonflags.FormatByteArrayAsArray) && va.Kind() == reflect.Array {
return marshalArray(enc, va, mo)
@@ -365,7 +365,7 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler {
uo.Format = ""
return unmarshalArray(dec, va, uo)
default:
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
} else if uo.Flags.Get(jsonflags.FormatByteArrayAsArray) && va.Kind() == reflect.Array {
return unmarshalArray(dec, va, uo)
@@ -433,7 +433,7 @@ func makeIntArshaler(t reflect.Type) *arshaler {
fncs.marshal = func(enc *jsontext.Encoder, va addressableValue, mo *jsonopts.Struct) error {
xe := export.Encoder(enc)
if mo.Format != "" && mo.FormatDepth == xe.Tokens.Depth() {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
// Optimize for marshaling without preceding whitespace or string escaping.
@@ -454,7 +454,7 @@ func makeIntArshaler(t reflect.Type) *arshaler {
fncs.unmarshal = func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error {
xd := export.Decoder(dec)
if uo.Format != "" && uo.FormatDepth == xd.Tokens.Depth() {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
stringify := xd.Tokens.Last.NeedObjectName() || uo.Flags.Get(jsonflags.StringifyNumbers)
var flags jsonwire.ValueFlags
@@ -520,7 +520,7 @@ func makeUintArshaler(t reflect.Type) *arshaler {
fncs.marshal = func(enc *jsontext.Encoder, va addressableValue, mo *jsonopts.Struct) error {
xe := export.Encoder(enc)
if mo.Format != "" && mo.FormatDepth == xe.Tokens.Depth() {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
// Optimize for marshaling without preceding whitespace or string escaping.
@@ -541,7 +541,7 @@ func makeUintArshaler(t reflect.Type) *arshaler {
fncs.unmarshal = func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error {
xd := export.Decoder(dec)
if uo.Format != "" && uo.FormatDepth == xd.Tokens.Depth() {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
stringify := xd.Tokens.Last.NeedObjectName() || uo.Flags.Get(jsonflags.StringifyNumbers)
var flags jsonwire.ValueFlags
@@ -602,7 +602,7 @@ func makeFloatArshaler(t reflect.Type) *arshaler {
if mo.Format == "nonfinite" {
allowNonFinite = true
} else {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
}
@@ -637,7 +637,7 @@ func makeFloatArshaler(t reflect.Type) *arshaler {
if uo.Format == "nonfinite" {
allowNonFinite = true
} else {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
}
stringify := xd.Tokens.Last.NeedObjectName() || uo.Flags.Get(jsonflags.StringifyNumbers)
@@ -737,7 +737,7 @@ func makeMapArshaler(t reflect.Type) *arshaler {
emitNull = false
mo.Format = ""
default:
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
}
@@ -882,7 +882,7 @@ func makeMapArshaler(t reflect.Type) *arshaler {
case "emitnull", "emitempty":
uo.Format = "" // only relevant for marshaling
default:
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
}
tok, err := dec.ReadToken()
@@ -992,7 +992,7 @@ func makeMapArshaler(t reflect.Type) *arshaler {
}
return errUnmarshal
}
- return newUnmarshalErrorAfterWithSkipping(dec, uo, t, nil)
+ return newUnmarshalErrorAfterWithSkipping(dec, t, nil)
}
return &fncs
}
@@ -1037,7 +1037,7 @@ func makeStructArshaler(t reflect.Type) *arshaler {
fncs.marshal = func(enc *jsontext.Encoder, va addressableValue, mo *jsonopts.Struct) error {
xe := export.Encoder(enc)
if mo.Format != "" && mo.FormatDepth == xe.Tokens.Depth() {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
once.Do(init)
if errInit != nil && !mo.Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
@@ -1199,7 +1199,7 @@ func makeStructArshaler(t reflect.Type) *arshaler {
fncs.unmarshal = func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error {
xd := export.Decoder(dec)
if uo.Format != "" && uo.FormatDepth == xd.Tokens.Depth() {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
tok, err := dec.ReadToken()
if err != nil {
@@ -1317,7 +1317,7 @@ func makeStructArshaler(t reflect.Type) *arshaler {
}
return errUnmarshal
}
- return newUnmarshalErrorAfterWithSkipping(dec, uo, t, nil)
+ return newUnmarshalErrorAfterWithSkipping(dec, t, nil)
}
return &fncs
}
@@ -1414,7 +1414,7 @@ func makeSliceArshaler(t reflect.Type) *arshaler {
emitNull = false
mo.Format = ""
default:
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
}
@@ -1462,7 +1462,7 @@ func makeSliceArshaler(t reflect.Type) *arshaler {
case "emitnull", "emitempty":
uo.Format = "" // only relevant for marshaling
default:
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
}
@@ -1518,7 +1518,7 @@ func makeSliceArshaler(t reflect.Type) *arshaler {
}
return errUnmarshal
}
- return newUnmarshalErrorAfterWithSkipping(dec, uo, t, nil)
+ return newUnmarshalErrorAfterWithSkipping(dec, t, nil)
}
return &fncs
}
@@ -1539,7 +1539,7 @@ func makeArrayArshaler(t reflect.Type) *arshaler {
fncs.marshal = func(enc *jsontext.Encoder, va addressableValue, mo *jsonopts.Struct) error {
xe := export.Encoder(enc)
if mo.Format != "" && mo.FormatDepth == xe.Tokens.Depth() {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
once.Do(init)
if err := enc.WriteToken(jsontext.BeginArray); err != nil {
@@ -1563,7 +1563,7 @@ func makeArrayArshaler(t reflect.Type) *arshaler {
fncs.unmarshal = func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error {
xd := export.Decoder(dec)
if uo.Format != "" && uo.FormatDepth == xd.Tokens.Depth() {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
tok, err := dec.ReadToken()
if err != nil {
@@ -1616,7 +1616,7 @@ func makeArrayArshaler(t reflect.Type) *arshaler {
}
return errUnmarshal
}
- return newUnmarshalErrorAfterWithSkipping(dec, uo, t, nil)
+ return newUnmarshalErrorAfterWithSkipping(dec, t, nil)
}
return &fncs
}
@@ -1706,7 +1706,7 @@ func makeInterfaceArshaler(t reflect.Type) *arshaler {
fncs.marshal = func(enc *jsontext.Encoder, va addressableValue, mo *jsonopts.Struct) error {
xe := export.Encoder(enc)
if mo.Format != "" && mo.FormatDepth == xe.Tokens.Depth() {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
if va.IsNil() {
return enc.WriteToken(jsontext.Null)
@@ -1746,7 +1746,7 @@ func makeInterfaceArshaler(t reflect.Type) *arshaler {
fncs.unmarshal = func(dec *jsontext.Decoder, va addressableValue, uo *jsonopts.Struct) error {
xd := export.Decoder(dec)
if uo.Format != "" && uo.FormatDepth == xd.Tokens.Depth() {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
if uo.Flags.Get(jsonflags.MergeWithLegacySemantics) && !va.IsNil() {
// Legacy merge behavior is difficult to explain.
@@ -1795,7 +1795,7 @@ func makeInterfaceArshaler(t reflect.Type) *arshaler {
k := dec.PeekKind()
if !isAnyType(t) {
- return newUnmarshalErrorBeforeWithSkipping(dec, uo, t, internal.ErrNilInterface)
+ return newUnmarshalErrorBeforeWithSkipping(dec, t, internal.ErrNilInterface)
}
switch k {
case 'f', 't':
diff --git a/src/encoding/json/v2/arshal_inlined.go b/src/encoding/json/v2/arshal_inlined.go
index 6299cc4a428ae1..a25314450251f4 100644
--- a/src/encoding/json/v2/arshal_inlined.go
+++ b/src/encoding/json/v2/arshal_inlined.go
@@ -188,7 +188,7 @@ func unmarshalInlinedFallbackNext(dec *jsontext.Decoder, va addressableValue, uo
*b = append(*b, ',')
}
} else {
- return newUnmarshalErrorAfterWithSkipping(dec, uo, v.Type(), errRawInlinedNotObject)
+ return newUnmarshalErrorAfterWithSkipping(dec, v.Type(), errRawInlinedNotObject)
}
}
*b = append(*b, quotedName...)
diff --git a/src/encoding/json/v2/arshal_test.go b/src/encoding/json/v2/arshal_test.go
index f1ee2e2e3a7365..75093345a3b93e 100644
--- a/src/encoding/json/v2/arshal_test.go
+++ b/src/encoding/json/v2/arshal_test.go
@@ -3216,6 +3216,11 @@ func TestMarshal(t *testing.T) {
},
in: struct{ X any }{[8]byte{}},
want: `{"X":"called"}`,
+ }, {
+ name: jsontest.Name("Interfaces/Any/Float/NaN"),
+ in: struct{ X any }{math.NaN()},
+ want: `{"X"`,
+ wantErr: EM(fmt.Errorf("unsupported value: %v", math.NaN())).withType(0, reflect.TypeFor[float64]()).withPos(`{"X":`, "/X"),
}, {
name: jsontest.Name("Interfaces/Any/Maps/Nil"),
in: struct{ X any }{map[string]any(nil)},
@@ -3278,7 +3283,7 @@ func TestMarshal(t *testing.T) {
return struct{ X any }{m}
}(),
want: `{"X"` + strings.Repeat(`:{""`, startDetectingCyclesAfter),
- wantErr: EM(internal.ErrCycle).withPos(`{"X":`+strings.Repeat(`{"":`, startDetectingCyclesAfter), "/X"+jsontext.Pointer(strings.Repeat("/", startDetectingCyclesAfter))).withType(0, T[any]()),
+ wantErr: EM(internal.ErrCycle).withPos(`{"X":`+strings.Repeat(`{"":`, startDetectingCyclesAfter), "/X"+jsontext.Pointer(strings.Repeat("/", startDetectingCyclesAfter))).withType(0, T[map[string]any]()),
}, {
name: jsontest.Name("Interfaces/Any/Slices/Nil"),
in: struct{ X any }{[]any(nil)},
@@ -9413,6 +9418,51 @@ func TestUnmarshalDecodeOptions(t *testing.T) {
}
}
+func TestUnmarshalDecodeStream(t *testing.T) {
+ tests := []struct {
+ in string
+ want []any
+ err error
+ }{
+ {in: ``, err: io.EOF},
+ {in: `{`, err: &jsontext.SyntacticError{ByteOffset: len64(`{`), Err: io.ErrUnexpectedEOF}},
+ {in: `{"`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"`), Err: io.ErrUnexpectedEOF}},
+ {in: `{"k"`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"k"`), JSONPointer: "/k", Err: io.ErrUnexpectedEOF}},
+ {in: `{"k":`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"k":`), JSONPointer: "/k", Err: io.ErrUnexpectedEOF}},
+ {in: `{"k",`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"k"`), JSONPointer: "/k", Err: jsonwire.NewInvalidCharacterError(",", "after object name (expecting ':')")}},
+ {in: `{"k"}`, err: &jsontext.SyntacticError{ByteOffset: len64(`{"k"`), JSONPointer: "/k", Err: jsonwire.NewInvalidCharacterError("}", "after object name (expecting ':')")}},
+ {in: `[`, err: &jsontext.SyntacticError{ByteOffset: len64(`[`), Err: io.ErrUnexpectedEOF}},
+ {in: `[0`, err: &jsontext.SyntacticError{ByteOffset: len64(`[0`), Err: io.ErrUnexpectedEOF}},
+ {in: ` [0`, err: &jsontext.SyntacticError{ByteOffset: len64(` [0`), Err: io.ErrUnexpectedEOF}},
+ {in: `[0.`, err: &jsontext.SyntacticError{ByteOffset: len64(`[`), JSONPointer: "/0", Err: io.ErrUnexpectedEOF}},
+ {in: `[0. `, err: &jsontext.SyntacticError{ByteOffset: len64(`[0.`), JSONPointer: "/0", Err: jsonwire.NewInvalidCharacterError(" ", "in number (expecting digit)")}},
+ {in: `[0,`, err: &jsontext.SyntacticError{ByteOffset: len64(`[0,`), Err: io.ErrUnexpectedEOF}},
+ {in: `[0:`, err: &jsontext.SyntacticError{ByteOffset: len64(`[0`), Err: jsonwire.NewInvalidCharacterError(":", "after array element (expecting ',' or ']')")}},
+ {in: `n`, err: &jsontext.SyntacticError{ByteOffset: len64(`n`), Err: io.ErrUnexpectedEOF}},
+ {in: `nul`, err: &jsontext.SyntacticError{ByteOffset: len64(`nul`), Err: io.ErrUnexpectedEOF}},
+ {in: `fal `, err: &jsontext.SyntacticError{ByteOffset: len64(`fal`), Err: jsonwire.NewInvalidCharacterError(" ", "in literal false (expecting 's')")}},
+ {in: `false`, want: []any{false}, err: io.EOF},
+ {in: `false0.0[]null`, want: []any{false, 0.0, []any{}, nil}, err: io.EOF},
+ }
+ for _, tt := range tests {
+ d := jsontext.NewDecoder(strings.NewReader(tt.in))
+ var got []any
+ for {
+ var v any
+ if err := UnmarshalDecode(d, &v); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("`%s`: UnmarshalDecode error = %v, want %v", tt.in, err, tt.err)
+ }
+ break
+ }
+ got = append(got, v)
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("`%s`: UnmarshalDecode = %v, want %v", tt.in, got, tt.want)
+ }
+ }
+}
+
// BenchmarkUnmarshalDecodeOptions is a minimal decode operation to measure
// the overhead options setup before the unmarshal operation.
func BenchmarkUnmarshalDecodeOptions(b *testing.B) {
diff --git a/src/encoding/json/v2/arshal_time.go b/src/encoding/json/v2/arshal_time.go
index 06fed03e05fbed..ac29abe5021ca9 100644
--- a/src/encoding/json/v2/arshal_time.go
+++ b/src/encoding/json/v2/arshal_time.go
@@ -48,7 +48,7 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
var m durationArshaler
if mo.Format != "" && mo.FormatDepth == xe.Tokens.Depth() {
if !m.initFormat(mo.Format) {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
} else if mo.Flags.Get(jsonflags.FormatDurationAsNano) {
return marshalNano(enc, va, mo)
@@ -74,13 +74,13 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
var u durationArshaler
if uo.Format != "" && uo.FormatDepth == xd.Tokens.Depth() {
if !u.initFormat(uo.Format) {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
} else if uo.Flags.Get(jsonflags.FormatDurationAsNano) {
return unmarshalNano(dec, va, uo)
} else {
// TODO(https://go.dev/issue/71631): Decide on default duration representation.
- return newUnmarshalErrorBeforeWithSkipping(dec, uo, t, errors.New("no default representation (see https://go.dev/issue/71631); specify an explicit format"))
+ return newUnmarshalErrorBeforeWithSkipping(dec, t, errors.New("no default representation (see https://go.dev/issue/71631); specify an explicit format"))
}
stringify := !u.isNumeric() || xd.Tokens.Last.NeedObjectName() || uo.Flags.Get(jsonflags.StringifyNumbers)
@@ -125,7 +125,7 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
var m timeArshaler
if mo.Format != "" && mo.FormatDepth == xe.Tokens.Depth() {
if !m.initFormat(mo.Format) {
- return newInvalidFormatError(enc, t, mo)
+ return newInvalidFormatError(enc, t)
}
}
@@ -148,7 +148,7 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
var u timeArshaler
if uo.Format != "" && uo.FormatDepth == xd.Tokens.Depth() {
if !u.initFormat(uo.Format) {
- return newInvalidFormatError(dec, t, uo)
+ return newInvalidFormatError(dec, t)
}
} else if uo.Flags.Get(jsonflags.ParseTimeWithLooseRFC3339) {
u.looseRFC3339 = true
diff --git a/src/encoding/json/v2/errors.go b/src/encoding/json/v2/errors.go
index 1f315058692381..940b720210eee6 100644
--- a/src/encoding/json/v2/errors.go
+++ b/src/encoding/json/v2/errors.go
@@ -88,7 +88,10 @@ type SemanticError struct {
}
// coder is implemented by [jsontext.Encoder] or [jsontext.Decoder].
-type coder interface{ StackPointer() jsontext.Pointer }
+type coder interface {
+ StackPointer() jsontext.Pointer
+ Options() Options
+}
// newInvalidFormatError wraps err in a SemanticError because
// the current type t cannot handle the provided options format.
@@ -97,13 +100,13 @@ type coder interface{ StackPointer() jsontext.Pointer }
// If [jsonflags.ReportErrorsWithLegacySemantics] is specified,
// then this automatically skips the next value when unmarshaling
// to ensure that the value is fully consumed.
-func newInvalidFormatError(c coder, t reflect.Type, o *jsonopts.Struct) error {
- err := fmt.Errorf("invalid format flag %q", o.Format)
+func newInvalidFormatError(c coder, t reflect.Type) error {
+ err := fmt.Errorf("invalid format flag %q", c.Options().(*jsonopts.Struct).Format)
switch c := c.(type) {
case *jsontext.Encoder:
err = newMarshalErrorBefore(c, t, err)
case *jsontext.Decoder:
- err = newUnmarshalErrorBeforeWithSkipping(c, o, t, err)
+ err = newUnmarshalErrorBeforeWithSkipping(c, t, err)
}
return err
}
@@ -136,9 +139,9 @@ func newUnmarshalErrorBefore(d *jsontext.Decoder, t reflect.Type, err error) err
// newUnmarshalErrorBeforeWithSkipping is like [newUnmarshalErrorBefore],
// but automatically skips the next value if
// [jsonflags.ReportErrorsWithLegacySemantics] is specified.
-func newUnmarshalErrorBeforeWithSkipping(d *jsontext.Decoder, o *jsonopts.Struct, t reflect.Type, err error) error {
+func newUnmarshalErrorBeforeWithSkipping(d *jsontext.Decoder, t reflect.Type, err error) error {
err = newUnmarshalErrorBefore(d, t, err)
- if o.Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
+ if export.Decoder(d).Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
if err2 := export.Decoder(d).SkipValue(); err2 != nil {
return err2
}
@@ -170,9 +173,9 @@ func newUnmarshalErrorAfterWithValue(d *jsontext.Decoder, t reflect.Type, err er
// newUnmarshalErrorAfterWithSkipping is like [newUnmarshalErrorAfter],
// but automatically skips the remainder of the current value if
// [jsonflags.ReportErrorsWithLegacySemantics] is specified.
-func newUnmarshalErrorAfterWithSkipping(d *jsontext.Decoder, o *jsonopts.Struct, t reflect.Type, err error) error {
+func newUnmarshalErrorAfterWithSkipping(d *jsontext.Decoder, t reflect.Type, err error) error {
err = newUnmarshalErrorAfter(d, t, err)
- if o.Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
+ if export.Decoder(d).Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
if err2 := export.Decoder(d).SkipValueRemainder(); err2 != nil {
return err2
}
diff --git a/src/encoding/json/v2_indent.go b/src/encoding/json/v2_indent.go
index 2655942b128b32..b2e8518471ba67 100644
--- a/src/encoding/json/v2_indent.go
+++ b/src/encoding/json/v2_indent.go
@@ -88,17 +88,8 @@ func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
}
func appendIndent(dst, src []byte, prefix, indent string) ([]byte, error) {
- // In v2, trailing whitespace is discarded, while v1 preserved it.
- dstLen := len(dst)
- if n := len(src) - len(bytes.TrimRight(src, " \n\r\t")); n > 0 {
- // Append the trailing whitespace afterwards.
- defer func() {
- if len(dst) > dstLen {
- dst = append(dst, src[len(src)-n:]...)
- }
- }()
- }
// In v2, only spaces and tabs are allowed, while v1 allowed any character.
+ dstLen := len(dst)
if len(strings.Trim(prefix, " \t"))+len(strings.Trim(indent, " \t")) > 0 {
// Use placeholder spaces of correct length, and replace afterwards.
invalidPrefix, invalidIndent := prefix, indent
@@ -129,5 +120,10 @@ func appendIndent(dst, src []byte, prefix, indent string) ([]byte, error) {
if err != nil {
return dst[:dstLen], transformSyntacticError(err)
}
+
+ // In v2, trailing whitespace is discarded, while v1 preserved it.
+ if n := len(src) - len(bytes.TrimRight(src, " \n\r\t")); n > 0 {
+ dst = append(dst, src[len(src)-n:]...)
+ }
return dst, nil
}
diff --git a/src/encoding/json/v2_scanner_test.go b/src/encoding/json/v2_scanner_test.go
index bec55212745b01..8885520e6d890a 100644
--- a/src/encoding/json/v2_scanner_test.go
+++ b/src/encoding/json/v2_scanner_test.go
@@ -74,6 +74,7 @@ func TestCompactAndIndent(t *testing.T) {
-5e+2
]`},
{Name(""), "{\"\":\"<>&\u2028\u2029\"}", "{\n\t\"\": \"<>&\u2028\u2029\"\n}"}, // See golang.org/issue/34070
+ {Name(""), `null`, "null \n\r\t"}, // See golang.org/issue/13520 and golang.org/issue/74806
}
var buf bytes.Buffer
for _, tt := range tests {
@@ -102,7 +103,7 @@ func TestCompactAndIndent(t *testing.T) {
buf.Reset()
if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
t.Errorf("%s: Indent error: %v", tt.Where, err)
- } else if got := buf.String(); got != tt.indent {
+ } else if got := buf.String(); got != strings.TrimRight(tt.indent, " \n\r\t") {
t.Errorf("%s: Compact:\n\tgot: %s\n\twant: %s", tt.Where, indentNewlines(got), indentNewlines(tt.indent))
}
})
diff --git a/src/go.mod b/src/go.mod
index b81f19068aef23..51c38f3bc0310d 100644
--- a/src/go.mod
+++ b/src/go.mod
@@ -1,13 +1,13 @@
module std
-go 1.25
+go 1.26
require (
- golang.org/x/crypto v0.39.0
- golang.org/x/net v0.41.0
+ golang.org/x/crypto v0.41.0
+ golang.org/x/net v0.43.0
)
require (
- golang.org/x/sys v0.33.0 // indirect
- golang.org/x/text v0.26.0 // indirect
+ golang.org/x/sys v0.35.0 // indirect
+ golang.org/x/text v0.28.0 // indirect
)
diff --git a/src/go.sum b/src/go.sum
index 410eb8648a710a..3bc4cb53724095 100644
--- a/src/go.sum
+++ b/src/go.sum
@@ -1,8 +1,8 @@
-golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
-golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
-golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
-golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
-golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
-golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
-golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
index 00e6e562e54178..41dde20bf9cc9b 100644
--- a/src/go/build/deps_test.go
+++ b/src/go/build/deps_test.go
@@ -100,6 +100,7 @@ var depsRules = `
< internal/runtime/maps
< internal/runtime/strconv
< internal/runtime/cgroup
+ < internal/runtime/gc/scan
< runtime
< sync/atomic
< internal/sync
@@ -797,6 +798,20 @@ var depsRules = `
FMT, testing < internal/cgrouptest;
C, CGO < internal/runtime/cgobench;
+
+ # Generate-only packages can have anything they want
+ container/heap,
+ encoding/binary,
+ fmt,
+ hash/maphash,
+ io,
+ log,
+ math/bits,
+ os,
+ reflect,
+ strings,
+ sync
+ < internal/runtime/gc/internal/gen;
`
// listStdPkgs returns the same list of packages as "go list std".
diff --git a/src/go/parser/error_test.go b/src/go/parser/error_test.go
index a4e17dd6dbff4f..252325659cb752 100644
--- a/src/go/parser/error_test.go
+++ b/src/go/parser/error_test.go
@@ -88,7 +88,7 @@ func expectedErrors(fset *token.FileSet, filename string, src []byte) map[token.
s := errRx.FindStringSubmatch(lit)
if len(s) == 3 {
if s[1] == "HERE" {
- pos = here // start of comment
+ pos = here // position right after the previous token prior to comment
} else if s[1] == "AFTER" {
pos += token.Pos(len(lit)) // end of comment
} else {
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index 8a2f95976fc390..9ee1576a99e85d 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -455,25 +455,6 @@ var exprEnd = map[token.Token]bool{
token.RBRACE: true,
}
-// safePos returns a valid file position for a given position: If pos
-// is valid to begin with, safePos returns pos. If pos is out-of-range,
-// safePos returns the EOF position.
-//
-// This is hack to work around "artificial" end positions in the AST which
-// are computed by adding 1 to (presumably valid) token positions. If the
-// token positions are invalid due to parse errors, the resulting end position
-// may be past the file's EOF position, which would lead to panics if used
-// later on.
-func (p *parser) safePos(pos token.Pos) (res token.Pos) {
- defer func() {
- if recover() != nil {
- res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
- }
- }()
- _ = p.file.Offset(pos) // trigger a panic if position is out-of-range
- return pos
-}
-
// ----------------------------------------------------------------------------
// Identifiers
@@ -2022,7 +2003,7 @@ func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
}
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
- p.error(p.safePos(x.End()), fmt.Sprintf("expression in %s must be function call", callType))
+ p.error(x.End(), fmt.Sprintf("expression in %s must be function call", callType))
}
return nil
}
@@ -2100,7 +2081,7 @@ func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
found = "assignment"
}
p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
- return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
+ return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
// parseIfHeader is an adjusted version of parser.header
@@ -2423,7 +2404,7 @@ func (p *parser) parseForStmt() ast.Stmt {
key, value = as.Lhs[0], as.Lhs[1]
default:
p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
- return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
+ return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
diff --git a/src/go/types/check.go b/src/go/types/check.go
index e4e8e95c9974b9..c9753280bf8685 100644
--- a/src/go/types/check.go
+++ b/src/go/types/check.go
@@ -25,7 +25,7 @@ var noposn = atPos(nopos)
const debug = false // leave on during development
// position tracing for panics during type checking
-const tracePos = false // TODO(markfreeman): check performance implications
+const tracePos = true
// gotypesalias controls the use of Alias types.
// As of Apr 16 2024 they are used by default.
diff --git a/src/go/types/decl.go b/src/go/types/decl.go
index f40a8e54b9bf13..42423d291cee8d 100644
--- a/src/go/types/decl.go
+++ b/src/go/types/decl.go
@@ -9,7 +9,6 @@ import (
"go/ast"
"go/constant"
"go/token"
- "internal/buildcfg"
. "internal/types/errors"
"slices"
)
@@ -600,10 +599,6 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *TypeName
// handle type parameters even if not allowed (Alias type is supported)
if tparam0 != nil {
- if !versionErr && !buildcfg.Experiment.AliasTypeParams {
- check.error(tdecl, UnsupportedFeature, "generic type alias requires GOEXPERIMENT=aliastypeparams")
- versionErr = true
- }
check.openScope(tdecl, "type parameters")
defer check.closeScope()
check.collectTypeParams(&alias.tparams, tdecl.TypeParams)
diff --git a/src/go/types/instantiate.go b/src/go/types/instantiate.go
index db270eb55634e8..eef473447da414 100644
--- a/src/go/types/instantiate.go
+++ b/src/go/types/instantiate.go
@@ -14,7 +14,6 @@ import (
"errors"
"fmt"
"go/token"
- "internal/buildcfg"
. "internal/types/errors"
)
@@ -133,10 +132,6 @@ func (check *Checker) instance(pos token.Pos, orig genericType, targs []Type, ex
res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily
case *Alias:
- if !buildcfg.Experiment.AliasTypeParams {
- assert(expanding == nil) // Alias instances cannot be reached from Named types
- }
-
// verify type parameter count (see go.dev/issue/71198 for a test case)
tparams := orig.TypeParams()
if !check.validateTArgLen(pos, orig.obj.Name(), tparams.Len(), len(targs)) {
diff --git a/src/go/types/object_test.go b/src/go/types/object_test.go
index 0b4fce7bb161ce..fc165fb7a0160a 100644
--- a/src/go/types/object_test.go
+++ b/src/go/types/object_test.go
@@ -99,8 +99,7 @@ var testObjects = []struct {
{"type t = struct{f int}", "t", "type p.t = struct{f int}", false},
{"type t = func(int)", "t", "type p.t = func(int)", false},
{"type A = B; type B = int", "A", "type p.A = p.B", true},
- {"type A[P ~int] = struct{}", "A", "type p.A[P ~int] = struct{}", true}, // requires GOEXPERIMENT=aliastypeparams
-
+ {"type A[P ~int] = struct{}", "A", "type p.A[P ~int] = struct{}", true},
{"var v int", "v", "var p.v int", false},
{"func f(int) string", "f", "func p.f(int) string", false},
@@ -115,8 +114,6 @@ func TestObjectString(t *testing.T) {
for i, test := range testObjects {
t.Run(fmt.Sprint(i), func(t *testing.T) {
if test.alias {
- revert := setGOEXPERIMENT("aliastypeparams")
- defer revert()
t.Setenv("GODEBUG", "gotypesalias=1")
}
diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go
index 8e95d23ec38375..79ccbc6fcfad92 100644
--- a/src/go/types/stdlib_test.go
+++ b/src/go/types/stdlib_test.go
@@ -334,6 +334,8 @@ func TestStdFixed(t *testing.T) {
"issue49814.go", // go/types does not have constraints on array size
"issue56103.go", // anonymous interface cycles; will be a type checker error in 1.22
"issue52697.go", // go/types does not have constraints on stack size
+ "issue68054.go", // this test requires GODEBUG=gotypesalias=1
+ "issue68580.go", // this test requires GODEBUG=gotypesalias=1
"issue73309.go", // this test requires GODEBUG=gotypesalias=1
"issue73309b.go", // this test requires GODEBUG=gotypesalias=1
diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go
index df84e9fdf46feb..310226bc0146e5 100644
--- a/src/internal/buildcfg/exp.go
+++ b/src/internal/buildcfg/exp.go
@@ -79,10 +79,9 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) {
dwarf5Supported := (goos != "darwin" && goos != "ios" && goos != "aix")
baseline := goexperiment.Flags{
- RegabiWrappers: regabiSupported,
- RegabiArgs: regabiSupported,
- AliasTypeParams: true,
- Dwarf5: dwarf5Supported,
+ RegabiWrappers: regabiSupported,
+ RegabiArgs: regabiSupported,
+ Dwarf5: dwarf5Supported,
}
// Start with the statically enabled set of experiments.
diff --git a/src/internal/bytealg/bytealg.go b/src/internal/bytealg/bytealg.go
index 711df74baf14cb..319ea54ba3c77f 100644
--- a/src/internal/bytealg/bytealg.go
+++ b/src/internal/bytealg/bytealg.go
@@ -11,16 +11,18 @@ import (
// Offsets into internal/cpu records for use in assembly.
const (
- offsetX86HasSSE42 = unsafe.Offsetof(cpu.X86.HasSSE42)
- offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
- offsetX86HasPOPCNT = unsafe.Offsetof(cpu.X86.HasPOPCNT)
+ offsetPPC64HasPOWER9 = unsafe.Offsetof(cpu.PPC64.IsPOWER9)
+
+ offsetRISCV64HasV = unsafe.Offsetof(cpu.RISCV64.HasV)
offsetLOONG64HasLSX = unsafe.Offsetof(cpu.Loong64.HasLSX)
offsetLOONG64HasLASX = unsafe.Offsetof(cpu.Loong64.HasLASX)
offsetS390xHasVX = unsafe.Offsetof(cpu.S390X.HasVX)
- offsetPPC64HasPOWER9 = unsafe.Offsetof(cpu.PPC64.IsPOWER9)
+ offsetX86HasSSE42 = unsafe.Offsetof(cpu.X86.HasSSE42)
+ offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
+ offsetX86HasPOPCNT = unsafe.Offsetof(cpu.X86.HasPOPCNT)
)
// MaxLen is the maximum length of the string to be searched for (argument b) in Index.
diff --git a/src/internal/bytealg/compare_riscv64.s b/src/internal/bytealg/compare_riscv64.s
index 6388fcd2095dda..3b1523dfbf7f3b 100644
--- a/src/internal/bytealg/compare_riscv64.s
+++ b/src/internal/bytealg/compare_riscv64.s
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+#include "asm_riscv64.h"
#include "go_asm.h"
#include "textflag.h"
@@ -35,6 +36,46 @@ TEXT compare<>(SB),NOSPLIT|NOFRAME,$0
MIN X11, X13, X5
BEQZ X5, cmp_len
+ MOV $16, X6
+ BLT X5, X6, check8_unaligned
+
+#ifndef hasV
+ MOVB internal∕cpu·RISCV64+const_offsetRISCV64HasV(SB), X6
+ BEQZ X6, compare_scalar
+#endif
+
+ // Use vector if not 8 byte aligned.
+ OR X10, X12, X6
+ AND $7, X6
+ BNEZ X6, vector_loop
+
+ // Use scalar if 8 byte aligned and <= 128 bytes.
+ SUB $128, X5, X6
+ BLEZ X6, compare_scalar_aligned
+
+ PCALIGN $16
+vector_loop:
+ VSETVLI X5, E8, M8, TA, MA, X6
+ VLE8V (X10), V8
+ VLE8V (X12), V16
+ VMSNEVV V8, V16, V0
+ VFIRSTM V0, X7
+ BGEZ X7, vector_not_eq
+ ADD X6, X10
+ ADD X6, X12
+ SUB X6, X5
+ BNEZ X5, vector_loop
+ JMP cmp_len
+
+vector_not_eq:
+ // Load first differing bytes in X8/X9.
+ ADD X7, X10
+ ADD X7, X12
+ MOVBU (X10), X8
+ MOVBU (X12), X9
+ JMP cmp
+
+compare_scalar:
MOV $32, X6
BLT X5, X6, check8_unaligned
@@ -57,9 +98,9 @@ align:
ADD $1, X12
BNEZ X7, align
-check32:
- // X6 contains $32
- BLT X5, X6, compare16
+compare_scalar_aligned:
+ MOV $32, X6
+ BLT X5, X6, check16
compare32:
MOV 0(X10), X15
MOV 0(X12), X16
diff --git a/src/internal/bytealg/equal_riscv64.s b/src/internal/bytealg/equal_riscv64.s
index 87b2d79302dc6a..58e033f8479b69 100644
--- a/src/internal/bytealg/equal_riscv64.s
+++ b/src/internal/bytealg/equal_riscv64.s
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+#include "asm_riscv64.h"
#include "go_asm.h"
#include "textflag.h"
@@ -28,6 +29,35 @@ length_check:
MOV $32, X23
BLT X12, X23, loop4_check
+#ifndef hasV
+ MOVB internal∕cpu·RISCV64+const_offsetRISCV64HasV(SB), X5
+ BEQZ X5, equal_scalar
+#endif
+
+ // Use vector if not 8 byte aligned.
+ OR X10, X11, X5
+ AND $7, X5
+ BNEZ X5, vector_loop
+
+ // Use scalar if 8 byte aligned and <= 64 bytes.
+ SUB $64, X12, X6
+ BLEZ X6, loop32_check
+
+ PCALIGN $16
+vector_loop:
+ VSETVLI X12, E8, M8, TA, MA, X5
+ VLE8V (X10), V8
+ VLE8V (X11), V16
+ VMSNEVV V8, V16, V0
+ VFIRSTM V0, X6
+ BGEZ X6, done
+ ADD X5, X10
+ ADD X5, X11
+ SUB X5, X12
+ BNEZ X12, vector_loop
+ JMP done
+
+equal_scalar:
// Check alignment - if alignment differs we have to do one byte at a time.
AND $7, X10, X9
AND $7, X11, X19
diff --git a/src/internal/bytealg/index_generic.go b/src/internal/bytealg/index_generic.go
index a59e32938e76ec..643bb59ab1edbb 100644
--- a/src/internal/bytealg/index_generic.go
+++ b/src/internal/bytealg/index_generic.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !amd64 && !arm64 && !s390x && !ppc64le && !ppc64
+//go:build !amd64 && !arm64 && !loong64 && !s390x && !ppc64le && !ppc64
package bytealg
diff --git a/src/internal/bytealg/index_loong64.go b/src/internal/bytealg/index_loong64.go
new file mode 100644
index 00000000000000..ad574d66faee18
--- /dev/null
+++ b/src/internal/bytealg/index_loong64.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bytealg
+
+import "internal/cpu"
+
+// Empirical data shows that using Index can get better
+// performance when len(s) <= 16.
+const MaxBruteForce = 16
+
+func init() {
+ // If SIMD is supported, optimize the cases where the substring length is less than 64 bytes,
+ // otherwise, cases the length less than 32 bytes is optimized.
+ if cpu.Loong64.HasLASX || cpu.Loong64.HasLSX {
+ MaxLen = 64
+ } else {
+ MaxLen = 32
+ }
+}
+
+// Cutover reports the number of failures of IndexByte we should tolerate
+// before switching over to Index.
+// n is the number of bytes processed so far.
+// See the bytes.Index implementation for details.
+func Cutover(n int) int {
+ // 1 error per 8 characters, plus a few slop to start.
+ return (n + 16) / 8
+}
diff --git a/src/internal/bytealg/index_loong64.s b/src/internal/bytealg/index_loong64.s
new file mode 100644
index 00000000000000..1016db738dee1b
--- /dev/null
+++ b/src/internal/bytealg/index_loong64.s
@@ -0,0 +1,303 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·Index(SB),NOSPLIT,$0-56
+ MOVV R7, R6 // R6 = separator pointer
+ MOVV R8, R7 // R7 = separator length
+ JMP indexbody<>(SB)
+
+TEXT ·IndexString(SB),NOSPLIT,$0-40
+ JMP indexbody<>(SB)
+
+// input:
+// R4 = string
+// R5 = length
+// R6 = separator pointer
+// R7 = separator length (2 <= len <= 64)
+TEXT indexbody<>(SB),NOSPLIT,$0
+ // main idea is to load 'sep' into separate register(s)
+ // to avoid repeatedly re-load it again and again
+ // for sebsequent substring comparisons
+ SUBV R7, R5, R8
+ ADDV R4, R8 // R8 contains the start of last substring for comparison
+ ADDV $1, R4, R9 // store base for later
+
+ MOVV $8, R5
+ BGE R7, R5, len_gt_or_eq_8
+len_2_7:
+ AND $0x4, R7, R5
+ BNE R5, len_4_7
+
+len_2_3:
+ AND $0x1, R7, R5
+ BNE R5, len_3
+
+len_2:
+ MOVHU (R6), R10
+loop_2:
+ BLT R8, R4, not_found
+ MOVHU (R4), R11
+ ADDV $1, R4
+ BNE R10, R11, loop_2
+ JMP found
+
+len_3:
+ MOVHU (R6), R10
+ MOVBU 2(R6), R11
+loop_3:
+ BLT R8, R4, not_found
+ MOVHU (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_3
+ MOVBU 1(R4), R13
+ BNE R11, R13, loop_3
+ JMP found
+
+len_4_7:
+ AND $0x2, R7, R5
+ BNE R5, len_6_7
+ AND $0x1, R7, R5
+ BNE R5, len_5
+len_4:
+ MOVWU (R6), R10
+loop_4:
+ BLT R8, R4, not_found
+ MOVWU (R4), R11
+ ADDV $1, R4
+ BNE R10, R11, loop_4
+ JMP found
+
+len_5:
+ MOVWU (R6), R10
+ MOVBU 4(R6), R11
+loop_5:
+ BLT R8, R4, not_found
+ MOVWU (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_5
+ MOVBU 3(R4), R13
+ BNE R11, R13, loop_5
+ JMP found
+
+len_6_7:
+ AND $0x1, R7, R5
+ BNE R5, len_7
+len_6:
+ MOVWU (R6), R10
+ MOVHU 4(R6), R11
+loop_6:
+ BLT R8, R4, not_found
+ MOVWU (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_6
+ MOVHU 3(R4), R13
+ BNE R11, R13, loop_6
+ JMP found
+
+len_7:
+ MOVWU (R6), R10
+ MOVWU 3(R6), R11
+loop_7:
+ BLT R8, R4, not_found
+ MOVWU (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_7
+ MOVWU 2(R4), R13
+ BNE R11, R13, loop_7
+ JMP found
+
+len_gt_or_eq_8:
+ BEQ R5, R7, len_8
+ MOVV $17, R5
+ BGE R7, R5, len_gt_or_eq_17
+ JMP len_9_16
+len_8:
+ MOVV (R6), R10
+loop_8:
+ BLT R8, R4, not_found
+ MOVV (R4), R11
+ ADDV $1, R4
+ BNE R10, R11, loop_8
+ JMP found
+
+len_9_16:
+ MOVV (R6), R10
+ SUBV $8, R7
+ MOVV (R6)(R7), R11
+ SUBV $1, R7
+loop_9_16:
+ BLT R8, R4, not_found
+ MOVV (R4), R12
+ ADDV $1, R4
+ BNE R10, R12, loop_9_16
+ MOVV (R4)(R7), R13
+ BNE R11, R13, loop_9_16
+ JMP found
+
+len_gt_or_eq_17:
+ MOVV $25, R5
+ BGE R7, R5, len_gt_or_eq_25
+len_17_24:
+ MOVV 0(R6), R10
+ MOVV 8(R6), R11
+ SUBV $8, R7
+ MOVV (R6)(R7), R12
+ SUBV $1, R7
+loop_17_24:
+ BLT R8, R4, not_found
+ MOVV (R4), R13
+ ADDV $1, R4
+ BNE R10, R13, loop_17_24
+ MOVV 7(R4), R14
+ BNE R11, R14, loop_17_24
+ MOVV (R4)(R7), R15
+ BNE R12, R15, loop_17_24
+ JMP found
+
+len_gt_or_eq_25:
+ MOVV $33, R5
+ BGE R7, R5, len_gt_or_eq_33
+ MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLSX(SB), R10
+ BNE R10, lsx_len_25_32
+len_25_32:
+ MOVV 0(R6), R10
+ MOVV 8(R6), R11
+ MOVV 16(R6), R12
+ SUBV $8, R7
+ MOVV (R6)(R7), R13
+ SUBV $1, R7
+loop_25_32:
+ BLT R8, R4, not_found
+ MOVV (R4), R14
+ ADDV $1, R4
+ BNE R10, R14, loop_25_32
+ MOVV 7(R4), R15
+ BNE R11, R15, loop_25_32
+ MOVV 15(R4), R16
+ BNE R12, R16, loop_25_32
+ MOVV (R4)(R7), R17
+ BNE R13, R17, loop_25_32
+ JMP found
+
+ // On loong64, LSX is included if LASX is supported.
+lasx_len_25_32:
+lsx_len_25_32:
+ VMOVQ 0(R6), V0
+ SUBV $16, R7
+ VMOVQ (R6)(R7), V1
+ SUBV $1, R7
+lsx_loop_25_32:
+ BLT R8, R4, not_found
+ VMOVQ (R4), V2
+ ADDV $1, R4
+ VSEQV V0, V2, V2
+ VSETANYEQV V2, FCC0
+ BFPT FCC0, lsx_loop_25_32
+
+ VMOVQ (R4)(R7), V3
+ VSEQV V1, V3, V3
+ VSETANYEQV V3, FCC1
+ BFPT FCC1, lsx_loop_25_32
+ JMP found
+
+len_gt_or_eq_33:
+ MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLASX(SB), R10
+ MOVV $49, R5
+ BGE R7, R5, len_gt_or_eq_49
+len_33_48:
+ BNE R10, lasx_len_33_48
+ JMP lsx_len_33_48
+
+len_gt_or_eq_49:
+len_49_64:
+ BNE R10, lasx_len_49_64
+ JMP lsx_len_49_64
+
+lsx_len_33_48:
+ VMOVQ 0(R6), V0
+ VMOVQ 16(R6), V1
+ SUBV $16, R7
+ VMOVQ (R6)(R7), V2
+ SUBV $1, R7
+lsx_loop_33_48:
+ BLT R8, R4, not_found
+ VMOVQ 0(R4), V3
+ ADDV $1, R4
+ VSEQV V0, V3, V3
+ VSETANYEQV V3, FCC0
+ BFPT FCC0, lsx_loop_33_48
+
+ VMOVQ 15(R4), V4
+ VSEQV V1, V4, V4
+ VSETANYEQV V4, FCC1
+ BFPT FCC1, lsx_loop_33_48
+
+ VMOVQ (R4)(R7), V5
+ VSEQV V2, V5, V5
+ VSETANYEQV V5, FCC2
+ BFPT FCC2, lsx_loop_33_48
+ JMP found
+
+lsx_len_49_64:
+ VMOVQ 0(R6), V0
+ VMOVQ 16(R6), V1
+ VMOVQ 32(R6), V2
+ SUBV $16, R7
+ VMOVQ (R6)(R7), V3
+ SUBV $1, R7
+lsx_loop_49_64:
+ BLT R8, R4, not_found
+ VMOVQ 0(R4), V4
+ ADDV $1, R4
+ VSEQV V0, V4, V4
+ VSETANYEQV V4, FCC0
+ BFPT FCC0, lsx_loop_49_64
+
+ VMOVQ 15(R4), V5
+ VSEQV V1, V5, V5
+ VSETANYEQV V5, FCC1
+ BFPT FCC1, lsx_loop_49_64
+
+ VMOVQ 31(R4), V6
+ VSEQV V2, V6, V6
+ VSETANYEQV V6, FCC2
+ BFPT FCC2, lsx_loop_49_64
+
+ VMOVQ (R4)(R7), V7
+ VSEQV V3, V7, V7
+ VSETANYEQV V7, FCC3
+ BFPT FCC3, lsx_loop_49_64
+ JMP found
+
+lasx_len_33_48:
+lasx_len_49_64:
+lasx_len_33_64:
+ XVMOVQ (R6), X0
+ SUBV $32, R7
+ XVMOVQ (R6)(R7), X1
+ SUBV $1, R7
+lasx_loop_33_64:
+ BLT R8, R4, not_found
+ XVMOVQ (R4), X2
+ ADDV $1, R4
+ XVSEQV X0, X2, X3
+ XVSETANYEQV X3, FCC0
+ BFPT FCC0, lasx_loop_33_64
+
+ XVMOVQ (R4)(R7), X4
+ XVSEQV X1, X4, X5
+ XVSETANYEQV X5, FCC1
+ BFPT FCC1, lasx_loop_33_64
+ JMP found
+
+found:
+ SUBV R9, R4
+ RET
+
+not_found:
+ MOVV $-1, R4
+ RET
diff --git a/src/internal/bytealg/index_native.go b/src/internal/bytealg/index_native.go
index 59c93f9d126b90..f917c7a92adbf1 100644
--- a/src/internal/bytealg/index_native.go
+++ b/src/internal/bytealg/index_native.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build amd64 || arm64 || s390x || ppc64le || ppc64
+//go:build amd64 || arm64 || loong64 || s390x || ppc64le || ppc64
package bytealg
diff --git a/src/internal/bytealg/indexbyte_riscv64.s b/src/internal/bytealg/indexbyte_riscv64.s
index fde00da0eac7d9..527ae6d35ed55b 100644
--- a/src/internal/bytealg/indexbyte_riscv64.s
+++ b/src/internal/bytealg/indexbyte_riscv64.s
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+#include "asm_riscv64.h"
#include "go_asm.h"
#include "textflag.h"
@@ -11,12 +12,14 @@ TEXT ·IndexByte(SB),NOSPLIT,$0-40
// X12 = b_cap (unused)
// X13 = byte to find
AND $0xff, X13, X12 // x12 byte to look for
- MOV X10, X13 // store base for later
SLTI $24, X11, X14
- ADD X10, X11 // end
- BEQZ X14, bigBody
+ BNEZ X14, small
+ JMP indexByteBig<>(SB)
+small:
+ MOV X10, X13 // store base for later
+ ADD X10, X11 // end
SUB $1, X10
loop:
ADD $1, X10
@@ -31,21 +34,19 @@ notfound:
MOV $-1, X10
RET
-bigBody:
- JMP indexByteBig<>(SB)
-
TEXT ·IndexByteString(SB),NOSPLIT,$0-32
// X10 = b_base
// X11 = b_len
// X12 = byte to find
-
AND $0xff, X12 // x12 byte to look for
- MOV X10, X13 // store base for later
SLTI $24, X11, X14
- ADD X10, X11 // end
- BEQZ X14, bigBody
+ BNEZ X14, small
+ JMP indexByteBig<>(SB)
+small:
+ MOV X10, X13 // store base for later
+ ADD X10, X11 // end
SUB $1, X10
loop:
ADD $1, X10
@@ -60,20 +61,41 @@ notfound:
MOV $-1, X10
RET
-bigBody:
- JMP indexByteBig<>(SB)
-
TEXT indexByteBig<>(SB),NOSPLIT|NOFRAME,$0
- // On entry
+ // On entry:
// X10 = b_base
- // X11 = end
+ // X11 = b_len (at least 16 bytes)
// X12 = byte to find
- // X13 = b_base
- // X11 is at least 16 bytes > X10
-
- // On exit
+ // On exit:
// X10 = index of first instance of sought byte, if found, or -1 otherwise
+ MOV X10, X13 // store base for later
+
+#ifndef hasV
+ MOVB internal∕cpu·RISCV64+const_offsetRISCV64HasV(SB), X5
+ BEQZ X5, indexbyte_scalar
+#endif
+
+ PCALIGN $16
+vector_loop:
+ VSETVLI X11, E8, M8, TA, MA, X5
+ VLE8V (X10), V8
+ VMSEQVX X12, V8, V0
+ VFIRSTM V0, X6
+ BGEZ X6, vector_found
+ ADD X5, X10
+ SUB X5, X11
+ BNEZ X11, vector_loop
+ JMP notfound
+
+vector_found:
+ SUB X13, X10
+ ADD X6, X10
+ RET
+
+indexbyte_scalar:
+ ADD X10, X11 // end
+
// Process the first few bytes until we get to an 8 byte boundary
// No need to check for end here as we have at least 16 bytes in
// the buffer.
diff --git a/src/internal/chacha8rand/chacha8_loong64.s b/src/internal/chacha8rand/chacha8_loong64.s
index caa1426a054967..5e6857ed3a6598 100644
--- a/src/internal/chacha8rand/chacha8_loong64.s
+++ b/src/internal/chacha8rand/chacha8_loong64.s
@@ -49,35 +49,23 @@ lsx_chacha8:
MOVV $·chachaIncRot(SB), R11
// load contants
- // VLDREPL.W $0, R10, V0
- WORD $0x30200140
- // VLDREPL.W $1, R10, V1
- WORD $0x30200541
- // VLDREPL.W $2, R10, V2
- WORD $0x30200942
- // VLDREPL.W $3, R10, V3
- WORD $0x30200d43
+ VMOVQ (R10), V0.W4
+ VMOVQ 1(R10), V1.W4
+ VMOVQ 2(R10), V2.W4
+ VMOVQ 3(R10), V3.W4
// load 4-32bit data from incRotMatrix added to counter
VMOVQ (R11), V30
// load seed
- // VLDREPL.W $0, R4, V4
- WORD $0x30200084
- // VLDREPL.W $1, R4, V5
- WORD $0x30200485
- // VLDREPL.W $2, R4, V6
- WORD $0x30200886
- // VLDREPL.W $3, R4, V7
- WORD $0x30200c87
- // VLDREPL.W $4, R4, V8
- WORD $0x30201088
- // VLDREPL.W $5, R4, V9
- WORD $0x30201489
- // VLDREPL.W $6, R4, V10
- WORD $0x3020188a
- // VLDREPL.W $7, R4, V11
- WORD $0x30201c8b
+ VMOVQ (R4), V4.W4
+ VMOVQ 1(R4), V5.W4
+ VMOVQ 2(R4), V6.W4
+ VMOVQ 3(R4), V7.W4
+ VMOVQ 4(R4), V8.W4
+ VMOVQ 5(R4), V9.W4
+ VMOVQ 6(R4), V10.W4
+ VMOVQ 7(R4), V11.W4
// load counter and update counter
VMOVQ R6, V12.W4
diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go
index 6017b1acc9fe96..e92c1851a21467 100644
--- a/src/internal/cpu/cpu.go
+++ b/src/internal/cpu/cpu.go
@@ -31,14 +31,22 @@ var X86 struct {
HasADX bool
HasAVX bool
HasAVX2 bool
+ HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL
HasAVX512F bool
+ HasAVX512CD bool
+ HasAVX512BITALG bool
HasAVX512BW bool
+ HasAVX512DQ bool
HasAVX512VL bool
+ HasAVX512VPCLMULQDQ bool
+ HasAVX512VBMI bool
+ HasAVX512VBMI2 bool
HasBMI1 bool
HasBMI2 bool
HasERMS bool
HasFSRM bool
HasFMA bool
+ HasGFNI bool
HasOSXSAVE bool
HasPCLMULQDQ bool
HasPOPCNT bool
@@ -48,7 +56,6 @@ var X86 struct {
HasSSSE3 bool
HasSSE41 bool
HasSSE42 bool
- HasAVX512VPCLMULQDQ bool
_ CacheLinePad
}
@@ -161,6 +168,10 @@ var RISCV64 struct {
//go:linkname S390X
//go:linkname RISCV64
+// doDerived, if non-nil, is called after processing GODEBUG to set "derived"
+// feature flags.
+var doDerived func()
+
// Initialize examines the processor and sets the relevant variables above.
// This is called by the runtime package early in program initialization,
// before normal init functions are run. env is set by runtime if the OS supports
@@ -168,6 +179,9 @@ var RISCV64 struct {
func Initialize(env string) {
doinit()
processOptions(env)
+ if doDerived != nil {
+ doDerived()
+ }
}
// options contains the cpu debug options that can be used in GODEBUG.
diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go
index 69b9542ae2a1f5..6fa30b776310b3 100644
--- a/src/internal/cpu/cpu_x86.go
+++ b/src/internal/cpu/cpu_x86.go
@@ -18,7 +18,7 @@ func xgetbv() (eax, edx uint32)
func getGOAMD64level() int32
const (
- // ecx bits
+ // Bits returned in ECX for CPUID EAX=0x1 ECX=0x0
cpuid_SSE3 = 1 << 0
cpuid_PCLMULQDQ = 1 << 1
cpuid_SSSE3 = 1 << 9
@@ -30,19 +30,25 @@ const (
cpuid_OSXSAVE = 1 << 27
cpuid_AVX = 1 << 28
- // ebx bits
+ // "Extended Feature Flag" bits returned in EBX for CPUID EAX=0x7 ECX=0x0
cpuid_BMI1 = 1 << 3
cpuid_AVX2 = 1 << 5
cpuid_BMI2 = 1 << 8
cpuid_ERMS = 1 << 9
cpuid_AVX512F = 1 << 16
+ cpuid_AVX512DQ = 1 << 17
cpuid_ADX = 1 << 19
+ cpuid_AVX512CD = 1 << 28
cpuid_SHA = 1 << 29
cpuid_AVX512BW = 1 << 30
cpuid_AVX512VL = 1 << 31
- // ecx bits
+ // "Extended Feature Flag" bits returned in ECX for CPUID EAX=0x7 ECX=0x0
+ cpuid_AVX512_VBMI = 1 << 1
+ cpuid_AVX512_VBMI2 = 1 << 6
+ cpuid_GFNI = 1 << 8
cpuid_AVX512VPCLMULQDQ = 1 << 10
+ cpuid_AVX512_BITALG = 1 << 12
// edx bits
cpuid_FSRM = 1 << 4
@@ -89,7 +95,9 @@ func doinit() {
// they can be turned off.
options = append(options,
option{Name: "avx512f", Feature: &X86.HasAVX512F},
+ option{Name: "avx512cd", Feature: &X86.HasAVX512CD},
option{Name: "avx512bw", Feature: &X86.HasAVX512BW},
+ option{Name: "avx512dq", Feature: &X86.HasAVX512DQ},
option{Name: "avx512vl", Feature: &X86.HasAVX512VL},
)
}
@@ -154,9 +162,15 @@ func doinit() {
X86.HasAVX512F = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512
if X86.HasAVX512F {
+ X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD)
X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW)
+ X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ)
X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL)
X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ)
+ X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI)
+ X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512_VBMI2)
+ X86.HasGFNI = isSet(ecx7, cpuid_GFNI)
+ X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512_BITALG)
}
X86.HasFSRM = isSet(edx7, cpuid_FSRM)
@@ -170,6 +184,17 @@ func doinit() {
_, _, _, edxExt1 := cpuid(0x80000001, 0)
X86.HasRDTSCP = isSet(edxExt1, cpuid_RDTSCP)
+
+ doDerived = func() {
+ // Rather than carefully gating on fundamental AVX-512 features, we have
+ // a virtual "AVX512" feature that captures F+CD+BW+DQ+VL. BW, DQ, and
+ // VL have a huge effect on which AVX-512 instructions are available,
+ // and these have all been supported on everything except the earliest
+ // Phi chips with AVX-512. No CPU has had CD without F, so we include
+ // it. GOAMD64=v4 also implies exactly this set, and these are all
+ // included in AVX10.1.
+ X86.HasAVX512 = X86.HasAVX512F && X86.HasAVX512CD && X86.HasAVX512BW && X86.HasAVX512DQ && X86.HasAVX512VL
+ }
}
func isSet(hwc uint32, value uint32) bool {
diff --git a/src/internal/cpu/datacache_unsupported.go b/src/internal/cpu/datacache_unsupported.go
new file mode 100644
index 00000000000000..44544aa8c9e5be
--- /dev/null
+++ b/src/internal/cpu/datacache_unsupported.go
@@ -0,0 +1,11 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !386 && !amd64
+
+package cpu
+
+func DataCacheSizes() []uintptr {
+ return nil
+}
diff --git a/src/internal/cpu/datacache_x86.go b/src/internal/cpu/datacache_x86.go
new file mode 100644
index 00000000000000..eb7b93b0a26392
--- /dev/null
+++ b/src/internal/cpu/datacache_x86.go
@@ -0,0 +1,121 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64
+
+package cpu
+
+// DataCacheSizes returns the size of each data cache from lowest
+// level in the hierarchy to highest.
+//
+// Unlike other parts of this package's public API, it is not safe
+// to reference early in runtime initialization because it allocates.
+// It's intended for testing only.
+func DataCacheSizes() []uintptr {
+ maxFunctionInformation, ebx0, ecx0, edx0 := cpuid(0, 0)
+ if maxFunctionInformation < 1 {
+ return nil
+ }
+
+ switch {
+ // Check for "GenuineIntel"
+ case ebx0 == 0x756E6547 && ecx0 == 0x6C65746E && edx0 == 0x49656E69:
+ return getDataCacheSizesIntel(maxFunctionInformation)
+ // Check for "AuthenticAMD"
+ case ebx0 == 0x68747541 && ecx0 == 0x444D4163 && edx0 == 0x69746E65:
+ return getDataCacheSizesAMD()
+ }
+ return nil
+}
+
+func extractBits(arg uint32, l int, r int) uint32 {
+ if l > r {
+ panic("bad bit range")
+ }
+ return (arg >> l) & ((1 << (r - l + 1)) - 1)
+}
+
+func getDataCacheSizesIntel(maxID uint32) []uintptr {
+ // Constants for cache types
+ const (
+ noCache = 0
+ dataCache = 1
+ instructionCache = 2
+ unifiedCache = 3
+ )
+ if maxID < 4 {
+ return nil
+ }
+
+ // Iterate through CPUID leaf 4 (deterministic cache parameters)
+ var caches []uintptr
+ for i := uint32(0); i < 0xFFFF; i++ {
+ eax, ebx, ecx, _ := cpuid(4, i)
+
+ cacheType := eax & 0xF // EAX bits 4-0: Cache Type
+ if cacheType == 0 {
+ break
+ }
+
+ // Report only data caches.
+ if !(cacheType == dataCache || cacheType == unifiedCache) {
+ continue
+ }
+
+ // Guaranteed to always start counting from 1.
+ level := (eax >> 5) & 0x7
+
+ lineSize := extractBits(ebx, 0, 11) + 1 // Bits 11-0: Line size in bytes - 1
+ partitions := extractBits(ebx, 12, 21) + 1 // Bits 21-12: Physical line partitions - 1
+ ways := extractBits(ebx, 22, 31) + 1 // Bits 31-22: Ways of associativity - 1
+ sets := uint64(ecx) + 1 // Number of sets - 1
+ size := uint64(ways*partitions*lineSize) * sets // Calculate cache size in bytes
+
+ caches = append(caches, uintptr(size))
+
+ // If we see more than one cache described per level, or they appear
+ // out of order, crash.
+ //
+ // Going by the SDM, it's not clear whether this is actually possible,
+ // so this code is purely defensive.
+ if level != uint32(len(caches)) {
+ panic("expected levels to be in order and for there to be one data/unified cache per level")
+ }
+ }
+ return caches
+}
+
+func getDataCacheSizesAMD() []uintptr {
+ maxExtendedFunctionInformation, _, _, _ := cpuid(0x80000000, 0)
+ if maxExtendedFunctionInformation < 0x80000006 {
+ return nil
+ }
+
+ var caches []uintptr
+
+ _, _, ecx5, _ := cpuid(0x80000005, 0)
+ _, _, ecx6, edx6 := cpuid(0x80000006, 0)
+
+ // The size is return in kb, turning into bytes.
+ l1dSize := uintptr(extractBits(ecx5, 24, 31) << 10)
+ caches = append(caches, l1dSize)
+
+ // Check that L2 cache is present.
+ if l2Assoc := extractBits(ecx6, 12, 15); l2Assoc == 0 {
+ return caches
+ }
+ l2Size := uintptr(extractBits(ecx6, 16, 31) << 10)
+ caches = append(caches, l2Size)
+
+ // Check that L3 cache is present.
+ if l3Assoc := extractBits(edx6, 12, 15); l3Assoc == 0 {
+ return caches
+ }
+ // Specifies the L3 cache size is within the following range:
+ // (L3Size[31:18] * 512KB) <= L3 cache size < ((L3Size[31:18]+1) * 512KB).
+ l3Size := uintptr(extractBits(edx6, 18, 31) * (512 << 10))
+ caches = append(caches, l3Size)
+
+ return caches
+}
diff --git a/src/internal/cpu/datacache_x86_test.go b/src/internal/cpu/datacache_x86_test.go
new file mode 100644
index 00000000000000..425c525be099f0
--- /dev/null
+++ b/src/internal/cpu/datacache_x86_test.go
@@ -0,0 +1,26 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build 386 || amd64
+
+package cpu_test
+
+import (
+ "internal/cpu"
+ "testing"
+)
+
+// Tests fetching data cache sizes. This test only checks that DataCacheSizes
+// won't explode. Otherwise it's just informational, and dumps the current
+// data cache sizes.
+func TestDataCacheSizes(t *testing.T) {
+ // N.B. Don't try to check these values because we don't know what
+ // kind of environment we're running in. We don't want this test to
+ // fail on some random x86 chip that happens to not support the right
+ // CPUID bits for some reason.
+ caches := cpu.DataCacheSizes()
+ for i, size := range caches {
+ t.Logf("L%d: %d", i+1, size)
+ }
+}
diff --git a/src/internal/goarch/goarch.go b/src/internal/goarch/goarch.go
index f52fe6c42ec0fc..4da56dda9dadc1 100644
--- a/src/internal/goarch/goarch.go
+++ b/src/internal/goarch/goarch.go
@@ -34,6 +34,9 @@ const (
// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit).
const PtrSize = 4 << (^uintptr(0) >> 63)
+// PtrSize is bit width of a pointer.
+const PtrBits = PtrSize * 8
+
// ArchFamily is the architecture family (AMD64, ARM, ...)
const ArchFamily ArchFamilyType = _ArchFamily
diff --git a/src/internal/goexperiment/exp_aliastypeparams_off.go b/src/internal/goexperiment/exp_aliastypeparams_off.go
deleted file mode 100644
index 620d34ec795a0f..00000000000000
--- a/src/internal/goexperiment/exp_aliastypeparams_off.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.aliastypeparams
-
-package goexperiment
-
-const AliasTypeParams = false
-const AliasTypeParamsInt = 0
diff --git a/src/internal/goexperiment/exp_aliastypeparams_on.go b/src/internal/goexperiment/exp_aliastypeparams_on.go
deleted file mode 100644
index 8f6872cdcd361b..00000000000000
--- a/src/internal/goexperiment/exp_aliastypeparams_on.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.aliastypeparams
-
-package goexperiment
-
-const AliasTypeParams = true
-const AliasTypeParamsInt = 1
diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go
index dd7a4f446c1a57..3144d3adfc7aba 100644
--- a/src/internal/goexperiment/flags.go
+++ b/src/internal/goexperiment/flags.go
@@ -100,11 +100,6 @@ type Flags struct {
// inlining phase within the Go compiler.
NewInliner bool
- // AliasTypeParams enables type parameters for alias types.
- // Requires that gotypesalias=1 is set with GODEBUG.
- // This flag will be removed with Go 1.25.
- AliasTypeParams bool
-
// Synctest enables the testing/synctest package.
Synctest bool
diff --git a/src/internal/platform/supported.go b/src/internal/platform/supported.go
index 7d25fd7ee990ec..a07b66d3947cce 100644
--- a/src/internal/platform/supported.go
+++ b/src/internal/platform/supported.go
@@ -194,7 +194,7 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool {
"ios/amd64", "ios/arm64",
"aix/ppc64",
"openbsd/arm64",
- "windows/386", "windows/amd64", "windows/arm", "windows/arm64":
+ "windows/386", "windows/amd64", "windows/arm64":
return true
}
return false
@@ -226,7 +226,7 @@ func InternalLinkPIESupported(goos, goarch string) bool {
case "android/arm64",
"darwin/amd64", "darwin/arm64",
"linux/amd64", "linux/arm64", "linux/loong64", "linux/ppc64le",
- "windows/386", "windows/amd64", "windows/arm", "windows/arm64":
+ "windows/386", "windows/amd64", "windows/arm64":
return true
}
return false
diff --git a/src/internal/platform/zosarch.go b/src/internal/platform/zosarch.go
index ebde978a230f74..a2f5b22ea9a656 100644
--- a/src/internal/platform/zosarch.go
+++ b/src/internal/platform/zosarch.go
@@ -57,7 +57,6 @@ var List = []OSArch{
{"wasip1", "wasm"},
{"windows", "386"},
{"windows", "amd64"},
- {"windows", "arm"},
{"windows", "arm64"},
}
@@ -74,7 +73,7 @@ var distInfo = map[OSArch]osArchInfo{
{"freebsd", "amd64"}: {CgoSupported: true},
{"freebsd", "arm"}: {CgoSupported: true},
{"freebsd", "arm64"}: {CgoSupported: true},
- {"freebsd", "riscv64"}: {CgoSupported: true},
+ {"freebsd", "riscv64"}: {CgoSupported: true, Broken: true},
{"illumos", "amd64"}: {CgoSupported: true},
{"ios", "amd64"}: {CgoSupported: true},
{"ios", "arm64"}: {CgoSupported: true},
@@ -111,6 +110,5 @@ var distInfo = map[OSArch]osArchInfo{
{"wasip1", "wasm"}: {},
{"windows", "386"}: {CgoSupported: true, FirstClass: true},
{"windows", "amd64"}: {CgoSupported: true, FirstClass: true},
- {"windows", "arm"}: {Broken: true},
{"windows", "arm64"}: {CgoSupported: true},
}
diff --git a/src/internal/runtime/gc/internal/gen/gen.go b/src/internal/runtime/gc/internal/gen/gen.go
new file mode 100644
index 00000000000000..0758f9b242b3a5
--- /dev/null
+++ b/src/internal/runtime/gc/internal/gen/gen.go
@@ -0,0 +1,537 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gen
+
+import (
+ "container/heap"
+ "encoding/binary"
+ "fmt"
+ "hash/maphash"
+ "io"
+ "log"
+ "os"
+ "reflect"
+ "strings"
+)
+
+const logCompile = true
+
+func fatalf(f string, args ...any) {
+ panic(fmt.Sprintf(f, args...))
+}
+
+type File struct {
+ w io.Writer
+ funcs []*Func
+ consts []fileConst
+}
+
+func NewFile(w io.Writer) *File {
+ return &File{w: w}
+}
+
+func (f *File) AddFunc(fn *Func) {
+ f.funcs = append(f.funcs, fn)
+}
+
+type fileConst struct {
+ name string
+ data any
+}
+
+func (f *File) AddConst(name string, data any) {
+ // TODO: It would be nice if this were unified with "const" ops, but the
+ // reason I added this was for []*Func consts, which would take an overhaul
+ // to represent in "const" ops.
+ f.consts = append(f.consts, fileConst{name, data})
+}
+
+type Func struct {
+ name string
+ nArgs int
+ idGen int
+ ops []*op
+}
+
+func NewFunc(name string) *Func {
+ fn := &Func{name: name}
+ return fn
+}
+
+// attach adds x to fn's op list. If x has any unattached arguments, this adds
+// those first (recursively).
+func (fn *Func) attach(x *op) {
+ // Make sure the arguments are attached to the function.
+ for _, arg := range x.args {
+ argFn := arg.fn
+ if argFn == nil {
+ fn.attach(arg)
+ } else if argFn != fn {
+ panic("ops from different functions")
+ }
+ }
+
+ x.fn = fn
+ x.id = fn.idGen
+ fn.idGen++
+ fn.ops = append(fn.ops, x)
+}
+
+func Arg[W wrap[T], T Word](fn *Func) T {
+ loc := locReg{cls: regClassGP, reg: fn.nArgs}
+ fn.nArgs++
+ var x W
+ o := &op{op: "arg", kind: x.kind(), c: loc}
+ fn.attach(o)
+ return x.wrap(o)
+}
+
+func Return(results ...Value) {
+ args := make([]*op, len(results))
+ for i, res := range results {
+ args[i] = res.getOp()
+ }
+ var x void
+ x.initOp(&op{op: "return", kind: voidKind, args: args})
+}
+
+type op struct {
+ op string
+ kind *kind
+ args []*op
+
+ id int
+ fn *Func
+
+ // c depends on "op".
+ //
+ // arg locReg - The register containing the argument value
+ // const any - The constant value
+ // deref int - Byte offset from args[0]
+ c any
+ name string
+}
+
+func (o *op) String() string {
+ return fmt.Sprintf("v%02d", o.id)
+}
+
+func imm(val any) *op {
+ return &op{op: "imm", c: val}
+}
+
+func (o *op) equalNoName(o2 *op) bool {
+ if o.op != o2.op || o.c != o2.c || len(o.args) != len(o2.args) {
+ return false
+ }
+ for i, arg := range o.args {
+ if o2.args[i] != arg {
+ return false
+ }
+ }
+ return true
+}
+
+func (o *op) write(w io.Writer) {
+ fmt.Fprintf(w, "v%02d = %s", o.id, o.op)
+ for _, arg := range o.args {
+ fmt.Fprintf(w, " v%02d", arg.id)
+ }
+ if o.c != nil {
+ fmt.Fprintf(w, " %v", o.c)
+ }
+ if o.name != "" {
+ fmt.Fprintf(w, " %q", o.name)
+ }
+ if o.kind != nil {
+ fmt.Fprintf(w, " [%s]", o.kind.typ)
+ }
+ fmt.Fprintf(w, "\n")
+}
+
+func (fn *Func) write(w io.Writer) {
+ fmt.Fprintf(w, "FUNC %s\n", fn.name)
+ for _, op := range fn.ops {
+ op.write(w)
+ }
+}
+
+func (f *File) Compile() {
+ // TODO: CSE constants across the whole file
+
+ fmt.Fprintf(f.w, `#include "go_asm.h"
+#include "textflag.h"
+
+`)
+
+ for _, c := range f.consts {
+ f.emitConst(c.name, c.data)
+ }
+
+ trace := func(fn *Func, step string) {
+ if !logCompile {
+ return
+ }
+ log.Printf("## Compiling %s: %s", fn.name, step)
+ fn.write(os.Stderr)
+ }
+
+ for _, fn := range f.funcs {
+ trace(fn, "initial")
+
+ for {
+ if fn.cse() {
+ trace(fn, "post cse")
+ continue
+ }
+ if fn.deadcode() {
+ trace(fn, "post deadcode")
+ continue
+ }
+ break
+ }
+ fn.addLoads()
+ trace(fn, "post addLoads")
+
+ // Assigning locations requires ops to be in dependency order.
+ fn.schedule()
+ trace(fn, "post schedule")
+
+ locs := fn.assignLocs()
+
+ fn.emit(f, locs)
+ }
+}
+
+// cse performs common subexpression elimination.
+func (fn *Func) cse() bool {
+ // Compute structural hashes
+ hashes := make(map[*op]uint64)
+ var h maphash.Hash
+ var bbuf [8]byte
+ for _, op := range fn.ops {
+ // We ignore the name for canonicalization.
+ h.Reset()
+ h.WriteString(op.op)
+ // TODO: Ideally we would hash o1.c, but we don't have a good way to do that.
+ for _, arg := range op.args {
+ if _, ok := hashes[arg]; !ok {
+ panic("ops not in dependency order")
+ }
+ binary.NativeEndian.PutUint64(bbuf[:], hashes[arg])
+ h.Write(bbuf[:])
+ }
+ hashes[op] = h.Sum64()
+ }
+
+ canon := make(map[uint64][]*op)
+ lookup := func(o *op) *op {
+ hash := hashes[o]
+ for _, o2 := range canon[hash] {
+ if o.equalNoName(o2) {
+ return o2
+ }
+ }
+ canon[hash] = append(canon[hash], o)
+ return o
+ }
+
+ // Canonicalize ops.
+ dirty := false
+ for _, op := range fn.ops {
+ for i, arg := range op.args {
+ newArg := lookup(arg)
+ if arg != newArg {
+ dirty = true
+ op.args[i] = newArg
+ }
+ }
+ }
+ return dirty
+}
+
+// deadcode eliminates unused ops.
+func (fn *Func) deadcode() bool {
+ marks := make(map[*op]bool)
+ var mark func(o *op)
+ mark = func(o *op) {
+ if marks[o] {
+ return
+ }
+ marks[o] = true
+ for _, arg := range o.args {
+ mark(arg)
+ }
+ }
+ // Mark operations that have a side-effect.
+ for _, op := range fn.ops {
+ switch op.op {
+ case "return":
+ mark(op)
+ }
+ }
+ // Discard unmarked operations
+ if len(marks) == len(fn.ops) {
+ return false
+ }
+ newOps := make([]*op, 0, len(marks))
+ for _, op := range fn.ops {
+ if marks[op] {
+ newOps = append(newOps, op)
+ }
+ }
+ fn.ops = newOps
+ return true
+}
+
+// canMem is a map from operation to a bitmap of which arguments can use a
+// direct memory reference.
+var canMem = map[string]uint64{
+ "VPERMB": 1 << 0,
+ "VPERMI2B": 1 << 0,
+ "VPERMT2B": 1 << 0,
+ "VGF2P8AFFINEQB": 1 << 0,
+ "VPORQ": 1 << 0,
+ "VPSUBQ": 1 << 0,
+ "VPSHUFBITQMB": 1 << 0,
+}
+
+// addLoads inserts load ops for ops that can't take memory inputs directly.
+func (fn *Func) addLoads() {
+ // A lot of operations can directly take memory locations. If there's only a
+ // single reference to a deref operation, and the operation can do the deref
+ // itself, eliminate the deref. If there's more than one reference, then we
+ // leave the load so we can share the value in the register.
+ nRefs := fn.opRefs()
+ loads := make(map[*op]*op) // deref -> load
+ for _, o := range fn.ops {
+ canMask := canMem[o.op]
+ for i, arg := range o.args {
+ // TODO: Many AVX-512 operations that support memory operands also
+ // support a ".BCST" suffix that performs a broadcasting memory
+ // load. If the const can be broadcast and all uses support
+ // broadcast load, it would be nice to use .BCST. I'm not sure if
+ // that belongs in this pass or a different one.
+ if arg.op == "deref" || arg.op == "const" {
+ // These produce memory locations.
+ if canMask&(1< 1 {
+ // This argument needs to be loaded into a register.
+ load, ok := loads[arg]
+ if !ok {
+ load = makeLoad(arg)
+ fn.attach(load)
+ loads[arg] = load
+ }
+ o.args[i] = load
+ }
+ }
+ }
+ }
+}
+
+func (fn *Func) opRefs() map[*op]int {
+ refs := make(map[*op]int)
+ for _, o1 := range fn.ops {
+ for _, arg := range o1.args {
+ refs[arg]++
+ }
+ }
+ return refs
+}
+
+func makeLoad(deref *op) *op {
+ var inst string
+ switch deref.kind.reg {
+ default:
+ fatalf("don't know how to load %v", deref.kind.reg)
+ case regClassGP:
+ inst = "MOVQ"
+ case regClassZ:
+ inst = "VMOVDQU64"
+ }
+ // The load references deref rather than deref.args[0] because when we
+ // assign locations, the deref op gets the memory location to load from,
+ // while its argument has some other location (like a register). Also, the
+ // offset to deref is attached to the deref op.
+ return &op{op: inst, kind: deref.kind, args: []*op{deref}}
+}
+
+type opHeap []*op
+
+func (h opHeap) Len() int { return len(h) }
+func (h opHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
+func (h opHeap) Less(i, j int) bool {
+ priority := func(o *op) int {
+ if o.op == "deref" || o.op == "const" {
+ // Input to memory load
+ return 1
+ }
+ if len(o.args) > 0 && (o.args[0].op == "deref" || o.args[0].op == "const") {
+ // Memory load
+ return 2
+ }
+ return 100
+ }
+ if p1, p2 := priority(h[i]), priority(h[j]); p1 != p2 {
+ return p1 < p2
+ }
+ return h[i].id < h[j].id
+}
+
+func (h *opHeap) Push(x any) {
+ *h = append(*h, x.(*op))
+}
+
+func (h *opHeap) Pop() any {
+ old := *h
+ n := len(old)
+ x := old[n-1]
+ *h = old[0 : n-1]
+ return x
+}
+
+// schedule ensures fn's ops are in dependency order.
+func (fn *Func) schedule() {
+ // TODO: This tends to generate a huge amount of register pressure, mostly
+ // because it floats loads as early as possible and partly because it has no
+ // concept of rematerialization and CSE can make rematerializable values
+ // live for a very long time. It some sense it doesn't matter because we
+ // don't run out of registers for anything we need.
+
+ missing := make(map[*op]int)
+ uses := make(map[*op][]*op)
+ var h opHeap
+ for _, op := range fn.ops {
+ if len(op.args) == 0 {
+ h = append(h, op)
+ } else {
+ missing[op] = len(op.args)
+ }
+ for _, arg := range op.args {
+ uses[arg] = append(uses[arg], op)
+ }
+ }
+ heap.Init(&h)
+
+ newOps := make([]*op, 0, len(fn.ops))
+ for len(h) > 0 {
+ if false {
+ log.Printf("schedule: %s", h)
+ }
+ top := h[0]
+ newOps = append(newOps, top)
+ heap.Pop(&h)
+ for _, o := range uses[top] {
+ missing[o]--
+ if missing[o] == 0 {
+ heap.Push(&h, o)
+ }
+ }
+ }
+ if len(newOps) != len(fn.ops) {
+ log.Print("schedule didn't schedule all ops")
+ log.Print("before:")
+ fn.write(os.Stderr)
+ fn.ops = newOps
+ log.Print("after:")
+ fn.write(os.Stderr)
+ log.Fatal("bad schedule")
+ }
+
+ fn.ops = newOps
+}
+
+func (fn *Func) emit(f *File, locs map[*op]loc) {
+ w := f.w
+
+ // Emit constants first
+ for _, o := range fn.ops {
+ if o.op == "const" {
+ name := locs[o].(locMem).name
+ f.emitConst(name, o.c)
+ }
+ }
+
+ fmt.Fprintf(w, "TEXT %s(SB), NOSPLIT, $0-0\n", fn.name)
+
+ // Emit body
+ for _, o := range fn.ops {
+ switch o.op {
+ case "const", "arg", "return", "deref", "imm":
+ // Does not produce code
+ continue
+ }
+ switch o.op {
+ case "addConst":
+ fatalf("addConst not lowered")
+ }
+
+ opName := o.op
+ // A ".mask" suffix is used to distinguish AVX-512 ops that use the same
+ // mnemonic for regular and masked mode.
+ opName = strings.TrimSuffix(opName, ".mask")
+
+ fmt.Fprintf(w, "\t%s", opName)
+ if o.op == "VGF2P8AFFINEQB" {
+ // Hidden immediate, but always 0
+ //
+ // TODO: Replace this with an imm input.
+ fmt.Fprintf(w, " $0,")
+ }
+ for i, arg := range o.args {
+ if i == 0 {
+ fmt.Fprintf(w, " ")
+ } else {
+ fmt.Fprintf(w, ", ")
+ }
+ if arg.op == "imm" {
+ fmt.Fprintf(w, "$0x%x", arg.c)
+ } else {
+ fmt.Fprint(w, locs[arg].LocString())
+ }
+ }
+ if _, ok := opRMW[o.op]; ok {
+ // Read-modify-write instructions, so the output is already in the
+ // arguments above.
+ } else {
+ fmt.Fprintf(w, ", %s", locs[o].LocString())
+ }
+ fmt.Fprintf(w, "\n")
+ }
+ fmt.Fprintf(w, "\tRET\n")
+ fmt.Fprintf(w, "\n")
+}
+
+func (f *File) emitConst(name string, data any) {
+ switch data := data.(type) {
+ case []*Func:
+ fmt.Fprintf(f.w, "GLOBL %s(SB), RODATA, $%#x\n", name, len(data)*8)
+ for i, fn := range data {
+ fmt.Fprintf(f.w, "DATA %s+%#02x(SB)/8, ", name, 8*i)
+ if fn == nil {
+ fmt.Fprintf(f.w, "$0\n")
+ } else {
+ fmt.Fprintf(f.w, "$%s(SB)\n", fn.name)
+ }
+ }
+ fmt.Fprintf(f.w, "\n")
+ return
+ }
+
+ // Assume it's a numeric slice or array
+ rv := reflect.ValueOf(data)
+ sz := int(rv.Type().Elem().Size())
+ fmt.Fprintf(f.w, "GLOBL %s(SB), RODATA, $%#x\n", name, rv.Len()*sz)
+ for wi := 0; wi < sz*rv.Len()/8; wi++ { // Iterate over words
+ var word uint64
+ for j := 0; j < 8/sz; j++ { // Iterate over elements in this word
+ d := rv.Index(wi*8/sz + j).Uint()
+ word |= d << (j * sz * 8)
+ }
+ fmt.Fprintf(f.w, "DATA %s+%#02x(SB)/8, $%#016x\n", name, 8*wi, word)
+ }
+
+ fmt.Fprintf(f.w, "\n")
+}
diff --git a/src/internal/runtime/gc/internal/gen/gp.go b/src/internal/runtime/gc/internal/gen/gp.go
new file mode 100644
index 00000000000000..390d6e50eda178
--- /dev/null
+++ b/src/internal/runtime/gc/internal/gen/gp.go
@@ -0,0 +1,26 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gen
+
+type Uint64 struct {
+ valGP
+}
+
+var kindUint64 = &kind{typ: "Uint64", reg: regClassGP}
+
+func ConstUint64(c uint64, name string) (y Uint64) {
+ y.initOp(&op{op: "const", kind: y.kind(), c: c, name: name})
+ return y
+}
+
+func (Uint64) kind() *kind {
+ return kindUint64
+}
+
+func (Uint64) wrap(x *op) Uint64 {
+ var y Uint64
+ y.initOp(x)
+ return y
+}
diff --git a/src/internal/runtime/gc/internal/gen/regalloc.go b/src/internal/runtime/gc/internal/gen/regalloc.go
new file mode 100644
index 00000000000000..424a295afb8062
--- /dev/null
+++ b/src/internal/runtime/gc/internal/gen/regalloc.go
@@ -0,0 +1,338 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gen
+
+import (
+ "fmt"
+ "log"
+ "math/bits"
+ "strings"
+)
+
+const traceRegAlloc = true
+
+type regClass uint8
+
+const (
+ regClassFixed regClass = iota
+ regClassGP
+ regClassZ
+ regClassK
+
+ numRegClasses
+
+ regClassNone = ^regClass(0)
+)
+
+type locReg struct {
+ cls regClass
+ reg int
+}
+
+func (l locReg) LocString() string {
+ switch l.cls {
+ case regClassFixed:
+ return fixedRegs[l.reg]
+ case regClassGP:
+ return gpRegs[l.reg]
+ case regClassZ:
+ return fmt.Sprintf("Z%d", l.reg)
+ case regClassK:
+ return fmt.Sprintf("K%d", l.reg)
+ }
+ panic("bad register class")
+}
+
+func (l locReg) Deref(off int) (loc, error) {
+ return locMem{l, off, ""}, nil
+}
+
+func (l locReg) Reg() (locReg, bool) {
+ return l, true
+}
+
+type locMem struct {
+ base locReg
+ off int
+ name string
+}
+
+func (l locMem) LocString() string {
+ if l.base.cls == regClassFixed && l.base.reg == regSB && l.off == 0 {
+ return l.name + "(SB)"
+ }
+ if l.name != "" {
+ return fmt.Sprintf("%s+%d(%s)", l.name, l.off, l.base.LocString())
+ }
+ if l.off != 0 {
+ return fmt.Sprintf("%d(%s)", l.off, l.base.LocString())
+ }
+ return "(" + l.base.LocString() + ")"
+}
+
+func (l locMem) Deref(off int) (loc, error) {
+ return nil, fmt.Errorf("cannot dereference already memory address %s", l.LocString())
+}
+
+func (l locMem) Reg() (locReg, bool) {
+ if l.base.cls == regClassFixed {
+ return locReg{}, false
+ }
+ return l.base, true
+}
+
+type loc interface {
+ LocString() string // Return the assembly syntax for this location
+ Deref(off int) (loc, error) // Treat this location as an address and return a location with the contents of memory at that address
+ Reg() (locReg, bool) // Register used by this location
+}
+
+var opRMW = map[string]int{
+ "VPERMI2B": 2, // Overwrites third argument
+ "VPERMI2B.Z": 3, // Overwrites fourth argument
+ "VPERMI2B.mask": 3, // Overwrites fourth argument
+ "VPERMT2B": 1, // Overwrites second argument TODO: Check this. Unused for now.
+ "VPBROADCASTQ.mask": 2, // Overwrites last argument
+}
+
+// TODO: Should we have a general rule that all ".mask" instructions overwrite
+// their last argument?
+
+const (
+ regSB = iota
+ regFP
+)
+
+var fixedRegs = []string{regSB: "SB", regFP: "FP"}
+var gpRegs = []string{"AX", "BX", "CX", "DI", "SI", "R8", "R9", "R10", "R11"} // ABI argument order
+
+type regSet struct {
+ inUse [numRegClasses]uint32
+}
+
+func (s *regSet) used(o *op, l loc) {
+ if l == nil {
+ return
+ }
+ reg, ok := l.Reg()
+ if !ok {
+ return
+ }
+ if traceRegAlloc {
+ log.Printf(" alloc %s @ v%02d", reg.LocString(), o.id)
+ }
+ if s.inUse[reg.cls]&(1<")
+
+ // Create map from op -> fn.ops index
+ opIndexes := make(map[*op]int, len(fn.ops))
+ for i, o := range fn.ops {
+ opIndexes[o] = i
+ }
+
+ // Read-modify-write operations share a location with one of their inputs.
+ // Likewise, deref ops extend the lifetime of their input (but in a shared
+ // way, unlike RMW ops).
+ //
+ // Compute a map from each op to the earliest "canonical" op whose live
+ // range we'll use.
+ canon := make(map[*op]*op)
+ overwritten := make(map[*op]bool)
+ for _, o := range fn.ops {
+ // Check that this op doesn't use any overwritten inputs.
+ for _, arg := range o.args {
+ if overwritten[arg] {
+ // TODO: The solution to this is to insert copy ops.
+ fatalf("op %+v uses overwritten input %+v", o, arg)
+ }
+ }
+
+ // Record canonical op.
+ rmw, ok := opRMW[o.op]
+ if ok {
+ canon[o] = canon[o.args[rmw]]
+ // Record that the input is dead now and must not be referenced.
+ overwritten[o.args[rmw]] = true
+ } else if o.op == "deref" {
+ canon[o] = canon[o.args[0]]
+ } else {
+ canon[o] = o
+ }
+ }
+
+ // Compute live ranges of each canonical op.
+ //
+ // First, find the last use of each op.
+ lastUses := make(map[*op]*op) // Canonical creation op -> last use op
+ for _, op := range fn.ops {
+ for _, arg := range op.args {
+ lastUses[canon[arg]] = op
+ }
+ }
+ // Invert the last uses map to get a map from op to the (canonical) values
+ // that die at that op.
+ lastUseMap := make(map[*op][]*op) // op of last use -> (canonical) creation ops
+ for def, lastUse := range lastUses {
+ lastUseMap[lastUse] = append(lastUseMap[lastUse], def)
+ }
+
+ // Prepare for assignments
+ regUsed := make([]regSet, len(fn.ops)) // In-use registers at each op
+ for i := range regUsed {
+ // X15/Y15/Z15 is reserved by the Go ABI
+ regUsed[i].inUse[regClassZ] |= 1 << 15
+ // K0 is contextual (if used as an opmask, it means no mask). Too
+ // complicated, so just ignore it.
+ regUsed[i].inUse[regClassK] |= 1 << 0
+ }
+ locs := make(map[*op]loc)
+ assign := func(o *op, l loc) {
+ if have, ok := locs[o]; ok {
+ fatalf("op %+v already assigned location %v (new %v)", o, have, l)
+ return
+ }
+ if o == canon[o] {
+ // Mark this location used over o's live range
+ for i := opIndexes[o]; i < opIndexes[lastUses[o]]; i++ {
+ regUsed[i].used(fn.ops[i], l)
+ }
+ }
+ locs[o] = l
+ }
+
+ // Assign fixed locations
+ id := 0
+ for _, o := range fn.ops {
+ switch o.op {
+ case "arg":
+ if traceRegAlloc {
+ log.Printf("fixed op %+v", o)
+ }
+ assign(o, o.c.(locReg))
+ case "const":
+ if traceRegAlloc {
+ log.Printf("fixed op %+v", o)
+ }
+ name := o.name
+ if name == "" {
+ name = fmt.Sprintf("%s_%d<>", nameBase, id)
+ id++
+ } else if name[0] == '*' {
+ name = nameBase + name[1:]
+ }
+ assign(o, locMem{locReg{cls: regClassFixed, reg: regSB}, 0, name})
+ case "return":
+ if traceRegAlloc {
+ log.Printf("fixed op %+v", o)
+ }
+ assign(o, nil) // no location
+ // TODO: argZ should start at 0.
+ argGP, argZ := 0, 1
+ for _, arg := range o.args {
+ switch arg.kind.reg {
+ default:
+ fatalf("bad register class for return value")
+ case regClassGP:
+ assign(canon[arg], locReg{regClassGP, argGP})
+ argGP++
+ case regClassZ:
+ assign(canon[arg], locReg{regClassZ, argZ})
+ argZ++
+ }
+ }
+ case "imm":
+ assign(o, nil) // no location
+ }
+ }
+
+ // Assign locations.
+ for _, o := range fn.ops {
+ if traceRegAlloc {
+ log.Printf("assign %+v", o)
+ }
+
+ if _, ok := locs[o]; ok {
+ // Already assigned a fixed location above.
+ continue
+ }
+
+ if o.op == "deref" {
+ loc, err := locs[o.args[0]].Deref(o.c.(int))
+ if err != nil {
+ fatalf("%v", err)
+ }
+ // We don't "assign" here because we've already processed the
+ // canonical op, which marked loc's register as in-use for the whole
+ // live range.
+ locs[o] = loc
+ continue
+ }
+
+ if canon[o] != o {
+ // Copy the canonical op's location.
+ locs[o] = locs[canon[o]]
+ continue
+ }
+ // Below here we know that o is already a canonical op.
+
+ if _, ok := opRMW[o.op]; ok {
+ fatalf("read-modify-write op not canonicalized")
+ }
+
+ // Find a free register of the right class.
+ cls := o.kind.reg
+ var used uint32
+ for i := opIndexes[o]; i < opIndexes[lastUses[o]]; i++ {
+ used |= regUsed[i].inUse[cls]
+ }
+
+ // Assign result location.
+ num := bits.TrailingZeros32(^used)
+ switch cls {
+ default:
+ fatalf("unknown reg class %v", cls)
+ case regClassGP:
+ if num >= len(gpRegs) {
+ panic("out of GP regs")
+ }
+ case regClassZ:
+ if num >= 32 {
+ panic("out of Z regs")
+ }
+ case regClassK:
+ if num >= 8 {
+ panic("out of K regs")
+ }
+ }
+ loc := locReg{cls, num}
+ assign(o, loc)
+ }
+
+ return locs
+}
diff --git a/src/internal/runtime/gc/internal/gen/simd.go b/src/internal/runtime/gc/internal/gen/simd.go
new file mode 100644
index 00000000000000..0360aa4b06560f
--- /dev/null
+++ b/src/internal/runtime/gc/internal/gen/simd.go
@@ -0,0 +1,246 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gen
+
+type Uint8x64 struct {
+ valAny
+}
+
+var kindUint8x64 = &kind{typ: "Uint8x64", reg: regClassZ}
+
+func ConstUint8x64(c [64]uint8, name string) (y Uint8x64) {
+ y.initOp(&op{op: "const", kind: y.kind(), c: c, name: name})
+ return y
+}
+
+func (Uint8x64) kind() *kind {
+ return kindUint8x64
+}
+
+func (Uint8x64) wrap(x *op) Uint8x64 {
+ var y Uint8x64
+ y.initOp(x)
+ return y
+}
+
+func (x Uint8x64) ToUint64x8() (z Uint64x8) {
+ z.op = x.op
+ return z
+}
+
+func (x Uint8x64) Shuffle(shuf Uint8x64) (y Uint8x64) {
+ if shuf.op.op == "const" {
+ // TODO: There are often patterns we can take advantage of here. Sometimes
+ // we can do a broadcast. Sometimes we can at least do a quadword
+ // permutation instead of a full byte permutation.
+
+ // Range check the shuffle
+ for i, inp := range shuf.op.c.([64]uint8) {
+ // 0xff is a special "don't care" value
+ if !(inp == 0xff || inp < 64) {
+ fatalf("shuffle[%d] = %d out of range [0, %d) or 0xff", i, inp, 64)
+ }
+ }
+ }
+
+ args := []*op{x.op, shuf.op}
+ y.initOp(&op{op: "VPERMB", kind: y.kind(), args: args})
+ return y
+}
+
+func (x Uint8x64) ShuffleZeroed(shuf Uint8x64, mask Mask64) (y Uint8x64) {
+ args := []*op{x.op, shuf.op, mask.op}
+ y.initOp(&op{op: "VPERMB.Z", kind: y.kind(), args: args})
+ return y
+}
+
+func (x Uint8x64) ShuffleMasked(shuf Uint8x64, mask Mask64) (y Uint8x64) {
+ args := []*op{x.op, shuf.op, mask.op}
+ y.initOp(&op{op: "VPERMB.mask", kind: y.kind(), args: args})
+ return y
+}
+
+// TODO: The two-argument shuffle is a little weird. You almost want the
+// receiver to be the shuffle and the two arguments to be the two inputs, but
+// that's almost certainly *not* what you want for the single input shuffle.
+
+func (x Uint8x64) Shuffle2(y Uint8x64, shuf Uint8x64) (z Uint8x64) {
+ // Confusingly, the inputs are in the opposite order from what you'd expect.
+ args := []*op{y.op, x.op, shuf.op}
+ z.initOp(&op{op: "VPERMI2B", kind: z.kind(), args: args})
+ return z
+}
+
+func (x Uint8x64) Shuffle2Zeroed(y Uint8x64, shuf Uint8x64, mask Mask64) (z Uint8x64) {
+ // Confusingly, the inputs are in the opposite order from what you'd expect.
+ args := []*op{y.op, x.op, mask.op, shuf.op}
+ z.initOp(&op{op: "VPERMI2B.Z", kind: z.kind(), args: args})
+ return z
+}
+
+func (x Uint8x64) Shuffle2Masked(y Uint8x64, shuf Uint8x64, mask Mask64) (z Uint8x64) {
+ // Confusingly, the inputs are in the opposite order from what you'd expect.
+ args := []*op{y.op, x.op, mask.op, shuf.op}
+ z.initOp(&op{op: "VPERMI2B.mask", kind: z.kind(), args: args})
+ return z
+}
+
+type Uint64x8 struct {
+ valAny
+}
+
+var kindUint64x8 = &kind{typ: "Uint64x8", reg: regClassZ}
+
+func ConstUint64x8(c [8]uint64, name string) (y Uint64x8) {
+ // TODO: Sometimes these can be optimized into broadcast loads.
+ y.initOp(&op{op: "const", kind: y.kind(), c: c, name: name})
+ return y
+}
+
+func BroadcastUint64x8Zeroed(src Uint64, mask Mask8) (z Uint64x8) {
+ z.initOp(&op{op: "VPBROADCASTQ.Z", kind: z.kind(), args: []*op{src.op, mask.op}})
+ return z
+}
+
+func (x Uint64x8) BroadcastMasked(src Uint64, mask Mask8) (z Uint64x8) {
+ z.initOp(&op{op: "VPBROADCASTQ.mask", kind: z.kind(), args: []*op{src.op, mask.op, x.op}})
+ return z
+}
+
+func (Uint64x8) kind() *kind {
+ return kindUint64x8
+}
+
+func (Uint64x8) wrap(x *op) Uint64x8 {
+ var y Uint64x8
+ y.initOp(x)
+ return y
+}
+
+func (x Uint64x8) Or(y Uint64x8) (z Uint64x8) {
+ z.initOp(&op{op: "VPORQ", kind: z.kind(), args: []*op{y.op, x.op}})
+ return z
+}
+
+func (x Uint64x8) Sub(y Uint64x8) (z Uint64x8) {
+ // Arguments are backwards
+ z.initOp(&op{op: "VPSUBQ", kind: z.kind(), args: []*op{y.op, x.op}})
+ return z
+}
+
+func (x Uint64x8) ToUint8x64() (z Uint8x64) {
+ z.op = x.op
+ return z
+}
+
+func (x Uint64x8) GF2P8Affine(y Uint8x64) (z Uint8x64) {
+ // matrix, vector
+ z.initOp(&op{op: "VGF2P8AFFINEQB", kind: z.kind(), args: []*op{x.op, y.op}})
+ return z
+}
+
+func (x Uint64x8) ShuffleBits(y Uint8x64) (z Mask64) {
+ z.initOp(&op{op: "VPSHUFBITQMB", kind: z.kind(), args: []*op{y.op, x.op}})
+ return z
+}
+
+func (x Uint64x8) ShuffleBitsMasked(y Uint8x64, mask Mask64) (z Mask64) {
+ // This is always zeroing if the mask is provided.
+ z.initOp(&op{op: "VPSHUFBITQMB", kind: z.kind(), args: []*op{y.op, x.op, mask.op}})
+ return z
+}
+
+type Mask8 struct {
+ valAny
+}
+
+var kindMask8 = &kind{typ: "Mask8", reg: regClassK}
+
+func ConstMask8(c uint8) (y Mask8) {
+ var tmp Uint64
+ tmp.initOp(&op{op: "MOVQ", kind: tmp.kind(), args: []*op{imm(c)}})
+ y.initOp(&op{op: "KMOVB", kind: y.kind(), args: []*op{tmp.op}})
+ return y
+}
+
+func (Mask8) kind() *kind {
+ return kindMask8
+}
+
+func (Mask8) wrap(x *op) Mask8 {
+ var y Mask8
+ y.initOp(x)
+ return y
+}
+
+func (x Mask8) ToUint8() (z Uint64) {
+ z.initOp(&op{op: "KMOVB", kind: z.kind(), args: []*op{x.op}})
+ return z
+}
+
+func (x Mask8) Or(y Mask8) (z Mask8) {
+ z.initOp(&op{op: "KORQ", kind: z.kind(), args: []*op{y.op, x.op}})
+ return z
+}
+
+func (x Mask8) ShiftLeft(c uint8) (z Mask8) {
+ if c == 0 {
+ z = x
+ } else {
+ z.initOp(&op{op: "KSHIFTLB", kind: z.kind(), args: []*op{imm(c), x.op}})
+ }
+ return z
+}
+
+type Mask64 struct {
+ valAny
+}
+
+var kindMask64 = &kind{typ: "Mask64", reg: regClassK}
+
+func ConstMask64(c uint64) (y Mask64) {
+ var tmp Uint64
+ tmp.initOp(&op{op: "MOVQ", kind: tmp.kind(), args: []*op{imm(c)}})
+ y.initOp(&op{op: "KMOVQ", kind: y.kind(), args: []*op{tmp.op}})
+ return y
+}
+
+func (Mask64) kind() *kind {
+ return kindMask64
+}
+
+func (Mask64) wrap(x *op) Mask64 {
+ var y Mask64
+ y.initOp(x)
+ return y
+}
+
+func (x Mask64) ToUint64() (z Uint64) {
+ z.initOp(&op{op: "KMOVQ", kind: z.kind(), args: []*op{x.op}})
+ return z
+}
+
+func (x Mask64) Or(y Mask64) (z Mask64) {
+ z.initOp(&op{op: "KORQ", kind: z.kind(), args: []*op{y.op, x.op}})
+ return z
+}
+
+func (x Mask64) ShiftLeft(c uint8) (z Mask64) {
+ if c == 0 {
+ z = x
+ } else {
+ z.initOp(&op{op: "KSHIFTLQ", kind: z.kind(), args: []*op{imm(c), x.op}})
+ }
+ return z
+}
+
+func (x Mask64) ShiftRight(c uint8) (z Mask64) {
+ if c == 0 {
+ z = x
+ } else {
+ z.initOp(&op{op: "KSHIFTRQ", kind: z.kind(), args: []*op{imm(c), x.op}})
+ }
+ return z
+}
diff --git a/src/internal/runtime/gc/internal/gen/val.go b/src/internal/runtime/gc/internal/gen/val.go
new file mode 100644
index 00000000000000..24a843a62c4257
--- /dev/null
+++ b/src/internal/runtime/gc/internal/gen/val.go
@@ -0,0 +1,137 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gen
+
+import "sync"
+
+type Value interface {
+ kind() *kind
+ getOp() *op
+}
+
+type Word interface {
+ Value
+ isWord()
+}
+
+// wrap is an unfortunate necessity so that we can pass Value types around as
+// values (not pointers), but still have generic functions that can construct a
+// new Value. Ideally we would just have a method on Value to initialize its op,
+// but that needs to have a non-pointer receiver to satisfy the interface and
+// then it can't mutate the Value.
+type wrap[T Value] interface {
+ Value
+ wrap(x *op) T
+}
+
+type kind struct {
+ typ string
+ reg regClass
+}
+
+type void struct {
+ valAny
+}
+
+var voidKind = &kind{typ: "void", reg: regClassNone}
+
+func (void) kind() *kind { return voidKind }
+
+type Ptr[T Value] struct {
+ valGP
+}
+
+// Ptr is a Word
+var _ Word = Ptr[void]{}
+
+var ptrKinds = sync.Map{} // *kind -> *kind
+
+func (Ptr[T]) kind() *kind {
+ var x T
+ xk := x.kind()
+ pk, ok := ptrKinds.Load(xk)
+ if !ok {
+ k := &kind{typ: "Ptr[" + x.kind().typ + "]", reg: regClassGP}
+ pk, _ = ptrKinds.LoadOrStore(xk, k)
+ }
+ return pk.(*kind)
+}
+
+func (Ptr[T]) wrap(x *op) Ptr[T] {
+ var y Ptr[T]
+ y.initOp(x)
+ return y
+}
+
+func (x Ptr[T]) AddConst(off int) (y Ptr[T]) {
+ base := x.op
+ for base.op == "addConst" {
+ off += base.args[1].c.(int)
+ base = base.args[0]
+ }
+ y.initOp(&op{op: "addConst", kind: y.kind(), args: []*op{base, imm(off)}})
+ return y
+}
+
+func Deref[W wrap[T], T Value](ptr Ptr[W]) T {
+ var off int
+ base := ptr.op
+ for base.op == "addConst" {
+ off += base.args[1].c.(int)
+ base = base.args[0]
+ }
+
+ var y W
+ return y.wrap(&op{op: "deref", kind: y.kind(), args: []*op{base}, c: off})
+}
+
+type Array[T Value] struct {
+ valAny
+}
+
+func ConstArray[T Value](vals []T, name string) (y Array[T]) {
+ // TODO: This probably doesn't actually work because emitConst won't
+ // understand vals.
+ y.initOp(&op{op: "const", kind: y.kind(), c: vals, name: name})
+ return y
+}
+
+func (Array[T]) kind() *kind {
+ // TODO: Cache this like Ptr.kind.
+ var x T
+ return &kind{typ: "Array[" + x.kind().typ + "]", reg: regClassNone}
+}
+
+type valGP struct {
+ valAny
+}
+
+func (valGP) isWord() {}
+
+type valAny struct {
+ *op
+}
+
+func (v *valAny) initOp(x *op) {
+ if v.op != nil {
+ panic("double init of val")
+ }
+ if x.kind == nil {
+ panic("val missing kind")
+ }
+ v.op = x
+
+ // Figure out this value's function.
+ for _, arg := range x.args {
+ if fn := arg.fn; fn != nil {
+ fn.attach(x)
+ break
+ }
+ }
+}
+
+func (v valAny) getOp() *op {
+ return v.op
+}
diff --git a/src/internal/runtime/gc/malloc.go b/src/internal/runtime/gc/malloc.go
index bb54fff6869f9c..7c36a6bfbe942a 100644
--- a/src/internal/runtime/gc/malloc.go
+++ b/src/internal/runtime/gc/malloc.go
@@ -7,7 +7,8 @@ package gc
import "internal/goarch"
const (
- ptrBits = 8 * goarch.PtrSize
+ // PageWords is the number of pointer-words per page.
+ PageWords = PageSize / goarch.PtrSize
// A malloc header is functionally a single type pointer, but
// we need to use 8 here to ensure 8-byte alignment of allocations
@@ -43,7 +44,7 @@ const (
// would not be invariant to size-class rounding. Eschewing this property means a
// more complex check or possibly storing additional state to determine whether a
// span has malloc headers.
- MinSizeForMallocHeader = goarch.PtrSize * ptrBits
+ MinSizeForMallocHeader = goarch.PtrSize * goarch.PtrBits
// PageSize is the increment in which spans are managed.
PageSize = 1 << PageShift
diff --git a/src/internal/runtime/gc/mksizeclasses.go b/src/internal/runtime/gc/mksizeclasses.go
index ea48cda469c520..e7b848af023a08 100644
--- a/src/internal/runtime/gc/mksizeclasses.go
+++ b/src/internal/runtime/gc/mksizeclasses.go
@@ -52,7 +52,7 @@ func main() {
fmt.Fprintln(&b, "// Code generated by mksizeclasses.go; DO NOT EDIT.")
fmt.Fprintln(&b, "//go:generate go run mksizeclasses.go")
fmt.Fprintln(&b)
- fmt.Fprintln(&b, "package runtime")
+ fmt.Fprintln(&b, "package gc")
classes := makeClasses()
printComment(&b, classes)
@@ -287,6 +287,14 @@ func maxObjsPerSpan(classes []class) int {
return most
}
+func maxNPages(classes []class) int {
+ most := 0
+ for _, c := range classes[1:] {
+ most = max(most, c.npages)
+ }
+ return most
+}
+
func printClasses(w io.Writer, classes []class) {
fmt.Fprintln(w, "const (")
fmt.Fprintf(w, "MinHeapAlign = %d\n", minHeapAlign)
@@ -297,6 +305,7 @@ func printClasses(w io.Writer, classes []class) {
fmt.Fprintf(w, "NumSizeClasses = %d\n", len(classes))
fmt.Fprintf(w, "PageShift = %d\n", pageShift)
fmt.Fprintf(w, "MaxObjsPerSpan = %d\n", maxObjsPerSpan(classes))
+ fmt.Fprintf(w, "MaxSizeClassNPages = %d\n", maxNPages(classes))
fmt.Fprintln(w, ")")
fmt.Fprint(w, "var SizeClassToSize = [NumSizeClasses]uint16 {")
diff --git a/src/internal/runtime/gc/scan/expand_amd64.go b/src/internal/runtime/gc/scan/expand_amd64.go
new file mode 100644
index 00000000000000..9bea471abec6c5
--- /dev/null
+++ b/src/internal/runtime/gc/scan/expand_amd64.go
@@ -0,0 +1,22 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan
+
+import "internal/runtime/gc"
+
+// ExpandAVX512 expands each bit in packed into f consecutive bits in unpacked,
+// where f is the word size of objects in sizeClass.
+//
+// This is a testing entrypoint to the expanders used by scanSpanPacked*.
+//
+//go:noescape
+func ExpandAVX512(sizeClass int, packed *gc.ObjMask, unpacked *gc.PtrMask)
+
+// gcExpandersAVX512 is the PCs of expander functions. These cannot be called directly
+// as they don't follow the Go ABI, but you can use this to check if a given
+// expander PC is 0.
+//
+// It is defined in assembly.
+var gcExpandersAVX512 [len(gc.SizeClassToSize)]uintptr
diff --git a/src/internal/runtime/gc/scan/expand_amd64.s b/src/internal/runtime/gc/scan/expand_amd64.s
new file mode 100644
index 00000000000000..6b0be44cc10450
--- /dev/null
+++ b/src/internal/runtime/gc/scan/expand_amd64.s
@@ -0,0 +1,2631 @@
+// Code generated by mkasm.go. DO NOT EDIT.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+GLOBL ·gcExpandersAVX512(SB), RODATA, $0x220
+DATA ·gcExpandersAVX512+0x00(SB)/8, $0
+DATA ·gcExpandersAVX512+0x08(SB)/8, $expandAVX512_1<>(SB)
+DATA ·gcExpandersAVX512+0x10(SB)/8, $expandAVX512_2<>(SB)
+DATA ·gcExpandersAVX512+0x18(SB)/8, $expandAVX512_3<>(SB)
+DATA ·gcExpandersAVX512+0x20(SB)/8, $expandAVX512_4<>(SB)
+DATA ·gcExpandersAVX512+0x28(SB)/8, $expandAVX512_6<>(SB)
+DATA ·gcExpandersAVX512+0x30(SB)/8, $expandAVX512_8<>(SB)
+DATA ·gcExpandersAVX512+0x38(SB)/8, $expandAVX512_10<>(SB)
+DATA ·gcExpandersAVX512+0x40(SB)/8, $expandAVX512_12<>(SB)
+DATA ·gcExpandersAVX512+0x48(SB)/8, $expandAVX512_14<>(SB)
+DATA ·gcExpandersAVX512+0x50(SB)/8, $expandAVX512_16<>(SB)
+DATA ·gcExpandersAVX512+0x58(SB)/8, $expandAVX512_18<>(SB)
+DATA ·gcExpandersAVX512+0x60(SB)/8, $expandAVX512_20<>(SB)
+DATA ·gcExpandersAVX512+0x68(SB)/8, $expandAVX512_22<>(SB)
+DATA ·gcExpandersAVX512+0x70(SB)/8, $expandAVX512_24<>(SB)
+DATA ·gcExpandersAVX512+0x78(SB)/8, $expandAVX512_26<>(SB)
+DATA ·gcExpandersAVX512+0x80(SB)/8, $expandAVX512_28<>(SB)
+DATA ·gcExpandersAVX512+0x88(SB)/8, $expandAVX512_30<>(SB)
+DATA ·gcExpandersAVX512+0x90(SB)/8, $expandAVX512_32<>(SB)
+DATA ·gcExpandersAVX512+0x98(SB)/8, $expandAVX512_36<>(SB)
+DATA ·gcExpandersAVX512+0xa0(SB)/8, $expandAVX512_40<>(SB)
+DATA ·gcExpandersAVX512+0xa8(SB)/8, $expandAVX512_44<>(SB)
+DATA ·gcExpandersAVX512+0xb0(SB)/8, $expandAVX512_48<>(SB)
+DATA ·gcExpandersAVX512+0xb8(SB)/8, $expandAVX512_52<>(SB)
+DATA ·gcExpandersAVX512+0xc0(SB)/8, $expandAVX512_56<>(SB)
+DATA ·gcExpandersAVX512+0xc8(SB)/8, $expandAVX512_60<>(SB)
+DATA ·gcExpandersAVX512+0xd0(SB)/8, $expandAVX512_64<>(SB)
+DATA ·gcExpandersAVX512+0xd8(SB)/8, $0
+DATA ·gcExpandersAVX512+0xe0(SB)/8, $0
+DATA ·gcExpandersAVX512+0xe8(SB)/8, $0
+DATA ·gcExpandersAVX512+0xf0(SB)/8, $0
+DATA ·gcExpandersAVX512+0xf8(SB)/8, $0
+DATA ·gcExpandersAVX512+0x100(SB)/8, $0
+DATA ·gcExpandersAVX512+0x108(SB)/8, $0
+DATA ·gcExpandersAVX512+0x110(SB)/8, $0
+DATA ·gcExpandersAVX512+0x118(SB)/8, $0
+DATA ·gcExpandersAVX512+0x120(SB)/8, $0
+DATA ·gcExpandersAVX512+0x128(SB)/8, $0
+DATA ·gcExpandersAVX512+0x130(SB)/8, $0
+DATA ·gcExpandersAVX512+0x138(SB)/8, $0
+DATA ·gcExpandersAVX512+0x140(SB)/8, $0
+DATA ·gcExpandersAVX512+0x148(SB)/8, $0
+DATA ·gcExpandersAVX512+0x150(SB)/8, $0
+DATA ·gcExpandersAVX512+0x158(SB)/8, $0
+DATA ·gcExpandersAVX512+0x160(SB)/8, $0
+DATA ·gcExpandersAVX512+0x168(SB)/8, $0
+DATA ·gcExpandersAVX512+0x170(SB)/8, $0
+DATA ·gcExpandersAVX512+0x178(SB)/8, $0
+DATA ·gcExpandersAVX512+0x180(SB)/8, $0
+DATA ·gcExpandersAVX512+0x188(SB)/8, $0
+DATA ·gcExpandersAVX512+0x190(SB)/8, $0
+DATA ·gcExpandersAVX512+0x198(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1a0(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1a8(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1b0(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1b8(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1c0(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1c8(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1d0(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1d8(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1e0(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1e8(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1f0(SB)/8, $0
+DATA ·gcExpandersAVX512+0x1f8(SB)/8, $0
+DATA ·gcExpandersAVX512+0x200(SB)/8, $0
+DATA ·gcExpandersAVX512+0x208(SB)/8, $0
+DATA ·gcExpandersAVX512+0x210(SB)/8, $0
+DATA ·gcExpandersAVX512+0x218(SB)/8, $0
+
+TEXT expandAVX512_1<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 (AX), Z1
+ VMOVDQU64 64(AX), Z2
+ RET
+
+GLOBL expandAVX512_2_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_2_inShuf0<>+0x00(SB)/8, $0x0706050403020100
+DATA expandAVX512_2_inShuf0<>+0x08(SB)/8, $0x0706050403020100
+DATA expandAVX512_2_inShuf0<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_2_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_2_inShuf0<>+0x20(SB)/8, $0x1716151413121110
+DATA expandAVX512_2_inShuf0<>+0x28(SB)/8, $0x1716151413121110
+DATA expandAVX512_2_inShuf0<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918
+DATA expandAVX512_2_inShuf0<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918
+
+GLOBL expandAVX512_2_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_2_mat0<>+0x00(SB)/8, $0x0101020204040808
+DATA expandAVX512_2_mat0<>+0x08(SB)/8, $0x1010202040408080
+DATA expandAVX512_2_mat0<>+0x10(SB)/8, $0x0101020204040808
+DATA expandAVX512_2_mat0<>+0x18(SB)/8, $0x1010202040408080
+DATA expandAVX512_2_mat0<>+0x20(SB)/8, $0x0101020204040808
+DATA expandAVX512_2_mat0<>+0x28(SB)/8, $0x1010202040408080
+DATA expandAVX512_2_mat0<>+0x30(SB)/8, $0x0101020204040808
+DATA expandAVX512_2_mat0<>+0x38(SB)/8, $0x1010202040408080
+
+GLOBL expandAVX512_2_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_2_inShuf1<>+0x00(SB)/8, $0x2726252423222120
+DATA expandAVX512_2_inShuf1<>+0x08(SB)/8, $0x2726252423222120
+DATA expandAVX512_2_inShuf1<>+0x10(SB)/8, $0x2f2e2d2c2b2a2928
+DATA expandAVX512_2_inShuf1<>+0x18(SB)/8, $0x2f2e2d2c2b2a2928
+DATA expandAVX512_2_inShuf1<>+0x20(SB)/8, $0x3736353433323130
+DATA expandAVX512_2_inShuf1<>+0x28(SB)/8, $0x3736353433323130
+DATA expandAVX512_2_inShuf1<>+0x30(SB)/8, $0x3f3e3d3c3b3a3938
+DATA expandAVX512_2_inShuf1<>+0x38(SB)/8, $0x3f3e3d3c3b3a3938
+
+GLOBL expandAVX512_2_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_2_outShufLo+0x00(SB)/8, $0x0b030a0209010800
+DATA expandAVX512_2_outShufLo+0x08(SB)/8, $0x0f070e060d050c04
+DATA expandAVX512_2_outShufLo+0x10(SB)/8, $0x1b131a1219111810
+DATA expandAVX512_2_outShufLo+0x18(SB)/8, $0x1f171e161d151c14
+DATA expandAVX512_2_outShufLo+0x20(SB)/8, $0x2b232a2229212820
+DATA expandAVX512_2_outShufLo+0x28(SB)/8, $0x2f272e262d252c24
+DATA expandAVX512_2_outShufLo+0x30(SB)/8, $0x3b333a3239313830
+DATA expandAVX512_2_outShufLo+0x38(SB)/8, $0x3f373e363d353c34
+
+TEXT expandAVX512_2<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_2_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_2_mat0<>(SB), Z1
+ VMOVDQU64 expandAVX512_2_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_2_outShufLo(SB), Z3
+ VMOVDQU64 (AX), Z4
+ VPERMB Z4, Z0, Z0
+ VGF2P8AFFINEQB $0, Z1, Z0, Z0
+ VPERMB Z4, Z2, Z2
+ VGF2P8AFFINEQB $0, Z1, Z2, Z2
+ VPERMB Z0, Z3, Z1
+ VPERMB Z2, Z3, Z2
+ RET
+
+GLOBL expandAVX512_3_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_3_inShuf0<>+0x00(SB)/8, $0x0706050403020100
+DATA expandAVX512_3_inShuf0<>+0x08(SB)/8, $0x0706050403020100
+DATA expandAVX512_3_inShuf0<>+0x10(SB)/8, $0x0706050403020100
+DATA expandAVX512_3_inShuf0<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_3_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_3_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_3_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_3_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_3_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_3_mat0<>+0x00(SB)/8, $0x0101010202020404
+DATA expandAVX512_3_mat0<>+0x08(SB)/8, $0x0408080810101020
+DATA expandAVX512_3_mat0<>+0x10(SB)/8, $0x2020404040808080
+DATA expandAVX512_3_mat0<>+0x18(SB)/8, $0x0101010202020404
+DATA expandAVX512_3_mat0<>+0x20(SB)/8, $0x0408080810101020
+DATA expandAVX512_3_mat0<>+0x28(SB)/8, $0x2020404040808080
+DATA expandAVX512_3_mat0<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_3_mat0<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_3_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_3_inShuf1<>+0x00(SB)/8, $0x1716151413121110
+DATA expandAVX512_3_inShuf1<>+0x08(SB)/8, $0x1716151413121110
+DATA expandAVX512_3_inShuf1<>+0x10(SB)/8, $0x1716151413121110
+DATA expandAVX512_3_inShuf1<>+0x18(SB)/8, $0x1f1e1d1c1b1a1918
+DATA expandAVX512_3_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918
+DATA expandAVX512_3_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918
+DATA expandAVX512_3_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_3_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_3_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_3_inShuf2<>+0x00(SB)/8, $0x2726252423222120
+DATA expandAVX512_3_inShuf2<>+0x08(SB)/8, $0x2726252423222120
+DATA expandAVX512_3_inShuf2<>+0x10(SB)/8, $0x2726252423222120
+DATA expandAVX512_3_inShuf2<>+0x18(SB)/8, $0xffffffffff2a2928
+DATA expandAVX512_3_inShuf2<>+0x20(SB)/8, $0xffffffffff2a2928
+DATA expandAVX512_3_inShuf2<>+0x28(SB)/8, $0xffffffffffff2928
+DATA expandAVX512_3_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_3_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_3_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_3_outShufLo+0x00(SB)/8, $0x0a02110901100800
+DATA expandAVX512_3_outShufLo+0x08(SB)/8, $0x05140c04130b0312
+DATA expandAVX512_3_outShufLo+0x10(SB)/8, $0x170f07160e06150d
+DATA expandAVX512_3_outShufLo+0x18(SB)/8, $0x221a292119282018
+DATA expandAVX512_3_outShufLo+0x20(SB)/8, $0x1d2c241c2b231b2a
+DATA expandAVX512_3_outShufLo+0x28(SB)/8, $0x2f271f2e261e2d25
+DATA expandAVX512_3_outShufLo+0x30(SB)/8, $0x4a42514941504840
+DATA expandAVX512_3_outShufLo+0x38(SB)/8, $0x45544c44534b4352
+
+GLOBL expandAVX512_3_outShufHi(SB), RODATA, $0x40
+DATA expandAVX512_3_outShufHi+0x00(SB)/8, $0x170f07160e06150d
+DATA expandAVX512_3_outShufHi+0x08(SB)/8, $0x221a292119282018
+DATA expandAVX512_3_outShufHi+0x10(SB)/8, $0x1d2c241c2b231b2a
+DATA expandAVX512_3_outShufHi+0x18(SB)/8, $0x2f271f2e261e2d25
+DATA expandAVX512_3_outShufHi+0x20(SB)/8, $0x4a42514941504840
+DATA expandAVX512_3_outShufHi+0x28(SB)/8, $0x45544c44534b4352
+DATA expandAVX512_3_outShufHi+0x30(SB)/8, $0x574f47564e46554d
+DATA expandAVX512_3_outShufHi+0x38(SB)/8, $0x625a696159686058
+
+TEXT expandAVX512_3<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_3_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_3_mat0<>(SB), Z3
+ VMOVDQU64 expandAVX512_3_inShuf1<>(SB), Z4
+ VMOVDQU64 expandAVX512_3_inShuf2<>(SB), Z5
+ VMOVDQU64 expandAVX512_3_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_3_outShufHi(SB), Z2
+ VMOVDQU64 (AX), Z6
+ VPERMB Z6, Z0, Z0
+ VGF2P8AFFINEQB $0, Z3, Z0, Z0
+ VPERMB Z6, Z4, Z4
+ VGF2P8AFFINEQB $0, Z3, Z4, Z4
+ VPERMB Z6, Z5, Z5
+ VGF2P8AFFINEQB $0, Z3, Z5, Z3
+ VPERMI2B Z4, Z0, Z1
+ VPERMI2B Z3, Z4, Z2
+ RET
+
+GLOBL expandAVX512_4_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_4_inShuf0<>+0x00(SB)/8, $0x0706050403020100
+DATA expandAVX512_4_inShuf0<>+0x08(SB)/8, $0x0706050403020100
+DATA expandAVX512_4_inShuf0<>+0x10(SB)/8, $0x0706050403020100
+DATA expandAVX512_4_inShuf0<>+0x18(SB)/8, $0x0706050403020100
+DATA expandAVX512_4_inShuf0<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_4_inShuf0<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_4_inShuf0<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_4_inShuf0<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908
+
+GLOBL expandAVX512_4_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_4_mat0<>+0x00(SB)/8, $0x0101010102020202
+DATA expandAVX512_4_mat0<>+0x08(SB)/8, $0x0404040408080808
+DATA expandAVX512_4_mat0<>+0x10(SB)/8, $0x1010101020202020
+DATA expandAVX512_4_mat0<>+0x18(SB)/8, $0x4040404080808080
+DATA expandAVX512_4_mat0<>+0x20(SB)/8, $0x0101010102020202
+DATA expandAVX512_4_mat0<>+0x28(SB)/8, $0x0404040408080808
+DATA expandAVX512_4_mat0<>+0x30(SB)/8, $0x1010101020202020
+DATA expandAVX512_4_mat0<>+0x38(SB)/8, $0x4040404080808080
+
+GLOBL expandAVX512_4_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_4_inShuf1<>+0x00(SB)/8, $0x1716151413121110
+DATA expandAVX512_4_inShuf1<>+0x08(SB)/8, $0x1716151413121110
+DATA expandAVX512_4_inShuf1<>+0x10(SB)/8, $0x1716151413121110
+DATA expandAVX512_4_inShuf1<>+0x18(SB)/8, $0x1716151413121110
+DATA expandAVX512_4_inShuf1<>+0x20(SB)/8, $0x1f1e1d1c1b1a1918
+DATA expandAVX512_4_inShuf1<>+0x28(SB)/8, $0x1f1e1d1c1b1a1918
+DATA expandAVX512_4_inShuf1<>+0x30(SB)/8, $0x1f1e1d1c1b1a1918
+DATA expandAVX512_4_inShuf1<>+0x38(SB)/8, $0x1f1e1d1c1b1a1918
+
+GLOBL expandAVX512_4_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_4_outShufLo+0x00(SB)/8, $0x1911090118100800
+DATA expandAVX512_4_outShufLo+0x08(SB)/8, $0x1b130b031a120a02
+DATA expandAVX512_4_outShufLo+0x10(SB)/8, $0x1d150d051c140c04
+DATA expandAVX512_4_outShufLo+0x18(SB)/8, $0x1f170f071e160e06
+DATA expandAVX512_4_outShufLo+0x20(SB)/8, $0x3931292138302820
+DATA expandAVX512_4_outShufLo+0x28(SB)/8, $0x3b332b233a322a22
+DATA expandAVX512_4_outShufLo+0x30(SB)/8, $0x3d352d253c342c24
+DATA expandAVX512_4_outShufLo+0x38(SB)/8, $0x3f372f273e362e26
+
+TEXT expandAVX512_4<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_4_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_4_mat0<>(SB), Z1
+ VMOVDQU64 expandAVX512_4_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_4_outShufLo(SB), Z3
+ VMOVDQU64 (AX), Z4
+ VPERMB Z4, Z0, Z0
+ VGF2P8AFFINEQB $0, Z1, Z0, Z0
+ VPERMB Z4, Z2, Z2
+ VGF2P8AFFINEQB $0, Z1, Z2, Z2
+ VPERMB Z0, Z3, Z1
+ VPERMB Z2, Z3, Z2
+ RET
+
+GLOBL expandAVX512_6_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_6_inShuf0<>+0x00(SB)/8, $0x0706050403020100
+DATA expandAVX512_6_inShuf0<>+0x08(SB)/8, $0x0706050403020100
+DATA expandAVX512_6_inShuf0<>+0x10(SB)/8, $0x0706050403020100
+DATA expandAVX512_6_inShuf0<>+0x18(SB)/8, $0x0706050403020100
+DATA expandAVX512_6_inShuf0<>+0x20(SB)/8, $0x0706050403020100
+DATA expandAVX512_6_inShuf0<>+0x28(SB)/8, $0x0706050403020100
+DATA expandAVX512_6_inShuf0<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_6_inShuf0<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_6_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_6_mat0<>+0x00(SB)/8, $0x0101010101010202
+DATA expandAVX512_6_mat0<>+0x08(SB)/8, $0x0202020204040404
+DATA expandAVX512_6_mat0<>+0x10(SB)/8, $0x0404080808080808
+DATA expandAVX512_6_mat0<>+0x18(SB)/8, $0x1010101010102020
+DATA expandAVX512_6_mat0<>+0x20(SB)/8, $0x2020202040404040
+DATA expandAVX512_6_mat0<>+0x28(SB)/8, $0x4040808080808080
+DATA expandAVX512_6_mat0<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_6_mat0<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_6_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_6_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_6_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_6_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_6_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_6_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_6_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_6_inShuf1<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_6_inShuf1<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_6_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_6_inShuf2<>+0x00(SB)/8, $0xffff151413121110
+DATA expandAVX512_6_inShuf2<>+0x08(SB)/8, $0xffff151413121110
+DATA expandAVX512_6_inShuf2<>+0x10(SB)/8, $0xffffff1413121110
+DATA expandAVX512_6_inShuf2<>+0x18(SB)/8, $0xffffff1413121110
+DATA expandAVX512_6_inShuf2<>+0x20(SB)/8, $0xffffff1413121110
+DATA expandAVX512_6_inShuf2<>+0x28(SB)/8, $0xffffff1413121110
+DATA expandAVX512_6_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_6_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_6_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_6_outShufLo+0x00(SB)/8, $0x0901282018100800
+DATA expandAVX512_6_outShufLo+0x08(SB)/8, $0x1a120a0229211911
+DATA expandAVX512_6_outShufLo+0x10(SB)/8, $0x2b231b130b032a22
+DATA expandAVX512_6_outShufLo+0x18(SB)/8, $0x0d052c241c140c04
+DATA expandAVX512_6_outShufLo+0x20(SB)/8, $0x1e160e062d251d15
+DATA expandAVX512_6_outShufLo+0x28(SB)/8, $0x2f271f170f072e26
+DATA expandAVX512_6_outShufLo+0x30(SB)/8, $0x4941686058504840
+DATA expandAVX512_6_outShufLo+0x38(SB)/8, $0x5a524a4269615951
+
+GLOBL expandAVX512_6_outShufHi(SB), RODATA, $0x40
+DATA expandAVX512_6_outShufHi+0x00(SB)/8, $0x2b231b130b032a22
+DATA expandAVX512_6_outShufHi+0x08(SB)/8, $0x0d052c241c140c04
+DATA expandAVX512_6_outShufHi+0x10(SB)/8, $0x1e160e062d251d15
+DATA expandAVX512_6_outShufHi+0x18(SB)/8, $0x2f271f170f072e26
+DATA expandAVX512_6_outShufHi+0x20(SB)/8, $0x4941686058504840
+DATA expandAVX512_6_outShufHi+0x28(SB)/8, $0x5a524a4269615951
+DATA expandAVX512_6_outShufHi+0x30(SB)/8, $0x6b635b534b436a62
+DATA expandAVX512_6_outShufHi+0x38(SB)/8, $0x4d456c645c544c44
+
+TEXT expandAVX512_6<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_6_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_6_mat0<>(SB), Z3
+ VMOVDQU64 expandAVX512_6_inShuf1<>(SB), Z4
+ VMOVDQU64 expandAVX512_6_inShuf2<>(SB), Z5
+ VMOVDQU64 expandAVX512_6_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_6_outShufHi(SB), Z2
+ VMOVDQU64 (AX), Z6
+ VPERMB Z6, Z0, Z0
+ VGF2P8AFFINEQB $0, Z3, Z0, Z0
+ VPERMB Z6, Z4, Z4
+ VGF2P8AFFINEQB $0, Z3, Z4, Z4
+ VPERMB Z6, Z5, Z5
+ VGF2P8AFFINEQB $0, Z3, Z5, Z3
+ VPERMI2B Z4, Z0, Z1
+ VPERMI2B Z3, Z4, Z2
+ RET
+
+GLOBL expandAVX512_8_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_8_inShuf0<>+0x00(SB)/8, $0x0706050403020100
+DATA expandAVX512_8_inShuf0<>+0x08(SB)/8, $0x0706050403020100
+DATA expandAVX512_8_inShuf0<>+0x10(SB)/8, $0x0706050403020100
+DATA expandAVX512_8_inShuf0<>+0x18(SB)/8, $0x0706050403020100
+DATA expandAVX512_8_inShuf0<>+0x20(SB)/8, $0x0706050403020100
+DATA expandAVX512_8_inShuf0<>+0x28(SB)/8, $0x0706050403020100
+DATA expandAVX512_8_inShuf0<>+0x30(SB)/8, $0x0706050403020100
+DATA expandAVX512_8_inShuf0<>+0x38(SB)/8, $0x0706050403020100
+
+GLOBL expandAVX512_8_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_8_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_8_mat0<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_8_mat0<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_8_mat0<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_8_mat0<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_8_mat0<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_8_mat0<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_8_mat0<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_8_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_8_inShuf1<>+0x00(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_8_inShuf1<>+0x08(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_8_inShuf1<>+0x10(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_8_inShuf1<>+0x18(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_8_inShuf1<>+0x20(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_8_inShuf1<>+0x28(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_8_inShuf1<>+0x30(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_8_inShuf1<>+0x38(SB)/8, $0x0f0e0d0c0b0a0908
+
+GLOBL expandAVX512_8_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_8_outShufLo+0x00(SB)/8, $0x3830282018100800
+DATA expandAVX512_8_outShufLo+0x08(SB)/8, $0x3931292119110901
+DATA expandAVX512_8_outShufLo+0x10(SB)/8, $0x3a322a221a120a02
+DATA expandAVX512_8_outShufLo+0x18(SB)/8, $0x3b332b231b130b03
+DATA expandAVX512_8_outShufLo+0x20(SB)/8, $0x3c342c241c140c04
+DATA expandAVX512_8_outShufLo+0x28(SB)/8, $0x3d352d251d150d05
+DATA expandAVX512_8_outShufLo+0x30(SB)/8, $0x3e362e261e160e06
+DATA expandAVX512_8_outShufLo+0x38(SB)/8, $0x3f372f271f170f07
+
+TEXT expandAVX512_8<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_8_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_8_mat0<>(SB), Z1
+ VMOVDQU64 expandAVX512_8_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_8_outShufLo(SB), Z3
+ VMOVDQU64 (AX), Z4
+ VPERMB Z4, Z0, Z0
+ VGF2P8AFFINEQB $0, Z1, Z0, Z0
+ VPERMB Z4, Z2, Z2
+ VGF2P8AFFINEQB $0, Z1, Z2, Z2
+ VPERMB Z0, Z3, Z1
+ VPERMB Z2, Z3, Z2
+ RET
+
+GLOBL expandAVX512_10_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_10_inShuf0<>+0x00(SB)/8, $0xff06050403020100
+DATA expandAVX512_10_inShuf0<>+0x08(SB)/8, $0xff06050403020100
+DATA expandAVX512_10_inShuf0<>+0x10(SB)/8, $0xff06050403020100
+DATA expandAVX512_10_inShuf0<>+0x18(SB)/8, $0xff06050403020100
+DATA expandAVX512_10_inShuf0<>+0x20(SB)/8, $0xffff050403020100
+DATA expandAVX512_10_inShuf0<>+0x28(SB)/8, $0xffff050403020100
+DATA expandAVX512_10_inShuf0<>+0x30(SB)/8, $0xffff050403020100
+DATA expandAVX512_10_inShuf0<>+0x38(SB)/8, $0xffff050403020100
+
+GLOBL expandAVX512_10_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_10_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_10_mat0<>+0x08(SB)/8, $0x0101020202020202
+DATA expandAVX512_10_mat0<>+0x10(SB)/8, $0x0202020204040404
+DATA expandAVX512_10_mat0<>+0x18(SB)/8, $0x0404040404040808
+DATA expandAVX512_10_mat0<>+0x20(SB)/8, $0x0808080808080808
+DATA expandAVX512_10_mat0<>+0x28(SB)/8, $0x1010101010101010
+DATA expandAVX512_10_mat0<>+0x30(SB)/8, $0x1010202020202020
+DATA expandAVX512_10_mat0<>+0x38(SB)/8, $0x2020202040404040
+
+GLOBL expandAVX512_10_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_10_inShuf1<>+0x00(SB)/8, $0xffff050403020100
+DATA expandAVX512_10_inShuf1<>+0x08(SB)/8, $0xffff050403020100
+DATA expandAVX512_10_inShuf1<>+0x10(SB)/8, $0xff0c0b0a09080706
+DATA expandAVX512_10_inShuf1<>+0x18(SB)/8, $0xff0c0b0a09080706
+DATA expandAVX512_10_inShuf1<>+0x20(SB)/8, $0xff0c0b0a09080706
+DATA expandAVX512_10_inShuf1<>+0x28(SB)/8, $0xff0c0b0a09080706
+DATA expandAVX512_10_inShuf1<>+0x30(SB)/8, $0xffff0b0a09080706
+DATA expandAVX512_10_inShuf1<>+0x38(SB)/8, $0xffff0b0a09080706
+
+GLOBL expandAVX512_10_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_10_mat1<>+0x00(SB)/8, $0x4040404040408080
+DATA expandAVX512_10_mat1<>+0x08(SB)/8, $0x8080808080808080
+DATA expandAVX512_10_mat1<>+0x10(SB)/8, $0x0808080808080808
+DATA expandAVX512_10_mat1<>+0x18(SB)/8, $0x1010101010101010
+DATA expandAVX512_10_mat1<>+0x20(SB)/8, $0x1010202020202020
+DATA expandAVX512_10_mat1<>+0x28(SB)/8, $0x2020202040404040
+DATA expandAVX512_10_mat1<>+0x30(SB)/8, $0x4040404040408080
+DATA expandAVX512_10_mat1<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_10_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_10_inShuf2<>+0x00(SB)/8, $0xffff0c0b0a090807
+DATA expandAVX512_10_inShuf2<>+0x08(SB)/8, $0xffff0c0b0a090807
+DATA expandAVX512_10_inShuf2<>+0x10(SB)/8, $0xffff0c0b0a090807
+DATA expandAVX512_10_inShuf2<>+0x18(SB)/8, $0xffff0c0b0a090807
+DATA expandAVX512_10_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_10_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_10_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_10_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_10_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_10_mat2<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_10_mat2<>+0x08(SB)/8, $0x0101020202020202
+DATA expandAVX512_10_mat2<>+0x10(SB)/8, $0x0202020204040404
+DATA expandAVX512_10_mat2<>+0x18(SB)/8, $0x0404040404040808
+DATA expandAVX512_10_mat2<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_10_mat2<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_10_mat2<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_10_mat2<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_10_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_10_outShufLo+0x00(SB)/8, $0x3830282018100800
+DATA expandAVX512_10_outShufLo+0x08(SB)/8, $0x2921191109014840
+DATA expandAVX512_10_outShufLo+0x10(SB)/8, $0x1a120a0249413931
+DATA expandAVX512_10_outShufLo+0x18(SB)/8, $0x0b034a423a322a22
+DATA expandAVX512_10_outShufLo+0x20(SB)/8, $0x4b433b332b231b13
+DATA expandAVX512_10_outShufLo+0x28(SB)/8, $0x3c342c241c140c04
+DATA expandAVX512_10_outShufLo+0x30(SB)/8, $0x2d251d150d054c44
+DATA expandAVX512_10_outShufLo+0x38(SB)/8, $0x1e160e064d453d35
+
+GLOBL expandAVX512_10_outShufHi(SB), RODATA, $0x40
+DATA expandAVX512_10_outShufHi+0x00(SB)/8, $0x4840383028201810
+DATA expandAVX512_10_outShufHi+0x08(SB)/8, $0x3931292119115850
+DATA expandAVX512_10_outShufHi+0x10(SB)/8, $0x2a221a1259514941
+DATA expandAVX512_10_outShufHi+0x18(SB)/8, $0x1b135a524a423a32
+DATA expandAVX512_10_outShufHi+0x20(SB)/8, $0x5b534b433b332b23
+DATA expandAVX512_10_outShufHi+0x28(SB)/8, $0x4c443c342c241c14
+DATA expandAVX512_10_outShufHi+0x30(SB)/8, $0x3d352d251d155c54
+DATA expandAVX512_10_outShufHi+0x38(SB)/8, $0x2e261e165d554d45
+
+TEXT expandAVX512_10<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_10_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_10_inShuf1<>(SB), Z3
+ VMOVDQU64 expandAVX512_10_inShuf2<>(SB), Z4
+ VMOVDQU64 expandAVX512_10_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_10_outShufHi(SB), Z2
+ VMOVDQU64 (AX), Z5
+ VPERMB Z5, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_10_mat0<>(SB), Z0, Z0
+ VPERMB Z5, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_10_mat1<>(SB), Z3, Z3
+ VPERMB Z5, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_10_mat2<>(SB), Z4, Z4
+ VPERMI2B Z3, Z0, Z1
+ VPERMI2B Z4, Z3, Z2
+ RET
+
+GLOBL expandAVX512_12_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_12_inShuf0<>+0x00(SB)/8, $0xffff050403020100
+DATA expandAVX512_12_inShuf0<>+0x08(SB)/8, $0xffff050403020100
+DATA expandAVX512_12_inShuf0<>+0x10(SB)/8, $0xffff050403020100
+DATA expandAVX512_12_inShuf0<>+0x18(SB)/8, $0xffff050403020100
+DATA expandAVX512_12_inShuf0<>+0x20(SB)/8, $0xffffff0403020100
+DATA expandAVX512_12_inShuf0<>+0x28(SB)/8, $0xffffff0403020100
+DATA expandAVX512_12_inShuf0<>+0x30(SB)/8, $0xffffff0403020100
+DATA expandAVX512_12_inShuf0<>+0x38(SB)/8, $0xffffff0403020100
+
+GLOBL expandAVX512_12_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_12_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_12_mat0<>+0x08(SB)/8, $0x0101010102020202
+DATA expandAVX512_12_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_12_mat0<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_12_mat0<>+0x20(SB)/8, $0x0404040408080808
+DATA expandAVX512_12_mat0<>+0x28(SB)/8, $0x0808080808080808
+DATA expandAVX512_12_mat0<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_12_mat0<>+0x38(SB)/8, $0x1010101020202020
+
+GLOBL expandAVX512_12_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_12_inShuf1<>+0x00(SB)/8, $0xffffff0403020100
+DATA expandAVX512_12_inShuf1<>+0x08(SB)/8, $0xffffff0403020100
+DATA expandAVX512_12_inShuf1<>+0x10(SB)/8, $0xffffff0403020100
+DATA expandAVX512_12_inShuf1<>+0x18(SB)/8, $0xffffff0403020100
+DATA expandAVX512_12_inShuf1<>+0x20(SB)/8, $0xffff0a0908070605
+DATA expandAVX512_12_inShuf1<>+0x28(SB)/8, $0xffff0a0908070605
+DATA expandAVX512_12_inShuf1<>+0x30(SB)/8, $0xffff0a0908070605
+DATA expandAVX512_12_inShuf1<>+0x38(SB)/8, $0xffff0a0908070605
+
+GLOBL expandAVX512_12_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_12_mat1<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_12_mat1<>+0x08(SB)/8, $0x4040404040404040
+DATA expandAVX512_12_mat1<>+0x10(SB)/8, $0x4040404080808080
+DATA expandAVX512_12_mat1<>+0x18(SB)/8, $0x8080808080808080
+DATA expandAVX512_12_mat1<>+0x20(SB)/8, $0x0404040408080808
+DATA expandAVX512_12_mat1<>+0x28(SB)/8, $0x0808080808080808
+DATA expandAVX512_12_mat1<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_12_mat1<>+0x38(SB)/8, $0x1010101020202020
+
+GLOBL expandAVX512_12_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_12_inShuf2<>+0x00(SB)/8, $0xffffff0908070605
+DATA expandAVX512_12_inShuf2<>+0x08(SB)/8, $0xffffff0908070605
+DATA expandAVX512_12_inShuf2<>+0x10(SB)/8, $0xffffff0908070605
+DATA expandAVX512_12_inShuf2<>+0x18(SB)/8, $0xffffff0908070605
+DATA expandAVX512_12_inShuf2<>+0x20(SB)/8, $0xffffff0a09080706
+DATA expandAVX512_12_inShuf2<>+0x28(SB)/8, $0xffffff0a09080706
+DATA expandAVX512_12_inShuf2<>+0x30(SB)/8, $0xffffff0a09080706
+DATA expandAVX512_12_inShuf2<>+0x38(SB)/8, $0xffffff0a09080706
+
+GLOBL expandAVX512_12_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_12_mat2<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_12_mat2<>+0x08(SB)/8, $0x4040404040404040
+DATA expandAVX512_12_mat2<>+0x10(SB)/8, $0x4040404080808080
+DATA expandAVX512_12_mat2<>+0x18(SB)/8, $0x8080808080808080
+DATA expandAVX512_12_mat2<>+0x20(SB)/8, $0x0101010101010101
+DATA expandAVX512_12_mat2<>+0x28(SB)/8, $0x0101010102020202
+DATA expandAVX512_12_mat2<>+0x30(SB)/8, $0x0202020202020202
+DATA expandAVX512_12_mat2<>+0x38(SB)/8, $0x0404040404040404
+
+GLOBL expandAVX512_12_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_12_outShufLo+0x00(SB)/8, $0x3830282018100800
+DATA expandAVX512_12_outShufLo+0x08(SB)/8, $0x1911090158504840
+DATA expandAVX512_12_outShufLo+0x10(SB)/8, $0x5951494139312921
+DATA expandAVX512_12_outShufLo+0x18(SB)/8, $0x3a322a221a120a02
+DATA expandAVX512_12_outShufLo+0x20(SB)/8, $0x1b130b035a524a42
+DATA expandAVX512_12_outShufLo+0x28(SB)/8, $0x5b534b433b332b23
+DATA expandAVX512_12_outShufLo+0x30(SB)/8, $0x3c342c241c140c04
+DATA expandAVX512_12_outShufLo+0x38(SB)/8, $0x1d150d055c544c44
+
+GLOBL expandAVX512_12_outShufHi(SB), RODATA, $0x40
+DATA expandAVX512_12_outShufHi+0x00(SB)/8, $0x5850484038302820
+DATA expandAVX512_12_outShufHi+0x08(SB)/8, $0x3931292178706860
+DATA expandAVX512_12_outShufHi+0x10(SB)/8, $0x7971696159514941
+DATA expandAVX512_12_outShufHi+0x18(SB)/8, $0x5a524a423a322a22
+DATA expandAVX512_12_outShufHi+0x20(SB)/8, $0x3b332b237a726a62
+DATA expandAVX512_12_outShufHi+0x28(SB)/8, $0x7b736b635b534b43
+DATA expandAVX512_12_outShufHi+0x30(SB)/8, $0x5c544c443c342c24
+DATA expandAVX512_12_outShufHi+0x38(SB)/8, $0x3d352d257c746c64
+
+TEXT expandAVX512_12<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_12_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_12_inShuf1<>(SB), Z3
+ VMOVDQU64 expandAVX512_12_inShuf2<>(SB), Z4
+ VMOVDQU64 expandAVX512_12_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_12_outShufHi(SB), Z2
+ VMOVDQU64 (AX), Z5
+ VPERMB Z5, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_12_mat0<>(SB), Z0, Z0
+ VPERMB Z5, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_12_mat1<>(SB), Z3, Z3
+ VPERMB Z5, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_12_mat2<>(SB), Z4, Z4
+ VPERMI2B Z3, Z0, Z1
+ VPERMI2B Z4, Z3, Z2
+ RET
+
+GLOBL expandAVX512_14_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_14_inShuf0<>+0x00(SB)/8, $0xffffff0403020100
+DATA expandAVX512_14_inShuf0<>+0x08(SB)/8, $0xffffff0403020100
+DATA expandAVX512_14_inShuf0<>+0x10(SB)/8, $0xffffff0403020100
+DATA expandAVX512_14_inShuf0<>+0x18(SB)/8, $0xffffff0403020100
+DATA expandAVX512_14_inShuf0<>+0x20(SB)/8, $0xffffff0403020100
+DATA expandAVX512_14_inShuf0<>+0x28(SB)/8, $0xffffff0403020100
+DATA expandAVX512_14_inShuf0<>+0x30(SB)/8, $0xffffff0403020100
+DATA expandAVX512_14_inShuf0<>+0x38(SB)/8, $0xffffff0403020100
+
+GLOBL expandAVX512_14_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_14_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_14_mat0<>+0x08(SB)/8, $0x0101010101010202
+DATA expandAVX512_14_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_14_mat0<>+0x18(SB)/8, $0x0202020204040404
+DATA expandAVX512_14_mat0<>+0x20(SB)/8, $0x0404040404040404
+DATA expandAVX512_14_mat0<>+0x28(SB)/8, $0x0404080808080808
+DATA expandAVX512_14_mat0<>+0x30(SB)/8, $0x0808080808080808
+DATA expandAVX512_14_mat0<>+0x38(SB)/8, $0x1010101010101010
+
+GLOBL expandAVX512_14_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_14_inShuf1<>+0x00(SB)/8, $0xffffffff03020100
+DATA expandAVX512_14_inShuf1<>+0x08(SB)/8, $0xffffffff03020100
+DATA expandAVX512_14_inShuf1<>+0x10(SB)/8, $0xffffffff03020100
+DATA expandAVX512_14_inShuf1<>+0x18(SB)/8, $0xffffffff03020100
+DATA expandAVX512_14_inShuf1<>+0x20(SB)/8, $0xffffffff03020100
+DATA expandAVX512_14_inShuf1<>+0x28(SB)/8, $0xffffffff03020100
+DATA expandAVX512_14_inShuf1<>+0x30(SB)/8, $0xffffff0807060504
+DATA expandAVX512_14_inShuf1<>+0x38(SB)/8, $0xffffff0807060504
+
+GLOBL expandAVX512_14_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_14_mat1<>+0x00(SB)/8, $0x1010101010102020
+DATA expandAVX512_14_mat1<>+0x08(SB)/8, $0x2020202020202020
+DATA expandAVX512_14_mat1<>+0x10(SB)/8, $0x2020202040404040
+DATA expandAVX512_14_mat1<>+0x18(SB)/8, $0x4040404040404040
+DATA expandAVX512_14_mat1<>+0x20(SB)/8, $0x4040808080808080
+DATA expandAVX512_14_mat1<>+0x28(SB)/8, $0x8080808080808080
+DATA expandAVX512_14_mat1<>+0x30(SB)/8, $0x1010101010102020
+DATA expandAVX512_14_mat1<>+0x38(SB)/8, $0x2020202020202020
+
+GLOBL expandAVX512_14_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_14_inShuf2<>+0x00(SB)/8, $0xffffff0807060504
+DATA expandAVX512_14_inShuf2<>+0x08(SB)/8, $0xffffff0807060504
+DATA expandAVX512_14_inShuf2<>+0x10(SB)/8, $0xffffff0807060504
+DATA expandAVX512_14_inShuf2<>+0x18(SB)/8, $0xffffff0807060504
+DATA expandAVX512_14_inShuf2<>+0x20(SB)/8, $0xffffff0908070605
+DATA expandAVX512_14_inShuf2<>+0x28(SB)/8, $0xffffff0908070605
+DATA expandAVX512_14_inShuf2<>+0x30(SB)/8, $0xffffffff08070605
+DATA expandAVX512_14_inShuf2<>+0x38(SB)/8, $0xffffffff08070605
+
+GLOBL expandAVX512_14_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_14_mat2<>+0x00(SB)/8, $0x2020202040404040
+DATA expandAVX512_14_mat2<>+0x08(SB)/8, $0x4040404040404040
+DATA expandAVX512_14_mat2<>+0x10(SB)/8, $0x4040808080808080
+DATA expandAVX512_14_mat2<>+0x18(SB)/8, $0x8080808080808080
+DATA expandAVX512_14_mat2<>+0x20(SB)/8, $0x0101010101010101
+DATA expandAVX512_14_mat2<>+0x28(SB)/8, $0x0101010101010202
+DATA expandAVX512_14_mat2<>+0x30(SB)/8, $0x0202020202020202
+DATA expandAVX512_14_mat2<>+0x38(SB)/8, $0x0202020204040404
+
+GLOBL expandAVX512_14_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_14_inShuf3<>+0x00(SB)/8, $0xffffffff08070605
+DATA expandAVX512_14_inShuf3<>+0x08(SB)/8, $0xffffffff08070605
+DATA expandAVX512_14_inShuf3<>+0x10(SB)/8, $0xffffffff08070605
+DATA expandAVX512_14_inShuf3<>+0x18(SB)/8, $0xffffffff08070605
+DATA expandAVX512_14_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_14_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_14_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_14_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_14_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_14_mat3<>+0x00(SB)/8, $0x0404040404040404
+DATA expandAVX512_14_mat3<>+0x08(SB)/8, $0x0404080808080808
+DATA expandAVX512_14_mat3<>+0x10(SB)/8, $0x0808080808080808
+DATA expandAVX512_14_mat3<>+0x18(SB)/8, $0x1010101010101010
+DATA expandAVX512_14_mat3<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_14_mat3<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_14_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_14_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_14_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_14_outShufLo+0x00(SB)/8, $0x3830282018100800
+DATA expandAVX512_14_outShufLo+0x08(SB)/8, $0x0901686058504840
+DATA expandAVX512_14_outShufLo+0x10(SB)/8, $0x4941393129211911
+DATA expandAVX512_14_outShufLo+0x18(SB)/8, $0x1a120a0269615951
+DATA expandAVX512_14_outShufLo+0x20(SB)/8, $0x5a524a423a322a22
+DATA expandAVX512_14_outShufLo+0x28(SB)/8, $0x2b231b130b036a62
+DATA expandAVX512_14_outShufLo+0x30(SB)/8, $0x6b635b534b433b33
+DATA expandAVX512_14_outShufLo+0x38(SB)/8, $0x3c342c241c140c04
+
+GLOBL expandAVX512_14_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_14_outShufHi0+0x00(SB)/8, $0x6860585048403830
+DATA expandAVX512_14_outShufHi0+0x08(SB)/8, $0x3931ffffffff7870
+DATA expandAVX512_14_outShufHi0+0x10(SB)/8, $0x7971696159514941
+DATA expandAVX512_14_outShufHi0+0x18(SB)/8, $0x4a423a32ffffffff
+DATA expandAVX512_14_outShufHi0+0x20(SB)/8, $0xffff7a726a625a52
+DATA expandAVX512_14_outShufHi0+0x28(SB)/8, $0x5b534b433b33ffff
+DATA expandAVX512_14_outShufHi0+0x30(SB)/8, $0xffffffff7b736b63
+DATA expandAVX512_14_outShufHi0+0x38(SB)/8, $0x6c645c544c443c34
+
+GLOBL expandAVX512_14_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_14_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_14_outShufHi1+0x08(SB)/8, $0xffff18100800ffff
+DATA expandAVX512_14_outShufHi1+0x10(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_14_outShufHi1+0x18(SB)/8, $0xffffffff19110901
+DATA expandAVX512_14_outShufHi1+0x20(SB)/8, $0x0a02ffffffffffff
+DATA expandAVX512_14_outShufHi1+0x28(SB)/8, $0xffffffffffff1a12
+DATA expandAVX512_14_outShufHi1+0x30(SB)/8, $0x1b130b03ffffffff
+DATA expandAVX512_14_outShufHi1+0x38(SB)/8, $0xffffffffffffffff
+
+TEXT expandAVX512_14<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_14_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_14_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_14_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_14_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_14_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_14_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_14_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_14_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_14_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_14_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_14_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0xff0ffc3ff0ffc3ff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0xf003c00f003c00, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_16_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_16_inShuf0<>+0x00(SB)/8, $0x0303020201010000
+DATA expandAVX512_16_inShuf0<>+0x08(SB)/8, $0x0303020201010000
+DATA expandAVX512_16_inShuf0<>+0x10(SB)/8, $0x0303020201010000
+DATA expandAVX512_16_inShuf0<>+0x18(SB)/8, $0x0303020201010000
+DATA expandAVX512_16_inShuf0<>+0x20(SB)/8, $0x0303020201010000
+DATA expandAVX512_16_inShuf0<>+0x28(SB)/8, $0x0303020201010000
+DATA expandAVX512_16_inShuf0<>+0x30(SB)/8, $0x0303020201010000
+DATA expandAVX512_16_inShuf0<>+0x38(SB)/8, $0x0303020201010000
+
+GLOBL expandAVX512_16_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_16_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_16_mat0<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_16_mat0<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_16_mat0<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_16_mat0<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_16_mat0<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_16_mat0<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_16_mat0<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_16_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_16_inShuf1<>+0x00(SB)/8, $0x0707060605050404
+DATA expandAVX512_16_inShuf1<>+0x08(SB)/8, $0x0707060605050404
+DATA expandAVX512_16_inShuf1<>+0x10(SB)/8, $0x0707060605050404
+DATA expandAVX512_16_inShuf1<>+0x18(SB)/8, $0x0707060605050404
+DATA expandAVX512_16_inShuf1<>+0x20(SB)/8, $0x0707060605050404
+DATA expandAVX512_16_inShuf1<>+0x28(SB)/8, $0x0707060605050404
+DATA expandAVX512_16_inShuf1<>+0x30(SB)/8, $0x0707060605050404
+DATA expandAVX512_16_inShuf1<>+0x38(SB)/8, $0x0707060605050404
+
+GLOBL expandAVX512_16_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_16_outShufLo+0x00(SB)/8, $0x1918111009080100
+DATA expandAVX512_16_outShufLo+0x08(SB)/8, $0x3938313029282120
+DATA expandAVX512_16_outShufLo+0x10(SB)/8, $0x1b1a13120b0a0302
+DATA expandAVX512_16_outShufLo+0x18(SB)/8, $0x3b3a33322b2a2322
+DATA expandAVX512_16_outShufLo+0x20(SB)/8, $0x1d1c15140d0c0504
+DATA expandAVX512_16_outShufLo+0x28(SB)/8, $0x3d3c35342d2c2524
+DATA expandAVX512_16_outShufLo+0x30(SB)/8, $0x1f1e17160f0e0706
+DATA expandAVX512_16_outShufLo+0x38(SB)/8, $0x3f3e37362f2e2726
+
+TEXT expandAVX512_16<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_16_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_16_mat0<>(SB), Z1
+ VMOVDQU64 expandAVX512_16_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_16_outShufLo(SB), Z3
+ VMOVDQU64 (AX), Z4
+ VPERMB Z4, Z0, Z0
+ VGF2P8AFFINEQB $0, Z1, Z0, Z0
+ VPERMB Z4, Z2, Z2
+ VGF2P8AFFINEQB $0, Z1, Z2, Z2
+ VPERMB Z0, Z3, Z1
+ VPERMB Z2, Z3, Z2
+ RET
+
+GLOBL expandAVX512_18_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_18_inShuf0<>+0x00(SB)/8, $0x0303020201010000
+DATA expandAVX512_18_inShuf0<>+0x08(SB)/8, $0xffffffff03020100
+DATA expandAVX512_18_inShuf0<>+0x10(SB)/8, $0xffffffff03020100
+DATA expandAVX512_18_inShuf0<>+0x18(SB)/8, $0xffffffff03020100
+DATA expandAVX512_18_inShuf0<>+0x20(SB)/8, $0xffffffff03020100
+DATA expandAVX512_18_inShuf0<>+0x28(SB)/8, $0xffffffff03020100
+DATA expandAVX512_18_inShuf0<>+0x30(SB)/8, $0x0303020201010000
+DATA expandAVX512_18_inShuf0<>+0x38(SB)/8, $0xff03020201010000
+
+GLOBL expandAVX512_18_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_18_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_18_mat0<>+0x08(SB)/8, $0x0101020202020202
+DATA expandAVX512_18_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_18_mat0<>+0x18(SB)/8, $0x0202020204040404
+DATA expandAVX512_18_mat0<>+0x20(SB)/8, $0x0404040404040404
+DATA expandAVX512_18_mat0<>+0x28(SB)/8, $0x0404040404040808
+DATA expandAVX512_18_mat0<>+0x30(SB)/8, $0x0808080808080808
+DATA expandAVX512_18_mat0<>+0x38(SB)/8, $0x1010101010101010
+
+GLOBL expandAVX512_18_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_18_inShuf1<>+0x00(SB)/8, $0xffffffffff020100
+DATA expandAVX512_18_inShuf1<>+0x08(SB)/8, $0xffffffffff020100
+DATA expandAVX512_18_inShuf1<>+0x10(SB)/8, $0xffffffffff020100
+DATA expandAVX512_18_inShuf1<>+0x18(SB)/8, $0xffffffffff020100
+DATA expandAVX512_18_inShuf1<>+0x20(SB)/8, $0xffffffffff020100
+DATA expandAVX512_18_inShuf1<>+0x28(SB)/8, $0xffff020201010000
+DATA expandAVX512_18_inShuf1<>+0x30(SB)/8, $0xff06060505040403
+DATA expandAVX512_18_inShuf1<>+0x38(SB)/8, $0xffffffff06050403
+
+GLOBL expandAVX512_18_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_18_mat1<>+0x00(SB)/8, $0x1010202020202020
+DATA expandAVX512_18_mat1<>+0x08(SB)/8, $0x2020202020202020
+DATA expandAVX512_18_mat1<>+0x10(SB)/8, $0x2020202040404040
+DATA expandAVX512_18_mat1<>+0x18(SB)/8, $0x4040404040404040
+DATA expandAVX512_18_mat1<>+0x20(SB)/8, $0x4040404040408080
+DATA expandAVX512_18_mat1<>+0x28(SB)/8, $0x8080808080808080
+DATA expandAVX512_18_mat1<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_18_mat1<>+0x38(SB)/8, $0x1010202020202020
+
+GLOBL expandAVX512_18_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_18_inShuf2<>+0x00(SB)/8, $0xffffffff06050403
+DATA expandAVX512_18_inShuf2<>+0x08(SB)/8, $0xffffffff06050403
+DATA expandAVX512_18_inShuf2<>+0x10(SB)/8, $0xffffffff06050403
+DATA expandAVX512_18_inShuf2<>+0x18(SB)/8, $0xffffffff06050403
+DATA expandAVX512_18_inShuf2<>+0x20(SB)/8, $0x0606050504040303
+DATA expandAVX512_18_inShuf2<>+0x28(SB)/8, $0x0707060605050404
+DATA expandAVX512_18_inShuf2<>+0x30(SB)/8, $0xffffffffff060504
+DATA expandAVX512_18_inShuf2<>+0x38(SB)/8, $0xffffffffff060504
+
+GLOBL expandAVX512_18_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_18_mat2<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_18_mat2<>+0x08(SB)/8, $0x2020202040404040
+DATA expandAVX512_18_mat2<>+0x10(SB)/8, $0x4040404040404040
+DATA expandAVX512_18_mat2<>+0x18(SB)/8, $0x4040404040408080
+DATA expandAVX512_18_mat2<>+0x20(SB)/8, $0x8080808080808080
+DATA expandAVX512_18_mat2<>+0x28(SB)/8, $0x0101010101010101
+DATA expandAVX512_18_mat2<>+0x30(SB)/8, $0x0101020202020202
+DATA expandAVX512_18_mat2<>+0x38(SB)/8, $0x0202020202020202
+
+GLOBL expandAVX512_18_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_18_inShuf3<>+0x00(SB)/8, $0xffffffffff060504
+DATA expandAVX512_18_inShuf3<>+0x08(SB)/8, $0xffffffffff060504
+DATA expandAVX512_18_inShuf3<>+0x10(SB)/8, $0xffffffffff060504
+DATA expandAVX512_18_inShuf3<>+0x18(SB)/8, $0xffff060605050404
+DATA expandAVX512_18_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_18_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_18_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_18_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_18_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_18_mat3<>+0x00(SB)/8, $0x0202020204040404
+DATA expandAVX512_18_mat3<>+0x08(SB)/8, $0x0404040404040404
+DATA expandAVX512_18_mat3<>+0x10(SB)/8, $0x0404040404040808
+DATA expandAVX512_18_mat3<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_18_mat3<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_18_mat3<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_18_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_18_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_18_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_18_outShufLo+0x00(SB)/8, $0x3028201810080100
+DATA expandAVX512_18_outShufLo+0x08(SB)/8, $0x6058504840393831
+DATA expandAVX512_18_outShufLo+0x10(SB)/8, $0x2119110903026968
+DATA expandAVX512_18_outShufLo+0x18(SB)/8, $0x5149413b3a333229
+DATA expandAVX512_18_outShufLo+0x20(SB)/8, $0x120a05046b6a6159
+DATA expandAVX512_18_outShufLo+0x28(SB)/8, $0x423d3c35342a221a
+DATA expandAVX512_18_outShufLo+0x30(SB)/8, $0x07066d6c625a524a
+DATA expandAVX512_18_outShufLo+0x38(SB)/8, $0x3e37362b231b130b
+
+GLOBL expandAVX512_18_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_18_outShufHi0+0x00(SB)/8, $0x6160585048403830
+DATA expandAVX512_18_outShufHi0+0x08(SB)/8, $0xffffffff78706968
+DATA expandAVX512_18_outShufHi0+0x10(SB)/8, $0x59514941393231ff
+DATA expandAVX512_18_outShufHi0+0x18(SB)/8, $0xffff79716b6a6362
+DATA expandAVX512_18_outShufHi0+0x20(SB)/8, $0x4a423a3433ffffff
+DATA expandAVX512_18_outShufHi0+0x28(SB)/8, $0x7a726d6c65645a52
+DATA expandAVX512_18_outShufHi0+0x30(SB)/8, $0x3b3635ffffffffff
+DATA expandAVX512_18_outShufHi0+0x38(SB)/8, $0x6f6e67665b534b43
+
+GLOBL expandAVX512_18_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_18_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_18_outShufHi1+0x08(SB)/8, $0x18100800ffffffff
+DATA expandAVX512_18_outShufHi1+0x10(SB)/8, $0xffffffffffffff19
+DATA expandAVX512_18_outShufHi1+0x18(SB)/8, $0x0901ffffffffffff
+DATA expandAVX512_18_outShufHi1+0x20(SB)/8, $0xffffffffff1b1a11
+DATA expandAVX512_18_outShufHi1+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_18_outShufHi1+0x30(SB)/8, $0xffffff1d1c120a02
+DATA expandAVX512_18_outShufHi1+0x38(SB)/8, $0xffffffffffffffff
+
+TEXT expandAVX512_18<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_18_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_18_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_18_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_18_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_18_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_18_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_18_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_18_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_18_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_18_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_18_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0xffe0fff83ffe0fff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0x1f0007c001f000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_20_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_20_inShuf0<>+0x00(SB)/8, $0x0303020201010000
+DATA expandAVX512_20_inShuf0<>+0x08(SB)/8, $0xffffffff03020100
+DATA expandAVX512_20_inShuf0<>+0x10(SB)/8, $0xff03020201010000
+DATA expandAVX512_20_inShuf0<>+0x18(SB)/8, $0xffff020201010000
+DATA expandAVX512_20_inShuf0<>+0x20(SB)/8, $0xffffffffff020100
+DATA expandAVX512_20_inShuf0<>+0x28(SB)/8, $0xffff020201010000
+DATA expandAVX512_20_inShuf0<>+0x30(SB)/8, $0xffff020201010000
+DATA expandAVX512_20_inShuf0<>+0x38(SB)/8, $0xffffffffff020100
+
+GLOBL expandAVX512_20_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_20_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_20_mat0<>+0x08(SB)/8, $0x0101010102020202
+DATA expandAVX512_20_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_20_mat0<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_20_mat0<>+0x20(SB)/8, $0x0404040408080808
+DATA expandAVX512_20_mat0<>+0x28(SB)/8, $0x0808080808080808
+DATA expandAVX512_20_mat0<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_20_mat0<>+0x38(SB)/8, $0x1010101020202020
+
+GLOBL expandAVX512_20_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_20_inShuf1<>+0x00(SB)/8, $0xffff020201010000
+DATA expandAVX512_20_inShuf1<>+0x08(SB)/8, $0xffff020201010000
+DATA expandAVX512_20_inShuf1<>+0x10(SB)/8, $0xffffffffff020100
+DATA expandAVX512_20_inShuf1<>+0x18(SB)/8, $0xffff020201010000
+DATA expandAVX512_20_inShuf1<>+0x20(SB)/8, $0xff06060505040403
+DATA expandAVX512_20_inShuf1<>+0x28(SB)/8, $0x0606050504040303
+DATA expandAVX512_20_inShuf1<>+0x30(SB)/8, $0xffffffff06050403
+DATA expandAVX512_20_inShuf1<>+0x38(SB)/8, $0xffff050504040303
+
+GLOBL expandAVX512_20_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_20_mat1<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_20_mat1<>+0x08(SB)/8, $0x4040404040404040
+DATA expandAVX512_20_mat1<>+0x10(SB)/8, $0x4040404080808080
+DATA expandAVX512_20_mat1<>+0x18(SB)/8, $0x8080808080808080
+DATA expandAVX512_20_mat1<>+0x20(SB)/8, $0x0202020202020202
+DATA expandAVX512_20_mat1<>+0x28(SB)/8, $0x0404040404040404
+DATA expandAVX512_20_mat1<>+0x30(SB)/8, $0x0404040408080808
+DATA expandAVX512_20_mat1<>+0x38(SB)/8, $0x0808080808080808
+
+GLOBL expandAVX512_20_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_20_inShuf2<>+0x00(SB)/8, $0xffff050504040303
+DATA expandAVX512_20_inShuf2<>+0x08(SB)/8, $0xffffffffff050403
+DATA expandAVX512_20_inShuf2<>+0x10(SB)/8, $0xffff050504040303
+DATA expandAVX512_20_inShuf2<>+0x18(SB)/8, $0xffff050504040303
+DATA expandAVX512_20_inShuf2<>+0x20(SB)/8, $0xffffffffff050403
+DATA expandAVX512_20_inShuf2<>+0x28(SB)/8, $0xffff050504040303
+DATA expandAVX512_20_inShuf2<>+0x30(SB)/8, $0xffff060605050404
+DATA expandAVX512_20_inShuf2<>+0x38(SB)/8, $0xffffffffff060504
+
+GLOBL expandAVX512_20_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_20_mat2<>+0x00(SB)/8, $0x1010101010101010
+DATA expandAVX512_20_mat2<>+0x08(SB)/8, $0x1010101020202020
+DATA expandAVX512_20_mat2<>+0x10(SB)/8, $0x2020202020202020
+DATA expandAVX512_20_mat2<>+0x18(SB)/8, $0x4040404040404040
+DATA expandAVX512_20_mat2<>+0x20(SB)/8, $0x4040404080808080
+DATA expandAVX512_20_mat2<>+0x28(SB)/8, $0x8080808080808080
+DATA expandAVX512_20_mat2<>+0x30(SB)/8, $0x0101010101010101
+DATA expandAVX512_20_mat2<>+0x38(SB)/8, $0x0101010102020202
+
+GLOBL expandAVX512_20_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_20_outShufLo+0x00(SB)/8, $0x2019181110080100
+DATA expandAVX512_20_outShufLo+0x08(SB)/8, $0x4841403831302928
+DATA expandAVX512_20_outShufLo+0x10(SB)/8, $0x1209030259585049
+DATA expandAVX512_20_outShufLo+0x18(SB)/8, $0x33322b2a211b1a13
+DATA expandAVX512_20_outShufLo+0x20(SB)/8, $0x5b5a514b4a434239
+DATA expandAVX512_20_outShufLo+0x28(SB)/8, $0x221d1c15140a0504
+DATA expandAVX512_20_outShufLo+0x30(SB)/8, $0x4c45443a35342d2c
+DATA expandAVX512_20_outShufLo+0x38(SB)/8, $0x160b07065d5c524d
+
+GLOBL expandAVX512_20_outShufHi(SB), RODATA, $0x40
+DATA expandAVX512_20_outShufHi+0x00(SB)/8, $0x4140393830292820
+DATA expandAVX512_20_outShufHi+0x08(SB)/8, $0x6968605958515048
+DATA expandAVX512_20_outShufHi+0x10(SB)/8, $0x312b2a2221787170
+DATA expandAVX512_20_outShufHi+0x18(SB)/8, $0x5a53524943423b3a
+DATA expandAVX512_20_outShufHi+0x20(SB)/8, $0x237973726b6a615b
+DATA expandAVX512_20_outShufHi+0x28(SB)/8, $0x45443d3c322d2c24
+DATA expandAVX512_20_outShufHi+0x30(SB)/8, $0x6d6c625d5c55544a
+DATA expandAVX512_20_outShufHi+0x38(SB)/8, $0x332f2e26257a7574
+
+TEXT expandAVX512_20<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_20_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_20_inShuf1<>(SB), Z3
+ VMOVDQU64 expandAVX512_20_inShuf2<>(SB), Z4
+ VMOVDQU64 expandAVX512_20_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_20_outShufHi(SB), Z2
+ VMOVDQU64 (AX), Z5
+ VPERMB Z5, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_20_mat0<>(SB), Z0, Z0
+ VPERMB Z5, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_20_mat1<>(SB), Z3, Z3
+ VPERMB Z5, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_20_mat2<>(SB), Z4, Z4
+ VPERMI2B Z3, Z0, Z1
+ VPERMI2B Z4, Z3, Z2
+ RET
+
+GLOBL expandAVX512_22_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_22_inShuf0<>+0x00(SB)/8, $0xffff020201010000
+DATA expandAVX512_22_inShuf0<>+0x08(SB)/8, $0xffffffffff020100
+DATA expandAVX512_22_inShuf0<>+0x10(SB)/8, $0xffff020201010000
+DATA expandAVX512_22_inShuf0<>+0x18(SB)/8, $0xffffffffff020100
+DATA expandAVX512_22_inShuf0<>+0x20(SB)/8, $0xffff020201010000
+DATA expandAVX512_22_inShuf0<>+0x28(SB)/8, $0xffffffffff020100
+DATA expandAVX512_22_inShuf0<>+0x30(SB)/8, $0xffff020201010000
+DATA expandAVX512_22_inShuf0<>+0x38(SB)/8, $0xffff020201010000
+
+GLOBL expandAVX512_22_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_22_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_22_mat0<>+0x08(SB)/8, $0x0101010101010202
+DATA expandAVX512_22_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_22_mat0<>+0x18(SB)/8, $0x0202020204040404
+DATA expandAVX512_22_mat0<>+0x20(SB)/8, $0x0404040404040404
+DATA expandAVX512_22_mat0<>+0x28(SB)/8, $0x0404080808080808
+DATA expandAVX512_22_mat0<>+0x30(SB)/8, $0x0808080808080808
+DATA expandAVX512_22_mat0<>+0x38(SB)/8, $0x1010101010101010
+
+GLOBL expandAVX512_22_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_22_inShuf1<>+0x00(SB)/8, $0xffffffffff020100
+DATA expandAVX512_22_inShuf1<>+0x08(SB)/8, $0xffff020201010000
+DATA expandAVX512_22_inShuf1<>+0x10(SB)/8, $0xffffffffff020100
+DATA expandAVX512_22_inShuf1<>+0x18(SB)/8, $0xffff020201010000
+DATA expandAVX512_22_inShuf1<>+0x20(SB)/8, $0xffffffffff020100
+DATA expandAVX512_22_inShuf1<>+0x28(SB)/8, $0xffffffff01010000
+DATA expandAVX512_22_inShuf1<>+0x30(SB)/8, $0xffff040403030202
+DATA expandAVX512_22_inShuf1<>+0x38(SB)/8, $0xffff050504040303
+
+GLOBL expandAVX512_22_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_22_mat1<>+0x00(SB)/8, $0x1010101010102020
+DATA expandAVX512_22_mat1<>+0x08(SB)/8, $0x2020202020202020
+DATA expandAVX512_22_mat1<>+0x10(SB)/8, $0x2020202040404040
+DATA expandAVX512_22_mat1<>+0x18(SB)/8, $0x4040404040404040
+DATA expandAVX512_22_mat1<>+0x20(SB)/8, $0x4040808080808080
+DATA expandAVX512_22_mat1<>+0x28(SB)/8, $0x8080808080808080
+DATA expandAVX512_22_mat1<>+0x30(SB)/8, $0x8080808080808080
+DATA expandAVX512_22_mat1<>+0x38(SB)/8, $0x0101010101010101
+
+GLOBL expandAVX512_22_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_22_inShuf2<>+0x00(SB)/8, $0xffffffffff050403
+DATA expandAVX512_22_inShuf2<>+0x08(SB)/8, $0xffff050504040303
+DATA expandAVX512_22_inShuf2<>+0x10(SB)/8, $0xffffffffff050403
+DATA expandAVX512_22_inShuf2<>+0x18(SB)/8, $0xffff050504040303
+DATA expandAVX512_22_inShuf2<>+0x20(SB)/8, $0xffffffffff050403
+DATA expandAVX512_22_inShuf2<>+0x28(SB)/8, $0xffff050504040303
+DATA expandAVX512_22_inShuf2<>+0x30(SB)/8, $0xffff050504040303
+DATA expandAVX512_22_inShuf2<>+0x38(SB)/8, $0xffffffffff050403
+
+GLOBL expandAVX512_22_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_22_mat2<>+0x00(SB)/8, $0x0101010101010202
+DATA expandAVX512_22_mat2<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_22_mat2<>+0x10(SB)/8, $0x0202020204040404
+DATA expandAVX512_22_mat2<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_22_mat2<>+0x20(SB)/8, $0x0404080808080808
+DATA expandAVX512_22_mat2<>+0x28(SB)/8, $0x0808080808080808
+DATA expandAVX512_22_mat2<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_22_mat2<>+0x38(SB)/8, $0x1010101010102020
+
+GLOBL expandAVX512_22_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_22_inShuf3<>+0x00(SB)/8, $0xffff050504040303
+DATA expandAVX512_22_inShuf3<>+0x08(SB)/8, $0xffffffffff050403
+DATA expandAVX512_22_inShuf3<>+0x10(SB)/8, $0xffffff0504040303
+DATA expandAVX512_22_inShuf3<>+0x18(SB)/8, $0xffffffffffff0403
+DATA expandAVX512_22_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_22_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_22_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_22_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_22_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_22_mat3<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_22_mat3<>+0x08(SB)/8, $0x2020202040404040
+DATA expandAVX512_22_mat3<>+0x10(SB)/8, $0x4040404040404040
+DATA expandAVX512_22_mat3<>+0x18(SB)/8, $0x4040808080808080
+DATA expandAVX512_22_mat3<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_22_mat3<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_22_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_22_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_22_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_22_outShufLo+0x00(SB)/8, $0x2120181110080100
+DATA expandAVX512_22_outShufLo+0x08(SB)/8, $0x4948403938313028
+DATA expandAVX512_22_outShufLo+0x10(SB)/8, $0x0302696860595850
+DATA expandAVX512_22_outShufLo+0x18(SB)/8, $0x3229232219131209
+DATA expandAVX512_22_outShufLo+0x20(SB)/8, $0x5a514b4a413b3a33
+DATA expandAVX512_22_outShufLo+0x28(SB)/8, $0x140a05046b6a615b
+DATA expandAVX512_22_outShufLo+0x30(SB)/8, $0x3c35342a25241a15
+DATA expandAVX512_22_outShufLo+0x38(SB)/8, $0x625d5c524d4c423d
+
+GLOBL expandAVX512_22_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_22_outShufHi0+0x00(SB)/8, $0x5049484039383130
+DATA expandAVX512_22_outShufHi0+0x08(SB)/8, $0x7871706968605958
+DATA expandAVX512_22_outShufHi0+0x10(SB)/8, $0x3332ffffffffffff
+DATA expandAVX512_22_outShufHi0+0x18(SB)/8, $0x5b5a514b4a413b3a
+DATA expandAVX512_22_outShufHi0+0x20(SB)/8, $0xffff7973726b6a61
+DATA expandAVX512_22_outShufHi0+0x28(SB)/8, $0x3d3c3534ffffffff
+DATA expandAVX512_22_outShufHi0+0x30(SB)/8, $0x6c625d5c524d4c42
+DATA expandAVX512_22_outShufHi0+0x38(SB)/8, $0xffffffff7a75746d
+
+GLOBL expandAVX512_22_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_22_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_22_outShufHi1+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_22_outShufHi1+0x10(SB)/8, $0xffff181110080100
+DATA expandAVX512_22_outShufHi1+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_22_outShufHi1+0x20(SB)/8, $0x0302ffffffffffff
+DATA expandAVX512_22_outShufHi1+0x28(SB)/8, $0xffffffff19131209
+DATA expandAVX512_22_outShufHi1+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_22_outShufHi1+0x38(SB)/8, $0x140a0504ffffffff
+
+TEXT expandAVX512_22<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_22_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_22_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_22_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_22_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_22_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_22_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_22_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_22_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_22_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_22_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_22_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0xffff03fffc0ffff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0xf0000fc0003f0000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_24_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_24_inShuf0<>+0x00(SB)/8, $0x0202010101000000
+DATA expandAVX512_24_inShuf0<>+0x08(SB)/8, $0x0202010101000000
+DATA expandAVX512_24_inShuf0<>+0x10(SB)/8, $0x0202010101000000
+DATA expandAVX512_24_inShuf0<>+0x18(SB)/8, $0x0202010101000000
+DATA expandAVX512_24_inShuf0<>+0x20(SB)/8, $0x0202010101000000
+DATA expandAVX512_24_inShuf0<>+0x28(SB)/8, $0xff02010101000000
+DATA expandAVX512_24_inShuf0<>+0x30(SB)/8, $0xffff010101000000
+DATA expandAVX512_24_inShuf0<>+0x38(SB)/8, $0xffff010101000000
+
+GLOBL expandAVX512_24_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_24_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_24_mat0<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_24_mat0<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_24_mat0<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_24_mat0<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_24_mat0<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_24_mat0<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_24_mat0<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_24_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_24_inShuf1<>+0x00(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_24_inShuf1<>+0x08(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_24_inShuf1<>+0x10(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_24_inShuf1<>+0x18(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_24_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_24_inShuf1<>+0x28(SB)/8, $0x0404040303030202
+DATA expandAVX512_24_inShuf1<>+0x30(SB)/8, $0x0404030303020202
+DATA expandAVX512_24_inShuf1<>+0x38(SB)/8, $0x0404030303020202
+
+GLOBL expandAVX512_24_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_24_inShuf2<>+0x00(SB)/8, $0x0505040404030303
+DATA expandAVX512_24_inShuf2<>+0x08(SB)/8, $0x0505040404030303
+DATA expandAVX512_24_inShuf2<>+0x10(SB)/8, $0x0505040404030303
+DATA expandAVX512_24_inShuf2<>+0x18(SB)/8, $0xffff040404030303
+DATA expandAVX512_24_inShuf2<>+0x20(SB)/8, $0xffff040404030303
+DATA expandAVX512_24_inShuf2<>+0x28(SB)/8, $0xffffffffffffff04
+DATA expandAVX512_24_inShuf2<>+0x30(SB)/8, $0xffffffffffffff04
+DATA expandAVX512_24_inShuf2<>+0x38(SB)/8, $0xffffffffffffff05
+
+GLOBL expandAVX512_24_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_24_mat2<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_24_mat2<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_24_mat2<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_24_mat2<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_24_mat2<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_24_mat2<>+0x28(SB)/8, $0x4040404040404040
+DATA expandAVX512_24_mat2<>+0x30(SB)/8, $0x8080808080808080
+DATA expandAVX512_24_mat2<>+0x38(SB)/8, $0x0101010101010101
+
+GLOBL expandAVX512_24_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_24_inShuf3<>+0x00(SB)/8, $0xffffffffffffff05
+DATA expandAVX512_24_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_24_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_24_mat3<>+0x00(SB)/8, $0x0202020202020202
+DATA expandAVX512_24_mat3<>+0x08(SB)/8, $0x0000000000000000
+DATA expandAVX512_24_mat3<>+0x10(SB)/8, $0x0000000000000000
+DATA expandAVX512_24_mat3<>+0x18(SB)/8, $0x0000000000000000
+DATA expandAVX512_24_mat3<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_24_mat3<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_24_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_24_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_24_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_24_outShufLo+0x00(SB)/8, $0x11100a0908020100
+DATA expandAVX512_24_outShufLo+0x08(SB)/8, $0x282221201a191812
+DATA expandAVX512_24_outShufLo+0x10(SB)/8, $0x3a39383231302a29
+DATA expandAVX512_24_outShufLo+0x18(SB)/8, $0x14130d0c0b050403
+DATA expandAVX512_24_outShufLo+0x20(SB)/8, $0x2b2524231d1c1b15
+DATA expandAVX512_24_outShufLo+0x28(SB)/8, $0x3d3c3b3534332d2c
+DATA expandAVX512_24_outShufLo+0x30(SB)/8, $0x1716480f0e400706
+DATA expandAVX512_24_outShufLo+0x38(SB)/8, $0x2e602726581f1e50
+
+GLOBL expandAVX512_24_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_24_outShufHi0+0x00(SB)/8, $0x3a39383231302928
+DATA expandAVX512_24_outShufHi0+0x08(SB)/8, $0x51504a4948424140
+DATA expandAVX512_24_outShufHi0+0x10(SB)/8, $0x2a6261605a595852
+DATA expandAVX512_24_outShufHi0+0x18(SB)/8, $0x3d3c3b3534332c2b
+DATA expandAVX512_24_outShufHi0+0x20(SB)/8, $0x54534d4c4b454443
+DATA expandAVX512_24_outShufHi0+0x28(SB)/8, $0x2d6564635d5c5b55
+DATA expandAVX512_24_outShufHi0+0x30(SB)/8, $0x703f3e6837362f2e
+DATA expandAVX512_24_outShufHi0+0x38(SB)/8, $0x5756ff4f4e784746
+
+GLOBL expandAVX512_24_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_24_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_outShufHi1+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_outShufHi1+0x10(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_outShufHi1+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_outShufHi1+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_outShufHi1+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_outShufHi1+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_24_outShufHi1+0x38(SB)/8, $0xffff00ffffffffff
+
+TEXT expandAVX512_24<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_24_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_24_mat0<>(SB), Z2
+ VMOVDQU64 expandAVX512_24_inShuf1<>(SB), Z3
+ VMOVDQU64 expandAVX512_24_inShuf2<>(SB), Z4
+ VMOVDQU64 expandAVX512_24_inShuf3<>(SB), Z5
+ VMOVDQU64 expandAVX512_24_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_24_outShufHi0(SB), Z6
+ VMOVDQU64 expandAVX512_24_outShufHi1(SB), Z7
+ VMOVDQU64 (AX), Z8
+ VPERMB Z8, Z0, Z0
+ VGF2P8AFFINEQB $0, Z2, Z0, Z0
+ VPERMB Z8, Z3, Z3
+ VGF2P8AFFINEQB $0, Z2, Z3, Z2
+ VPERMB Z8, Z4, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_24_mat2<>(SB), Z3, Z3
+ VPERMB Z8, Z5, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_24_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0xdfffffffffffffff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z6
+ MOVQ $0x2000000000000000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z7, K1, Z0
+ VPORQ Z0, Z6, Z2
+ RET
+
+GLOBL expandAVX512_26_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_26_inShuf0<>+0x00(SB)/8, $0x0202010101000000
+DATA expandAVX512_26_inShuf0<>+0x08(SB)/8, $0xffffffffff020100
+DATA expandAVX512_26_inShuf0<>+0x10(SB)/8, $0xffff020201010000
+DATA expandAVX512_26_inShuf0<>+0x18(SB)/8, $0xffffffffff020100
+DATA expandAVX512_26_inShuf0<>+0x20(SB)/8, $0xffff020201010000
+DATA expandAVX512_26_inShuf0<>+0x28(SB)/8, $0xffffffffff020100
+DATA expandAVX512_26_inShuf0<>+0x30(SB)/8, $0x0202010101000000
+DATA expandAVX512_26_inShuf0<>+0x38(SB)/8, $0xffff010101000000
+
+GLOBL expandAVX512_26_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_26_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_26_mat0<>+0x08(SB)/8, $0x0101020202020202
+DATA expandAVX512_26_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_26_mat0<>+0x18(SB)/8, $0x0202020204040404
+DATA expandAVX512_26_mat0<>+0x20(SB)/8, $0x0404040404040404
+DATA expandAVX512_26_mat0<>+0x28(SB)/8, $0x0404040404040808
+DATA expandAVX512_26_mat0<>+0x30(SB)/8, $0x0808080808080808
+DATA expandAVX512_26_mat0<>+0x38(SB)/8, $0x1010101010101010
+
+GLOBL expandAVX512_26_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_26_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_26_inShuf1<>+0x08(SB)/8, $0xffffffff01010000
+DATA expandAVX512_26_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_26_inShuf1<>+0x18(SB)/8, $0xffffffff01010000
+DATA expandAVX512_26_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_26_inShuf1<>+0x28(SB)/8, $0xffff010101000000
+DATA expandAVX512_26_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_26_inShuf1<>+0x38(SB)/8, $0xff04040403030302
+
+GLOBL expandAVX512_26_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_26_mat1<>+0x00(SB)/8, $0x1010202020202020
+DATA expandAVX512_26_mat1<>+0x08(SB)/8, $0x2020202020202020
+DATA expandAVX512_26_mat1<>+0x10(SB)/8, $0x2020202040404040
+DATA expandAVX512_26_mat1<>+0x18(SB)/8, $0x4040404040404040
+DATA expandAVX512_26_mat1<>+0x20(SB)/8, $0x4040404040408080
+DATA expandAVX512_26_mat1<>+0x28(SB)/8, $0x8080808080808080
+DATA expandAVX512_26_mat1<>+0x30(SB)/8, $0x0101010101010101
+DATA expandAVX512_26_mat1<>+0x38(SB)/8, $0x0808080808080808
+
+GLOBL expandAVX512_26_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_26_inShuf2<>+0x00(SB)/8, $0x0404030303020202
+DATA expandAVX512_26_inShuf2<>+0x08(SB)/8, $0xffffffffff040302
+DATA expandAVX512_26_inShuf2<>+0x10(SB)/8, $0xffff040403030202
+DATA expandAVX512_26_inShuf2<>+0x18(SB)/8, $0xffffffffff040302
+DATA expandAVX512_26_inShuf2<>+0x20(SB)/8, $0xffff040403030202
+DATA expandAVX512_26_inShuf2<>+0x28(SB)/8, $0xffffffffff040302
+DATA expandAVX512_26_inShuf2<>+0x30(SB)/8, $0xff04030303020202
+DATA expandAVX512_26_inShuf2<>+0x38(SB)/8, $0xffff040404030303
+
+GLOBL expandAVX512_26_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_26_mat2<>+0x00(SB)/8, $0x1010101010101010
+DATA expandAVX512_26_mat2<>+0x08(SB)/8, $0x1010202020202020
+DATA expandAVX512_26_mat2<>+0x10(SB)/8, $0x2020202020202020
+DATA expandAVX512_26_mat2<>+0x18(SB)/8, $0x2020202040404040
+DATA expandAVX512_26_mat2<>+0x20(SB)/8, $0x4040404040404040
+DATA expandAVX512_26_mat2<>+0x28(SB)/8, $0x4040404040408080
+DATA expandAVX512_26_mat2<>+0x30(SB)/8, $0x8080808080808080
+DATA expandAVX512_26_mat2<>+0x38(SB)/8, $0x0101010101010101
+
+GLOBL expandAVX512_26_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_26_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403
+DATA expandAVX512_26_inShuf3<>+0x08(SB)/8, $0xffffffff04040303
+DATA expandAVX512_26_inShuf3<>+0x10(SB)/8, $0xffffffffffff0403
+DATA expandAVX512_26_inShuf3<>+0x18(SB)/8, $0xffffffff04040303
+DATA expandAVX512_26_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403
+DATA expandAVX512_26_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04
+DATA expandAVX512_26_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_26_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_26_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_26_mat3<>+0x00(SB)/8, $0x0101020202020202
+DATA expandAVX512_26_mat3<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_26_mat3<>+0x10(SB)/8, $0x0202020204040404
+DATA expandAVX512_26_mat3<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_26_mat3<>+0x20(SB)/8, $0x0404040404040808
+DATA expandAVX512_26_mat3<>+0x28(SB)/8, $0x1010101010101010
+DATA expandAVX512_26_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_26_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_26_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_26_outShufLo+0x00(SB)/8, $0x2018111008020100
+DATA expandAVX512_26_outShufLo+0x08(SB)/8, $0x3a39383231302821
+DATA expandAVX512_26_outShufLo+0x10(SB)/8, $0x6860595850494840
+DATA expandAVX512_26_outShufLo+0x18(SB)/8, $0x1312090504036a69
+DATA expandAVX512_26_outShufLo+0x20(SB)/8, $0x3b35343329232219
+DATA expandAVX512_26_outShufLo+0x28(SB)/8, $0x5b5a514b4a413d3c
+DATA expandAVX512_26_outShufLo+0x30(SB)/8, $0x0a7007066d6c6b61
+DATA expandAVX512_26_outShufLo+0x38(SB)/8, $0x37362a25241a1514
+
+GLOBL expandAVX512_26_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_26_outShufHi0+0x00(SB)/8, $0x5851504842414038
+DATA expandAVX512_26_outShufHi0+0x08(SB)/8, $0x7978727170686160
+DATA expandAVX512_26_outShufHi0+0x10(SB)/8, $0xffffffffffffff7a
+DATA expandAVX512_26_outShufHi0+0x18(SB)/8, $0x52494544433b3a39
+DATA expandAVX512_26_outShufHi0+0x20(SB)/8, $0x7574736963625953
+DATA expandAVX512_26_outShufHi0+0x28(SB)/8, $0xffffffffff7d7c7b
+DATA expandAVX512_26_outShufHi0+0x30(SB)/8, $0xff47463e3d3cffff
+DATA expandAVX512_26_outShufHi0+0x38(SB)/8, $0x766a65645a55544a
+
+GLOBL expandAVX512_26_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_26_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_26_outShufHi1+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_26_outShufHi1+0x10(SB)/8, $0x20191810090800ff
+DATA expandAVX512_26_outShufHi1+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_26_outShufHi1+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_26_outShufHi1+0x28(SB)/8, $0x1a110b0a01ffffff
+DATA expandAVX512_26_outShufHi1+0x30(SB)/8, $0x28ffffffffff211b
+DATA expandAVX512_26_outShufHi1+0x38(SB)/8, $0xffffffffffffffff
+
+TEXT expandAVX512_26<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_26_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_26_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_26_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_26_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_26_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_26_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_26_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_26_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_26_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_26_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_26_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0xff7c07ffff01ffff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0x83f80000fe0000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_28_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_28_inShuf0<>+0x00(SB)/8, $0x0202010101000000
+DATA expandAVX512_28_inShuf0<>+0x08(SB)/8, $0xffffffffff020100
+DATA expandAVX512_28_inShuf0<>+0x10(SB)/8, $0x0202010101000000
+DATA expandAVX512_28_inShuf0<>+0x18(SB)/8, $0xff02010101000000
+DATA expandAVX512_28_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_28_inShuf0<>+0x28(SB)/8, $0xffff010101000000
+DATA expandAVX512_28_inShuf0<>+0x30(SB)/8, $0xffff010101000000
+DATA expandAVX512_28_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100
+
+GLOBL expandAVX512_28_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_28_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_28_mat0<>+0x08(SB)/8, $0x0101010102020202
+DATA expandAVX512_28_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_28_mat0<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_28_mat0<>+0x20(SB)/8, $0x0404040408080808
+DATA expandAVX512_28_mat0<>+0x28(SB)/8, $0x0808080808080808
+DATA expandAVX512_28_mat0<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_28_mat0<>+0x38(SB)/8, $0x1010101020202020
+
+GLOBL expandAVX512_28_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_28_inShuf1<>+0x00(SB)/8, $0xffff010101000000
+DATA expandAVX512_28_inShuf1<>+0x08(SB)/8, $0xffff010101000000
+DATA expandAVX512_28_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_28_inShuf1<>+0x18(SB)/8, $0xffff010101000000
+DATA expandAVX512_28_inShuf1<>+0x20(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_28_inShuf1<>+0x28(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_28_inShuf1<>+0x30(SB)/8, $0x0404040303030202
+DATA expandAVX512_28_inShuf1<>+0x38(SB)/8, $0xffffffffff040302
+
+GLOBL expandAVX512_28_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_28_mat1<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_28_mat1<>+0x08(SB)/8, $0x4040404040404040
+DATA expandAVX512_28_mat1<>+0x10(SB)/8, $0x4040404080808080
+DATA expandAVX512_28_mat1<>+0x18(SB)/8, $0x8080808080808080
+DATA expandAVX512_28_mat1<>+0x20(SB)/8, $0x0101010101010101
+DATA expandAVX512_28_mat1<>+0x28(SB)/8, $0x0202020202020202
+DATA expandAVX512_28_mat1<>+0x30(SB)/8, $0x0404040404040404
+DATA expandAVX512_28_mat1<>+0x38(SB)/8, $0x0404040408080808
+
+GLOBL expandAVX512_28_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_28_inShuf2<>+0x00(SB)/8, $0x0404030303020202
+DATA expandAVX512_28_inShuf2<>+0x08(SB)/8, $0x0404030303020202
+DATA expandAVX512_28_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302
+DATA expandAVX512_28_inShuf2<>+0x18(SB)/8, $0xffff030303020202
+DATA expandAVX512_28_inShuf2<>+0x20(SB)/8, $0xffff030303020202
+DATA expandAVX512_28_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302
+DATA expandAVX512_28_inShuf2<>+0x30(SB)/8, $0xffff030303020202
+DATA expandAVX512_28_inShuf2<>+0x38(SB)/8, $0xffff040404030303
+
+GLOBL expandAVX512_28_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_28_mat2<>+0x00(SB)/8, $0x0808080808080808
+DATA expandAVX512_28_mat2<>+0x08(SB)/8, $0x1010101010101010
+DATA expandAVX512_28_mat2<>+0x10(SB)/8, $0x1010101020202020
+DATA expandAVX512_28_mat2<>+0x18(SB)/8, $0x2020202020202020
+DATA expandAVX512_28_mat2<>+0x20(SB)/8, $0x4040404040404040
+DATA expandAVX512_28_mat2<>+0x28(SB)/8, $0x4040404080808080
+DATA expandAVX512_28_mat2<>+0x30(SB)/8, $0x8080808080808080
+DATA expandAVX512_28_mat2<>+0x38(SB)/8, $0x0101010101010101
+
+GLOBL expandAVX512_28_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_28_inShuf3<>+0x00(SB)/8, $0xffffffffffff0403
+DATA expandAVX512_28_inShuf3<>+0x08(SB)/8, $0xffff040404030303
+DATA expandAVX512_28_inShuf3<>+0x10(SB)/8, $0xffffffffffffff04
+DATA expandAVX512_28_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_28_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_28_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_28_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_28_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_28_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_28_mat3<>+0x00(SB)/8, $0x0101010102020202
+DATA expandAVX512_28_mat3<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_28_mat3<>+0x10(SB)/8, $0x0808080808080808
+DATA expandAVX512_28_mat3<>+0x18(SB)/8, $0x0000000000000000
+DATA expandAVX512_28_mat3<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_28_mat3<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_28_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_28_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_28_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_28_outShufLo+0x00(SB)/8, $0x1812111008020100
+DATA expandAVX512_28_outShufLo+0x08(SB)/8, $0x31302a2928201a19
+DATA expandAVX512_28_outShufLo+0x10(SB)/8, $0x4a49484241403832
+DATA expandAVX512_28_outShufLo+0x18(SB)/8, $0x090504035a595850
+DATA expandAVX512_28_outShufLo+0x20(SB)/8, $0x2b211d1c1b151413
+DATA expandAVX512_28_outShufLo+0x28(SB)/8, $0x4443393534332d2c
+DATA expandAVX512_28_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b45
+DATA expandAVX512_28_outShufLo+0x38(SB)/8, $0x1e6817160a600706
+
+GLOBL expandAVX512_28_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_28_outShufHi0+0x00(SB)/8, $0x4948424140383130
+DATA expandAVX512_28_outShufHi0+0x08(SB)/8, $0x6261605a5958504a
+DATA expandAVX512_28_outShufHi0+0x10(SB)/8, $0xff7a797872717068
+DATA expandAVX512_28_outShufHi0+0x18(SB)/8, $0x4339343332ffffff
+DATA expandAVX512_28_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b4544
+DATA expandAVX512_28_outShufHi0+0x28(SB)/8, $0x757473696564635d
+DATA expandAVX512_28_outShufHi0+0x30(SB)/8, $0x35ffffffff7d7c7b
+DATA expandAVX512_28_outShufHi0+0x38(SB)/8, $0x4f4eff47463a3736
+
+GLOBL expandAVX512_28_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_28_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_28_outShufHi1+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_28_outShufHi1+0x10(SB)/8, $0x00ffffffffffffff
+DATA expandAVX512_28_outShufHi1+0x18(SB)/8, $0xffffffffff0a0908
+DATA expandAVX512_28_outShufHi1+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_28_outShufHi1+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_28_outShufHi1+0x30(SB)/8, $0xff0d0c0b01ffffff
+DATA expandAVX512_28_outShufHi1+0x38(SB)/8, $0xffff10ffffffffff
+
+TEXT expandAVX512_28<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_28_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_28_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_28_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_28_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_28_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_28_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_28_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_28_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_28_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_28_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_28_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0xdf87fffff87fffff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0x2078000007800000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_30_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_30_inShuf0<>+0x00(SB)/8, $0x0202010101000000
+DATA expandAVX512_30_inShuf0<>+0x08(SB)/8, $0xffffffffff020100
+DATA expandAVX512_30_inShuf0<>+0x10(SB)/8, $0xffff010101000000
+DATA expandAVX512_30_inShuf0<>+0x18(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_30_inShuf0<>+0x20(SB)/8, $0xffff010101000000
+DATA expandAVX512_30_inShuf0<>+0x28(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_30_inShuf0<>+0x30(SB)/8, $0xffff010101000000
+DATA expandAVX512_30_inShuf0<>+0x38(SB)/8, $0xffff010101000000
+
+GLOBL expandAVX512_30_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_30_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_30_mat0<>+0x08(SB)/8, $0x0101010101010202
+DATA expandAVX512_30_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_30_mat0<>+0x18(SB)/8, $0x0202020204040404
+DATA expandAVX512_30_mat0<>+0x20(SB)/8, $0x0404040404040404
+DATA expandAVX512_30_mat0<>+0x28(SB)/8, $0x0404080808080808
+DATA expandAVX512_30_mat0<>+0x30(SB)/8, $0x0808080808080808
+DATA expandAVX512_30_mat0<>+0x38(SB)/8, $0x1010101010101010
+
+GLOBL expandAVX512_30_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_30_inShuf1<>+0x00(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_30_inShuf1<>+0x08(SB)/8, $0xffff010101000000
+DATA expandAVX512_30_inShuf1<>+0x10(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_30_inShuf1<>+0x18(SB)/8, $0xffff010101000000
+DATA expandAVX512_30_inShuf1<>+0x20(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_30_inShuf1<>+0x28(SB)/8, $0xffff010101000000
+DATA expandAVX512_30_inShuf1<>+0x30(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_30_inShuf1<>+0x38(SB)/8, $0x0404030303020202
+
+GLOBL expandAVX512_30_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_30_mat1<>+0x00(SB)/8, $0x1010101010102020
+DATA expandAVX512_30_mat1<>+0x08(SB)/8, $0x2020202020202020
+DATA expandAVX512_30_mat1<>+0x10(SB)/8, $0x2020202040404040
+DATA expandAVX512_30_mat1<>+0x18(SB)/8, $0x4040404040404040
+DATA expandAVX512_30_mat1<>+0x20(SB)/8, $0x4040808080808080
+DATA expandAVX512_30_mat1<>+0x28(SB)/8, $0x8080808080808080
+DATA expandAVX512_30_mat1<>+0x30(SB)/8, $0x0101010101010101
+DATA expandAVX512_30_mat1<>+0x38(SB)/8, $0x0202020202020202
+
+GLOBL expandAVX512_30_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_30_inShuf2<>+0x00(SB)/8, $0xffffffffff040302
+DATA expandAVX512_30_inShuf2<>+0x08(SB)/8, $0xffff030303020202
+DATA expandAVX512_30_inShuf2<>+0x10(SB)/8, $0xffffffffffff0302
+DATA expandAVX512_30_inShuf2<>+0x18(SB)/8, $0xffff030303020202
+DATA expandAVX512_30_inShuf2<>+0x20(SB)/8, $0xffff030303020202
+DATA expandAVX512_30_inShuf2<>+0x28(SB)/8, $0xffffffffffff0302
+DATA expandAVX512_30_inShuf2<>+0x30(SB)/8, $0xffff030303020202
+DATA expandAVX512_30_inShuf2<>+0x38(SB)/8, $0xffffffffffff0302
+
+GLOBL expandAVX512_30_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_30_mat2<>+0x00(SB)/8, $0x0202020204040404
+DATA expandAVX512_30_mat2<>+0x08(SB)/8, $0x0404040404040404
+DATA expandAVX512_30_mat2<>+0x10(SB)/8, $0x0404080808080808
+DATA expandAVX512_30_mat2<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_30_mat2<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_30_mat2<>+0x28(SB)/8, $0x1010101010102020
+DATA expandAVX512_30_mat2<>+0x30(SB)/8, $0x2020202020202020
+DATA expandAVX512_30_mat2<>+0x38(SB)/8, $0x2020202040404040
+
+GLOBL expandAVX512_30_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_30_inShuf3<>+0x00(SB)/8, $0xffff030303020202
+DATA expandAVX512_30_inShuf3<>+0x08(SB)/8, $0xffffffffffff0302
+DATA expandAVX512_30_inShuf3<>+0x10(SB)/8, $0xffff030303020202
+DATA expandAVX512_30_inShuf3<>+0x18(SB)/8, $0xffff040404030303
+DATA expandAVX512_30_inShuf3<>+0x20(SB)/8, $0xffffffffffff0403
+DATA expandAVX512_30_inShuf3<>+0x28(SB)/8, $0xffffffffffffff04
+DATA expandAVX512_30_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_30_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_30_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_30_mat3<>+0x00(SB)/8, $0x4040404040404040
+DATA expandAVX512_30_mat3<>+0x08(SB)/8, $0x4040808080808080
+DATA expandAVX512_30_mat3<>+0x10(SB)/8, $0x8080808080808080
+DATA expandAVX512_30_mat3<>+0x18(SB)/8, $0x0101010101010101
+DATA expandAVX512_30_mat3<>+0x20(SB)/8, $0x0101010101010202
+DATA expandAVX512_30_mat3<>+0x28(SB)/8, $0x0202020202020202
+DATA expandAVX512_30_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_30_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_30_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_30_outShufLo+0x00(SB)/8, $0x1812111008020100
+DATA expandAVX512_30_outShufLo+0x08(SB)/8, $0x3832313028222120
+DATA expandAVX512_30_outShufLo+0x10(SB)/8, $0x58504a4948403a39
+DATA expandAVX512_30_outShufLo+0x18(SB)/8, $0x04036a6968605a59
+DATA expandAVX512_30_outShufLo+0x20(SB)/8, $0x2423191514130905
+DATA expandAVX512_30_outShufLo+0x28(SB)/8, $0x3d3c3b3534332925
+DATA expandAVX512_30_outShufLo+0x30(SB)/8, $0x5d5c5b514d4c4b41
+DATA expandAVX512_30_outShufLo+0x38(SB)/8, $0x0a7007066d6c6b61
+
+GLOBL expandAVX512_30_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_30_outShufHi0+0x00(SB)/8, $0x504a4948403a3938
+DATA expandAVX512_30_outShufHi0+0x08(SB)/8, $0x70686261605a5958
+DATA expandAVX512_30_outShufHi0+0x10(SB)/8, $0xffffffffff787271
+DATA expandAVX512_30_outShufHi0+0x18(SB)/8, $0x3c3bffffffffffff
+DATA expandAVX512_30_outShufHi0+0x20(SB)/8, $0x5c5b514d4c4b413d
+DATA expandAVX512_30_outShufHi0+0x28(SB)/8, $0x757473696564635d
+DATA expandAVX512_30_outShufHi0+0x30(SB)/8, $0xffffffffffffff79
+DATA expandAVX512_30_outShufHi0+0x38(SB)/8, $0x42ff3f3effffffff
+
+GLOBL expandAVX512_30_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_30_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_30_outShufHi1+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_30_outShufHi1+0x10(SB)/8, $0x1008020100ffffff
+DATA expandAVX512_30_outShufHi1+0x18(SB)/8, $0xffff201a19181211
+DATA expandAVX512_30_outShufHi1+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_30_outShufHi1+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_30_outShufHi1+0x30(SB)/8, $0x15141309050403ff
+DATA expandAVX512_30_outShufHi1+0x38(SB)/8, $0xff28ffff211d1c1b
+
+TEXT expandAVX512_30<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_30_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_30_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_30_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_30_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_30_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_30_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_30_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_30_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_30_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_30_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_30_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0xb001ffffc007ffff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0x4ffe00003ff80000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_32_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_32_inShuf0<>+0x00(SB)/8, $0x0101010100000000
+DATA expandAVX512_32_inShuf0<>+0x08(SB)/8, $0x0101010100000000
+DATA expandAVX512_32_inShuf0<>+0x10(SB)/8, $0x0101010100000000
+DATA expandAVX512_32_inShuf0<>+0x18(SB)/8, $0x0101010100000000
+DATA expandAVX512_32_inShuf0<>+0x20(SB)/8, $0x0101010100000000
+DATA expandAVX512_32_inShuf0<>+0x28(SB)/8, $0x0101010100000000
+DATA expandAVX512_32_inShuf0<>+0x30(SB)/8, $0x0101010100000000
+DATA expandAVX512_32_inShuf0<>+0x38(SB)/8, $0x0101010100000000
+
+GLOBL expandAVX512_32_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_32_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_32_mat0<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_32_mat0<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_32_mat0<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_32_mat0<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_32_mat0<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_32_mat0<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_32_mat0<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_32_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_32_inShuf1<>+0x00(SB)/8, $0x0303030302020202
+DATA expandAVX512_32_inShuf1<>+0x08(SB)/8, $0x0303030302020202
+DATA expandAVX512_32_inShuf1<>+0x10(SB)/8, $0x0303030302020202
+DATA expandAVX512_32_inShuf1<>+0x18(SB)/8, $0x0303030302020202
+DATA expandAVX512_32_inShuf1<>+0x20(SB)/8, $0x0303030302020202
+DATA expandAVX512_32_inShuf1<>+0x28(SB)/8, $0x0303030302020202
+DATA expandAVX512_32_inShuf1<>+0x30(SB)/8, $0x0303030302020202
+DATA expandAVX512_32_inShuf1<>+0x38(SB)/8, $0x0303030302020202
+
+GLOBL expandAVX512_32_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_32_outShufLo+0x00(SB)/8, $0x0b0a090803020100
+DATA expandAVX512_32_outShufLo+0x08(SB)/8, $0x1b1a191813121110
+DATA expandAVX512_32_outShufLo+0x10(SB)/8, $0x2b2a292823222120
+DATA expandAVX512_32_outShufLo+0x18(SB)/8, $0x3b3a393833323130
+DATA expandAVX512_32_outShufLo+0x20(SB)/8, $0x0f0e0d0c07060504
+DATA expandAVX512_32_outShufLo+0x28(SB)/8, $0x1f1e1d1c17161514
+DATA expandAVX512_32_outShufLo+0x30(SB)/8, $0x2f2e2d2c27262524
+DATA expandAVX512_32_outShufLo+0x38(SB)/8, $0x3f3e3d3c37363534
+
+TEXT expandAVX512_32<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_32_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_32_mat0<>(SB), Z1
+ VMOVDQU64 expandAVX512_32_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_32_outShufLo(SB), Z3
+ VMOVDQU64 (AX), Z4
+ VPERMB Z4, Z0, Z0
+ VGF2P8AFFINEQB $0, Z1, Z0, Z0
+ VPERMB Z4, Z2, Z2
+ VGF2P8AFFINEQB $0, Z1, Z2, Z2
+ VPERMB Z0, Z3, Z1
+ VPERMB Z2, Z3, Z2
+ RET
+
+GLOBL expandAVX512_36_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_36_inShuf0<>+0x00(SB)/8, $0x0101010100000000
+DATA expandAVX512_36_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_36_inShuf0<>+0x10(SB)/8, $0x0101010100000000
+DATA expandAVX512_36_inShuf0<>+0x18(SB)/8, $0x0101010100000000
+DATA expandAVX512_36_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_36_inShuf0<>+0x28(SB)/8, $0x0101010100000000
+DATA expandAVX512_36_inShuf0<>+0x30(SB)/8, $0x0101010100000000
+DATA expandAVX512_36_inShuf0<>+0x38(SB)/8, $0xffffffffffff0100
+
+GLOBL expandAVX512_36_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_36_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_36_mat0<>+0x08(SB)/8, $0x0101010102020202
+DATA expandAVX512_36_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_36_mat0<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_36_mat0<>+0x20(SB)/8, $0x0404040408080808
+DATA expandAVX512_36_mat0<>+0x28(SB)/8, $0x0808080808080808
+DATA expandAVX512_36_mat0<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_36_mat0<>+0x38(SB)/8, $0x1010101020202020
+
+GLOBL expandAVX512_36_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_36_inShuf1<>+0x00(SB)/8, $0x0101010100000000
+DATA expandAVX512_36_inShuf1<>+0x08(SB)/8, $0xffffff0100000000
+DATA expandAVX512_36_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00
+DATA expandAVX512_36_inShuf1<>+0x18(SB)/8, $0xffffffff00000000
+DATA expandAVX512_36_inShuf1<>+0x20(SB)/8, $0xff02020202010101
+DATA expandAVX512_36_inShuf1<>+0x28(SB)/8, $0xffffffffffff0201
+DATA expandAVX512_36_inShuf1<>+0x30(SB)/8, $0x0202020201010101
+DATA expandAVX512_36_inShuf1<>+0x38(SB)/8, $0x0303030302020202
+
+GLOBL expandAVX512_36_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_36_mat1<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_36_mat1<>+0x08(SB)/8, $0x4040404040404040
+DATA expandAVX512_36_mat1<>+0x10(SB)/8, $0x4040404080808080
+DATA expandAVX512_36_mat1<>+0x18(SB)/8, $0x8080808080808080
+DATA expandAVX512_36_mat1<>+0x20(SB)/8, $0x4040404040404040
+DATA expandAVX512_36_mat1<>+0x28(SB)/8, $0x4040404080808080
+DATA expandAVX512_36_mat1<>+0x30(SB)/8, $0x8080808080808080
+DATA expandAVX512_36_mat1<>+0x38(SB)/8, $0x0101010101010101
+
+GLOBL expandAVX512_36_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_36_inShuf2<>+0x00(SB)/8, $0xffffffffffff0302
+DATA expandAVX512_36_inShuf2<>+0x08(SB)/8, $0x0303030302020202
+DATA expandAVX512_36_inShuf2<>+0x10(SB)/8, $0x0303030302020202
+DATA expandAVX512_36_inShuf2<>+0x18(SB)/8, $0xffffffffffff0302
+DATA expandAVX512_36_inShuf2<>+0x20(SB)/8, $0x0303030302020202
+DATA expandAVX512_36_inShuf2<>+0x28(SB)/8, $0xffff030302020202
+DATA expandAVX512_36_inShuf2<>+0x30(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_36_inShuf2<>+0x38(SB)/8, $0xffffffff02020202
+
+GLOBL expandAVX512_36_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_36_mat2<>+0x00(SB)/8, $0x0101010102020202
+DATA expandAVX512_36_mat2<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_36_mat2<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_36_mat2<>+0x18(SB)/8, $0x0404040408080808
+DATA expandAVX512_36_mat2<>+0x20(SB)/8, $0x0808080808080808
+DATA expandAVX512_36_mat2<>+0x28(SB)/8, $0x1010101010101010
+DATA expandAVX512_36_mat2<>+0x30(SB)/8, $0x1010101020202020
+DATA expandAVX512_36_mat2<>+0x38(SB)/8, $0x2020202020202020
+
+GLOBL expandAVX512_36_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_36_outShufLo+0x00(SB)/8, $0x1211100803020100
+DATA expandAVX512_36_outShufLo+0x08(SB)/8, $0x2928201b1a191813
+DATA expandAVX512_36_outShufLo+0x10(SB)/8, $0x4038333231302b2a
+DATA expandAVX512_36_outShufLo+0x18(SB)/8, $0x504b4a4948434241
+DATA expandAVX512_36_outShufLo+0x20(SB)/8, $0x070605045b5a5958
+DATA expandAVX512_36_outShufLo+0x28(SB)/8, $0x1e1d1c1716151409
+DATA expandAVX512_36_outShufLo+0x30(SB)/8, $0x35342f2e2d2c211f
+DATA expandAVX512_36_outShufLo+0x38(SB)/8, $0x4c47464544393736
+
+GLOBL expandAVX512_36_outShufHi(SB), RODATA, $0x40
+DATA expandAVX512_36_outShufHi+0x00(SB)/8, $0x3332313028222120
+DATA expandAVX512_36_outShufHi+0x08(SB)/8, $0x4a4948403b3a3938
+DATA expandAVX512_36_outShufHi+0x10(SB)/8, $0x616058535251504b
+DATA expandAVX512_36_outShufHi+0x18(SB)/8, $0x78706b6a69686362
+DATA expandAVX512_36_outShufHi+0x20(SB)/8, $0x29262524237b7a79
+DATA expandAVX512_36_outShufHi+0x28(SB)/8, $0x3f3e3d3c37363534
+DATA expandAVX512_36_outShufHi+0x30(SB)/8, $0x5655544f4e4d4c41
+DATA expandAVX512_36_outShufHi+0x38(SB)/8, $0x6d6c676665645957
+
+TEXT expandAVX512_36<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_36_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_36_inShuf1<>(SB), Z3
+ VMOVDQU64 expandAVX512_36_inShuf2<>(SB), Z4
+ VMOVDQU64 expandAVX512_36_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_36_outShufHi(SB), Z2
+ VMOVDQU64 (AX), Z5
+ VPERMB Z5, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_36_mat0<>(SB), Z0, Z0
+ VPERMB Z5, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_36_mat1<>(SB), Z3, Z3
+ VPERMB Z5, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_36_mat2<>(SB), Z4, Z4
+ VPERMI2B Z3, Z0, Z1
+ VPERMI2B Z4, Z3, Z2
+ RET
+
+GLOBL expandAVX512_40_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_40_inShuf0<>+0x00(SB)/8, $0x0101010000000000
+DATA expandAVX512_40_inShuf0<>+0x08(SB)/8, $0x0101010000000000
+DATA expandAVX512_40_inShuf0<>+0x10(SB)/8, $0x0101010000000000
+DATA expandAVX512_40_inShuf0<>+0x18(SB)/8, $0x0101010000000000
+DATA expandAVX512_40_inShuf0<>+0x20(SB)/8, $0x0101010000000000
+DATA expandAVX512_40_inShuf0<>+0x28(SB)/8, $0xffffff0000000000
+DATA expandAVX512_40_inShuf0<>+0x30(SB)/8, $0xffffff0000000000
+DATA expandAVX512_40_inShuf0<>+0x38(SB)/8, $0xffffff0000000000
+
+GLOBL expandAVX512_40_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_40_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_40_mat0<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_40_mat0<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_40_mat0<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_40_mat0<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_40_mat0<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_40_mat0<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_40_mat0<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_40_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_40_inShuf1<>+0x00(SB)/8, $0xffffffffffff0101
+DATA expandAVX512_40_inShuf1<>+0x08(SB)/8, $0xffffffffffff0101
+DATA expandAVX512_40_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101
+DATA expandAVX512_40_inShuf1<>+0x18(SB)/8, $0xffffffffffff0101
+DATA expandAVX512_40_inShuf1<>+0x20(SB)/8, $0xffffffffffffff01
+DATA expandAVX512_40_inShuf1<>+0x28(SB)/8, $0xffff020202020201
+DATA expandAVX512_40_inShuf1<>+0x30(SB)/8, $0x0202020101010101
+DATA expandAVX512_40_inShuf1<>+0x38(SB)/8, $0x0202020101010101
+
+GLOBL expandAVX512_40_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_40_mat1<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_40_mat1<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_40_mat1<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_40_mat1<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_40_mat1<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_40_mat1<>+0x28(SB)/8, $0x1010101010101010
+DATA expandAVX512_40_mat1<>+0x30(SB)/8, $0x2020202020202020
+DATA expandAVX512_40_mat1<>+0x38(SB)/8, $0x4040404040404040
+
+GLOBL expandAVX512_40_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_40_inShuf2<>+0x00(SB)/8, $0x0202020101010101
+DATA expandAVX512_40_inShuf2<>+0x08(SB)/8, $0x0303030202020202
+DATA expandAVX512_40_inShuf2<>+0x10(SB)/8, $0x0303030202020202
+DATA expandAVX512_40_inShuf2<>+0x18(SB)/8, $0xffffff0202020202
+DATA expandAVX512_40_inShuf2<>+0x20(SB)/8, $0xffffff0202020202
+DATA expandAVX512_40_inShuf2<>+0x28(SB)/8, $0xffffffffffff0202
+DATA expandAVX512_40_inShuf2<>+0x30(SB)/8, $0xffffffffffff0202
+DATA expandAVX512_40_inShuf2<>+0x38(SB)/8, $0xffffffffffff0202
+
+GLOBL expandAVX512_40_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_40_mat2<>+0x00(SB)/8, $0x8080808080808080
+DATA expandAVX512_40_mat2<>+0x08(SB)/8, $0x0101010101010101
+DATA expandAVX512_40_mat2<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_40_mat2<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_40_mat2<>+0x20(SB)/8, $0x0808080808080808
+DATA expandAVX512_40_mat2<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_40_mat2<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_40_mat2<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_40_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_40_inShuf3<>+0x00(SB)/8, $0xffffffffffff0303
+DATA expandAVX512_40_inShuf3<>+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_40_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_40_mat3<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_40_mat3<>+0x08(SB)/8, $0x0000000000000000
+DATA expandAVX512_40_mat3<>+0x10(SB)/8, $0x0000000000000000
+DATA expandAVX512_40_mat3<>+0x18(SB)/8, $0x0000000000000000
+DATA expandAVX512_40_mat3<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_40_mat3<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_40_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_40_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_40_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_40_outShufLo+0x00(SB)/8, $0x0a09080403020100
+DATA expandAVX512_40_outShufLo+0x08(SB)/8, $0x1814131211100c0b
+DATA expandAVX512_40_outShufLo+0x10(SB)/8, $0x232221201c1b1a19
+DATA expandAVX512_40_outShufLo+0x18(SB)/8, $0x31302c2b2a292824
+DATA expandAVX512_40_outShufLo+0x20(SB)/8, $0x3c3b3a3938343332
+DATA expandAVX512_40_outShufLo+0x28(SB)/8, $0x0f0e0d4140070605
+DATA expandAVX512_40_outShufLo+0x30(SB)/8, $0x1d51501716154948
+DATA expandAVX512_40_outShufLo+0x38(SB)/8, $0x6027262559581f1e
+
+GLOBL expandAVX512_40_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_40_outShufHi0+0x00(SB)/8, $0x3938343332313028
+DATA expandAVX512_40_outShufHi0+0x08(SB)/8, $0x44434241403c3b3a
+DATA expandAVX512_40_outShufHi0+0x10(SB)/8, $0x5251504c4b4a4948
+DATA expandAVX512_40_outShufHi0+0x18(SB)/8, $0x605c5b5a59585453
+DATA expandAVX512_40_outShufHi0+0x20(SB)/8, $0x2c2b2a2964636261
+DATA expandAVX512_40_outShufHi0+0x28(SB)/8, $0x3e3d69683736352d
+DATA expandAVX512_40_outShufHi0+0x30(SB)/8, $0x797847464571703f
+DATA expandAVX512_40_outShufHi0+0x38(SB)/8, $0x575655ffff4f4e4d
+
+GLOBL expandAVX512_40_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_40_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_outShufHi1+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_outShufHi1+0x10(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_outShufHi1+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_outShufHi1+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_outShufHi1+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_outShufHi1+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_40_outShufHi1+0x38(SB)/8, $0xffffff0100ffffff
+
+TEXT expandAVX512_40<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_40_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_40_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_40_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_40_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_40_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_40_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_40_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_40_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_40_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_40_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_40_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0xe7ffffffffffffff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0x1800000000000000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_44_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_44_inShuf0<>+0x00(SB)/8, $0x0101010000000000
+DATA expandAVX512_44_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_44_inShuf0<>+0x10(SB)/8, $0x0101010000000000
+DATA expandAVX512_44_inShuf0<>+0x18(SB)/8, $0x0101010000000000
+DATA expandAVX512_44_inShuf0<>+0x20(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_44_inShuf0<>+0x28(SB)/8, $0x0101010000000000
+DATA expandAVX512_44_inShuf0<>+0x30(SB)/8, $0xffffff0000000000
+DATA expandAVX512_44_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00
+
+GLOBL expandAVX512_44_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_44_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_44_mat0<>+0x08(SB)/8, $0x0101010102020202
+DATA expandAVX512_44_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_44_mat0<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_44_mat0<>+0x20(SB)/8, $0x0404040408080808
+DATA expandAVX512_44_mat0<>+0x28(SB)/8, $0x0808080808080808
+DATA expandAVX512_44_mat0<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_44_mat0<>+0x38(SB)/8, $0x1010101020202020
+
+GLOBL expandAVX512_44_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_44_inShuf1<>+0x00(SB)/8, $0xffffff0000000000
+DATA expandAVX512_44_inShuf1<>+0x08(SB)/8, $0xffffff0000000000
+DATA expandAVX512_44_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00
+DATA expandAVX512_44_inShuf1<>+0x18(SB)/8, $0xffffff0000000000
+DATA expandAVX512_44_inShuf1<>+0x20(SB)/8, $0xffffffffffff0101
+DATA expandAVX512_44_inShuf1<>+0x28(SB)/8, $0xffffffffffff0101
+DATA expandAVX512_44_inShuf1<>+0x30(SB)/8, $0xffffffffffff0101
+DATA expandAVX512_44_inShuf1<>+0x38(SB)/8, $0xff02020202020101
+
+GLOBL expandAVX512_44_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_44_mat1<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_44_mat1<>+0x08(SB)/8, $0x4040404040404040
+DATA expandAVX512_44_mat1<>+0x10(SB)/8, $0x4040404080808080
+DATA expandAVX512_44_mat1<>+0x18(SB)/8, $0x8080808080808080
+DATA expandAVX512_44_mat1<>+0x20(SB)/8, $0x0101010101010101
+DATA expandAVX512_44_mat1<>+0x28(SB)/8, $0x0202020202020202
+DATA expandAVX512_44_mat1<>+0x30(SB)/8, $0x0404040404040404
+DATA expandAVX512_44_mat1<>+0x38(SB)/8, $0x0808080808080808
+
+GLOBL expandAVX512_44_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_44_inShuf2<>+0x00(SB)/8, $0x0202020101010101
+DATA expandAVX512_44_inShuf2<>+0x08(SB)/8, $0xffffffffffff0201
+DATA expandAVX512_44_inShuf2<>+0x10(SB)/8, $0x0202020101010101
+DATA expandAVX512_44_inShuf2<>+0x18(SB)/8, $0x0202020101010101
+DATA expandAVX512_44_inShuf2<>+0x20(SB)/8, $0xffffffffffff0201
+DATA expandAVX512_44_inShuf2<>+0x28(SB)/8, $0xffff020101010101
+DATA expandAVX512_44_inShuf2<>+0x30(SB)/8, $0xffffff0202020202
+DATA expandAVX512_44_inShuf2<>+0x38(SB)/8, $0xffffffffffffff02
+
+GLOBL expandAVX512_44_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_44_mat2<>+0x00(SB)/8, $0x1010101010101010
+DATA expandAVX512_44_mat2<>+0x08(SB)/8, $0x1010101020202020
+DATA expandAVX512_44_mat2<>+0x10(SB)/8, $0x2020202020202020
+DATA expandAVX512_44_mat2<>+0x18(SB)/8, $0x4040404040404040
+DATA expandAVX512_44_mat2<>+0x20(SB)/8, $0x4040404080808080
+DATA expandAVX512_44_mat2<>+0x28(SB)/8, $0x8080808080808080
+DATA expandAVX512_44_mat2<>+0x30(SB)/8, $0x0101010101010101
+DATA expandAVX512_44_mat2<>+0x38(SB)/8, $0x0101010102020202
+
+GLOBL expandAVX512_44_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_44_inShuf3<>+0x00(SB)/8, $0xffffff0202020202
+DATA expandAVX512_44_inShuf3<>+0x08(SB)/8, $0xffffff0202020202
+DATA expandAVX512_44_inShuf3<>+0x10(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_44_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202
+DATA expandAVX512_44_inShuf3<>+0x20(SB)/8, $0xffffffffffff0202
+DATA expandAVX512_44_inShuf3<>+0x28(SB)/8, $0xffffffffffff0202
+DATA expandAVX512_44_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_44_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_44_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_44_mat3<>+0x00(SB)/8, $0x0202020202020202
+DATA expandAVX512_44_mat3<>+0x08(SB)/8, $0x0404040404040404
+DATA expandAVX512_44_mat3<>+0x10(SB)/8, $0x0404040408080808
+DATA expandAVX512_44_mat3<>+0x18(SB)/8, $0x1010101010101010
+DATA expandAVX512_44_mat3<>+0x20(SB)/8, $0x2020202020202020
+DATA expandAVX512_44_mat3<>+0x28(SB)/8, $0x4040404040404040
+DATA expandAVX512_44_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_44_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_44_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_44_outShufLo+0x00(SB)/8, $0x1110080403020100
+DATA expandAVX512_44_outShufLo+0x08(SB)/8, $0x1c1b1a1918141312
+DATA expandAVX512_44_outShufLo+0x10(SB)/8, $0x31302c2b2a292820
+DATA expandAVX512_44_outShufLo+0x18(SB)/8, $0x4342414038343332
+DATA expandAVX512_44_outShufLo+0x20(SB)/8, $0x58504c4b4a494844
+DATA expandAVX512_44_outShufLo+0x28(SB)/8, $0x600706055c5b5a59
+DATA expandAVX512_44_outShufLo+0x30(SB)/8, $0x1d69681716150961
+DATA expandAVX512_44_outShufLo+0x38(SB)/8, $0x2f2e2d2171701f1e
+
+GLOBL expandAVX512_44_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_44_outShufHi0+0x00(SB)/8, $0x4844434241403938
+DATA expandAVX512_44_outShufHi0+0x08(SB)/8, $0x5a59585453525150
+DATA expandAVX512_44_outShufHi0+0x10(SB)/8, $0x6c6b6a6968605c5b
+DATA expandAVX512_44_outShufHi0+0x18(SB)/8, $0xffff787473727170
+DATA expandAVX512_44_outShufHi0+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_44_outShufHi0+0x28(SB)/8, $0x46453e3d3c3b3aff
+DATA expandAVX512_44_outShufHi0+0x30(SB)/8, $0xff57565549ffff47
+DATA expandAVX512_44_outShufHi0+0x38(SB)/8, $0x6d61ffff5f5e5dff
+
+GLOBL expandAVX512_44_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_44_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_44_outShufHi1+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_44_outShufHi1+0x10(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_44_outShufHi1+0x18(SB)/8, $0x0100ffffffffffff
+DATA expandAVX512_44_outShufHi1+0x20(SB)/8, $0x0c0b0a0908040302
+DATA expandAVX512_44_outShufHi1+0x28(SB)/8, $0xffffffffffffff10
+DATA expandAVX512_44_outShufHi1+0x30(SB)/8, $0x20ffffffff1918ff
+DATA expandAVX512_44_outShufHi1+0x38(SB)/8, $0xffff2928ffffff21
+
+TEXT expandAVX512_44<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_44_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_44_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_44_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_44_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_44_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_44_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_44_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_44_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_44_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_44_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_44_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0xce79fe003fffffff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0x318601ffc0000000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_48_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_48_inShuf0<>+0x00(SB)/8, $0x0101000000000000
+DATA expandAVX512_48_inShuf0<>+0x08(SB)/8, $0x0101000000000000
+DATA expandAVX512_48_inShuf0<>+0x10(SB)/8, $0x0101000000000000
+DATA expandAVX512_48_inShuf0<>+0x18(SB)/8, $0xffff000000000000
+DATA expandAVX512_48_inShuf0<>+0x20(SB)/8, $0xffff000000000000
+DATA expandAVX512_48_inShuf0<>+0x28(SB)/8, $0xffff000000000000
+DATA expandAVX512_48_inShuf0<>+0x30(SB)/8, $0xffff000000000000
+DATA expandAVX512_48_inShuf0<>+0x38(SB)/8, $0xffff000000000000
+
+GLOBL expandAVX512_48_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_48_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_48_mat0<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_48_mat0<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_48_mat0<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_48_mat0<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_48_mat0<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_48_mat0<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_48_mat0<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_48_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_48_inShuf1<>+0x00(SB)/8, $0xffffffff01010101
+DATA expandAVX512_48_inShuf1<>+0x08(SB)/8, $0xffffffff01010101
+DATA expandAVX512_48_inShuf1<>+0x10(SB)/8, $0xffffffffffff0101
+DATA expandAVX512_48_inShuf1<>+0x18(SB)/8, $0x0202020202020101
+DATA expandAVX512_48_inShuf1<>+0x20(SB)/8, $0x0202010101010101
+DATA expandAVX512_48_inShuf1<>+0x28(SB)/8, $0x0202010101010101
+DATA expandAVX512_48_inShuf1<>+0x30(SB)/8, $0x0202010101010101
+DATA expandAVX512_48_inShuf1<>+0x38(SB)/8, $0xffff010101010101
+
+GLOBL expandAVX512_48_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_48_mat1<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_48_mat1<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_48_mat1<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_48_mat1<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_48_mat1<>+0x20(SB)/8, $0x0808080808080808
+DATA expandAVX512_48_mat1<>+0x28(SB)/8, $0x1010101010101010
+DATA expandAVX512_48_mat1<>+0x30(SB)/8, $0x2020202020202020
+DATA expandAVX512_48_mat1<>+0x38(SB)/8, $0x4040404040404040
+
+GLOBL expandAVX512_48_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_48_inShuf2<>+0x00(SB)/8, $0xffff010101010101
+DATA expandAVX512_48_inShuf2<>+0x08(SB)/8, $0xffff020202020202
+DATA expandAVX512_48_inShuf2<>+0x10(SB)/8, $0xffff020202020202
+DATA expandAVX512_48_inShuf2<>+0x18(SB)/8, $0xffffffff02020202
+DATA expandAVX512_48_inShuf2<>+0x20(SB)/8, $0xffffffff02020202
+DATA expandAVX512_48_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_48_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_48_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_48_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_48_mat2<>+0x00(SB)/8, $0x8080808080808080
+DATA expandAVX512_48_mat2<>+0x08(SB)/8, $0x0101010101010101
+DATA expandAVX512_48_mat2<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_48_mat2<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_48_mat2<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_48_mat2<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_48_mat2<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_48_mat2<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_48_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_48_outShufLo+0x00(SB)/8, $0x0908050403020100
+DATA expandAVX512_48_outShufLo+0x08(SB)/8, $0x131211100d0c0b0a
+DATA expandAVX512_48_outShufLo+0x10(SB)/8, $0x1d1c1b1a19181514
+DATA expandAVX512_48_outShufLo+0x18(SB)/8, $0x2928252423222120
+DATA expandAVX512_48_outShufLo+0x20(SB)/8, $0x333231302d2c2b2a
+DATA expandAVX512_48_outShufLo+0x28(SB)/8, $0x3d3c3b3a39383534
+DATA expandAVX512_48_outShufLo+0x30(SB)/8, $0x0f0e434241400706
+DATA expandAVX512_48_outShufLo+0x38(SB)/8, $0x515017164b4a4948
+
+GLOBL expandAVX512_48_outShufHi(SB), RODATA, $0x40
+DATA expandAVX512_48_outShufHi+0x00(SB)/8, $0x2524232221201918
+DATA expandAVX512_48_outShufHi+0x08(SB)/8, $0x31302d2c2b2a2928
+DATA expandAVX512_48_outShufHi+0x10(SB)/8, $0x3b3a393835343332
+DATA expandAVX512_48_outShufHi+0x18(SB)/8, $0x4544434241403d3c
+DATA expandAVX512_48_outShufHi+0x20(SB)/8, $0x51504d4c4b4a4948
+DATA expandAVX512_48_outShufHi+0x28(SB)/8, $0x1d1c1b1a55545352
+DATA expandAVX512_48_outShufHi+0x30(SB)/8, $0x5b5a595827261f1e
+DATA expandAVX512_48_outShufHi+0x38(SB)/8, $0x3736636261602f2e
+
+TEXT expandAVX512_48<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_48_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_48_inShuf1<>(SB), Z3
+ VMOVDQU64 expandAVX512_48_inShuf2<>(SB), Z4
+ VMOVDQU64 expandAVX512_48_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_48_outShufHi(SB), Z2
+ VMOVDQU64 (AX), Z5
+ VPERMB Z5, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_48_mat0<>(SB), Z0, Z0
+ VPERMB Z5, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_48_mat1<>(SB), Z3, Z3
+ VPERMB Z5, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_48_mat2<>(SB), Z4, Z4
+ VPERMI2B Z3, Z0, Z1
+ VPERMI2B Z4, Z3, Z2
+ RET
+
+GLOBL expandAVX512_52_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_52_inShuf0<>+0x00(SB)/8, $0x0101000000000000
+DATA expandAVX512_52_inShuf0<>+0x08(SB)/8, $0xffffffffffff0100
+DATA expandAVX512_52_inShuf0<>+0x10(SB)/8, $0x0101000000000000
+DATA expandAVX512_52_inShuf0<>+0x18(SB)/8, $0xffff000000000000
+DATA expandAVX512_52_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00
+DATA expandAVX512_52_inShuf0<>+0x28(SB)/8, $0xffff000000000000
+DATA expandAVX512_52_inShuf0<>+0x30(SB)/8, $0xffff000000000000
+DATA expandAVX512_52_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00
+
+GLOBL expandAVX512_52_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_52_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_52_mat0<>+0x08(SB)/8, $0x0101010102020202
+DATA expandAVX512_52_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_52_mat0<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_52_mat0<>+0x20(SB)/8, $0x0404040408080808
+DATA expandAVX512_52_mat0<>+0x28(SB)/8, $0x0808080808080808
+DATA expandAVX512_52_mat0<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_52_mat0<>+0x38(SB)/8, $0x1010101020202020
+
+GLOBL expandAVX512_52_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_52_inShuf1<>+0x00(SB)/8, $0xffff000000000000
+DATA expandAVX512_52_inShuf1<>+0x08(SB)/8, $0xffff000000000000
+DATA expandAVX512_52_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00
+DATA expandAVX512_52_inShuf1<>+0x18(SB)/8, $0xffff000000000000
+DATA expandAVX512_52_inShuf1<>+0x20(SB)/8, $0xffffffff01010101
+DATA expandAVX512_52_inShuf1<>+0x28(SB)/8, $0xffffffffff010101
+DATA expandAVX512_52_inShuf1<>+0x30(SB)/8, $0xff02020202020201
+DATA expandAVX512_52_inShuf1<>+0x38(SB)/8, $0x0202010101010101
+
+GLOBL expandAVX512_52_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_52_mat1<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_52_mat1<>+0x08(SB)/8, $0x4040404040404040
+DATA expandAVX512_52_mat1<>+0x10(SB)/8, $0x4040404080808080
+DATA expandAVX512_52_mat1<>+0x18(SB)/8, $0x8080808080808080
+DATA expandAVX512_52_mat1<>+0x20(SB)/8, $0x0101010101010101
+DATA expandAVX512_52_mat1<>+0x28(SB)/8, $0x0202020202020202
+DATA expandAVX512_52_mat1<>+0x30(SB)/8, $0x0202020202020202
+DATA expandAVX512_52_mat1<>+0x38(SB)/8, $0x0404040404040404
+
+GLOBL expandAVX512_52_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_52_inShuf2<>+0x00(SB)/8, $0xffffffffffff0201
+DATA expandAVX512_52_inShuf2<>+0x08(SB)/8, $0x0202010101010101
+DATA expandAVX512_52_inShuf2<>+0x10(SB)/8, $0xffff010101010101
+DATA expandAVX512_52_inShuf2<>+0x18(SB)/8, $0xffffffffffffff01
+DATA expandAVX512_52_inShuf2<>+0x20(SB)/8, $0xffff010101010101
+DATA expandAVX512_52_inShuf2<>+0x28(SB)/8, $0xffff010101010101
+DATA expandAVX512_52_inShuf2<>+0x30(SB)/8, $0xffffffffffffff01
+DATA expandAVX512_52_inShuf2<>+0x38(SB)/8, $0xffff010101010101
+
+GLOBL expandAVX512_52_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_52_mat2<>+0x00(SB)/8, $0x0404040408080808
+DATA expandAVX512_52_mat2<>+0x08(SB)/8, $0x0808080808080808
+DATA expandAVX512_52_mat2<>+0x10(SB)/8, $0x1010101010101010
+DATA expandAVX512_52_mat2<>+0x18(SB)/8, $0x1010101020202020
+DATA expandAVX512_52_mat2<>+0x20(SB)/8, $0x2020202020202020
+DATA expandAVX512_52_mat2<>+0x28(SB)/8, $0x4040404040404040
+DATA expandAVX512_52_mat2<>+0x30(SB)/8, $0x4040404080808080
+DATA expandAVX512_52_mat2<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_52_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_52_inShuf3<>+0x00(SB)/8, $0xffff020202020202
+DATA expandAVX512_52_inShuf3<>+0x08(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_52_inShuf3<>+0x10(SB)/8, $0xffffffff02020202
+DATA expandAVX512_52_inShuf3<>+0x18(SB)/8, $0xffffffffffff0202
+DATA expandAVX512_52_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_52_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_52_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_52_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_52_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_52_mat3<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_52_mat3<>+0x08(SB)/8, $0x0101010102020202
+DATA expandAVX512_52_mat3<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_52_mat3<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_52_mat3<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_52_mat3<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_52_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_52_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_52_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_52_outShufLo+0x00(SB)/8, $0x1008050403020100
+DATA expandAVX512_52_outShufLo+0x08(SB)/8, $0x1a19181514131211
+DATA expandAVX512_52_outShufLo+0x10(SB)/8, $0x2b2a2928201d1c1b
+DATA expandAVX512_52_outShufLo+0x18(SB)/8, $0x3534333231302d2c
+DATA expandAVX512_52_outShufLo+0x20(SB)/8, $0x4845444342414038
+DATA expandAVX512_52_outShufLo+0x28(SB)/8, $0x5958504d4c4b4a49
+DATA expandAVX512_52_outShufLo+0x30(SB)/8, $0x616007065d5c5b5a
+DATA expandAVX512_52_outShufLo+0x38(SB)/8, $0x6a69681716096362
+
+GLOBL expandAVX512_52_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_52_outShufHi0+0x00(SB)/8, $0x403d3c3b3a393830
+DATA expandAVX512_52_outShufHi0+0x08(SB)/8, $0x51504d4c4b4a4948
+DATA expandAVX512_52_outShufHi0+0x10(SB)/8, $0x6261605855545352
+DATA expandAVX512_52_outShufHi0+0x18(SB)/8, $0x6c6b6a6968656463
+DATA expandAVX512_52_outShufHi0+0x20(SB)/8, $0x7d7c7b7a7978706d
+DATA expandAVX512_52_outShufHi0+0x28(SB)/8, $0x31ffffffffffffff
+DATA expandAVX512_52_outShufHi0+0x30(SB)/8, $0xff3f3e3635343332
+DATA expandAVX512_52_outShufHi0+0x38(SB)/8, $0xffff4f4e41ffffff
+
+GLOBL expandAVX512_52_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_52_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_52_outShufHi1+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_52_outShufHi1+0x10(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_52_outShufHi1+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_52_outShufHi1+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_52_outShufHi1+0x28(SB)/8, $0xff08050403020100
+DATA expandAVX512_52_outShufHi1+0x30(SB)/8, $0x10ffffffffffffff
+DATA expandAVX512_52_outShufHi1+0x38(SB)/8, $0x1918ffffff131211
+
+TEXT expandAVX512_52<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_52_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_52_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_52_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_52_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_52_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_52_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_52_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_52_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_52_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_52_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_52_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0x387f80ffffffffff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0xc7807f0000000000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_56_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_56_inShuf0<>+0x00(SB)/8, $0x0100000000000000
+DATA expandAVX512_56_inShuf0<>+0x08(SB)/8, $0x0100000000000000
+DATA expandAVX512_56_inShuf0<>+0x10(SB)/8, $0xff00000000000000
+DATA expandAVX512_56_inShuf0<>+0x18(SB)/8, $0xff00000000000000
+DATA expandAVX512_56_inShuf0<>+0x20(SB)/8, $0xff00000000000000
+DATA expandAVX512_56_inShuf0<>+0x28(SB)/8, $0xff00000000000000
+DATA expandAVX512_56_inShuf0<>+0x30(SB)/8, $0xff00000000000000
+DATA expandAVX512_56_inShuf0<>+0x38(SB)/8, $0xff00000000000000
+
+GLOBL expandAVX512_56_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_56_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_56_mat0<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_56_mat0<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_56_mat0<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_56_mat0<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_56_mat0<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_56_mat0<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_56_mat0<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_56_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_56_inShuf1<>+0x00(SB)/8, $0xffff010101010101
+DATA expandAVX512_56_inShuf1<>+0x08(SB)/8, $0x0202010101010101
+DATA expandAVX512_56_inShuf1<>+0x10(SB)/8, $0x0201010101010101
+DATA expandAVX512_56_inShuf1<>+0x18(SB)/8, $0xff01010101010101
+DATA expandAVX512_56_inShuf1<>+0x20(SB)/8, $0xff01010101010101
+DATA expandAVX512_56_inShuf1<>+0x28(SB)/8, $0xff01010101010101
+DATA expandAVX512_56_inShuf1<>+0x30(SB)/8, $0xff01010101010101
+DATA expandAVX512_56_inShuf1<>+0x38(SB)/8, $0xff01010101010101
+
+GLOBL expandAVX512_56_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_56_inShuf2<>+0x00(SB)/8, $0xff02020202020202
+DATA expandAVX512_56_inShuf2<>+0x08(SB)/8, $0xffffff0202020202
+DATA expandAVX512_56_inShuf2<>+0x10(SB)/8, $0xffffffffffffff02
+DATA expandAVX512_56_inShuf2<>+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_56_inShuf2<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_56_inShuf2<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_56_inShuf2<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_56_inShuf2<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_56_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_56_mat2<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_56_mat2<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_56_mat2<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_56_mat2<>+0x18(SB)/8, $0x0000000000000000
+DATA expandAVX512_56_mat2<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_56_mat2<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_56_mat2<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_56_mat2<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_56_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_56_outShufLo+0x00(SB)/8, $0x0806050403020100
+DATA expandAVX512_56_outShufLo+0x08(SB)/8, $0x11100e0d0c0b0a09
+DATA expandAVX512_56_outShufLo+0x10(SB)/8, $0x1a19181615141312
+DATA expandAVX512_56_outShufLo+0x18(SB)/8, $0x232221201e1d1c1b
+DATA expandAVX512_56_outShufLo+0x20(SB)/8, $0x2c2b2a2928262524
+DATA expandAVX512_56_outShufLo+0x28(SB)/8, $0x3534333231302e2d
+DATA expandAVX512_56_outShufLo+0x30(SB)/8, $0x3e3d3c3b3a393836
+DATA expandAVX512_56_outShufLo+0x38(SB)/8, $0x0f45444342414007
+
+GLOBL expandAVX512_56_outShufHi(SB), RODATA, $0x40
+DATA expandAVX512_56_outShufHi+0x00(SB)/8, $0x11100d0c0b0a0908
+DATA expandAVX512_56_outShufHi+0x08(SB)/8, $0x1a19181615141312
+DATA expandAVX512_56_outShufHi+0x10(SB)/8, $0x232221201e1d1c1b
+DATA expandAVX512_56_outShufHi+0x18(SB)/8, $0x2c2b2a2928262524
+DATA expandAVX512_56_outShufHi+0x20(SB)/8, $0x3534333231302e2d
+DATA expandAVX512_56_outShufHi+0x28(SB)/8, $0x3e3d3c3b3a393836
+DATA expandAVX512_56_outShufHi+0x30(SB)/8, $0x0e46454443424140
+DATA expandAVX512_56_outShufHi+0x38(SB)/8, $0x50174c4b4a49480f
+
+TEXT expandAVX512_56<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_56_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_56_mat0<>(SB), Z3
+ VMOVDQU64 expandAVX512_56_inShuf1<>(SB), Z4
+ VMOVDQU64 expandAVX512_56_inShuf2<>(SB), Z5
+ VMOVDQU64 expandAVX512_56_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_56_outShufHi(SB), Z2
+ VMOVDQU64 (AX), Z6
+ VPERMB Z6, Z0, Z0
+ VGF2P8AFFINEQB $0, Z3, Z0, Z0
+ VPERMB Z6, Z4, Z4
+ VGF2P8AFFINEQB $0, Z3, Z4, Z3
+ VPERMB Z6, Z5, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_56_mat2<>(SB), Z4, Z4
+ VPERMI2B Z3, Z0, Z1
+ VPERMI2B Z4, Z3, Z2
+ RET
+
+GLOBL expandAVX512_60_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_60_inShuf0<>+0x00(SB)/8, $0x0100000000000000
+DATA expandAVX512_60_inShuf0<>+0x08(SB)/8, $0xffffffffffffff00
+DATA expandAVX512_60_inShuf0<>+0x10(SB)/8, $0xff00000000000000
+DATA expandAVX512_60_inShuf0<>+0x18(SB)/8, $0xff00000000000000
+DATA expandAVX512_60_inShuf0<>+0x20(SB)/8, $0xffffffffffffff00
+DATA expandAVX512_60_inShuf0<>+0x28(SB)/8, $0xff00000000000000
+DATA expandAVX512_60_inShuf0<>+0x30(SB)/8, $0xff00000000000000
+DATA expandAVX512_60_inShuf0<>+0x38(SB)/8, $0xffffffffffffff00
+
+GLOBL expandAVX512_60_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_60_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_60_mat0<>+0x08(SB)/8, $0x0101010102020202
+DATA expandAVX512_60_mat0<>+0x10(SB)/8, $0x0202020202020202
+DATA expandAVX512_60_mat0<>+0x18(SB)/8, $0x0404040404040404
+DATA expandAVX512_60_mat0<>+0x20(SB)/8, $0x0404040408080808
+DATA expandAVX512_60_mat0<>+0x28(SB)/8, $0x0808080808080808
+DATA expandAVX512_60_mat0<>+0x30(SB)/8, $0x1010101010101010
+DATA expandAVX512_60_mat0<>+0x38(SB)/8, $0x1010101020202020
+
+GLOBL expandAVX512_60_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_60_inShuf1<>+0x00(SB)/8, $0xff00000000000000
+DATA expandAVX512_60_inShuf1<>+0x08(SB)/8, $0xff00000000000000
+DATA expandAVX512_60_inShuf1<>+0x10(SB)/8, $0xffffffffffffff00
+DATA expandAVX512_60_inShuf1<>+0x18(SB)/8, $0xff00000000000000
+DATA expandAVX512_60_inShuf1<>+0x20(SB)/8, $0xffffffffff010101
+DATA expandAVX512_60_inShuf1<>+0x28(SB)/8, $0x0202020202010101
+DATA expandAVX512_60_inShuf1<>+0x30(SB)/8, $0xffffffffffff0201
+DATA expandAVX512_60_inShuf1<>+0x38(SB)/8, $0xff01010101010101
+
+GLOBL expandAVX512_60_mat1<>(SB), RODATA, $0x40
+DATA expandAVX512_60_mat1<>+0x00(SB)/8, $0x2020202020202020
+DATA expandAVX512_60_mat1<>+0x08(SB)/8, $0x4040404040404040
+DATA expandAVX512_60_mat1<>+0x10(SB)/8, $0x4040404080808080
+DATA expandAVX512_60_mat1<>+0x18(SB)/8, $0x8080808080808080
+DATA expandAVX512_60_mat1<>+0x20(SB)/8, $0x0101010101010101
+DATA expandAVX512_60_mat1<>+0x28(SB)/8, $0x0101010101010101
+DATA expandAVX512_60_mat1<>+0x30(SB)/8, $0x0101010102020202
+DATA expandAVX512_60_mat1<>+0x38(SB)/8, $0x0202020202020202
+
+GLOBL expandAVX512_60_inShuf2<>(SB), RODATA, $0x40
+DATA expandAVX512_60_inShuf2<>+0x00(SB)/8, $0xff01010101010101
+DATA expandAVX512_60_inShuf2<>+0x08(SB)/8, $0xffffffffffffff01
+DATA expandAVX512_60_inShuf2<>+0x10(SB)/8, $0xff01010101010101
+DATA expandAVX512_60_inShuf2<>+0x18(SB)/8, $0xff01010101010101
+DATA expandAVX512_60_inShuf2<>+0x20(SB)/8, $0xffffffffffffff01
+DATA expandAVX512_60_inShuf2<>+0x28(SB)/8, $0xff01010101010101
+DATA expandAVX512_60_inShuf2<>+0x30(SB)/8, $0xff01010101010101
+DATA expandAVX512_60_inShuf2<>+0x38(SB)/8, $0xffffffffffffff01
+
+GLOBL expandAVX512_60_mat2<>(SB), RODATA, $0x40
+DATA expandAVX512_60_mat2<>+0x00(SB)/8, $0x0404040404040404
+DATA expandAVX512_60_mat2<>+0x08(SB)/8, $0x0404040408080808
+DATA expandAVX512_60_mat2<>+0x10(SB)/8, $0x0808080808080808
+DATA expandAVX512_60_mat2<>+0x18(SB)/8, $0x1010101010101010
+DATA expandAVX512_60_mat2<>+0x20(SB)/8, $0x1010101020202020
+DATA expandAVX512_60_mat2<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_60_mat2<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_60_mat2<>+0x38(SB)/8, $0x4040404080808080
+
+GLOBL expandAVX512_60_inShuf3<>(SB), RODATA, $0x40
+DATA expandAVX512_60_inShuf3<>+0x00(SB)/8, $0xff01010101010101
+DATA expandAVX512_60_inShuf3<>+0x08(SB)/8, $0xffffffffffff0202
+DATA expandAVX512_60_inShuf3<>+0x10(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_inShuf3<>+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_inShuf3<>+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_inShuf3<>+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_inShuf3<>+0x30(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_inShuf3<>+0x38(SB)/8, $0xffffffffffffffff
+
+GLOBL expandAVX512_60_mat3<>(SB), RODATA, $0x40
+DATA expandAVX512_60_mat3<>+0x00(SB)/8, $0x8080808080808080
+DATA expandAVX512_60_mat3<>+0x08(SB)/8, $0x0101010101010101
+DATA expandAVX512_60_mat3<>+0x10(SB)/8, $0x0000000000000000
+DATA expandAVX512_60_mat3<>+0x18(SB)/8, $0x0000000000000000
+DATA expandAVX512_60_mat3<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_60_mat3<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_60_mat3<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_60_mat3<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_60_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_60_outShufLo+0x00(SB)/8, $0x0806050403020100
+DATA expandAVX512_60_outShufLo+0x08(SB)/8, $0x1816151413121110
+DATA expandAVX512_60_outShufLo+0x10(SB)/8, $0x28201e1d1c1b1a19
+DATA expandAVX512_60_outShufLo+0x18(SB)/8, $0x31302e2d2c2b2a29
+DATA expandAVX512_60_outShufLo+0x20(SB)/8, $0x4140383635343332
+DATA expandAVX512_60_outShufLo+0x28(SB)/8, $0x4a49484645444342
+DATA expandAVX512_60_outShufLo+0x30(SB)/8, $0x5a5958504e4d4c4b
+DATA expandAVX512_60_outShufLo+0x38(SB)/8, $0x626160075e5d5c5b
+
+GLOBL expandAVX512_60_outShufHi0(SB), RODATA, $0x40
+DATA expandAVX512_60_outShufHi0+0x00(SB)/8, $0x3b3a3938302a2928
+DATA expandAVX512_60_outShufHi0+0x08(SB)/8, $0x44434241403e3d3c
+DATA expandAVX512_60_outShufHi0+0x10(SB)/8, $0x5453525150484645
+DATA expandAVX512_60_outShufHi0+0x18(SB)/8, $0x5d5c5b5a59585655
+DATA expandAVX512_60_outShufHi0+0x20(SB)/8, $0x6d6c6b6a6968605e
+DATA expandAVX512_60_outShufHi0+0x28(SB)/8, $0x767574737271706e
+DATA expandAVX512_60_outShufHi0+0x30(SB)/8, $0xffffffffffffff78
+DATA expandAVX512_60_outShufHi0+0x38(SB)/8, $0x31ffff2f2e2d2c2b
+
+GLOBL expandAVX512_60_outShufHi1(SB), RODATA, $0x40
+DATA expandAVX512_60_outShufHi1+0x00(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_outShufHi1+0x08(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_outShufHi1+0x10(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_outShufHi1+0x18(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_outShufHi1+0x20(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_outShufHi1+0x28(SB)/8, $0xffffffffffffffff
+DATA expandAVX512_60_outShufHi1+0x30(SB)/8, $0x06050403020100ff
+DATA expandAVX512_60_outShufHi1+0x38(SB)/8, $0xff0908ffffffffff
+
+TEXT expandAVX512_60<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_60_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_60_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_60_inShuf2<>(SB), Z3
+ VMOVDQU64 expandAVX512_60_inShuf3<>(SB), Z4
+ VMOVDQU64 expandAVX512_60_outShufLo(SB), Z1
+ VMOVDQU64 expandAVX512_60_outShufHi0(SB), Z5
+ VMOVDQU64 expandAVX512_60_outShufHi1(SB), Z6
+ VMOVDQU64 (AX), Z7
+ VPERMB Z7, Z0, Z0
+ VGF2P8AFFINEQB $0, expandAVX512_60_mat0<>(SB), Z0, Z0
+ VPERMB Z7, Z2, Z2
+ VGF2P8AFFINEQB $0, expandAVX512_60_mat1<>(SB), Z2, Z2
+ VPERMB Z7, Z3, Z3
+ VGF2P8AFFINEQB $0, expandAVX512_60_mat2<>(SB), Z3, Z3
+ VPERMB Z7, Z4, Z4
+ VGF2P8AFFINEQB $0, expandAVX512_60_mat3<>(SB), Z4, Z4
+ VPERMI2B Z2, Z0, Z1
+ MOVQ $0x9f01ffffffffffff, AX
+ KMOVQ AX, K1
+ VPERMI2B.Z Z3, Z2, K1, Z5
+ MOVQ $0x60fe000000000000, AX
+ KMOVQ AX, K1
+ VPERMB.Z Z4, Z6, K1, Z0
+ VPORQ Z0, Z5, Z2
+ RET
+
+GLOBL expandAVX512_64_inShuf0<>(SB), RODATA, $0x40
+DATA expandAVX512_64_inShuf0<>+0x00(SB)/8, $0x0000000000000000
+DATA expandAVX512_64_inShuf0<>+0x08(SB)/8, $0x0000000000000000
+DATA expandAVX512_64_inShuf0<>+0x10(SB)/8, $0x0000000000000000
+DATA expandAVX512_64_inShuf0<>+0x18(SB)/8, $0x0000000000000000
+DATA expandAVX512_64_inShuf0<>+0x20(SB)/8, $0x0000000000000000
+DATA expandAVX512_64_inShuf0<>+0x28(SB)/8, $0x0000000000000000
+DATA expandAVX512_64_inShuf0<>+0x30(SB)/8, $0x0000000000000000
+DATA expandAVX512_64_inShuf0<>+0x38(SB)/8, $0x0000000000000000
+
+GLOBL expandAVX512_64_mat0<>(SB), RODATA, $0x40
+DATA expandAVX512_64_mat0<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_64_mat0<>+0x08(SB)/8, $0x0202020202020202
+DATA expandAVX512_64_mat0<>+0x10(SB)/8, $0x0404040404040404
+DATA expandAVX512_64_mat0<>+0x18(SB)/8, $0x0808080808080808
+DATA expandAVX512_64_mat0<>+0x20(SB)/8, $0x1010101010101010
+DATA expandAVX512_64_mat0<>+0x28(SB)/8, $0x2020202020202020
+DATA expandAVX512_64_mat0<>+0x30(SB)/8, $0x4040404040404040
+DATA expandAVX512_64_mat0<>+0x38(SB)/8, $0x8080808080808080
+
+GLOBL expandAVX512_64_inShuf1<>(SB), RODATA, $0x40
+DATA expandAVX512_64_inShuf1<>+0x00(SB)/8, $0x0101010101010101
+DATA expandAVX512_64_inShuf1<>+0x08(SB)/8, $0x0101010101010101
+DATA expandAVX512_64_inShuf1<>+0x10(SB)/8, $0x0101010101010101
+DATA expandAVX512_64_inShuf1<>+0x18(SB)/8, $0x0101010101010101
+DATA expandAVX512_64_inShuf1<>+0x20(SB)/8, $0x0101010101010101
+DATA expandAVX512_64_inShuf1<>+0x28(SB)/8, $0x0101010101010101
+DATA expandAVX512_64_inShuf1<>+0x30(SB)/8, $0x0101010101010101
+DATA expandAVX512_64_inShuf1<>+0x38(SB)/8, $0x0101010101010101
+
+GLOBL expandAVX512_64_outShufLo(SB), RODATA, $0x40
+DATA expandAVX512_64_outShufLo+0x00(SB)/8, $0x0706050403020100
+DATA expandAVX512_64_outShufLo+0x08(SB)/8, $0x0f0e0d0c0b0a0908
+DATA expandAVX512_64_outShufLo+0x10(SB)/8, $0x1716151413121110
+DATA expandAVX512_64_outShufLo+0x18(SB)/8, $0x1f1e1d1c1b1a1918
+DATA expandAVX512_64_outShufLo+0x20(SB)/8, $0x2726252423222120
+DATA expandAVX512_64_outShufLo+0x28(SB)/8, $0x2f2e2d2c2b2a2928
+DATA expandAVX512_64_outShufLo+0x30(SB)/8, $0x3736353433323130
+DATA expandAVX512_64_outShufLo+0x38(SB)/8, $0x3f3e3d3c3b3a3938
+
+TEXT expandAVX512_64<>(SB), NOSPLIT, $0-0
+ VMOVDQU64 expandAVX512_64_inShuf0<>(SB), Z0
+ VMOVDQU64 expandAVX512_64_mat0<>(SB), Z1
+ VMOVDQU64 expandAVX512_64_inShuf1<>(SB), Z2
+ VMOVDQU64 expandAVX512_64_outShufLo(SB), Z3
+ VMOVDQU64 (AX), Z4
+ VPERMB Z4, Z0, Z0
+ VGF2P8AFFINEQB $0, Z1, Z0, Z0
+ VPERMB Z4, Z2, Z2
+ VGF2P8AFFINEQB $0, Z1, Z2, Z2
+ VPERMB Z0, Z3, Z1
+ VPERMB Z2, Z3, Z2
+ RET
+
diff --git a/src/internal/runtime/gc/scan/expand_amd64_test.go b/src/internal/runtime/gc/scan/expand_amd64_test.go
new file mode 100644
index 00000000000000..a8f5b88c5cb3a6
--- /dev/null
+++ b/src/internal/runtime/gc/scan/expand_amd64_test.go
@@ -0,0 +1,19 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64
+
+package scan_test
+
+import (
+ "internal/runtime/gc/scan"
+ "testing"
+)
+
+func TestExpandAVX512(t *testing.T) {
+ if !scan.CanAVX512() {
+ t.Skip("no AVX512")
+ }
+ testExpand(t, scan.ExpandAVX512)
+}
diff --git a/src/internal/runtime/gc/scan/expand_reference.go b/src/internal/runtime/gc/scan/expand_reference.go
new file mode 100644
index 00000000000000..45446528d7846c
--- /dev/null
+++ b/src/internal/runtime/gc/scan/expand_reference.go
@@ -0,0 +1,39 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan
+
+import (
+ "internal/goarch"
+ "internal/runtime/gc"
+)
+
+// ExpandReference is a reference implementation of an expander function
+// that translates object mark bits into a bitmap of one bit per word of
+// marked object, assuming the object is of the provided size class.
+func ExpandReference(sizeClass int, packed *gc.ObjMask, unpacked *gc.PtrMask) {
+ // Look up the size and derive the number of objects in a span.
+ // We're only concerned with small objects in single-page spans,
+ // and gc.PtrMask enforces this by being statically sized to
+ // accomodate only such spans.
+ size := uintptr(gc.SizeClassToSize[sizeClass])
+ nObj := uintptr(gc.SizeClassToNPages[sizeClass]) * gc.PageSize / size
+
+ // f is the expansion factor. For example, if our objects are of size 48,
+ // then each mark bit will translate into 6 (48/8 = 6) set bits in the
+ // pointer bitmap.
+ f := size / goarch.PtrSize
+ for i := range nObj {
+ // Check if the object is marked.
+ if packed[i/goarch.PtrBits]&(uintptr(1)<<(i%goarch.PtrBits)) == 0 {
+ continue
+ }
+ // Propagate that mark into the destination into one bit per the
+ // expansion factor f, offset to the object's offset within the span.
+ for j := range f {
+ b := i*f + j // i*f is the start bit for the object, j indexes into each corresponding word after.
+ unpacked[b/goarch.PtrBits] |= uintptr(1) << (b % goarch.PtrBits)
+ }
+ }
+}
diff --git a/src/internal/runtime/gc/scan/expand_test.go b/src/internal/runtime/gc/scan/expand_test.go
new file mode 100644
index 00000000000000..692817d8b2bd96
--- /dev/null
+++ b/src/internal/runtime/gc/scan/expand_test.go
@@ -0,0 +1,37 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan_test
+
+import (
+ "internal/goarch"
+ "internal/runtime/gc"
+ "internal/runtime/gc/scan"
+ "testing"
+)
+
+type expandFunc func(sizeClass int, packed *gc.ObjMask, unpacked *gc.PtrMask)
+
+func testExpand(t *testing.T, expF expandFunc) {
+ expR := scan.ExpandReference
+
+ testObjs(t, func(t *testing.T, sizeClass int, objs *gc.ObjMask) {
+ var want, got gc.PtrMask
+ expR(sizeClass, objs, &want)
+ expF(sizeClass, objs, &got)
+
+ for i := range want {
+ if got[i] != want[i] {
+ t.Errorf("expansion differs from reference at bit %d", i*goarch.PtrSize)
+ if goarch.PtrSize == 4 {
+ t.Logf("got: %032b", got[i])
+ t.Logf("want: %032b", want[i])
+ } else {
+ t.Logf("got: %064b", got[i])
+ t.Logf("want: %064b", want[i])
+ }
+ }
+ }
+ })
+}
diff --git a/src/internal/runtime/gc/scan/filter.go b/src/internal/runtime/gc/scan/filter.go
new file mode 100644
index 00000000000000..63cee9abf066a9
--- /dev/null
+++ b/src/internal/runtime/gc/scan/filter.go
@@ -0,0 +1,35 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan
+
+import "unsafe"
+
+// FilterNil packs non-nil (non-zero) values in bufp together
+// at the beginning of bufp, returning the length of the
+// packed buffer. It treats bufp as an array of size n.
+//
+// TODO(mknyszek): Add a faster SIMD-based implementation.
+func FilterNil(bufp *uintptr, n int32) int32 {
+ buf := unsafe.Slice(bufp, int(n))
+ lo := 0
+ hi := len(buf) - 1
+ for lo < hi {
+ for lo < hi && buf[hi] == 0 {
+ hi--
+ }
+ for lo < hi && buf[lo] != 0 {
+ lo++
+ }
+ if lo >= hi {
+ break
+ }
+ buf[lo] = buf[hi]
+ hi--
+ }
+ if hi >= 0 && buf[hi] == 0 {
+ hi--
+ }
+ return int32(hi) + 1
+}
diff --git a/src/internal/runtime/gc/scan/filter_test.go b/src/internal/runtime/gc/scan/filter_test.go
new file mode 100644
index 00000000000000..115fbfb8bcf348
--- /dev/null
+++ b/src/internal/runtime/gc/scan/filter_test.go
@@ -0,0 +1,94 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan_test
+
+import (
+ "internal/runtime/gc/scan"
+ "testing"
+)
+
+func TestFilterNil(t *testing.T) {
+ t.Run("empty", func(t *testing.T) {
+ testFilterNil(t, []uintptr{}, []uintptr{})
+ })
+ t.Run("one", func(t *testing.T) {
+ testFilterNil(t, []uintptr{4}, []uintptr{4})
+ })
+ t.Run("elimOne", func(t *testing.T) {
+ testFilterNil(t, []uintptr{0}, []uintptr{})
+ })
+ t.Run("oneElimBegin", func(t *testing.T) {
+ testFilterNil(t, []uintptr{0, 4}, []uintptr{4})
+ })
+ t.Run("oneElimEnd", func(t *testing.T) {
+ testFilterNil(t, []uintptr{4, 0}, []uintptr{4})
+ })
+ t.Run("oneElimMultiBegin", func(t *testing.T) {
+ testFilterNil(t, []uintptr{0, 0, 0, 4}, []uintptr{4})
+ })
+ t.Run("oneElimMultiEnd", func(t *testing.T) {
+ testFilterNil(t, []uintptr{4, 0, 0, 0}, []uintptr{4})
+ })
+ t.Run("oneElimMulti", func(t *testing.T) {
+ testFilterNil(t, []uintptr{0, 0, 0, 4, 0}, []uintptr{4})
+ })
+ t.Run("two", func(t *testing.T) {
+ testFilterNil(t, []uintptr{5, 12}, []uintptr{5, 12})
+ })
+ t.Run("twoElimBegin", func(t *testing.T) {
+ testFilterNil(t, []uintptr{0, 5, 12}, []uintptr{5, 12})
+ })
+ t.Run("twoElimMid", func(t *testing.T) {
+ testFilterNil(t, []uintptr{5, 0, 12}, []uintptr{5, 12})
+ })
+ t.Run("twoElimEnd", func(t *testing.T) {
+ testFilterNil(t, []uintptr{5, 12, 0}, []uintptr{5, 12})
+ })
+ t.Run("twoElimMulti", func(t *testing.T) {
+ testFilterNil(t, []uintptr{0, 5, 0, 12, 0}, []uintptr{5, 12})
+ })
+ t.Run("Multi", func(t *testing.T) {
+ testFilterNil(t, []uintptr{1, 5, 5, 0, 0, 0, 12, 0, 121, 5, 0}, []uintptr{1, 5, 5, 12, 121, 5})
+ })
+}
+
+func testFilterNil(t *testing.T, buf, want []uintptr) {
+ var bufp *uintptr
+ if len(buf) != 0 {
+ bufp = &buf[0]
+ }
+ n := scan.FilterNil(bufp, int32(len(buf)))
+ if n > int32(len(buf)) {
+ t.Errorf("bogus new length returned: %d > %d", n, len(buf))
+ return
+ }
+ buf = buf[:n]
+ if len(buf) != len(want) {
+ t.Errorf("lengths differ: got %d, want %d", len(buf), len(want))
+ }
+
+ wantMap := make(map[uintptr]int)
+ gotMap := make(map[uintptr]int)
+ for _, p := range want {
+ wantMap[p]++
+ }
+ for _, p := range buf {
+ gotMap[p]++
+ }
+ for p, nWant := range wantMap {
+ if nGot, ok := gotMap[p]; !ok {
+ t.Errorf("want %d, but missing from output", p)
+ } else if nGot != nWant {
+ t.Errorf("want %d copies of %d, but got %d", nWant, p, nGot)
+ }
+ }
+ for p := range gotMap {
+ if _, ok := wantMap[p]; !ok {
+ t.Errorf("got %d, but didn't want it", p)
+ }
+ }
+ t.Logf("got: %v", buf)
+ t.Logf("want: %v", want)
+}
diff --git a/src/internal/runtime/gc/scan/mem_nounix_test.go b/src/internal/runtime/gc/scan/mem_nounix_test.go
new file mode 100644
index 00000000000000..f4d21d8a8504ea
--- /dev/null
+++ b/src/internal/runtime/gc/scan/mem_nounix_test.go
@@ -0,0 +1,16 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix
+
+package scan_test
+
+import (
+ "testing"
+)
+
+func makeMem(t testing.TB, nPages int) ([]uintptr, func()) {
+ t.Skip("mmap unsupported")
+ return nil, nil
+}
diff --git a/src/internal/runtime/gc/scan/mem_unix_test.go b/src/internal/runtime/gc/scan/mem_unix_test.go
new file mode 100644
index 00000000000000..03f0bd5dd08090
--- /dev/null
+++ b/src/internal/runtime/gc/scan/mem_unix_test.go
@@ -0,0 +1,25 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package scan_test
+
+import (
+ "internal/runtime/gc"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+func makeMem(t testing.TB, nPages int) ([]uintptr, func()) {
+ mem, err := syscall.Mmap(-1, 0, int(gc.PageSize*nPages), syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_PRIVATE|syscall.MAP_ANON)
+ if err != nil {
+ t.Fatalf("mmap failed: %s", err)
+ }
+ free := func() {
+ syscall.Munmap(mem)
+ }
+ return unsafe.Slice((*uintptr)(unsafe.Pointer(unsafe.SliceData(mem))), len(mem)/8), free
+}
diff --git a/src/internal/runtime/gc/scan/mkasm.go b/src/internal/runtime/gc/scan/mkasm.go
new file mode 100644
index 00000000000000..e36defb2e18056
--- /dev/null
+++ b/src/internal/runtime/gc/scan/mkasm.go
@@ -0,0 +1,412 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "slices"
+ "strconv"
+
+ "internal/runtime/gc"
+ "internal/runtime/gc/internal/gen"
+)
+
+const header = "// Code generated by mkasm.go. DO NOT EDIT.\n\n"
+
+func main() {
+ generate("expand_amd64.s", genExpanders)
+}
+
+func generate(fileName string, genFunc func(*gen.File)) {
+ var buf bytes.Buffer
+ tee := io.MultiWriter(&buf, os.Stdout)
+
+ file := gen.NewFile(tee)
+
+ genFunc(file)
+
+ fmt.Fprintf(tee, header)
+ file.Compile()
+
+ f, err := os.Create(fileName)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer f.Close()
+ _, err = f.Write(buf.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func genExpanders(file *gen.File) {
+ gcExpandersAVX512 := make([]*gen.Func, len(gc.SizeClassToSize))
+ for sc, ob := range gc.SizeClassToSize {
+ if gc.SizeClassToNPages[sc] != 1 {
+ // These functions all produce a bitmap that covers exactly one
+ // page.
+ continue
+ }
+ if ob > gc.MinSizeForMallocHeader {
+ // This size class is too big to have a packed pointer/scalar bitmap.
+ break
+ }
+
+ xf := int(ob) / 8
+ log.Printf("size class %d bytes, expansion %dx", ob, xf)
+
+ fn := gen.NewFunc(fmt.Sprintf("expandAVX512_%d<>", xf))
+ ptrObjBits := gen.Arg[gen.Ptr[gen.Uint8x64]](fn)
+
+ if xf == 1 {
+ expandIdentity(ptrObjBits)
+ } else {
+ ok := gfExpander(xf, ptrObjBits)
+ if !ok {
+ log.Printf("failed to generate expander for size class %d", sc)
+ }
+ }
+ file.AddFunc(fn)
+ gcExpandersAVX512[sc] = fn
+ }
+
+ // Generate table mapping size class to expander PC
+ file.AddConst("·gcExpandersAVX512", gcExpandersAVX512)
+}
+
+// mat8x8 is an 8x8 bit matrix.
+type mat8x8 struct {
+ mat [8]uint8
+}
+
+func matGroupToVec(mats *[8]mat8x8) [8]uint64 {
+ var out [8]uint64
+ for i, mat := range mats {
+ for j, row := range mat.mat {
+ // For some reason, Intel flips the rows.
+ out[i] |= uint64(row) << ((7 - j) * 8)
+ }
+ }
+ return out
+}
+
+// expandIdentity implements 1x expansion (that is, no expansion).
+func expandIdentity(ptrObjBits gen.Ptr[gen.Uint8x64]) {
+ objBitsLo := gen.Deref(ptrObjBits)
+ objBitsHi := gen.Deref(ptrObjBits.AddConst(64))
+ gen.Return(objBitsLo, objBitsHi)
+}
+
+// gfExpander produces a function that expands each bit in an input bitmap into
+// f consecutive bits in an output bitmap.
+//
+// The input is
+//
+// AX *[8]uint64 = A pointer to floor(1024/f) bits (f >= 2, so at most 512 bits)
+//
+// The output is
+//
+// Z1 [64]uint8 = The bottom 512 bits of the expanded bitmap
+// Z2 [64]uint8 = The top 512 bits of the expanded bitmap
+//
+// TODO(austin): This should Z0/Z1.
+func gfExpander(f int, ptrObjBits gen.Ptr[gen.Uint8x64]) bool {
+ // TODO(austin): For powers of 2 >= 8, we can use mask expansion ops to make this much simpler.
+
+ // TODO(austin): For f >= 8, I suspect there are better ways to do this.
+ //
+ // For example, we could use a mask expansion to get a full byte for each
+ // input bit, and separately create the bytes that blend adjacent bits, then
+ // shuffle those bytes together. Certainly for f >= 16 this makes sense
+ // because each of those bytes will be used, possibly more than once.
+
+ objBits := gen.Deref(ptrObjBits)
+
+ type term struct {
+ iByte, oByte int
+ mat mat8x8
+ }
+ var terms []term
+
+ // Iterate over all output bytes and construct the 8x8 GF2 matrix to compute
+ // the output byte from the appropriate input byte. Gather all of these into
+ // "terms".
+ for oByte := 0; oByte < 1024/8; oByte++ {
+ var byteMat mat8x8
+ iByte := -1
+ for oBit := oByte * 8; oBit < oByte*8+8; oBit++ {
+ iBit := oBit / f
+ if iByte == -1 {
+ iByte = iBit / 8
+ } else if iByte != iBit/8 {
+ log.Printf("output byte %d straddles input bytes %d and %d", oByte, iByte, iBit/8)
+ return false
+ }
+ // One way to view this is that the i'th row of the matrix will be
+ // ANDed with the input byte, and the parity of the result will set
+ // the i'th bit in the output. We use a simple 1 bit mask, so the
+ // parity is irrelevant beyond selecting out that one bit.
+ byteMat.mat[oBit%8] = 1 << (iBit % 8)
+ }
+ terms = append(terms, term{iByte, oByte, byteMat})
+ }
+
+ if false {
+ // Print input byte -> output byte as a matrix
+ maxIByte, maxOByte := 0, 0
+ for _, term := range terms {
+ maxIByte = max(maxIByte, term.iByte)
+ maxOByte = max(maxOByte, term.oByte)
+ }
+ iToO := make([][]rune, maxIByte+1)
+ for i := range iToO {
+ iToO[i] = make([]rune, maxOByte+1)
+ }
+ matMap := make(map[mat8x8]int)
+ for _, term := range terms {
+ i, ok := matMap[term.mat]
+ if !ok {
+ i = len(matMap)
+ matMap[term.mat] = i
+ }
+ iToO[term.iByte][term.oByte] = 'A' + rune(i)
+ }
+ for o := range maxOByte + 1 {
+ fmt.Printf("%d", o)
+ for i := range maxIByte + 1 {
+ fmt.Printf(",")
+ if mat := iToO[i][o]; mat != 0 {
+ fmt.Printf("%c", mat)
+ }
+ }
+ fmt.Println()
+ }
+ }
+
+ // In hardware, each (8 byte) matrix applies to 8 bytes of data in parallel,
+ // and we get to operate on up to 8 matrixes in parallel (or 64 values). That is:
+ //
+ // abcdefgh ijklmnop qrstuvwx yzABCDEF GHIJKLMN OPQRSTUV WXYZ0123 456789_+
+ // mat0 mat1 mat2 mat3 mat4 mat5 mat6 mat7
+
+ // Group the terms by matrix, but limit each group to 8 terms.
+ const termsPerGroup = 8 // Number of terms we can multiply by the same matrix.
+ const groupsPerSuperGroup = 8 // Number of matrixes we can fit in a vector.
+
+ matMap := make(map[mat8x8]int)
+ allMats := make(map[mat8x8]bool)
+ var termGroups [][]term
+ for _, term := range terms {
+ allMats[term.mat] = true
+
+ i, ok := matMap[term.mat]
+ if ok && f > groupsPerSuperGroup {
+ // The output is ultimately produced in two [64]uint8 registers.
+ // Getting every byte in the right place of each of these requires a
+ // final permutation that often requires more than one source.
+ //
+ // Up to 8x expansion, we can get a really nice grouping so we can use
+ // the same 8 matrix vector several times, without producing
+ // permutations that require more than two sources.
+ //
+ // Above 8x, however, we can't get nice matrixes anyway, so we
+ // instead prefer reducing the complexity of the permutations we
+ // need to produce the final outputs. To do this, avoid grouping
+ // together terms that are split across the two registers.
+ outRegister := termGroups[i][0].oByte / 64
+ if term.oByte/64 != outRegister {
+ ok = false
+ }
+ }
+ if !ok {
+ // Start a new term group.
+ i = len(termGroups)
+ matMap[term.mat] = i
+ termGroups = append(termGroups, nil)
+ }
+
+ termGroups[i] = append(termGroups[i], term)
+
+ if len(termGroups[i]) == termsPerGroup {
+ // This term group is full.
+ delete(matMap, term.mat)
+ }
+ }
+
+ for i, termGroup := range termGroups {
+ log.Printf("term group %d:", i)
+ for _, term := range termGroup {
+ log.Printf(" %+v", term)
+ }
+ }
+
+ // We can do 8 matrix multiplies in parallel, which is 8 term groups. Pack
+ // as many term groups as we can into each super-group to minimize the
+ // number of matrix multiplies.
+ //
+ // Ideally, we use the same matrix in each super-group, which might mean
+ // doing fewer than 8 multiplies at a time. That's fine because it never
+ // increases the total number of matrix multiplies.
+ //
+ // TODO: Packing the matrixes less densely may let us use more broadcast
+ // loads instead of general permutations, though. That replaces a load of
+ // the permutation with a load of the matrix, but is probably still slightly
+ // better.
+ var sgSize, nSuperGroups int
+ oneMatVec := f <= groupsPerSuperGroup
+ if oneMatVec {
+ // We can use the same matrix in each multiply by doing sgSize
+ // multiplies at a time.
+ sgSize = groupsPerSuperGroup / len(allMats) * len(allMats)
+ nSuperGroups = (len(termGroups) + sgSize - 1) / sgSize
+ } else {
+ // We can't use the same matrix for each multiply. Just do as many at a
+ // time as we can.
+ //
+ // TODO: This is going to produce several distinct matrixes, when we
+ // probably only need two. Be smarter about how we create super-groups
+ // in this case. Maybe we build up an array of super-groups and then the
+ // loop below just turns them into ops?
+ sgSize = 8
+ nSuperGroups = (len(termGroups) + groupsPerSuperGroup - 1) / groupsPerSuperGroup
+ }
+
+ // Construct each super-group.
+ var matGroup [8]mat8x8
+ var matMuls []gen.Uint8x64
+ var perm [128]int
+ for sgi := range nSuperGroups {
+ var iperm [64]uint8
+ for i := range iperm {
+ iperm[i] = 0xff // "Don't care"
+ }
+ // Pick off sgSize term groups.
+ superGroup := termGroups[:min(len(termGroups), sgSize)]
+ termGroups = termGroups[len(superGroup):]
+ // Build the matrix and permutations for this super-group.
+ var thisMatGroup [8]mat8x8
+ for i, termGroup := range superGroup {
+ // All terms in this group have the same matrix. Pick one.
+ thisMatGroup[i] = termGroup[0].mat
+ for j, term := range termGroup {
+ // Build the input permutation.
+ iperm[i*termsPerGroup+j] = uint8(term.iByte)
+ // Build the output permutation.
+ perm[term.oByte] = sgi*groupsPerSuperGroup*termsPerGroup + i*termsPerGroup + j
+ }
+ }
+ log.Printf("input permutation %d: %v", sgi, iperm)
+
+ // Check that we're not making more distinct matrixes than expected.
+ if oneMatVec {
+ if sgi == 0 {
+ matGroup = thisMatGroup
+ } else if matGroup != thisMatGroup {
+ log.Printf("super-groups have different matrixes:\n%+v\n%+v", matGroup, thisMatGroup)
+ return false
+ }
+ }
+
+ // Emit matrix op.
+ matConst := gen.ConstUint64x8(matGroupToVec(&thisMatGroup), fmt.Sprintf("*_mat%d<>", sgi))
+ inOp := objBits.Shuffle(gen.ConstUint8x64(iperm, fmt.Sprintf("*_inShuf%d<>", sgi)))
+ matMul := matConst.GF2P8Affine(inOp)
+ matMuls = append(matMuls, matMul)
+ }
+
+ log.Printf("output permutation: %v", perm)
+
+ outLo, ok := genShuffle("*_outShufLo", (*[64]int)(perm[:64]), matMuls...)
+ if !ok {
+ log.Printf("bad number of inputs to final shuffle: %d != 1, 2, or 4", len(matMuls))
+ return false
+ }
+ outHi, ok := genShuffle("*_outShufHi", (*[64]int)(perm[64:]), matMuls...)
+ if !ok {
+ log.Printf("bad number of inputs to final shuffle: %d != 1, 2, or 4", len(matMuls))
+ return false
+ }
+ gen.Return(outLo, outHi)
+
+ return true
+}
+
+func genShuffle(name string, perm *[64]int, args ...gen.Uint8x64) (gen.Uint8x64, bool) {
+ // Construct flattened permutation.
+ var vperm [64]byte
+
+ // Get the inputs used by this permutation.
+ var inputs []int
+ for i, src := range perm {
+ inputIdx := slices.Index(inputs, src/64)
+ if inputIdx == -1 {
+ inputIdx = len(inputs)
+ inputs = append(inputs, src/64)
+ }
+ vperm[i] = byte(src%64 | (inputIdx << 6))
+ }
+
+ // Emit instructions for easy cases.
+ switch len(inputs) {
+ case 1:
+ constOp := gen.ConstUint8x64(vperm, name)
+ return args[inputs[0]].Shuffle(constOp), true
+ case 2:
+ constOp := gen.ConstUint8x64(vperm, name)
+ return args[inputs[0]].Shuffle2(args[inputs[1]], constOp), true
+ }
+
+ // Harder case, we need to shuffle in from up to 2 more tables.
+ //
+ // Perform two shuffles. One shuffle will get its data from the first
+ // two inputs, the other shuffle will get its data from the other one
+ // or two inputs. All values they don't care each don't care about will
+ // be zeroed.
+ var vperms [2][64]byte
+ var masks [2]uint64
+ for j, idx := range vperm {
+ for i := range vperms {
+ vperms[i][j] = 0xff // "Don't care"
+ }
+ if idx == 0xff {
+ continue
+ }
+ vperms[idx/128][j] = idx % 128
+ masks[idx/128] |= uint64(1) << j
+ }
+
+ // Validate that the masks are fully disjoint.
+ if masks[0]^masks[1] != ^uint64(0) {
+ panic("bad shuffle!")
+ }
+
+ // Generate constants.
+ constOps := make([]gen.Uint8x64, len(vperms))
+ for i, v := range vperms {
+ constOps[i] = gen.ConstUint8x64(v, name+strconv.Itoa(i))
+ }
+
+ // Generate shuffles.
+ switch len(inputs) {
+ case 3:
+ r0 := args[inputs[0]].Shuffle2Zeroed(args[inputs[1]], constOps[0], gen.ConstMask64(masks[0]))
+ r1 := args[inputs[2]].ShuffleZeroed(constOps[1], gen.ConstMask64(masks[1]))
+ return r0.ToUint64x8().Or(r1.ToUint64x8()).ToUint8x64(), true
+ case 4:
+ r0 := args[inputs[0]].Shuffle2Zeroed(args[inputs[1]], constOps[0], gen.ConstMask64(masks[0]))
+ r1 := args[inputs[2]].Shuffle2Zeroed(args[inputs[3]], constOps[1], gen.ConstMask64(masks[1]))
+ return r0.ToUint64x8().Or(r1.ToUint64x8()).ToUint8x64(), true
+ }
+
+ // Too many inputs. To support more, we'd need to separate tables much earlier.
+ // Right now all the indices fit in a byte, but with >4 inputs they might not (>256 bytes).
+ return args[0], false
+}
diff --git a/src/internal/runtime/gc/scan/scan_amd64.go b/src/internal/runtime/gc/scan/scan_amd64.go
new file mode 100644
index 00000000000000..2ac181f97e5b66
--- /dev/null
+++ b/src/internal/runtime/gc/scan/scan_amd64.go
@@ -0,0 +1,41 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan
+
+import (
+ "internal/cpu"
+ "internal/runtime/gc"
+ "unsafe"
+)
+
+func ScanSpanPacked(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) {
+ if CanAVX512() {
+ return ScanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask)
+ }
+ panic("not implemented")
+}
+
+func HasFastScanSpanPacked() bool {
+ return avx512ScanPackedReqsMet
+}
+
+// -- AVX512 --
+
+func CanAVX512() bool {
+ return avx512ScanPackedReqsMet
+}
+
+func ScanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) {
+ return FilterNil(bufp, scanSpanPackedAVX512(mem, bufp, objMarks, sizeClass, ptrMask))
+}
+
+//go:noescape
+func scanSpanPackedAVX512(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32)
+
+var avx512ScanPackedReqsMet = cpu.X86.HasAVX512VL &&
+ cpu.X86.HasAVX512BW &&
+ cpu.X86.HasGFNI &&
+ cpu.X86.HasAVX512BITALG &&
+ cpu.X86.HasAVX512VBMI
diff --git a/src/internal/runtime/gc/scan/scan_amd64.s b/src/internal/runtime/gc/scan/scan_amd64.s
new file mode 100644
index 00000000000000..055995fa38c987
--- /dev/null
+++ b/src/internal/runtime/gc/scan/scan_amd64.s
@@ -0,0 +1,103 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Test-only.
+TEXT ·ExpandAVX512(SB), NOSPLIT, $0-24
+ MOVQ sizeClass+0(FP), CX
+ MOVQ packed+8(FP), AX
+
+ // Call the expander for this size class
+ LEAQ ·gcExpandersAVX512(SB), BX
+ CALL (BX)(CX*8)
+
+ MOVQ unpacked+16(FP), DI // Expanded output bitmap pointer
+ VMOVDQU64 Z1, 0(DI)
+ VMOVDQU64 Z2, 64(DI)
+ VZEROUPPER
+ RET
+
+TEXT ·scanSpanPackedAVX512(SB), NOSPLIT, $256-44
+ // Z1+Z2 = Expand the grey object mask into a grey word mask
+ MOVQ objMarks+16(FP), AX
+ MOVQ sizeClass+24(FP), CX
+ LEAQ ·gcExpandersAVX512(SB), BX
+ CALL (BX)(CX*8)
+
+ // Z3+Z4 = Load the pointer mask
+ MOVQ ptrMask+32(FP), AX
+ VMOVDQU64 0(AX), Z3
+ VMOVDQU64 64(AX), Z4
+
+ // Z1+Z2 = Combine the grey word mask with the pointer mask to get the scan mask
+ VPANDQ Z1, Z3, Z1
+ VPANDQ Z2, Z4, Z2
+
+ // Now each bit of Z1+Z2 represents one word of the span.
+ // Thus, each byte covers 64 bytes of memory, which is also how
+ // much we can fix in a Z register.
+ //
+ // We do a load/compress for each 64 byte frame.
+ //
+ // Z3+Z4 [128]uint8 = Number of memory words to scan in each 64 byte frame
+ VPOPCNTB Z1, Z3 // Requires BITALG
+ VPOPCNTB Z2, Z4
+
+ // Store the scan mask and word counts at 0(SP) and 128(SP).
+ //
+ // TODO: Is it better to read directly from the registers?
+ VMOVDQU64 Z1, 0(SP)
+ VMOVDQU64 Z2, 64(SP)
+ VMOVDQU64 Z3, 128(SP)
+ VMOVDQU64 Z4, 192(SP)
+
+ // SI = Current address in span
+ MOVQ mem+0(FP), SI
+ // DI = Scan buffer base
+ MOVQ bufp+8(FP), DI
+ // DX = Index in scan buffer, (DI)(DX*8) = Current position in scan buffer
+ MOVQ $0, DX
+
+ // AX = address in scan mask, 128(AX) = address in popcount
+ LEAQ 0(SP), AX
+
+ // Loop over the 64 byte frames in this span.
+ // BX = 1 past the end of the scan mask
+ LEAQ 128(SP), BX
+
+ // Align loop to a cache line so that performance is less sensitive
+ // to how this function ends up laid out in memory. This is a hot
+ // function in the GC, and this is a tight loop. We don't want
+ // performance to waver wildly due to unrelated changes.
+ PCALIGN $64
+loop:
+ // CX = Fetch the mask of words to load from this frame.
+ MOVBQZX 0(AX), CX
+ // Skip empty frames.
+ TESTQ CX, CX
+ JZ skip
+
+ // Load the 64 byte frame.
+ KMOVB CX, K1
+ VMOVDQA64 0(SI), Z1
+
+ // Collect just the pointers from the greyed objects into the scan buffer,
+ // i.e., copy the word indices in the mask from Z1 into contiguous memory.
+ VPCOMPRESSQ Z1, K1, (DI)(DX*8)
+ // Advance the scan buffer position by the number of pointers.
+ MOVBQZX 128(AX), CX
+ ADDQ CX, DX
+
+skip:
+ ADDQ $64, SI
+ ADDQ $1, AX
+ CMPQ AX, BX
+ JB loop
+
+end:
+ MOVL DX, count+40(FP)
+ VZEROUPPER
+ RET
diff --git a/src/internal/runtime/gc/scan/scan_amd64_test.go b/src/internal/runtime/gc/scan/scan_amd64_test.go
new file mode 100644
index 00000000000000..a914b4f4d7a64f
--- /dev/null
+++ b/src/internal/runtime/gc/scan/scan_amd64_test.go
@@ -0,0 +1,19 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64
+
+package scan_test
+
+import (
+ "internal/runtime/gc/scan"
+ "testing"
+)
+
+func TestScanSpanPackedAVX512(t *testing.T) {
+ if !scan.CanAVX512() {
+ t.Skip("no AVX512")
+ }
+ testScanSpanPacked(t, scan.ScanSpanPackedAVX512)
+}
diff --git a/src/internal/runtime/gc/scan/scan_generic.go b/src/internal/runtime/gc/scan/scan_generic.go
new file mode 100644
index 00000000000000..a4d51827cc6a20
--- /dev/null
+++ b/src/internal/runtime/gc/scan/scan_generic.go
@@ -0,0 +1,23 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64
+
+package scan
+
+import (
+ "internal/runtime/gc"
+ "unsafe"
+)
+
+func HasFastScanSpanPacked() bool {
+ // N.B. ScanSpanPackedGeneric isn't actually fast enough to serve as a general-purpose implementation.
+ // The runtime's alternative of jumping between each object is still substantially better, even at
+ // relatively high object densities.
+ return false
+}
+
+func ScanSpanPacked(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) {
+ return ScanSpanPackedGo(mem, bufp, objMarks, sizeClass, ptrMask)
+}
diff --git a/src/internal/runtime/gc/scan/scan_generic_test.go b/src/internal/runtime/gc/scan/scan_generic_test.go
new file mode 100644
index 00000000000000..250135eca482dc
--- /dev/null
+++ b/src/internal/runtime/gc/scan/scan_generic_test.go
@@ -0,0 +1,14 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan_test
+
+import (
+ "internal/runtime/gc/scan"
+ "testing"
+)
+
+func TestScanSpanPackedGo(t *testing.T) {
+ testScanSpanPacked(t, scan.ScanSpanPackedGo)
+}
diff --git a/src/internal/runtime/gc/scan/scan_go.go b/src/internal/runtime/gc/scan/scan_go.go
new file mode 100644
index 00000000000000..9a2985a3ccd220
--- /dev/null
+++ b/src/internal/runtime/gc/scan/scan_go.go
@@ -0,0 +1,104 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan
+
+import (
+ "internal/goarch"
+ "internal/runtime/gc"
+ "internal/runtime/sys"
+ "unsafe"
+)
+
+// ScanSpanPackedGo is an optimized pure Go implementation of ScanSpanPacked.
+func ScanSpanPackedGo(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) {
+ buf := newUnsafeBuf(bufp)
+ objBytes := uintptr(gc.SizeClassToSize[sizeClass])
+ // TODO(austin): Trim objMarks to the number of objects in this size class?
+ for markI, markWord := range objMarks {
+ for range sys.OnesCount64(uint64(markWord)) {
+ bitI := sys.TrailingZeros64(uint64(markWord))
+ markWord &^= 1 << bitI
+
+ objIndex := markI*goarch.PtrBits + bitI
+
+ // objStartInSpan is the index of the word from mem where the
+ // object stats. objEndInSpan points to the next object, i.e.
+ // it's an exclusive upper bound.
+ objStartInSpan := objBytes * uintptr(objIndex) / goarch.PtrSize
+ objEndInSpan := objStartInSpan + objBytes/goarch.PtrSize
+
+ // TODO: Another way to do this would be to extract the pointer mask
+ // for this object (it's at most 64 bits) and do a bit iteration
+ // over that.
+
+ for wordI := objStartInSpan; wordI < objEndInSpan; wordI++ {
+ val := *(*uintptr)(unsafe.Add(mem, wordI*goarch.PtrSize))
+ // Check if we should enqueue this word.
+ //
+ // We load the word before the check because, even though this
+ // can lead to loading much more than necessary, it's faster.
+ // Most likely this is because it warms up the hardware
+ // prefetcher much better, and gives us more time before we need
+ // the value.
+ //
+ // We discard values that can't possibly be useful pointers
+ // here, too, because this filters out a lot of words and does
+ // so with as little processing as possible.
+ //
+ // TODO: This is close to, but not entirely branchless.
+ isPtr := bool2int(ptrMask[wordI/goarch.PtrBits]&(1<<(wordI%goarch.PtrBits)) != 0)
+ isNonNil := bool2int(val >= 4096)
+ pred := isPtr&isNonNil != 0
+ buf.addIf(val, pred)
+ }
+ }
+ }
+ // We don't know the true size of bufp, but we can at least catch obvious errors
+ // in this function by making sure we didn't write more than gc.PageWords pointers
+ // into the buffer.
+ buf.check(gc.PageWords)
+ return int32(buf.n)
+}
+
+// unsafeBuf allows for appending to a buffer without bounds-checks or branches.
+type unsafeBuf[T any] struct {
+ base *T
+ n int
+}
+
+func newUnsafeBuf[T any](base *T) unsafeBuf[T] {
+ return unsafeBuf[T]{base, 0}
+}
+
+// addIf appends a value to the buffer if the predicate is true.
+//
+// addIf speculatively writes to the next index of the buffer, so the caller
+// must be certain that such a write will still be in-bounds with respect
+// to the buffer's true capacity.
+func (b *unsafeBuf[T]) addIf(val T, pred bool) {
+ *(*T)(unsafe.Add(unsafe.Pointer(b.base), b.n*int(unsafe.Sizeof(val)))) = val
+ b.n += bool2int(pred)
+}
+
+// check performs a bounds check on speculative writes into the buffer.
+// Calling this shortly after a series of addIf calls is important to
+// catch any misuse as fast as possible. Separating the bounds check from
+// the append is more efficient, but one check to cover several appends is
+// still efficient and much more memory safe.
+func (b unsafeBuf[T]) check(cap int) {
+ // We fail even if b.n == cap because addIf speculatively writes one past b.n.
+ if b.n >= cap {
+ panic("unsafeBuf overflow")
+ }
+}
+
+func bool2int(x bool) int {
+ // This particular pattern gets optimized by the compiler.
+ var b int
+ if x {
+ b = 1
+ }
+ return b
+}
diff --git a/src/internal/runtime/gc/scan/scan_reference.go b/src/internal/runtime/gc/scan/scan_reference.go
new file mode 100644
index 00000000000000..05eca98df7abaf
--- /dev/null
+++ b/src/internal/runtime/gc/scan/scan_reference.go
@@ -0,0 +1,40 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan
+
+import (
+ "internal/goarch"
+ "internal/runtime/gc"
+ "unsafe"
+)
+
+// ScanSpanPackedReference is the reference implementation of ScanScanPacked. It prioritizes clarity over performance.
+//
+// Concretely, ScanScanPacked functions read pointers from mem, assumed to be gc.PageSize-aligned and gc.PageSize in size,
+// and writes them to bufp, which is large enough to guarantee that even if pointer-word of mem is a pointer, it will fit.
+// Therefore bufp, is always at least gc.PageSize in size.
+//
+// ScanSpanPacked is supposed to identify pointers by first filtering words by objMarks, where each bit of the mask
+// represents gc.SizeClassToSize[sizeClass] bytes of memory, and then filtering again by the bits in ptrMask.
+func ScanSpanPackedReference(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32) {
+ buf := unsafe.Slice(bufp, gc.PageWords)
+ expandBy := uintptr(gc.SizeClassToSize[sizeClass]) / goarch.PtrSize
+ for word := range gc.PageWords {
+ objI := uintptr(word) / expandBy
+ if objMarks[objI/goarch.PtrBits]&(1<<(objI%goarch.PtrBits)) == 0 {
+ continue
+ }
+ if ptrMask[word/goarch.PtrBits]&(1<<(word%goarch.PtrBits)) == 0 {
+ continue
+ }
+ ptr := *(*uintptr)(unsafe.Add(mem, word*goarch.PtrSize))
+ if ptr == 0 {
+ continue
+ }
+ buf[count] = ptr
+ count++
+ }
+ return count
+}
diff --git a/src/internal/runtime/gc/scan/scan_test.go b/src/internal/runtime/gc/scan/scan_test.go
new file mode 100644
index 00000000000000..14a0f6f7f48a47
--- /dev/null
+++ b/src/internal/runtime/gc/scan/scan_test.go
@@ -0,0 +1,257 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scan_test
+
+import (
+ "fmt"
+ "internal/cpu"
+ "internal/goarch"
+ "internal/runtime/gc"
+ "internal/runtime/gc/scan"
+ "math/bits"
+ "math/rand/v2"
+ "slices"
+ "sync"
+ "testing"
+ "unsafe"
+)
+
+type scanFunc func(mem unsafe.Pointer, bufp *uintptr, objMarks *gc.ObjMask, sizeClass uintptr, ptrMask *gc.PtrMask) (count int32)
+
+func testScanSpanPacked(t *testing.T, scanF scanFunc) {
+ scanR := scan.ScanSpanPackedReference
+
+ // Construct a fake memory
+ mem, free := makeMem(t, 1)
+ defer free()
+ for i := range mem {
+ // Use values > heap.PageSize because a scan function can discard
+ // pointers smaller than this.
+ mem[i] = uintptr(int(gc.PageSize) + i + 1)
+ }
+
+ // Construct a random pointer mask
+ rnd := rand.New(rand.NewPCG(42, 42))
+ var ptrs gc.PtrMask
+ for i := range ptrs {
+ ptrs[i] = uintptr(rnd.Uint64())
+ }
+
+ bufF := make([]uintptr, gc.PageWords)
+ bufR := make([]uintptr, gc.PageWords)
+ testObjs(t, func(t *testing.T, sizeClass int, objs *gc.ObjMask) {
+ nF := scanF(unsafe.Pointer(&mem[0]), &bufF[0], objs, uintptr(sizeClass), &ptrs)
+ nR := scanR(unsafe.Pointer(&mem[0]), &bufR[0], objs, uintptr(sizeClass), &ptrs)
+
+ if nR != nF {
+ t.Errorf("want %d count, got %d", nR, nF)
+ } else if !slices.Equal(bufF[:nF], bufR[:nR]) {
+ t.Errorf("want scanned pointers %d, got %d", bufR[:nR], bufF[:nF])
+ }
+ })
+}
+
+func testObjs(t *testing.T, f func(t *testing.T, sizeClass int, objMask *gc.ObjMask)) {
+ for sizeClass := range gc.NumSizeClasses {
+ if sizeClass == 0 {
+ continue
+ }
+ size := uintptr(gc.SizeClassToSize[sizeClass])
+ if size > gc.MinSizeForMallocHeader {
+ break // Pointer/scalar metadata is not packed for larger sizes.
+ }
+ t.Run(fmt.Sprintf("size=%d", size), func(t *testing.T) {
+ // Scan a few objects near i to test boundary conditions.
+ const objMask = 0x101
+ nObj := uintptr(gc.SizeClassToNPages[sizeClass]) * gc.PageSize / size
+ for i := range nObj - uintptr(bits.Len(objMask)-1) {
+ t.Run(fmt.Sprintf("objs=0x%x<<%d", objMask, i), func(t *testing.T) {
+ var objs gc.ObjMask
+ objs[i/goarch.PtrBits] = objMask << (i % goarch.PtrBits)
+ f(t, sizeClass, &objs)
+ })
+ }
+ })
+ }
+}
+
+var dataCacheSizes = sync.OnceValue(func() []uintptr {
+ cs := cpu.DataCacheSizes()
+ for i, c := range cs {
+ fmt.Printf("# L%d cache: %d (%d Go pages)\n", i+1, c, c/gc.PageSize)
+ }
+ return cs
+})
+
+func BenchmarkScanSpanPacked(b *testing.B) {
+ benchmarkCacheSizes(b, benchmarkScanSpanPackedAllSizeClasses)
+}
+
+func benchmarkCacheSizes(b *testing.B, fn func(b *testing.B, heapPages int)) {
+ cacheSizes := dataCacheSizes()
+ b.Run("cache=tiny/pages=1", func(b *testing.B) {
+ fn(b, 1)
+ })
+ for i, cacheBytes := range cacheSizes {
+ pages := int(cacheBytes*3/4) / gc.PageSize
+ b.Run(fmt.Sprintf("cache=L%d/pages=%d", i+1, pages), func(b *testing.B) {
+ fn(b, pages)
+ })
+ }
+ if len(cacheSizes) == 0 {
+ return
+ }
+ ramPages := int(cacheSizes[len(cacheSizes)-1]*3/2) / gc.PageSize
+ b.Run(fmt.Sprintf("cache=ram/pages=%d", ramPages), func(b *testing.B) {
+ fn(b, ramPages)
+ })
+}
+
+func benchmarkScanSpanPackedAllSizeClasses(b *testing.B, nPages int) {
+ for sc := range gc.NumSizeClasses {
+ if sc == 0 {
+ continue
+ }
+ if sc >= gc.MinSizeForMallocHeader {
+ break
+ }
+ b.Run(fmt.Sprintf("sizeclass=%d", sc), func(b *testing.B) {
+ benchmarkScanSpanPacked(b, nPages, sc)
+ })
+ }
+}
+
+func benchmarkScanSpanPacked(b *testing.B, nPages int, sizeClass int) {
+ rnd := rand.New(rand.NewPCG(42, 42))
+
+ // Construct a fake memory
+ mem, free := makeMem(b, nPages)
+ defer free()
+ for i := range mem {
+ // Use values > heap.PageSize because a scan function can discard
+ // pointers smaller than this.
+ mem[i] = uintptr(int(gc.PageSize) + i + 1)
+ }
+
+ // Construct a random pointer mask
+ ptrs := make([]gc.PtrMask, nPages)
+ for i := range ptrs {
+ for j := range ptrs[i] {
+ ptrs[i][j] = uintptr(rnd.Uint64())
+ }
+ }
+
+ // Visit the pages in a random order
+ pageOrder := rnd.Perm(nPages)
+
+ // Create the scan buffer.
+ buf := make([]uintptr, gc.PageWords)
+
+ // Sweep from 0 marks to all marks. We'll use the same marks for each page
+ // because I don't think that predictability matters.
+ objBytes := uintptr(gc.SizeClassToSize[sizeClass])
+ nObj := gc.PageSize / objBytes
+ markOrder := rnd.Perm(int(nObj))
+ const steps = 11
+ for i := 0; i < steps; i++ {
+ frac := float64(i) / float64(steps-1)
+ // Set frac marks.
+ nMarks := int(float64(len(markOrder))*frac + 0.5)
+ var objMarks gc.ObjMask
+ for _, mark := range markOrder[:nMarks] {
+ objMarks[mark/goarch.PtrBits] |= 1 << (mark % goarch.PtrBits)
+ }
+ greyClusters := 0
+ for page := range ptrs {
+ greyClusters += countGreyClusters(sizeClass, &objMarks, &ptrs[page])
+ }
+
+ // Report MB/s of how much memory they're actually hitting. This assumes
+ // 64 byte cache lines (TODO: Should it assume 128 byte cache lines?)
+ // and expands each access to the whole cache line. This is useful for
+ // comparing against memory bandwidth.
+ //
+ // TODO: Add a benchmark that just measures single core memory bandwidth
+ // for comparison. (See runtime memcpy benchmarks.)
+ //
+ // TODO: Should there be a separate measure where we don't expand to
+ // cache lines?
+ avgBytes := int64(greyClusters) * int64(cpu.CacheLineSize) / int64(len(ptrs))
+
+ b.Run(fmt.Sprintf("pct=%d", int(100*frac)), func(b *testing.B) {
+ b.Run("impl=Reference", func(b *testing.B) {
+ b.SetBytes(avgBytes)
+ for i := range b.N {
+ page := pageOrder[i%len(pageOrder)]
+ scan.ScanSpanPackedReference(unsafe.Pointer(&mem[gc.PageWords*page]), &buf[0], &objMarks, uintptr(sizeClass), &ptrs[page])
+ }
+ })
+ b.Run("impl=Go", func(b *testing.B) {
+ b.SetBytes(avgBytes)
+ for i := range b.N {
+ page := pageOrder[i%len(pageOrder)]
+ scan.ScanSpanPackedGo(unsafe.Pointer(&mem[gc.PageWords*page]), &buf[0], &objMarks, uintptr(sizeClass), &ptrs[page])
+ }
+ })
+ if scan.HasFastScanSpanPacked() {
+ b.Run("impl=Platform", func(b *testing.B) {
+ b.SetBytes(avgBytes)
+ for i := range b.N {
+ page := pageOrder[i%len(pageOrder)]
+ scan.ScanSpanPacked(unsafe.Pointer(&mem[gc.PageWords*page]), &buf[0], &objMarks, uintptr(sizeClass), &ptrs[page])
+ }
+ })
+ }
+ })
+ }
+}
+
+func countGreyClusters(sizeClass int, objMarks *gc.ObjMask, ptrMask *gc.PtrMask) int {
+ clusters := 0
+ lastCluster := -1
+
+ expandBy := uintptr(gc.SizeClassToSize[sizeClass]) / goarch.PtrSize
+ for word := range gc.PageWords {
+ objI := uintptr(word) / expandBy
+ if objMarks[objI/goarch.PtrBits]&(1<<(objI%goarch.PtrBits)) == 0 {
+ continue
+ }
+ if ptrMask[word/goarch.PtrBits]&(1<<(word%goarch.PtrBits)) == 0 {
+ continue
+ }
+ c := word * 8 / goarch.PtrBits
+ if c != lastCluster {
+ lastCluster = c
+ clusters++
+ }
+ }
+ return clusters
+}
+
+func BenchmarkScanMaxBandwidth(b *testing.B) {
+ // Measure the theoretical "maximum" bandwidth of scanning by reproducing
+ // the memory access pattern of a full page scan, but using memcpy as the
+ // kernel instead of scanning.
+ benchmarkCacheSizes(b, func(b *testing.B, heapPages int) {
+ mem, free := makeMem(b, heapPages)
+ defer free()
+ for i := range mem {
+ mem[i] = uintptr(int(gc.PageSize) + i + 1)
+ }
+ buf := make([]uintptr, gc.PageWords)
+
+ // Visit the pages in a random order
+ rnd := rand.New(rand.NewPCG(42, 42))
+ pageOrder := rnd.Perm(heapPages)
+
+ b.SetBytes(int64(gc.PageSize))
+
+ b.ResetTimer()
+ for i := range b.N {
+ page := pageOrder[i%len(pageOrder)]
+ copy(buf, mem[gc.PageWords*page:])
+ }
+ })
+}
diff --git a/src/internal/runtime/gc/sizeclasses.go b/src/internal/runtime/gc/sizeclasses.go
index d2cca1cef13b28..e5d562f943adae 100644
--- a/src/internal/runtime/gc/sizeclasses.go
+++ b/src/internal/runtime/gc/sizeclasses.go
@@ -82,14 +82,15 @@ package gc
// 8192 13 32768
const (
- MinHeapAlign = 8
- MaxSmallSize = 32768
- SmallSizeDiv = 8
- SmallSizeMax = 1024
- LargeSizeDiv = 128
- NumSizeClasses = 68
- PageShift = 13
- MaxObjsPerSpan = 1024
+ MinHeapAlign = 8
+ MaxSmallSize = 32768
+ SmallSizeDiv = 8
+ SmallSizeMax = 1024
+ LargeSizeDiv = 128
+ NumSizeClasses = 68
+ PageShift = 13
+ MaxObjsPerSpan = 1024
+ MaxSizeClassNPages = 10
)
var SizeClassToSize = [NumSizeClasses]uint16{0, 8, 16, 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1152, 1280, 1408, 1536, 1792, 2048, 2304, 2688, 3072, 3200, 3456, 4096, 4864, 5376, 6144, 6528, 6784, 6912, 8192, 9472, 9728, 10240, 10880, 12288, 13568, 14336, 16384, 18432, 19072, 20480, 21760, 24576, 27264, 28672, 32768}
diff --git a/src/internal/runtime/maps/group.go b/src/internal/runtime/maps/group.go
index b23ff76f983146..c8d38ba27c8c5d 100644
--- a/src/internal/runtime/maps/group.go
+++ b/src/internal/runtime/maps/group.go
@@ -22,10 +22,9 @@ const (
ctrlEmpty ctrl = 0b10000000
ctrlDeleted ctrl = 0b11111110
- bitsetLSB = 0x0101010101010101
- bitsetMSB = 0x8080808080808080
- bitsetEmpty = bitsetLSB * uint64(ctrlEmpty)
- bitsetDeleted = bitsetLSB * uint64(ctrlDeleted)
+ bitsetLSB = 0x0101010101010101
+ bitsetMSB = 0x8080808080808080
+ bitsetEmpty = bitsetLSB * uint64(ctrlEmpty)
)
// bitset represents a set of slots within a group.
diff --git a/src/internal/runtime/maps/runtime.go b/src/internal/runtime/maps/runtime.go
index ff8a7482494aef..8bba23f07003bd 100644
--- a/src/internal/runtime/maps/runtime.go
+++ b/src/internal/runtime/maps/runtime.go
@@ -94,10 +94,11 @@ func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Poi
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -168,10 +169,11 @@ func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Po
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -262,9 +264,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -329,7 +332,7 @@ outer:
slotElem = emem
}
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
diff --git a/src/internal/runtime/maps/runtime_fast32.go b/src/internal/runtime/maps/runtime_fast32.go
index beed67ce286aa1..d5be04afd450c0 100644
--- a/src/internal/runtime/maps/runtime_fast32.go
+++ b/src/internal/runtime/maps/runtime_fast32.go
@@ -55,10 +55,11 @@ func runtime_mapaccess1_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Poin
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -124,10 +125,11 @@ func runtime_mapaccess2_fast32(typ *abi.MapType, m *Map, key uint32) (unsafe.Poi
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -245,9 +247,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -302,7 +305,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
@@ -383,9 +386,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -435,7 +439,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
diff --git a/src/internal/runtime/maps/runtime_fast64.go b/src/internal/runtime/maps/runtime_fast64.go
index 2f9cf28daafdb4..2bee2d4be019b3 100644
--- a/src/internal/runtime/maps/runtime_fast64.go
+++ b/src/internal/runtime/maps/runtime_fast64.go
@@ -55,10 +55,11 @@ func runtime_mapaccess1_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Poin
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -124,10 +125,12 @@ func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Poi
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -245,9 +248,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -302,7 +306,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
@@ -422,9 +426,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -474,7 +479,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
diff --git a/src/internal/runtime/maps/runtime_faststr.go b/src/internal/runtime/maps/runtime_faststr.go
index ddac7eacc52ece..374468b66438a6 100644
--- a/src/internal/runtime/maps/runtime_faststr.go
+++ b/src/internal/runtime/maps/runtime_faststr.go
@@ -131,10 +131,11 @@ func runtime_mapaccess1_faststr(typ *abi.MapType, m *Map, key string) unsafe.Poi
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -190,10 +191,11 @@ func runtime_mapaccess2_faststr(typ *abi.MapType, m *Map, key string) (unsafe.Po
// Probe table.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -313,9 +315,10 @@ outer:
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -373,7 +376,7 @@ outer:
slotElem = g.elem(typ, i)
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
diff --git a/src/internal/runtime/maps/table.go b/src/internal/runtime/maps/table.go
index d4b9276b57078f..7e2c6e31bcaa1e 100644
--- a/src/internal/runtime/maps/table.go
+++ b/src/internal/runtime/maps/table.go
@@ -192,10 +192,11 @@ func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (
// load factors, k is less than 32, meaning that the number of false
// positive comparisons we must perform is less than 1/8 per find.
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -225,10 +226,11 @@ func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (
func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
@@ -271,9 +273,10 @@ func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Point
var firstDeletedGroup groupReference
var firstDeletedSlot uintptr
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
// Look for an existing slot containing this key.
for match != 0 {
@@ -348,7 +351,7 @@ func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Point
slotElem = emem
}
- g.ctrls().set(i, ctrl(h2(hash)))
+ g.ctrls().set(i, ctrl(h2Hash))
t.growthLeft--
t.used++
m.used++
@@ -420,9 +423,10 @@ func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsaf
// Delete returns true if it put a tombstone in t.
func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ h2Hash := h2(hash)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
+ match := g.ctrls().matchH2(h2Hash)
for match != 0 {
i := match.first()
diff --git a/src/internal/sync/hashtriemap.go b/src/internal/sync/hashtriemap.go
index 6f5e0b437fea23..db832974278f16 100644
--- a/src/internal/sync/hashtriemap.go
+++ b/src/internal/sync/hashtriemap.go
@@ -178,7 +178,7 @@ func (ht *HashTrieMap[K, V]) expand(oldEntry, newEntry *entry[K, V], newHash uin
top := newIndirect
for {
if hashShift == 0 {
- panic("internal/sync.HashTrieMap: ran out of hash bits while inserting")
+ panic("internal/sync.HashTrieMap: ran out of hash bits while inserting (incorrect use of unsafe or cgo, or data race?)")
}
hashShift -= nChildrenLog2 // hashShift is for the level parent is at. We need to go deeper.
oi := (oldHash >> hashShift) & nChildrenMask
@@ -196,8 +196,8 @@ func (ht *HashTrieMap[K, V]) expand(oldEntry, newEntry *entry[K, V], newHash uin
}
// Store sets the value for a key.
-func (ht *HashTrieMap[K, V]) Store(key K, old V) {
- _, _ = ht.Swap(key, old)
+func (ht *HashTrieMap[K, V]) Store(key K, new V) {
+ _, _ = ht.Swap(key, new)
}
// Swap swaps the value for a key and returns the previous value if any.
diff --git a/src/internal/synctest/synctest_test.go b/src/internal/synctest/synctest_test.go
index 6cebf86c31f416..307eee62e2b9aa 100644
--- a/src/internal/synctest/synctest_test.go
+++ b/src/internal/synctest/synctest_test.go
@@ -383,57 +383,59 @@ func TestChannelMovedOutOfBubble(t *testing.T) {
for _, test := range []struct {
desc string
f func(chan struct{})
- wantPanic string
+ wantFatal string
}{{
desc: "receive",
f: func(ch chan struct{}) {
<-ch
},
- wantPanic: "receive on synctest channel from outside bubble",
+ wantFatal: "receive on synctest channel from outside bubble",
}, {
desc: "send",
f: func(ch chan struct{}) {
ch <- struct{}{}
},
- wantPanic: "send on synctest channel from outside bubble",
+ wantFatal: "send on synctest channel from outside bubble",
}, {
desc: "close",
f: func(ch chan struct{}) {
close(ch)
},
- wantPanic: "close of synctest channel from outside bubble",
+ wantFatal: "close of synctest channel from outside bubble",
}} {
t.Run(test.desc, func(t *testing.T) {
// Bubbled channel accessed from outside any bubble.
t.Run("outside_bubble", func(t *testing.T) {
- donec := make(chan struct{})
- ch := make(chan chan struct{})
- go func() {
- defer close(donec)
- defer wantPanic(t, test.wantPanic)
- test.f(<-ch)
- }()
- synctest.Run(func() {
- ch <- make(chan struct{})
+ wantFatal(t, test.wantFatal, func() {
+ donec := make(chan struct{})
+ ch := make(chan chan struct{})
+ go func() {
+ defer close(donec)
+ test.f(<-ch)
+ }()
+ synctest.Run(func() {
+ ch <- make(chan struct{})
+ })
+ <-donec
})
- <-donec
})
// Bubbled channel accessed from a different bubble.
t.Run("different_bubble", func(t *testing.T) {
- donec := make(chan struct{})
- ch := make(chan chan struct{})
- go func() {
- defer close(donec)
- c := <-ch
+ wantFatal(t, test.wantFatal, func() {
+ donec := make(chan struct{})
+ ch := make(chan chan struct{})
+ go func() {
+ defer close(donec)
+ c := <-ch
+ synctest.Run(func() {
+ test.f(c)
+ })
+ }()
synctest.Run(func() {
- defer wantPanic(t, test.wantPanic)
- test.f(c)
+ ch <- make(chan struct{})
})
- }()
- synctest.Run(func() {
- ch <- make(chan struct{})
+ <-donec
})
- <-donec
})
})
}
@@ -443,39 +445,40 @@ func TestTimerFromInsideBubble(t *testing.T) {
for _, test := range []struct {
desc string
f func(tm *time.Timer)
- wantPanic string
+ wantFatal string
}{{
desc: "read channel",
f: func(tm *time.Timer) {
<-tm.C
},
- wantPanic: "receive on synctest channel from outside bubble",
+ wantFatal: "receive on synctest channel from outside bubble",
}, {
desc: "Reset",
f: func(tm *time.Timer) {
tm.Reset(1 * time.Second)
},
- wantPanic: "reset of synctest timer from outside bubble",
+ wantFatal: "reset of synctest timer from outside bubble",
}, {
desc: "Stop",
f: func(tm *time.Timer) {
tm.Stop()
},
- wantPanic: "stop of synctest timer from outside bubble",
+ wantFatal: "stop of synctest timer from outside bubble",
}} {
t.Run(test.desc, func(t *testing.T) {
- donec := make(chan struct{})
- ch := make(chan *time.Timer)
- go func() {
- defer close(donec)
- defer wantPanic(t, test.wantPanic)
- test.f(<-ch)
- }()
- synctest.Run(func() {
- tm := time.NewTimer(1 * time.Second)
- ch <- tm
+ wantFatal(t, test.wantFatal, func() {
+ donec := make(chan struct{})
+ ch := make(chan *time.Timer)
+ go func() {
+ defer close(donec)
+ test.f(<-ch)
+ }()
+ synctest.Run(func() {
+ tm := time.NewTimer(1 * time.Second)
+ ch <- tm
+ })
+ <-donec
})
- <-donec
})
}
}
diff --git a/src/internal/syscall/unix/at_sysnum_netbsd.go b/src/internal/syscall/unix/at_sysnum_netbsd.go
index b59b5e0cf96d0d..db17852b748e32 100644
--- a/src/internal/syscall/unix/at_sysnum_netbsd.go
+++ b/src/internal/syscall/unix/at_sysnum_netbsd.go
@@ -7,16 +7,17 @@ package unix
import "syscall"
const (
- unlinkatTrap uintptr = syscall.SYS_UNLINKAT
- openatTrap uintptr = syscall.SYS_OPENAT
- fstatatTrap uintptr = syscall.SYS_FSTATAT
- readlinkatTrap uintptr = syscall.SYS_READLINKAT
- mkdiratTrap uintptr = syscall.SYS_MKDIRAT
- fchmodatTrap uintptr = syscall.SYS_FCHMODAT
- fchownatTrap uintptr = syscall.SYS_FCHOWNAT
- renameatTrap uintptr = syscall.SYS_RENAMEAT
- linkatTrap uintptr = syscall.SYS_LINKAT
- symlinkatTrap uintptr = syscall.SYS_SYMLINKAT
+ unlinkatTrap uintptr = syscall.SYS_UNLINKAT
+ openatTrap uintptr = syscall.SYS_OPENAT
+ fstatatTrap uintptr = syscall.SYS_FSTATAT
+ readlinkatTrap uintptr = syscall.SYS_READLINKAT
+ mkdiratTrap uintptr = syscall.SYS_MKDIRAT
+ fchmodatTrap uintptr = syscall.SYS_FCHMODAT
+ fchownatTrap uintptr = syscall.SYS_FCHOWNAT
+ renameatTrap uintptr = syscall.SYS_RENAMEAT
+ linkatTrap uintptr = syscall.SYS_LINKAT
+ symlinkatTrap uintptr = syscall.SYS_SYMLINKAT
+ posixFallocateTrap uintptr = 479
)
const (
diff --git a/src/internal/syscall/unix/fallocate_freebsd_386.go b/src/internal/syscall/unix/fallocate_bsd_386.go
similarity index 85%
rename from src/internal/syscall/unix/fallocate_freebsd_386.go
rename to src/internal/syscall/unix/fallocate_bsd_386.go
index 535b23dbc5b7eb..1dcdff4a5391d0 100644
--- a/src/internal/syscall/unix/fallocate_freebsd_386.go
+++ b/src/internal/syscall/unix/fallocate_bsd_386.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build (freebsd || netbsd) && 386
+
package unix
import "syscall"
@@ -9,6 +11,7 @@ import "syscall"
func PosixFallocate(fd int, off int64, size int64) error {
// If successful, posix_fallocate() returns zero. It returns an error on failure, without
// setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1
+ // and https://man.netbsd.org/posix_fallocate.2#RETURN%20VALUES
r1, _, _ := syscall.Syscall6(posixFallocateTrap, uintptr(fd), uintptr(off), uintptr(off>>32), uintptr(size), uintptr(size>>32), 0)
if r1 != 0 {
return syscall.Errno(r1)
diff --git a/src/internal/syscall/unix/fallocate_freebsd_64bit.go b/src/internal/syscall/unix/fallocate_bsd_64bit.go
similarity index 82%
rename from src/internal/syscall/unix/fallocate_freebsd_64bit.go
rename to src/internal/syscall/unix/fallocate_bsd_64bit.go
index a9d52283f06a9b..177bb48382d54c 100644
--- a/src/internal/syscall/unix/fallocate_freebsd_64bit.go
+++ b/src/internal/syscall/unix/fallocate_bsd_64bit.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build freebsd && (amd64 || arm64 || riscv64)
+//go:build (freebsd || netbsd) && (amd64 || arm64 || riscv64)
package unix
@@ -11,6 +11,7 @@ import "syscall"
func PosixFallocate(fd int, off int64, size int64) error {
// If successful, posix_fallocate() returns zero. It returns an error on failure, without
// setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1
+ // and https://man.netbsd.org/posix_fallocate.2#RETURN%20VALUES
r1, _, _ := syscall.Syscall(posixFallocateTrap, uintptr(fd), uintptr(off), uintptr(size))
if r1 != 0 {
return syscall.Errno(r1)
diff --git a/src/internal/syscall/unix/fallocate_freebsd_arm.go b/src/internal/syscall/unix/fallocate_bsd_arm.go
similarity index 90%
rename from src/internal/syscall/unix/fallocate_freebsd_arm.go
rename to src/internal/syscall/unix/fallocate_bsd_arm.go
index 1ded50f3b9a168..15e99d02b1c790 100644
--- a/src/internal/syscall/unix/fallocate_freebsd_arm.go
+++ b/src/internal/syscall/unix/fallocate_bsd_arm.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build (freebsd || netbsd) && arm
+
package unix
import "syscall"
@@ -9,6 +11,7 @@ import "syscall"
func PosixFallocate(fd int, off int64, size int64) error {
// If successful, posix_fallocate() returns zero. It returns an error on failure, without
// setting errno. See https://man.freebsd.org/cgi/man.cgi?query=posix_fallocate&sektion=2&n=1
+ // and https://man.netbsd.org/posix_fallocate.2#RETURN%20VALUES
//
// The padding 0 argument is needed because the ARM calling convention requires that if an
// argument (off in this case) needs double-word alignment (8-byte), the NCRN (next core
diff --git a/src/internal/trace/event.go b/src/internal/trace/event.go
index f31412e35d889a..0204c2b5f525dd 100644
--- a/src/internal/trace/event.go
+++ b/src/internal/trace/event.go
@@ -489,7 +489,6 @@ func (e Event) Range() Range {
} else {
r.Scope.id = int64(e.Proc())
}
- r.Scope.id = int64(e.Proc())
case tracev2.EvGCMarkAssistBegin, tracev2.EvGCMarkAssistActive, tracev2.EvGCMarkAssistEnd:
r.Name = "GC mark assist"
r.Scope = ResourceID{Kind: ResourceGoroutine}
@@ -625,7 +624,6 @@ func (e Event) StateTransition() StateTransition {
s = goStateTransition(GoID(e.base.args[0]), GoRunnable, GoRunning)
case tracev2.EvGoDestroy:
s = goStateTransition(e.ctx.G, GoRunning, GoNotExist)
- s.Stack = e.Stack() // This event references the resource the event happened on.
case tracev2.EvGoDestroySyscall:
s = goStateTransition(e.ctx.G, GoSyscall, GoNotExist)
case tracev2.EvGoStop:
@@ -646,10 +644,8 @@ func (e Event) StateTransition() StateTransition {
s.Stack = e.Stack() // This event references the resource the event happened on.
case tracev2.EvGoSyscallEnd:
s = goStateTransition(e.ctx.G, GoSyscall, GoRunning)
- s.Stack = e.Stack() // This event references the resource the event happened on.
case tracev2.EvGoSyscallEndBlocked:
s = goStateTransition(e.ctx.G, GoSyscall, GoRunnable)
- s.Stack = e.Stack() // This event references the resource the event happened on.
case tracev2.EvGoStatus, tracev2.EvGoStatusStack:
packedStatus := e.base.args[2]
from, to := packedStatus>>32, packedStatus&((1<<32)-1)
@@ -690,9 +686,11 @@ type Sync struct {
// N indicates that this is the Nth sync event in the trace.
N int
- // ClockSnapshot is a snapshot of different clocks taken in close in time
- // that can be used to correlate trace events with data captured by other
- // tools. May be nil for older trace versions.
+ // ClockSnapshot represents a near-simultaneous clock reading of several
+ // different system clocks. The snapshot can be used as a reference to
+ // convert timestamps to different clocks, which is helpful for correlating
+ // timestamps with data captured by other tools. The value is nil for traces
+ // before go1.25.
ClockSnapshot *ClockSnapshot
// ExperimentalBatches contain all the unparsed batches of data for a given experiment.
diff --git a/src/internal/trace/internal/testgen/trace.go b/src/internal/trace/internal/testgen/trace.go
index 415acaccae33bd..38d2febb43b426 100644
--- a/src/internal/trace/internal/testgen/trace.go
+++ b/src/internal/trace/internal/testgen/trace.go
@@ -295,7 +295,7 @@ func (g *Generation) writeEventsTo(tw *raw.TextWriter) {
b.RawEvent(tracev2.EvStacks, nil)
for stk, id := range g.stacks {
stk := stk.stk[:stk.len]
- args := []uint64{id}
+ args := []uint64{id, uint64(len(stk))}
for _, f := range stk {
args = append(args, f.PC, g.String(f.Func), g.String(f.File), f.Line)
}
diff --git a/src/internal/trace/resources.go b/src/internal/trace/resources.go
index f49696f91c56bd..24db2f8d77a66a 100644
--- a/src/internal/trace/resources.go
+++ b/src/internal/trace/resources.go
@@ -224,7 +224,6 @@ type StateTransition struct {
// The actual transition data. Stored in a neutral form so that
// we don't need fields for every kind of resource.
- id int64
oldState uint8
newState uint8
}
diff --git a/src/internal/trace/tracev2/events.go b/src/internal/trace/tracev2/events.go
index bfbbdec00f9639..eab5a146261fc9 100644
--- a/src/internal/trace/tracev2/events.go
+++ b/src/internal/trace/tracev2/events.go
@@ -45,10 +45,10 @@ const (
EvGoSyscallBegin // syscall enter [timestamp, P seq, stack ID]
EvGoSyscallEnd // syscall exit [timestamp]
EvGoSyscallEndBlocked // syscall exit and it blocked at some point [timestamp]
- EvGoStatus // goroutine status at the start of a generation [timestamp, goroutine ID, thread ID, status]
+ EvGoStatus // goroutine status at the start of a generation [timestamp, goroutine ID, M ID, status]
// STW.
- EvSTWBegin // STW start [timestamp, kind]
+ EvSTWBegin // STW start [timestamp, kind, stack ID]
EvSTWEnd // STW done [timestamp]
// GC events.
diff --git a/src/internal/types/testdata/fixedbugs/issue67683.go b/src/internal/types/testdata/fixedbugs/issue67683.go
index f7c9bcdd0114c6..c9ad5f6788bec4 100644
--- a/src/internal/types/testdata/fixedbugs/issue67683.go
+++ b/src/internal/types/testdata/fixedbugs/issue67683.go
@@ -1,4 +1,4 @@
-// -goexperiment=aliastypeparams -gotypesalias=1
+// -gotypesalias=1
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/src/internal/types/testdata/fixedbugs/issue69576.go b/src/internal/types/testdata/fixedbugs/issue69576.go
index 97e03dfab4b2cd..fc436bbfd38424 100644
--- a/src/internal/types/testdata/fixedbugs/issue69576.go
+++ b/src/internal/types/testdata/fixedbugs/issue69576.go
@@ -1,4 +1,4 @@
-// -goexperiment=aliastypeparams -gotypesalias=1
+// -gotypesalias=1
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/src/internal/types/testdata/spec/typeAliases1.23b.go b/src/internal/types/testdata/spec/typeAliases1.23b.go
index c92c3d3a7e0ea9..8a09899066fe18 100644
--- a/src/internal/types/testdata/spec/typeAliases1.23b.go
+++ b/src/internal/types/testdata/spec/typeAliases1.23b.go
@@ -1,4 +1,4 @@
-// -lang=go1.23 -gotypesalias=1 -goexperiment=aliastypeparams
+// -lang=go1.23 -gotypesalias=1
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/src/make.bash b/src/make.bash
index b67ae1529fa0ac..d4e927dfda7c43 100755
--- a/src/make.bash
+++ b/src/make.bash
@@ -64,14 +64,14 @@
# timing information to this file. Useful for profiling where the
# time goes when these scripts run.
#
-# GOROOT_BOOTSTRAP: A working Go tree >= Go 1.22.6 for bootstrap.
+# GOROOT_BOOTSTRAP: A working Go tree >= Go 1.24.6 for bootstrap.
# If $GOROOT_BOOTSTRAP/bin/go is missing, $(go env GOROOT) is
-# tried for all "go" in $PATH. By default, one of $HOME/go1.22.6,
-# $HOME/sdk/go1.22.6, or $HOME/go1.4, whichever exists, in that order.
+# tried for all "go" in $PATH. By default, one of $HOME/go1.24.6,
+# $HOME/sdk/go1.24.6, or $HOME/go1.4, whichever exists, in that order.
# We still check $HOME/go1.4 to allow for build scripts that still hard-code
# that name even though they put newer Go toolchains there.
-bootgo=1.22.6
+bootgo=1.24.6
set -e
diff --git a/src/make.bat b/src/make.bat
index d9f686452e8974..29105cd8a54d98 100644
--- a/src/make.bat
+++ b/src/make.bat
@@ -71,7 +71,7 @@ for /f "tokens=*" %%g in ('where go 2^>nul') do (
)
)
-set bootgo=1.22.6
+set bootgo=1.24.6
if "x%GOROOT_BOOTSTRAP%"=="x" if exist "%HOMEDRIVE%%HOMEPATH%\go%bootgo%" set GOROOT_BOOTSTRAP=%HOMEDRIVE%%HOMEPATH%\go%bootgo%
if "x%GOROOT_BOOTSTRAP%"=="x" if exist "%HOMEDRIVE%%HOMEPATH%\sdk\go%bootgo%" set GOROOT_BOOTSTRAP=%HOMEDRIVE%%HOMEPATH%\sdk\go%bootgo%
if "x%GOROOT_BOOTSTRAP%"=="x" set GOROOT_BOOTSTRAP=%HOMEDRIVE%%HOMEPATH%\Go1.4
diff --git a/src/make.rc b/src/make.rc
index b3beb75660d1e4..9ba2b7d76d7018 100755
--- a/src/make.rc
+++ b/src/make.rc
@@ -48,7 +48,7 @@ fn bootstrapenv {
GOROOT=$GOROOT_BOOTSTRAP GO111MODULE=off GOENV=off GOOS=() GOARCH=() GOEXPERIMENT=() GOFLAGS=() $*
}
-bootgo = 1.22.6
+bootgo = 1.24.6
GOROOT = `{cd .. && pwd}
goroot_bootstrap_set = 'true'
if(~ $"GOROOT_BOOTSTRAP ''){
diff --git a/src/math/exp.go b/src/math/exp.go
index 050e0ee9d88239..029a4f8163698f 100644
--- a/src/math/exp.go
+++ b/src/math/exp.go
@@ -109,13 +109,11 @@ func exp(x float64) float64 {
// special cases
switch {
- case IsNaN(x) || IsInf(x, 1):
+ case IsNaN(x):
return x
- case IsInf(x, -1):
- return 0
- case x > Overflow:
+ case x > Overflow: // handles case where x is +∞
return Inf(1)
- case x < Underflow:
+ case x < Underflow: // handles case where x is -∞
return 0
case -NearZero < x && x < NearZero:
return 1 + x
@@ -157,13 +155,11 @@ func exp2(x float64) float64 {
// special cases
switch {
- case IsNaN(x) || IsInf(x, 1):
+ case IsNaN(x):
return x
- case IsInf(x, -1):
- return 0
- case x > Overflow:
+ case x > Overflow: // handles case where x is +∞
return Inf(1)
- case x < Underflow:
+ case x < Underflow: // handles case where x is -∞
return 0
}
diff --git a/src/net/dial.go b/src/net/dial.go
index 6264984ceca182..a87c57603a813c 100644
--- a/src/net/dial.go
+++ b/src/net/dial.go
@@ -9,6 +9,7 @@ import (
"internal/bytealg"
"internal/godebug"
"internal/nettrace"
+ "net/netip"
"syscall"
"time"
)
@@ -523,30 +524,8 @@ func (d *Dialer) Dial(network, address string) (Conn, error) {
// See func [Dial] for a description of the network and address
// parameters.
func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn, error) {
- if ctx == nil {
- panic("nil context")
- }
- deadline := d.deadline(ctx, time.Now())
- if !deadline.IsZero() {
- testHookStepTime()
- if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
- subCtx, cancel := context.WithDeadline(ctx, deadline)
- defer cancel()
- ctx = subCtx
- }
- }
- if oldCancel := d.Cancel; oldCancel != nil {
- subCtx, cancel := context.WithCancel(ctx)
- defer cancel()
- go func() {
- select {
- case <-oldCancel:
- cancel()
- case <-subCtx.Done():
- }
- }()
- ctx = subCtx
- }
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
// Shadow the nettrace (if any) during resolve so Connect events don't fire for DNS lookups.
resolveCtx := ctx
@@ -578,6 +557,97 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn
return sd.dialParallel(ctx, primaries, fallbacks)
}
+func (d *Dialer) dialCtx(ctx context.Context) (context.Context, context.CancelFunc) {
+ if ctx == nil {
+ panic("nil context")
+ }
+ deadline := d.deadline(ctx, time.Now())
+ var cancel1, cancel2 context.CancelFunc
+ if !deadline.IsZero() {
+ testHookStepTime()
+ if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
+ var subCtx context.Context
+ subCtx, cancel1 = context.WithDeadline(ctx, deadline)
+ ctx = subCtx
+ }
+ }
+ if oldCancel := d.Cancel; oldCancel != nil {
+ subCtx, cancel2 := context.WithCancel(ctx)
+ go func() {
+ select {
+ case <-oldCancel:
+ cancel2()
+ case <-subCtx.Done():
+ }
+ }()
+ ctx = subCtx
+ }
+ return ctx, func() {
+ if cancel1 != nil {
+ cancel1()
+ }
+ if cancel2 != nil {
+ cancel2()
+ }
+ }
+}
+
+// DialTCP acts like Dial for TCP networks using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before
+// the connection is complete, an error is returned. Once successfully
+// connected, any expiration of the context will not affect the
+// connection.
+//
+// The network must be a TCP network name; see func Dial for details.
+func (d *Dialer) DialTCP(ctx context.Context, network string, laddr netip.AddrPort, raddr netip.AddrPort) (*TCPConn, error) {
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
+ return dialTCP(ctx, d, network, TCPAddrFromAddrPort(laddr), TCPAddrFromAddrPort(raddr))
+}
+
+// DialUDP acts like Dial for UDP networks using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before
+// the connection is complete, an error is returned. Once successfully
+// connected, any expiration of the context will not affect the
+// connection.
+//
+// The network must be a UDP network name; see func Dial for details.
+func (d *Dialer) DialUDP(ctx context.Context, network string, laddr netip.AddrPort, raddr netip.AddrPort) (*UDPConn, error) {
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
+ return dialUDP(ctx, d, network, UDPAddrFromAddrPort(laddr), UDPAddrFromAddrPort(raddr))
+}
+
+// DialIP acts like Dial for IP networks using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before
+// the connection is complete, an error is returned. Once successfully
+// connected, any expiration of the context will not affect the
+// connection.
+//
+// The network must be an IP network name; see func Dial for details.
+func (d *Dialer) DialIP(ctx context.Context, network string, laddr netip.Addr, raddr netip.Addr) (*IPConn, error) {
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
+ return dialIP(ctx, d, network, ipAddrFromAddr(laddr), ipAddrFromAddr(raddr))
+}
+
+// DialUnix acts like Dial for Unix networks using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before
+// the connection is complete, an error is returned. Once successfully
+// connected, any expiration of the context will not affect the
+// connection.
+//
+// The network must be a Unix network name; see func Dial for details.
+func (d *Dialer) DialUnix(ctx context.Context, network string, laddr *UnixAddr, raddr *UnixAddr) (*UnixConn, error) {
+ ctx, cancel := d.dialCtx(ctx)
+ defer cancel()
+ return dialUnix(ctx, d, network, laddr, raddr)
+}
+
// dialParallel races two copies of dialSerial, giving the first a
// head start. It returns the first established connection and
// closes the others. Otherwise it returns an error from the first
diff --git a/src/net/dial_test.go b/src/net/dial_test.go
index b3bedb2fa275c3..829b80c33a198d 100644
--- a/src/net/dial_test.go
+++ b/src/net/dial_test.go
@@ -11,6 +11,7 @@ import (
"fmt"
"internal/testenv"
"io"
+ "net/netip"
"os"
"runtime"
"strings"
@@ -1064,6 +1065,99 @@ func TestDialerControlContext(t *testing.T) {
})
}
+func TestDialContext(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ case "js", "wasip1":
+ t.Skipf("skipping: fake net does not support Dialer.ControlContext")
+ }
+
+ t.Run("StreamDial", func(t *testing.T) {
+ var err error
+ for i, network := range []string{"tcp", "tcp4", "tcp6", "unix", "unixpacket"} {
+ if !testableNetwork(network) {
+ continue
+ }
+ ln := newLocalListener(t, network)
+ defer ln.Close()
+ var id int
+ d := Dialer{ControlContext: func(ctx context.Context, network string, address string, c syscall.RawConn) error {
+ id = ctx.Value("id").(int)
+ return controlOnConnSetup(network, address, c)
+ }}
+ var c Conn
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ raddr, err := netip.ParseAddrPort(ln.Addr().String())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ c, err = d.DialTCP(context.WithValue(context.Background(), "id", i+1), network, (*TCPAddr)(nil).AddrPort(), raddr)
+ case "unix", "unixpacket":
+ raddr, err := ResolveUnixAddr(network, ln.Addr().String())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ c, err = d.DialUnix(context.WithValue(context.Background(), "id", i+1), network, nil, raddr)
+ }
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if id != i+1 {
+ t.Errorf("%s: got id %d, want %d", network, id, i+1)
+ }
+ c.Close()
+ }
+ })
+ t.Run("PacketDial", func(t *testing.T) {
+ var err error
+ for i, network := range []string{"udp", "udp4", "udp6", "unixgram"} {
+ if !testableNetwork(network) {
+ continue
+ }
+ c1 := newLocalPacketListener(t, network)
+ if network == "unixgram" {
+ defer os.Remove(c1.LocalAddr().String())
+ }
+ defer c1.Close()
+ var id int
+ d := Dialer{ControlContext: func(ctx context.Context, network string, address string, c syscall.RawConn) error {
+ id = ctx.Value("id").(int)
+ return controlOnConnSetup(network, address, c)
+ }}
+ var c2 Conn
+ switch network {
+ case "udp", "udp4", "udp6":
+ raddr, err := netip.ParseAddrPort(c1.LocalAddr().String())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ c2, err = d.DialUDP(context.WithValue(context.Background(), "id", i+1), network, (*UDPAddr)(nil).AddrPort(), raddr)
+ case "unixgram":
+ raddr, err := ResolveUnixAddr(network, c1.LocalAddr().String())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ c2, err = d.DialUnix(context.WithValue(context.Background(), "id", i+1), network, nil, raddr)
+ }
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if id != i+1 {
+ t.Errorf("%s: got id %d, want %d", network, id, i+1)
+ }
+ c2.Close()
+ }
+ })
+}
+
// mustHaveExternalNetwork is like testenv.MustHaveExternalNetwork
// except on non-Linux, non-mobile builders it permits the test to
// run in -short mode.
diff --git a/src/net/http/example_test.go b/src/net/http/example_test.go
index f40273f14a2a24..acb96bba5178c8 100644
--- a/src/net/http/example_test.go
+++ b/src/net/http/example_test.go
@@ -12,6 +12,7 @@ import (
"net/http"
"os"
"os/signal"
+ "time"
)
func ExampleHijacker() {
@@ -221,3 +222,22 @@ func ExampleProtocols_http1or2() {
}
res.Body.Close()
}
+
+func ExampleCrossOriginProtection() {
+ mux := http.NewServeMux()
+
+ mux.HandleFunc("/hello", func(w http.ResponseWriter, req *http.Request) {
+ io.WriteString(w, "request allowed\n")
+ })
+
+ srv := http.Server{
+ Addr: ":8080",
+ ReadTimeout: 15 * time.Second,
+ WriteTimeout: 15 * time.Second,
+ // Use CrossOriginProtection.Handler to block all non-safe cross-origin
+ // browser requests to mux.
+ Handler: http.NewCrossOriginProtection().Handler(mux),
+ }
+
+ log.Fatal(srv.ListenAndServe())
+}
diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go
index 7ca0b13b3dc084..a57d6131343638 100644
--- a/src/net/http/h2_bundle.go
+++ b/src/net/http/h2_bundle.go
@@ -13,8 +13,6 @@
//
// See https://http2.github.io/ for more information on HTTP/2.
//
-// See https://http2.golang.org/ for a test server running this code.
-//
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/src/net/http/httptrace/trace.go b/src/net/http/httptrace/trace.go
index 706a4329578ef7..cee13d2da8345d 100644
--- a/src/net/http/httptrace/trace.go
+++ b/src/net/http/httptrace/trace.go
@@ -76,7 +76,7 @@ func WithClientTrace(ctx context.Context, trace *ClientTrace) context.Context {
// during a single round trip and has no hooks that span a series
// of redirected requests.
//
-// See https://blog.golang.org/http-tracing for more.
+// See https://go.dev/blog/http-tracing for more.
type ClientTrace struct {
// GetConn is called before a connection is created or
// retrieved from an idle pool. The hostPort is the
diff --git a/src/net/http/httputil/dump.go b/src/net/http/httputil/dump.go
index 2edb9bc98d3bc8..23918e2ad4bbd5 100644
--- a/src/net/http/httputil/dump.go
+++ b/src/net/http/httputil/dump.go
@@ -147,7 +147,6 @@ func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
req.Body = save
if err != nil {
- pw.Close()
dr.err = err
close(quitReadCh)
return nil, err
diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go
index 6ba6b2c8e033b9..635d3ad9d9f132 100644
--- a/src/net/http/pprof/pprof.go
+++ b/src/net/http/pprof/pprof.go
@@ -67,7 +67,7 @@
// in your browser.
//
// For a study of the facility in action, visit
-// https://blog.golang.org/2011/06/profiling-go-programs.html.
+// https://go.dev/blog/pprof.
package pprof
import (
diff --git a/src/net/http/transport.go b/src/net/http/transport.go
index 07b3a9e1e72ba6..b860eb95b043da 100644
--- a/src/net/http/transport.go
+++ b/src/net/http/transport.go
@@ -722,7 +722,7 @@ func (t *Transport) roundTrip(req *Request) (_ *Response, err error) {
if e, ok := err.(transportReadFromServerError); ok {
err = e.err
}
- if b, ok := req.Body.(*readTrackingBody); ok && !b.didClose {
+ if b, ok := req.Body.(*readTrackingBody); ok && !b.didClose.Load() {
// Issue 49621: Close the request body if pconn.roundTrip
// didn't do so already. This can happen if the pconn
// write loop exits without reading the write request.
@@ -752,8 +752,8 @@ var errCannotRewind = errors.New("net/http: cannot rewind body after connection
type readTrackingBody struct {
io.ReadCloser
- didRead bool
- didClose bool
+ didRead bool // not atomic.Bool because only one goroutine (the user's) should be accessing
+ didClose atomic.Bool
}
func (r *readTrackingBody) Read(data []byte) (int, error) {
@@ -762,7 +762,9 @@ func (r *readTrackingBody) Read(data []byte) (int, error) {
}
func (r *readTrackingBody) Close() error {
- r.didClose = true
+ if !r.didClose.CompareAndSwap(false, true) {
+ return nil
+ }
return r.ReadCloser.Close()
}
@@ -784,10 +786,10 @@ func setupRewindBody(req *Request) *Request {
// rewindBody takes care of closing req.Body when appropriate
// (in all cases except when rewindBody returns req unmodified).
func rewindBody(req *Request) (rewound *Request, err error) {
- if req.Body == nil || req.Body == NoBody || (!req.Body.(*readTrackingBody).didRead && !req.Body.(*readTrackingBody).didClose) {
+ if req.Body == nil || req.Body == NoBody || (!req.Body.(*readTrackingBody).didRead && !req.Body.(*readTrackingBody).didClose.Load()) {
return req, nil // nothing to rewind
}
- if !req.Body.(*readTrackingBody).didClose {
+ if !req.Body.(*readTrackingBody).didClose.Load() {
req.closeBody()
}
if req.GetBody == nil {
diff --git a/src/net/iprawsock.go b/src/net/iprawsock.go
index 76dded9ca16e12..80a80fef7d3e4a 100644
--- a/src/net/iprawsock.go
+++ b/src/net/iprawsock.go
@@ -6,6 +6,7 @@ package net
import (
"context"
+ "net/netip"
"syscall"
)
@@ -24,6 +25,13 @@ import (
// BUG(mikio): On JS and Plan 9, methods and functions related
// to IPConn are not implemented.
+func ipAddrFromAddr(addr netip.Addr) *IPAddr {
+ return &IPAddr{
+ IP: addr.AsSlice(),
+ Zone: addr.Zone(),
+ }
+}
+
// IPAddr represents the address of an IP end point.
type IPAddr struct {
IP IP
@@ -206,11 +214,18 @@ func newIPConn(fd *netFD) *IPConn { return &IPConn{conn{fd}} }
// If the IP field of raddr is nil or an unspecified IP address, the
// local system is assumed.
func DialIP(network string, laddr, raddr *IPAddr) (*IPConn, error) {
+ return dialIP(context.Background(), nil, network, laddr, raddr)
+}
+
+func dialIP(ctx context.Context, dialer *Dialer, network string, laddr, raddr *IPAddr) (*IPConn, error) {
if raddr == nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
}
sd := &sysDialer{network: network, address: raddr.String()}
- c, err := sd.dialIP(context.Background(), laddr, raddr)
+ if dialer != nil {
+ sd.Dialer = *dialer
+ }
+ c, err := sd.dialIP(ctx, laddr, raddr)
if err != nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
diff --git a/src/net/ipsock_posix.go b/src/net/ipsock_posix.go
index 2aeabd44873f22..52712f932f7530 100644
--- a/src/net/ipsock_posix.go
+++ b/src/net/ipsock_posix.go
@@ -237,8 +237,12 @@ func ipToSockaddr(family int, ip IP, port int, zone string) (syscall.Sockaddr, e
func addrPortToSockaddrInet4(ap netip.AddrPort) (syscall.SockaddrInet4, error) {
// ipToSockaddrInet4 has special handling here for zero length slices.
// We do not, because netip has no concept of a generic zero IP address.
+ //
+ // addr is allowed to be an IPv4-mapped IPv6 address.
+ // As4 will unmap it to an IPv4 address.
+ // The error message is kept consistent with ipToSockaddrInet4.
addr := ap.Addr()
- if !addr.Is4() {
+ if !addr.Is4() && !addr.Is4In6() {
return syscall.SockaddrInet4{}, &AddrError{Err: "non-IPv4 address", Addr: addr.String()}
}
sa := syscall.SockaddrInet4{
diff --git a/src/net/net_windows_test.go b/src/net/net_windows_test.go
index 671de7678008ed..0a5c77f032527e 100644
--- a/src/net/net_windows_test.go
+++ b/src/net/net_windows_test.go
@@ -302,7 +302,7 @@ func TestInterfacesWithNetsh(t *testing.T) {
}
slices.Sort(want)
- if strings.Join(want, "/") != strings.Join(have, "/") {
+ if !slices.Equal(want, have) {
t.Fatalf("unexpected interface list %q, want %q", have, want)
}
}
@@ -487,7 +487,7 @@ func TestInterfaceAddrsWithNetsh(t *testing.T) {
want = append(want, wantIPv6...)
slices.Sort(want)
- if strings.Join(want, "/") != strings.Join(have, "/") {
+ if !slices.Equal(want, have) {
t.Errorf("%s: unexpected addresses list %q, want %q", ifi.Name, have, want)
}
}
diff --git a/src/net/tcpsock.go b/src/net/tcpsock.go
index 9d215db1b2eec3..376bf238c70d07 100644
--- a/src/net/tcpsock.go
+++ b/src/net/tcpsock.go
@@ -315,6 +315,10 @@ func newTCPConn(fd *netFD, keepAliveIdle time.Duration, keepAliveCfg KeepAliveCo
// If the IP field of raddr is nil or an unspecified IP address, the
// local system is assumed.
func DialTCP(network string, laddr, raddr *TCPAddr) (*TCPConn, error) {
+ return dialTCP(context.Background(), nil, network, laddr, raddr)
+}
+
+func dialTCP(ctx context.Context, dialer *Dialer, network string, laddr, raddr *TCPAddr) (*TCPConn, error) {
switch network {
case "tcp", "tcp4", "tcp6":
default:
@@ -328,10 +332,13 @@ func DialTCP(network string, laddr, raddr *TCPAddr) (*TCPConn, error) {
c *TCPConn
err error
)
+ if dialer != nil {
+ sd.Dialer = *dialer
+ }
if sd.MultipathTCP() {
- c, err = sd.dialMPTCP(context.Background(), laddr, raddr)
+ c, err = sd.dialMPTCP(ctx, laddr, raddr)
} else {
- c, err = sd.dialTCP(context.Background(), laddr, raddr)
+ c, err = sd.dialTCP(ctx, laddr, raddr)
}
if err != nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
diff --git a/src/net/udpsock.go b/src/net/udpsock.go
index 35da018c307afb..f9a3bee867d340 100644
--- a/src/net/udpsock.go
+++ b/src/net/udpsock.go
@@ -285,6 +285,10 @@ func newUDPConn(fd *netFD) *UDPConn { return &UDPConn{conn{fd}} }
// If the IP field of raddr is nil or an unspecified IP address, the
// local system is assumed.
func DialUDP(network string, laddr, raddr *UDPAddr) (*UDPConn, error) {
+ return dialUDP(context.Background(), nil, network, laddr, raddr)
+}
+
+func dialUDP(ctx context.Context, dialer *Dialer, network string, laddr, raddr *UDPAddr) (*UDPConn, error) {
switch network {
case "udp", "udp4", "udp6":
default:
@@ -294,7 +298,10 @@ func DialUDP(network string, laddr, raddr *UDPAddr) (*UDPConn, error) {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
}
sd := &sysDialer{network: network, address: raddr.String()}
- c, err := sd.dialUDP(context.Background(), laddr, raddr)
+ if dialer != nil {
+ sd.Dialer = *dialer
+ }
+ c, err := sd.dialUDP(ctx, laddr, raddr)
if err != nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
diff --git a/src/net/udpsock_test.go b/src/net/udpsock_test.go
index 6dacc81df6e059..7ad8a585b07e33 100644
--- a/src/net/udpsock_test.go
+++ b/src/net/udpsock_test.go
@@ -705,3 +705,35 @@ func TestIPv6WriteMsgUDPAddrPortTargetAddrIPVersion(t *testing.T) {
t.Fatal(err)
}
}
+
+// TestIPv4WriteMsgUDPAddrPortTargetAddrIPVersion verifies that
+// WriteMsgUDPAddrPort accepts IPv4 and IPv4-mapped IPv6 destination addresses,
+// and rejects IPv6 destination addresses on a "udp4" connection.
+func TestIPv4WriteMsgUDPAddrPortTargetAddrIPVersion(t *testing.T) {
+ if !testableNetwork("udp4") {
+ t.Skipf("skipping: udp4 not available")
+ }
+
+ conn, err := ListenUDP("udp4", &UDPAddr{IP: IPv4(127, 0, 0, 1)})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer conn.Close()
+
+ daddr4 := netip.AddrPortFrom(netip.MustParseAddr("127.0.0.1"), 12345)
+ daddr4in6 := netip.AddrPortFrom(netip.MustParseAddr("::ffff:127.0.0.1"), 12345)
+ daddr6 := netip.AddrPortFrom(netip.MustParseAddr("::1"), 12345)
+ buf := make([]byte, 8)
+
+ if _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr4); err != nil {
+ t.Errorf("conn.WriteMsgUDPAddrPort(buf, nil, daddr4) failed: %v", err)
+ }
+
+ if _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr4in6); err != nil {
+ t.Errorf("conn.WriteMsgUDPAddrPort(buf, nil, daddr4in6) failed: %v", err)
+ }
+
+ if _, _, err = conn.WriteMsgUDPAddrPort(buf, nil, daddr6); err == nil {
+ t.Errorf("conn.WriteMsgUDPAddrPort(buf, nil, daddr6) should have failed, but got no error")
+ }
+}
diff --git a/src/net/unixsock.go b/src/net/unixsock.go
index c93ef91d5730e6..0ee79f35dec8a4 100644
--- a/src/net/unixsock.go
+++ b/src/net/unixsock.go
@@ -201,13 +201,20 @@ func newUnixConn(fd *netFD) *UnixConn { return &UnixConn{conn{fd}} }
// If laddr is non-nil, it is used as the local address for the
// connection.
func DialUnix(network string, laddr, raddr *UnixAddr) (*UnixConn, error) {
+ return dialUnix(context.Background(), nil, network, laddr, raddr)
+}
+
+func dialUnix(ctx context.Context, dialer *Dialer, network string, laddr, raddr *UnixAddr) (*UnixConn, error) {
switch network {
case "unix", "unixgram", "unixpacket":
default:
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)}
}
sd := &sysDialer{network: network, address: raddr.String()}
- c, err := sd.dialUnix(context.Background(), laddr, raddr)
+ if dialer != nil {
+ sd.Dialer = *dialer
+ }
+ c, err := sd.dialUnix(ctx, laddr, raddr)
if err != nil {
return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
diff --git a/src/os/exec/lp_plan9.go b/src/os/exec/lp_plan9.go
index 0430af9eefeb42..f713a6905cfbdc 100644
--- a/src/os/exec/lp_plan9.go
+++ b/src/os/exec/lp_plan9.go
@@ -36,7 +36,7 @@ func findExecutable(file string) error {
// As of Go 1.19, LookPath will instead return that path along with an error satisfying
// [errors.Is](err, [ErrDot]). See the package documentation for more details.
func LookPath(file string) (string, error) {
- if err := validateLookPath(file); err != nil {
+ if err := validateLookPath(filepath.Clean(file)); err != nil {
return "", &Error{file, err}
}
diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go
index 515d1c135901e0..5a479051ee1e30 100644
--- a/src/os/os_windows_test.go
+++ b/src/os/os_windows_test.go
@@ -663,7 +663,7 @@ func TestOpenVolumeName(t *testing.T) {
}
slices.Sort(have)
- if strings.Join(want, "/") != strings.Join(have, "/") {
+ if !slices.Equal(want, have) {
t.Fatalf("unexpected file list %q, want %q", have, want)
}
}
diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go
index f415b0408820af..2ae79980c753ee 100644
--- a/src/path/filepath/match_test.go
+++ b/src/path/filepath/match_test.go
@@ -231,7 +231,7 @@ func (test *globTest) globAbs(root, rootPattern string) error {
}
slices.Sort(have)
want := test.buildWant(root + `\`)
- if strings.Join(want, "_") == strings.Join(have, "_") {
+ if slices.Equal(want, have) {
return nil
}
return fmt.Errorf("Glob(%q) returns %q, but %q expected", p, have, want)
@@ -245,12 +245,12 @@ func (test *globTest) globRel(root string) error {
}
slices.Sort(have)
want := test.buildWant(root)
- if strings.Join(want, "_") == strings.Join(have, "_") {
+ if slices.Equal(want, have) {
return nil
}
// try also matching version without root prefix
wantWithNoRoot := test.buildWant("")
- if strings.Join(wantWithNoRoot, "_") == strings.Join(have, "_") {
+ if slices.Equal(wantWithNoRoot, have) {
return nil
}
return fmt.Errorf("Glob(%q) returns %q, but %q expected", p, have, want)
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index 742b97f888514f..d371e80d8484ac 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -794,9 +794,6 @@ TEXT setg<>(SB),NOSPLIT|NOFRAME,$0-0
MOVW R0, g
// Save g to thread-local storage.
-#ifdef GOOS_windows
- B runtime·save_g(SB)
-#else
#ifdef GOOS_openbsd
B runtime·save_g(SB)
#else
@@ -808,7 +805,6 @@ TEXT setg<>(SB),NOSPLIT|NOFRAME,$0-0
MOVW g, R0
RET
#endif
-#endif
TEXT runtime·emptyfunc(SB),0,$0-0
RET
diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s
index 46ef00bab8aa35..ee7f825e1f6681 100644
--- a/src/runtime/asm_loong64.s
+++ b/src/runtime/asm_loong64.s
@@ -70,8 +70,9 @@ nocgo:
// start this M
JAL runtime·mstart(SB)
- // Prevent dead-code elimination of debugCallV2, which is
+ // Prevent dead-code elimination of debugCallV2 and debugPinnerV1, which are
// intended to be called by debuggers.
+ MOVV $runtime·debugPinnerV1(SB), R0
MOVV $runtime·debugCallV2(SB), R0
MOVV R0, 1(R0)
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index 96c87afac8c92f..fc70fa82046056 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -1349,67 +1349,29 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$32-16
TW $31, R0, R0
RET
#endif
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSliceConvert(SB)
+
+TEXT runtime·panicBounds(SB),NOSPLIT,$88-0
+ // Note: frame size is 16 bytes larger than necessary
+ // in order to pacify vet. Vet doesn't understand ppc64
+ // layout properly.
+ NO_LOCAL_POINTERS
+ // Save all 7 int registers that could have an index in them.
+ // They may be pointers, but if so they are dead.
+ // Skip R0 aka ZERO, R1 aka SP, R2 aka SB
+ MOVD R3, 48(R1)
+ MOVD R4, 56(R1)
+ MOVD R5, 64(R1)
+ MOVD R6, 72(R1)
+ MOVD R7, 80(R1)
+ MOVD R8, 88(R1)
+ MOVD R9, 96(R1)
+ // Note: we only save 7 registers to keep under nosplit stack limit
+ // Also, R11 is clobbered in dynamic linking situations
+
+ MOVD LR, R3 // PC immediately after call to panicBounds
+ ADD $48, R1, R4 // pointer to save area
+ CALL runtime·panicBounds64(SB)
+ RET
// These functions are used when internal linking cgo with external
// objects compiled with the -Os on gcc. They reduce prologue/epilogue
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index 4031cdde9ee6b5..6b16d03c9a8070 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -884,80 +884,32 @@ TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0
MOV $64, X24
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers (ssa/gen/RISCV64Ops.go), but the space for those
-// arguments are allocated in the caller's stack frame.
-// These stubs write the args into that stack space and then tail call to the
-// corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicBounds(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ // Skip X0 aka ZERO, X1 aka LR, X2 aka SP, X3 aka GP, X4 aka TP.
+ MOV X5, 24(X2)
+ MOV X6, 32(X2)
+ MOV X7, 40(X2)
+ MOV X8, 48(X2)
+ MOV X9, 56(X2)
+ MOV X10, 64(X2)
+ MOV X11, 72(X2)
+ MOV X12, 80(X2)
+ MOV X13, 88(X2)
+ MOV X14, 96(X2)
+ MOV X15, 104(X2)
+ MOV X16, 112(X2)
+ MOV X17, 120(X2)
+ MOV X18, 128(X2)
+ MOV X19, 136(X2)
+ MOV X20, 144(X2)
+
+ MOV X1, X10 // PC immediately after call to panicBounds
+ ADD $24, X2, X11 // pointer to save area
+ CALL runtime·panicBounds64(SB)
+ RET
DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
GLOBL runtime·mainPC(SB),RODATA,$8
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index 7fc88009e88a85..4cc1c0eb104886 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -892,76 +892,18 @@ TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0
MOVD $64, R9
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicBounds(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ STMG R0, R12, 24(R15)
+ // Note that R10 @ 104 is not needed, it is an assembler temp
+ // skip R13 aka G @ 128
+ // skip R14 aka LR @ 136
+ // skip R15 aka SP @ 144
+
+ MOVD R14, 8(R15) // PC immediately after call to panicBounds
+ ADD $24, R15, R0 // pointer to save area
+ MOVD R0, 16(R15)
+ CALL runtime·panicBounds64(SB)
+ RET
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index bb554ebfdb1f3a..639d29dc8337f0 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -191,7 +191,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
}
if c.bubble != nil && getg().bubble != c.bubble {
- panic(plainError("send on synctest channel from outside bubble"))
+ fatal("send on synctest channel from outside bubble")
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
@@ -318,7 +318,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
if c.bubble != nil && getg().bubble != c.bubble {
unlockf()
- panic(plainError("send on synctest channel from outside bubble"))
+ fatal("send on synctest channel from outside bubble")
}
if raceenabled {
if c.dataqsiz == 0 {
@@ -416,7 +416,7 @@ func closechan(c *hchan) {
panic(plainError("close of nil channel"))
}
if c.bubble != nil && getg().bubble != c.bubble {
- panic(plainError("close of synctest channel from outside bubble"))
+ fatal("close of synctest channel from outside bubble")
}
lock(&c.lock)
@@ -538,7 +538,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
}
if c.bubble != nil && getg().bubble != c.bubble {
- panic(plainError("receive on synctest channel from outside bubble"))
+ fatal("receive on synctest channel from outside bubble")
}
if c.timer != nil {
@@ -702,7 +702,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
if c.bubble != nil && getg().bubble != c.bubble {
unlockf()
- panic(plainError("receive on synctest channel from outside bubble"))
+ fatal("receive on synctest channel from outside bubble")
}
if c.dataqsiz == 0 {
if raceenabled {
diff --git a/src/runtime/checkptr_test.go b/src/runtime/checkptr_test.go
index 811c0f03553420..119708be7f52ac 100644
--- a/src/runtime/checkptr_test.go
+++ b/src/runtime/checkptr_test.go
@@ -35,6 +35,7 @@ func TestCheckPtr(t *testing.T) {
{"CheckPtrAlignmentNilPtr", ""},
{"CheckPtrArithmetic", "fatal error: checkptr: pointer arithmetic result points to invalid allocation\n"},
{"CheckPtrArithmetic2", "fatal error: checkptr: pointer arithmetic result points to invalid allocation\n"},
+ {"CheckPtrArithmeticUnsafeAdd", "fatal error: checkptr: pointer arithmetic result points to invalid allocation\n"},
{"CheckPtrSize", "fatal error: checkptr: converted pointer straddles multiple allocations\n"},
{"CheckPtrSmall", "fatal error: checkptr: pointer arithmetic computed bad pointer value\n"},
{"CheckPtrSliceOK", ""},
diff --git a/src/runtime/cpuflags.go b/src/runtime/cpuflags.go
index bd1cb328d37b87..6452364b68ec32 100644
--- a/src/runtime/cpuflags.go
+++ b/src/runtime/cpuflags.go
@@ -13,6 +13,7 @@ import (
const (
offsetX86HasAVX = unsafe.Offsetof(cpu.X86.HasAVX)
offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
+ offsetX86HasAVX512 = unsafe.Offsetof(cpu.X86.HasAVX512) // F+CD+BW+DQ+VL
offsetX86HasERMS = unsafe.Offsetof(cpu.X86.HasERMS)
offsetX86HasRDTSCP = unsafe.Offsetof(cpu.X86.HasRDTSCP)
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index fa30efccb1efe3..1f55717f0a1a60 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -554,6 +554,8 @@ type G = g
type Sudog = sudog
+type XRegPerG = xRegPerG
+
func Getg() *G {
return getg()
}
diff --git a/src/runtime/export_vdso_linux_test.go b/src/runtime/export_vdso_linux_test.go
new file mode 100644
index 00000000000000..cd339c6038f717
--- /dev/null
+++ b/src/runtime/export_vdso_linux_test.go
@@ -0,0 +1,29 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (386 || amd64 || arm || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x)
+
+package runtime
+
+type VDSOSymbolKey vdsoSymbolKey
+
+func (v VDSOSymbolKey) Name() string {
+ return v.name
+}
+
+func (v VDSOSymbolKey) SymHash() uint32 {
+ return v.symHash
+}
+
+func (v VDSOSymbolKey) GNUHash() uint32 {
+ return v.gnuHash
+}
+
+func VDSOSymbolKeys() []VDSOSymbolKey {
+ keys := make([]VDSOSymbolKey, 0, len(vdsoSymbolKeys))
+ for _, k := range vdsoSymbolKeys {
+ keys = append(keys, VDSOSymbolKey(k))
+ }
+ return keys
+}
diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go
index 44015ce862d077..9821e499989951 100644
--- a/src/runtime/lockrank.go
+++ b/src/runtime/lockrank.go
@@ -70,6 +70,7 @@ const (
lockRankHchanLeaf
// WB
lockRankWbufSpans
+ lockRankXRegAlloc
lockRankMheap
lockRankMheapSpecial
lockRankGlobalAlloc
@@ -143,6 +144,7 @@ var lockNames = []string{
lockRankStackLarge: "stackLarge",
lockRankHchanLeaf: "hchanLeaf",
lockRankWbufSpans: "wbufSpans",
+ lockRankXRegAlloc: "xRegAlloc",
lockRankMheap: "mheap",
lockRankMheapSpecial: "mheapSpecial",
lockRankGlobalAlloc: "globalAlloc",
@@ -228,9 +230,10 @@ var lockPartialOrder [][]lockRank = [][]lockRank{
lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
+ lockRankXRegAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
- lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial},
+ lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankXRegAlloc, lockRankMheap, lockRankMheapSpecial},
lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankComputeMaxProcs, lockRankUpdateMaxProcsG, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankCleanupQueue, lockRankSweep, lockRankTestR, lockRankVgetrandom, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankSynctest, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
lockRankPanic: {},
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index cb0d34004899ca..1776206573892f 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -821,6 +821,8 @@ func (h *mheap) init() {
}
h.pages.init(&h.lock, &memstats.gcMiscSys, false)
+
+ xRegInitAlloc()
}
// reclaim sweeps and reclaims at least npage pages into the heap.
diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go
index 46a063fdce569c..9c503369a35841 100644
--- a/src/runtime/mklockrank.go
+++ b/src/runtime/mklockrank.go
@@ -193,6 +193,9 @@ defer,
# Below WB is the write barrier implementation.
< wbufSpans;
+# xRegState allocator
+sched < xRegAlloc;
+
# Span allocator
stackLarge,
stackpool,
@@ -205,7 +208,8 @@ stackLarge,
# an mspanSpecial lock, and they're part of the malloc implementation.
# Pinner bits might be freed by the span allocator.
mheap, mspanSpecial < mheapSpecial;
-mheap, mheapSpecial < globalAlloc;
+# Fixallocs
+mheap, mheapSpecial, xRegAlloc < globalAlloc;
# Execution tracer events (with a P)
hchan,
diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index 6a9cf77a43fcf0..2bd2ef07fa8292 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -9,8 +9,10 @@
package main
import (
+ "bytes"
"flag"
"fmt"
+ "go/format"
"io"
"log"
"os"
@@ -73,16 +75,14 @@ var regNamesAMD64 = []string{
"X15",
}
-var out io.Writer
-
-var arches = map[string]func(){
+var arches = map[string]func(g *gen){
"386": gen386,
"amd64": genAMD64,
"arm": genARM,
"arm64": genARM64,
"loong64": genLoong64,
- "mips64x": func() { genMIPS(true) },
- "mipsx": func() { genMIPS(false) },
+ "mips64x": func(g *gen) { genMIPS(g, true) },
+ "mipsx": func(g *gen) { genMIPS(g, false) },
"ppc64x": genPPC64,
"riscv64": genRISCV64,
"s390x": genS390X,
@@ -93,53 +93,100 @@ var beLe = map[string]bool{"mips64x": true, "mipsx": true, "ppc64x": true}
func main() {
flag.Parse()
if flag.NArg() > 0 {
- out = os.Stdout
for _, arch := range flag.Args() {
- gen, ok := arches[arch]
+ genFn, ok := arches[arch]
if !ok {
log.Fatalf("unknown arch %s", arch)
}
- header(arch)
- gen()
+ g := gen{os.Stdout, arch}
+ g.asmHeader()
+ genFn(&g)
}
return
}
- for arch, gen := range arches {
+ for arch, genFn := range arches {
f, err := os.Create(fmt.Sprintf("preempt_%s.s", arch))
if err != nil {
log.Fatal(err)
}
- out = f
- header(arch)
- gen()
+ g := gen{f, arch}
+ g.asmHeader()
+ genFn(&g)
if err := f.Close(); err != nil {
log.Fatal(err)
}
}
}
-func header(arch string) {
- fmt.Fprintf(out, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n")
- if beLe[arch] {
- base := arch[:len(arch)-1]
- fmt.Fprintf(out, "//go:build %s || %sle\n\n", base, base)
+type gen struct {
+ w io.Writer
+ goarch string
+}
+
+func (g *gen) commonHeader() {
+ fmt.Fprintf(g.w, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n")
+ if beLe[g.goarch] {
+ base := g.goarch[:len(g.goarch)-1]
+ fmt.Fprintf(g.w, "//go:build %s || %sle\n\n", base, base)
}
- fmt.Fprintf(out, "#include \"go_asm.h\"\n")
- if arch == "amd64" {
- fmt.Fprintf(out, "#include \"asm_amd64.h\"\n")
+}
+
+func (g *gen) asmHeader() {
+ g.commonHeader()
+ fmt.Fprintf(g.w, "#include \"go_asm.h\"\n")
+ if g.goarch == "amd64" {
+ fmt.Fprintf(g.w, "#include \"go_tls.h\"\n")
+ fmt.Fprintf(g.w, "#include \"asm_amd64.h\"\n")
}
- fmt.Fprintf(out, "#include \"textflag.h\"\n\n")
- fmt.Fprintf(out, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n")
+ fmt.Fprintf(g.w, "#include \"textflag.h\"\n\n")
+ fmt.Fprintf(g.w, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n")
}
-func p(f string, args ...any) {
+func (g *gen) p(f string, args ...any) {
fmted := fmt.Sprintf(f, args...)
- fmt.Fprintf(out, "\t%s\n", strings.ReplaceAll(fmted, "\n", "\n\t"))
+ fmt.Fprintf(g.w, "\t%s\n", strings.ReplaceAll(fmted, "\n", "\n\t"))
}
-func label(l string) {
- fmt.Fprintf(out, "%s\n", l)
+func (g *gen) label(l string) {
+ fmt.Fprintf(g.w, "%s\n", l)
+}
+
+// writeXRegs writes an architecture xregs file.
+func writeXRegs(arch string, l *layout) {
+ var code bytes.Buffer
+ g := gen{&code, arch}
+ g.commonHeader()
+ fmt.Fprintf(g.w, `
+package runtime
+
+type xRegs struct {
+`)
+ pos := 0
+ for _, reg := range l.regs {
+ if reg.pos != pos {
+ log.Fatalf("padding not implemented")
+ }
+ typ := fmt.Sprintf("[%d]byte", reg.size)
+ switch {
+ case reg.size == 4 && reg.pos%4 == 0:
+ typ = "uint32"
+ case reg.size == 8 && reg.pos%8 == 0:
+ typ = "uint64"
+ }
+ fmt.Fprintf(g.w, "\t%s %s\n", reg.reg, typ)
+ pos += reg.size
+ }
+ fmt.Fprintf(g.w, "}\n")
+
+ path := fmt.Sprintf("preempt_%s.go", arch)
+ b, err := format.Source(code.Bytes())
+ if err != nil {
+ log.Fatalf("formatting %s: %s", path, err)
+ }
+ if err := os.WriteFile(path, b, 0666); err != nil {
+ log.Fatal(err)
+ }
}
type layout struct {
@@ -149,7 +196,7 @@ type layout struct {
}
type regPos struct {
- pos int
+ pos, size int
saveOp string
restoreOp string
@@ -162,42 +209,44 @@ type regPos struct {
}
func (l *layout) add(op, reg string, size int) {
- l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack})
+ l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack, size: size})
l.stack += size
}
func (l *layout) add2(sop, rop, reg string, size int) {
- l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack})
+ l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack, size: size})
l.stack += size
}
func (l *layout) addSpecial(save, restore string, size int) {
- l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack})
+ l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack, size: size})
l.stack += size
}
-func (l *layout) save() {
+func (l *layout) save(g *gen) {
for _, reg := range l.regs {
if reg.save != "" {
- p(reg.save, reg.pos)
+ g.p(reg.save, reg.pos)
} else {
- p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp)
+ g.p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp)
}
}
}
-func (l *layout) restore() {
+func (l *layout) restore(g *gen) {
for i := len(l.regs) - 1; i >= 0; i-- {
reg := l.regs[i]
if reg.restore != "" {
- p(reg.restore, reg.pos)
+ g.p(reg.restore, reg.pos)
} else {
- p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg)
+ g.p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg)
}
}
}
-func gen386() {
+func gen386(g *gen) {
+ p := g.p
+
p("PUSHFL")
// Save general purpose registers.
var l = layout{sp: "SP"}
@@ -218,22 +267,26 @@ func gen386() {
p("ADJSP $%d", lSSE.stack)
p("NOP SP")
- l.save()
+ l.save(g)
p("#ifndef %s", softfloat)
- lSSE.save()
+ lSSE.save(g)
p("#endif")
p("CALL ·asyncPreempt2(SB)")
p("#ifndef %s", softfloat)
- lSSE.restore()
+ lSSE.restore(g)
p("#endif")
- l.restore()
+ l.restore(g)
p("ADJSP $%d", -lSSE.stack)
p("POPFL")
p("RET")
}
-func genAMD64() {
+func genAMD64(g *gen) {
+ const xReg = "AX" // *xRegState
+
+ p, label := g.p, g.label
+
// Assign stack offsets.
var l = layout{sp: "SP"}
for _, reg := range regNamesAMD64 {
@@ -244,37 +297,121 @@ func genAMD64() {
l.add("MOVQ", reg, 8)
}
}
- lSSE := layout{stack: l.stack, sp: "SP"}
- for _, reg := range regNamesAMD64 {
- if strings.HasPrefix(reg, "X") {
- lSSE.add("MOVUPS", reg, 16)
+ // Create layouts for X, Y, and Z registers.
+ const (
+ numXRegs = 16
+ numZRegs = 16 // TODO: If we start using upper registers, change to 32
+ numKRegs = 8
+ )
+ lZRegs := layout{sp: xReg} // Non-GP registers
+ lXRegs, lYRegs := lZRegs, lZRegs
+ for i := range numZRegs {
+ lZRegs.add("VMOVDQU64", fmt.Sprintf("Z%d", i), 512/8)
+ if i < numXRegs {
+ // Use SSE-only instructions for X registers.
+ lXRegs.add("MOVUPS", fmt.Sprintf("X%d", i), 128/8)
+ lYRegs.add("VMOVDQU", fmt.Sprintf("Y%d", i), 256/8)
}
}
-
- // TODO: MXCSR register?
+ for i := range numKRegs {
+ lZRegs.add("KMOVQ", fmt.Sprintf("K%d", i), 8)
+ }
+ // The Z layout is the most general, so we line up the others with that one.
+ // We don't have to do this, but it results in a nice Go type. If we split
+ // this into multiple types, we probably should stop doing this.
+ for i := range lXRegs.regs {
+ lXRegs.regs[i].pos = lZRegs.regs[i].pos
+ lYRegs.regs[i].pos = lZRegs.regs[i].pos
+ }
+ writeXRegs(g.goarch, &lZRegs)
p("PUSHQ BP")
p("MOVQ SP, BP")
p("// Save flags before clobbering them")
p("PUSHFQ")
p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP")
- p("ADJSP $%d", lSSE.stack)
+ p("ADJSP $%d", l.stack)
p("// But vet doesn't know ADJSP, so suppress vet stack checking")
p("NOP SP")
- l.save()
+ p("// Save GPs")
+ l.save(g)
+
+ // In general, the limitations on asynchronous preemption mean we only
+ // preempt in ABIInternal code. However, there's at least one exception to
+ // this: when we're in an open-coded transition between an ABIInternal
+ // function and an ABI0 call. We could more carefully arrange unsafe points
+ // to avoid ever landing in ABI0, but it's easy to just make this code not
+ // sensitive to the ABI we're preempting. The CALL to asyncPreempt2 will
+ // ensure we're in ABIInternal register state.
+ p("// Save extended register state to p.xRegs.scratch")
+ p("// Don't make assumptions about ABI register state. See mkpreempt.go")
+ p("get_tls(CX)")
+ p("MOVQ g(CX), R14")
+ p("MOVQ g_m(R14), %s", xReg)
+ p("MOVQ m_p(%s), %s", xReg, xReg)
+ p("LEAQ (p_xRegs+xRegPerP_scratch)(%s), %s", xReg, xReg)
+
+ // Which registers do we need to save?
+ p("#ifdef GOEXPERIMENT_simd")
+ p("CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1")
+ p("JE saveAVX512")
+ p("CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1")
+ p("JE saveAVX2")
+ p("#endif")
+
+ // No features. Assume only SSE.
+ label("saveSSE:")
+ lXRegs.save(g)
+ p("JMP preempt")
+
+ label("saveAVX2:")
+ lYRegs.save(g)
+ p("JMP preempt")
- lSSE.save()
+ label("saveAVX512:")
+ lZRegs.save(g)
+ p("JMP preempt")
+
+ label("preempt:")
p("CALL ·asyncPreempt2(SB)")
- lSSE.restore()
- l.restore()
- p("ADJSP $%d", -lSSE.stack)
+
+ p("// Restore non-GPs from *p.xRegs.cache")
+ p("MOVQ g_m(R14), %s", xReg)
+ p("MOVQ m_p(%s), %s", xReg, xReg)
+ p("MOVQ (p_xRegs+xRegPerP_cache)(%s), %s", xReg, xReg)
+
+ p("#ifdef GOEXPERIMENT_simd")
+ p("CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1")
+ p("JE restoreAVX512")
+ p("CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1")
+ p("JE restoreAVX2")
+ p("#endif")
+
+ label("restoreSSE:")
+ lXRegs.restore(g)
+ p("JMP restoreGPs")
+
+ label("restoreAVX2:")
+ lYRegs.restore(g)
+ p("JMP restoreGPs")
+
+ label("restoreAVX512:")
+ lZRegs.restore(g)
+ p("JMP restoreGPs")
+
+ label("restoreGPs:")
+ p("// Restore GPs")
+ l.restore(g)
+ p("ADJSP $%d", -l.stack)
p("POPFQ")
p("POPQ BP")
p("RET")
}
-func genARM() {
+func genARM(g *gen) {
+ p := g.p
+
// Add integer registers R0-R12.
// R13 (SP), R14 (LR), R15 (PC) are special and not saved here.
var l = layout{sp: "R13", stack: 4} // add LR slot
@@ -303,22 +440,23 @@ func genARM() {
}
p("MOVW.W R14, -%d(R13)", lfp.stack) // allocate frame, save LR
- l.save()
+ l.save(g)
p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0.
- lfp.save()
- label("nofp:")
+ lfp.save(g)
+ g.label("nofp:")
p("CALL ·asyncPreempt2(SB)")
p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp2") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0.
- lfp.restore()
- label("nofp2:")
- l.restore()
+ lfp.restore(g)
+ g.label("nofp2:")
+ l.restore(g)
p("MOVW %d(R13), R14", lfp.stack) // sigctxt.pushCall pushes LR on stack, restore it
p("MOVW.P %d(R13), R15", lfp.stack+4) // load PC, pop frame (including the space pushed by sigctxt.pushCall)
p("UNDEF") // shouldn't get here
}
-func genARM64() {
+func genARM64(g *gen) {
+ p := g.p
// Add integer registers R0-R26
// R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special
// and not saved here.
@@ -362,9 +500,9 @@ func genARM64() {
p("MOVD R30, (RSP)")
p("#endif")
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
p("MOVD -8(RSP), R29") // restore frame pointer
@@ -373,7 +511,9 @@ func genARM64() {
p("RET (R27)")
}
-func genMIPS(_64bit bool) {
+func genMIPS(g *gen, _64bit bool) {
+ p := g.p
+
mov := "MOVW"
movf := "MOVF"
add := "ADD"
@@ -428,15 +568,15 @@ func genMIPS(_64bit bool) {
p(mov+" R31, -%d(R29)", lfp.stack)
p(sub+" $%d, R29", lfp.stack)
- l.save()
+ l.save(g)
p("#ifndef %s", softfloat)
- lfp.save()
+ lfp.save(g)
p("#endif")
p("CALL ·asyncPreempt2(SB)")
p("#ifndef %s", softfloat)
- lfp.restore()
+ lfp.restore(g)
p("#endif")
- l.restore()
+ l.restore(g)
p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
p(mov + " (R29), R23") // load PC to REGTMP
@@ -444,7 +584,9 @@ func genMIPS(_64bit bool) {
p("JMP (R23)")
}
-func genLoong64() {
+func genLoong64(g *gen) {
+ p := g.p
+
mov := "MOVV"
movf := "MOVD"
add := "ADDV"
@@ -478,9 +620,9 @@ func genLoong64() {
p(mov+" R1, -%d(R3)", l.stack)
p(sub+" $%d, R3", l.stack)
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p(mov+" %d(R3), R1", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
p(mov + " (R3), R30") // load PC to REGTMP
@@ -488,7 +630,9 @@ func genLoong64() {
p("JMP (R30)")
}
-func genPPC64() {
+func genPPC64(g *gen) {
+ p := g.p
+
// Add integer registers R3-R29
// R0 (zero), R1 (SP), R30 (g) are special and not saved here.
// R2 (TOC pointer in PIC mode), R12 (function entry address in PIC mode) have been saved in sigctxt.pushCall.
@@ -528,9 +672,9 @@ func genPPC64() {
p("MOVD LR, R31")
p("MOVDU R31, -%d(R1)", l.stack) // allocate frame, save PC of interrupted instruction (in LR)
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p("MOVD %d(R1), R31", l.stack) // sigctxt.pushCall has pushed LR, R2, R12 (at interrupt) on stack, restore them
p("MOVD R31, LR")
@@ -543,7 +687,9 @@ func genPPC64() {
p("JMP (CTR)")
}
-func genRISCV64() {
+func genRISCV64(g *gen) {
+ p := g.p
+
// X0 (zero), X1 (LR), X2 (SP), X3 (GP), X4 (TP), X27 (g), X31 (TMP) are special.
var l = layout{sp: "X2", stack: 8}
@@ -564,16 +710,18 @@ func genRISCV64() {
p("MOV X1, -%d(X2)", l.stack)
p("SUB $%d, X2", l.stack)
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p("MOV %d(X2), X1", l.stack)
p("MOV (X2), X31")
p("ADD $%d, X2", l.stack+8)
p("JMP (X31)")
}
-func genS390X() {
+func genS390X(g *gen) {
+ p := g.p
+
// Add integer registers R0-R12
// R13 (g), R14 (LR), R15 (SP) are special, and not saved here.
// Saving R10 (REGTMP) is not necessary, but it is saved anyway.
@@ -594,9 +742,9 @@ func genS390X() {
p("ADD $-%d, R15", l.stack)
p("MOVW R10, 8(R15)") // save flags
- l.save()
+ l.save(g)
p("CALL ·asyncPreempt2(SB)")
- l.restore()
+ l.restore(g)
p("MOVD %d(R15), R14", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
p("ADD $%d, R15", l.stack+8) // pop frame (including the space pushed by sigctxt.pushCall)
@@ -606,12 +754,14 @@ func genS390X() {
p("JMP (R10)")
}
-func genWasm() {
+func genWasm(g *gen) {
+ p := g.p
p("// No async preemption on wasm")
p("UNDEF")
}
-func notImplemented() {
+func notImplemented(g *gen) {
+ p := g.p
p("// Not implemented yet")
p("JMP ·abort(SB)")
}
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 8f9ab4dd47345b..8c91c9435abd18 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -103,9 +103,8 @@ func panicCheck2(err string) {
// these (they always look like they're called from the runtime).
// Hence, for these, we just check for clearly bad runtime conditions.
//
-// The panic{Index,Slice} functions are implemented in assembly and tail call
-// to the goPanic{Index,Slice} functions below. This is done so we can use
-// a space-minimal register calling convention.
+// The goPanic{Index,Slice} functions are only used by wasm. All the other architectures
+// use panic{Bounds,Extend} in assembly, which then call to panicBounds{64,32,32X}.
// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
//
@@ -205,28 +204,10 @@ func goPanicSliceConvert(x int, y int) {
panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsConvert})
}
-// Implemented in assembly, as they take arguments in registers.
-// Declared here to mark them as ABIInternal.
-func panicIndex(x int, y int)
-func panicIndexU(x uint, y int)
-func panicSliceAlen(x int, y int)
-func panicSliceAlenU(x uint, y int)
-func panicSliceAcap(x int, y int)
-func panicSliceAcapU(x uint, y int)
-func panicSliceB(x int, y int)
-func panicSliceBU(x uint, y int)
-func panicSlice3Alen(x int, y int)
-func panicSlice3AlenU(x uint, y int)
-func panicSlice3Acap(x int, y int)
-func panicSlice3AcapU(x uint, y int)
-func panicSlice3B(x int, y int)
-func panicSlice3BU(x uint, y int)
-func panicSlice3C(x int, y int)
-func panicSlice3CU(x uint, y int)
-func panicSliceConvert(x int, y int)
-
+// Implemented in assembly. Declared here to mark them as ABIInternal.
func panicBounds() // in asm_GOARCH.s files, called from generated code
func panicExtend() // in asm_GOARCH.s files, called from generated code (on 32-bit archs)
+
func panicBounds64(pc uintptr, regs *[16]int64) { // called from panicBounds on 64-bit archs
f := findfunc(pc)
v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 5f83f37b5078f2..99c5155806da63 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -635,10 +635,6 @@ func TestCPUProfileWithFork(t *testing.T) {
// Use smaller size for Android to avoid crash.
heap = 100 << 20
}
- if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
- // Use smaller heap for Windows/ARM to avoid crash.
- heap = 100 << 20
- }
if testing.Short() {
heap = 100 << 20
}
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
index c41c3558359c0c..22727df74eead2 100644
--- a/src/runtime/preempt.go
+++ b/src/runtime/preempt.go
@@ -292,21 +292,52 @@ func canPreemptM(mp *m) bool {
// asyncPreempt saves all user registers and calls asyncPreempt2.
//
-// When stack scanning encounters an asyncPreempt frame, it scans that
+// It saves GP registers (anything that might contain a pointer) to the G stack.
+// Hence, when stack scanning encounters an asyncPreempt frame, it scans that
// frame and its parent frame conservatively.
//
+// On some platforms, it saves large additional scalar-only register state such
+// as vector registers to an "extended register state" on the P.
+//
// asyncPreempt is implemented in assembly.
func asyncPreempt()
+// asyncPreempt2 is the Go continuation of asyncPreempt.
+//
+// It must be deeply nosplit because there's untyped data on the stack from
+// asyncPreempt.
+//
+// It must not have any write barriers because we need to limit the amount of
+// stack it uses.
+//
//go:nosplit
+//go:nowritebarrierrec
func asyncPreempt2() {
+ // We can't grow the stack with untyped data from asyncPreempt, so switch to
+ // the system stack right away.
+ mcall(func(gp *g) {
+ gp.asyncSafePoint = true
+
+ // Move the extended register state from the P to the G. We do this now that
+ // we're on the system stack to avoid stack splits.
+ xRegSave(gp)
+
+ if gp.preemptStop {
+ preemptPark(gp)
+ } else {
+ gopreempt_m(gp)
+ }
+ // The above functions never return.
+ })
+
+ // Do not grow the stack below here!
+
gp := getg()
- gp.asyncSafePoint = true
- if gp.preemptStop {
- mcall(preemptPark)
- } else {
- mcall(gopreempt_m)
- }
+
+ // Put the extended register state back on the M so resumption can find it.
+ // We can't do this in asyncPreemptM because the park calls never return.
+ xRegRestore(gp)
+
gp.asyncSafePoint = false
}
@@ -319,19 +350,13 @@ func init() {
total := funcMaxSPDelta(f)
f = findfunc(abi.FuncPCABIInternal(asyncPreempt2))
total += funcMaxSPDelta(f)
+ f = findfunc(abi.FuncPCABIInternal(xRegRestore))
+ total += funcMaxSPDelta(f)
// Add some overhead for return PCs, etc.
asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
if asyncPreemptStack > stackNosplit {
- // We need more than the nosplit limit. This isn't
- // unsafe, but it may limit asynchronous preemption.
- //
- // This may be a problem if we start using more
- // registers. In that case, we should store registers
- // in a context object. If we pre-allocate one per P,
- // asyncPreempt can spill just a few registers to the
- // stack, then grab its context object and spill into
- // it. When it enters the runtime, it would allocate a
- // new context for the P.
+ // We need more than the nosplit limit. This isn't unsafe, but it may
+ // limit asynchronous preemption. Consider moving state into xRegState.
print("runtime: asyncPreemptStack=", asyncPreemptStack, "\n")
throw("async stack too large")
}
diff --git a/src/runtime/preempt_amd64.go b/src/runtime/preempt_amd64.go
new file mode 100644
index 00000000000000..88c0ddd34ade72
--- /dev/null
+++ b/src/runtime/preempt_amd64.go
@@ -0,0 +1,30 @@
+// Code generated by mkpreempt.go; DO NOT EDIT.
+
+package runtime
+
+type xRegs struct {
+ Z0 [64]byte
+ Z1 [64]byte
+ Z2 [64]byte
+ Z3 [64]byte
+ Z4 [64]byte
+ Z5 [64]byte
+ Z6 [64]byte
+ Z7 [64]byte
+ Z8 [64]byte
+ Z9 [64]byte
+ Z10 [64]byte
+ Z11 [64]byte
+ Z12 [64]byte
+ Z13 [64]byte
+ Z14 [64]byte
+ Z15 [64]byte
+ K0 uint64
+ K1 uint64
+ K2 uint64
+ K3 uint64
+ K4 uint64
+ K5 uint64
+ K6 uint64
+ K7 uint64
+}
diff --git a/src/runtime/preempt_amd64.s b/src/runtime/preempt_amd64.s
index 8e3ed0d7c59dce..c35de7f3b75726 100644
--- a/src/runtime/preempt_amd64.s
+++ b/src/runtime/preempt_amd64.s
@@ -1,6 +1,7 @@
// Code generated by mkpreempt.go; DO NOT EDIT.
#include "go_asm.h"
+#include "go_tls.h"
#include "asm_amd64.h"
#include "textflag.h"
@@ -10,9 +11,10 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
// Save flags before clobbering them
PUSHFQ
// obj doesn't understand ADD/SUB on SP, but does understand ADJSP
- ADJSP $368
+ ADJSP $112
// But vet doesn't know ADJSP, so suppress vet stack checking
NOP SP
+ // Save GPs
MOVQ AX, 0(SP)
MOVQ CX, 8(SP)
MOVQ DX, 16(SP)
@@ -27,39 +29,157 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVQ R13, 88(SP)
MOVQ R14, 96(SP)
MOVQ R15, 104(SP)
- MOVUPS X0, 112(SP)
- MOVUPS X1, 128(SP)
- MOVUPS X2, 144(SP)
- MOVUPS X3, 160(SP)
- MOVUPS X4, 176(SP)
- MOVUPS X5, 192(SP)
- MOVUPS X6, 208(SP)
- MOVUPS X7, 224(SP)
- MOVUPS X8, 240(SP)
- MOVUPS X9, 256(SP)
- MOVUPS X10, 272(SP)
- MOVUPS X11, 288(SP)
- MOVUPS X12, 304(SP)
- MOVUPS X13, 320(SP)
- MOVUPS X14, 336(SP)
- MOVUPS X15, 352(SP)
+ // Save extended register state to p.xRegs.scratch
+ // Don't make assumptions about ABI register state. See mkpreempt.go
+ get_tls(CX)
+ MOVQ g(CX), R14
+ MOVQ g_m(R14), AX
+ MOVQ m_p(AX), AX
+ LEAQ (p_xRegs+xRegPerP_scratch)(AX), AX
+ #ifdef GOEXPERIMENT_simd
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1
+ JE saveAVX512
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JE saveAVX2
+ #endif
+saveSSE:
+ MOVUPS X0, 0(AX)
+ MOVUPS X1, 64(AX)
+ MOVUPS X2, 128(AX)
+ MOVUPS X3, 192(AX)
+ MOVUPS X4, 256(AX)
+ MOVUPS X5, 320(AX)
+ MOVUPS X6, 384(AX)
+ MOVUPS X7, 448(AX)
+ MOVUPS X8, 512(AX)
+ MOVUPS X9, 576(AX)
+ MOVUPS X10, 640(AX)
+ MOVUPS X11, 704(AX)
+ MOVUPS X12, 768(AX)
+ MOVUPS X13, 832(AX)
+ MOVUPS X14, 896(AX)
+ MOVUPS X15, 960(AX)
+ JMP preempt
+saveAVX2:
+ VMOVDQU Y0, 0(AX)
+ VMOVDQU Y1, 64(AX)
+ VMOVDQU Y2, 128(AX)
+ VMOVDQU Y3, 192(AX)
+ VMOVDQU Y4, 256(AX)
+ VMOVDQU Y5, 320(AX)
+ VMOVDQU Y6, 384(AX)
+ VMOVDQU Y7, 448(AX)
+ VMOVDQU Y8, 512(AX)
+ VMOVDQU Y9, 576(AX)
+ VMOVDQU Y10, 640(AX)
+ VMOVDQU Y11, 704(AX)
+ VMOVDQU Y12, 768(AX)
+ VMOVDQU Y13, 832(AX)
+ VMOVDQU Y14, 896(AX)
+ VMOVDQU Y15, 960(AX)
+ JMP preempt
+saveAVX512:
+ VMOVDQU64 Z0, 0(AX)
+ VMOVDQU64 Z1, 64(AX)
+ VMOVDQU64 Z2, 128(AX)
+ VMOVDQU64 Z3, 192(AX)
+ VMOVDQU64 Z4, 256(AX)
+ VMOVDQU64 Z5, 320(AX)
+ VMOVDQU64 Z6, 384(AX)
+ VMOVDQU64 Z7, 448(AX)
+ VMOVDQU64 Z8, 512(AX)
+ VMOVDQU64 Z9, 576(AX)
+ VMOVDQU64 Z10, 640(AX)
+ VMOVDQU64 Z11, 704(AX)
+ VMOVDQU64 Z12, 768(AX)
+ VMOVDQU64 Z13, 832(AX)
+ VMOVDQU64 Z14, 896(AX)
+ VMOVDQU64 Z15, 960(AX)
+ KMOVQ K0, 1024(AX)
+ KMOVQ K1, 1032(AX)
+ KMOVQ K2, 1040(AX)
+ KMOVQ K3, 1048(AX)
+ KMOVQ K4, 1056(AX)
+ KMOVQ K5, 1064(AX)
+ KMOVQ K6, 1072(AX)
+ KMOVQ K7, 1080(AX)
+ JMP preempt
+preempt:
CALL ·asyncPreempt2(SB)
- MOVUPS 352(SP), X15
- MOVUPS 336(SP), X14
- MOVUPS 320(SP), X13
- MOVUPS 304(SP), X12
- MOVUPS 288(SP), X11
- MOVUPS 272(SP), X10
- MOVUPS 256(SP), X9
- MOVUPS 240(SP), X8
- MOVUPS 224(SP), X7
- MOVUPS 208(SP), X6
- MOVUPS 192(SP), X5
- MOVUPS 176(SP), X4
- MOVUPS 160(SP), X3
- MOVUPS 144(SP), X2
- MOVUPS 128(SP), X1
- MOVUPS 112(SP), X0
+ // Restore non-GPs from *p.xRegs.cache
+ MOVQ g_m(R14), AX
+ MOVQ m_p(AX), AX
+ MOVQ (p_xRegs+xRegPerP_cache)(AX), AX
+ #ifdef GOEXPERIMENT_simd
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX512(SB), $1
+ JE restoreAVX512
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ JE restoreAVX2
+ #endif
+restoreSSE:
+ MOVUPS 960(AX), X15
+ MOVUPS 896(AX), X14
+ MOVUPS 832(AX), X13
+ MOVUPS 768(AX), X12
+ MOVUPS 704(AX), X11
+ MOVUPS 640(AX), X10
+ MOVUPS 576(AX), X9
+ MOVUPS 512(AX), X8
+ MOVUPS 448(AX), X7
+ MOVUPS 384(AX), X6
+ MOVUPS 320(AX), X5
+ MOVUPS 256(AX), X4
+ MOVUPS 192(AX), X3
+ MOVUPS 128(AX), X2
+ MOVUPS 64(AX), X1
+ MOVUPS 0(AX), X0
+ JMP restoreGPs
+restoreAVX2:
+ VMOVDQU 960(AX), Y15
+ VMOVDQU 896(AX), Y14
+ VMOVDQU 832(AX), Y13
+ VMOVDQU 768(AX), Y12
+ VMOVDQU 704(AX), Y11
+ VMOVDQU 640(AX), Y10
+ VMOVDQU 576(AX), Y9
+ VMOVDQU 512(AX), Y8
+ VMOVDQU 448(AX), Y7
+ VMOVDQU 384(AX), Y6
+ VMOVDQU 320(AX), Y5
+ VMOVDQU 256(AX), Y4
+ VMOVDQU 192(AX), Y3
+ VMOVDQU 128(AX), Y2
+ VMOVDQU 64(AX), Y1
+ VMOVDQU 0(AX), Y0
+ JMP restoreGPs
+restoreAVX512:
+ KMOVQ 1080(AX), K7
+ KMOVQ 1072(AX), K6
+ KMOVQ 1064(AX), K5
+ KMOVQ 1056(AX), K4
+ KMOVQ 1048(AX), K3
+ KMOVQ 1040(AX), K2
+ KMOVQ 1032(AX), K1
+ KMOVQ 1024(AX), K0
+ VMOVDQU64 960(AX), Z15
+ VMOVDQU64 896(AX), Z14
+ VMOVDQU64 832(AX), Z13
+ VMOVDQU64 768(AX), Z12
+ VMOVDQU64 704(AX), Z11
+ VMOVDQU64 640(AX), Z10
+ VMOVDQU64 576(AX), Z9
+ VMOVDQU64 512(AX), Z8
+ VMOVDQU64 448(AX), Z7
+ VMOVDQU64 384(AX), Z6
+ VMOVDQU64 320(AX), Z5
+ VMOVDQU64 256(AX), Z4
+ VMOVDQU64 192(AX), Z3
+ VMOVDQU64 128(AX), Z2
+ VMOVDQU64 64(AX), Z1
+ VMOVDQU64 0(AX), Z0
+ JMP restoreGPs
+restoreGPs:
+ // Restore GPs
MOVQ 104(SP), R15
MOVQ 96(SP), R14
MOVQ 88(SP), R13
@@ -74,7 +194,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVQ 16(SP), DX
MOVQ 8(SP), CX
MOVQ 0(SP), AX
- ADJSP $-368
+ ADJSP $-112
POPFQ
POPQ BP
RET
diff --git a/src/runtime/preempt_noxreg.go b/src/runtime/preempt_noxreg.go
new file mode 100644
index 00000000000000..dfe46559b5b723
--- /dev/null
+++ b/src/runtime/preempt_noxreg.go
@@ -0,0 +1,27 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64
+
+// This provides common support for architectures that DO NOT use extended
+// register state in asynchronous preemption.
+
+package runtime
+
+type xRegPerG struct{}
+
+type xRegPerP struct{}
+
+// xRegState is defined only so the build fails if we try to define a real
+// xRegState on a noxreg architecture.
+type xRegState struct{}
+
+func xRegInitAlloc() {}
+
+func xRegSave(gp *g) {}
+
+//go:nosplit
+func xRegRestore(gp *g) {}
+
+func (*xRegPerP) free() {}
diff --git a/src/runtime/preempt_xreg.go b/src/runtime/preempt_xreg.go
new file mode 100644
index 00000000000000..9e05455ddbb747
--- /dev/null
+++ b/src/runtime/preempt_xreg.go
@@ -0,0 +1,137 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64
+
+// This provides common support for architectures that use extended register
+// state in asynchronous preemption.
+//
+// While asynchronous preemption stores general-purpose (GP) registers on the
+// preempted goroutine's own stack, extended register state can be used to save
+// non-GP state off the stack. In particular, this is meant for large vector
+// register files. Currently, we assume this contains only scalar data, though
+// we could change this constraint by conservatively scanning this memory.
+//
+// For an architecture to support extended register state, it must provide a Go
+// definition of an xRegState type for storing the state, and its asyncPreempt
+// implementation must write this register state to p.xRegs.scratch.
+
+package runtime
+
+import (
+ "internal/runtime/sys"
+ "unsafe"
+)
+
+// xRegState is long-lived extended register state. It is allocated off-heap and
+// manually managed.
+type xRegState struct {
+ _ sys.NotInHeap // Allocated from xRegAlloc
+ regs xRegs
+}
+
+// xRegPerG stores extended register state while a goroutine is asynchronously
+// preempted. This is nil otherwise, so we can reuse a (likely small) pool of
+// xRegState objects.
+type xRegPerG struct {
+ state *xRegState
+}
+
+type xRegPerP struct {
+ // scratch temporary per-P space where [asyncPreempt] saves the register
+ // state before entering Go. It's quickly copied to per-G state.
+ scratch xRegs
+
+ // cache is a 1-element allocation cache of extended register state used by
+ // asynchronous preemption. On entry to preemption, this is used as a simple
+ // allocation cache. On exit from preemption, the G's xRegState is always
+ // stored here where it can be restored, and later either freed or reused
+ // for another preemption. On exit, this serves the dual purpose of
+ // delay-freeing the allocated xRegState until after we've definitely
+ // restored it.
+ cache *xRegState
+}
+
+// xRegAlloc allocates xRegState objects.
+var xRegAlloc struct {
+ lock mutex
+ alloc fixalloc
+}
+
+func xRegInitAlloc() {
+ lockInit(&xRegAlloc.lock, lockRankXRegAlloc)
+ xRegAlloc.alloc.init(unsafe.Sizeof(xRegState{}), nil, nil, &memstats.other_sys)
+}
+
+// xRegSave saves the extended register state on this P to gp.
+//
+// This must run on the system stack because it assumes the P won't change.
+//
+//go:systemstack
+func xRegSave(gp *g) {
+ if gp.xRegs.state != nil {
+ // Double preempt?
+ throw("gp.xRegState.p != nil on async preempt")
+ }
+
+ // Get the place to save the register state.
+ var dest *xRegState
+ pp := gp.m.p.ptr()
+ if pp.xRegs.cache != nil {
+ // Use the cached allocation.
+ dest = pp.xRegs.cache
+ pp.xRegs.cache = nil
+ } else {
+ // Allocate a new save block.
+ lock(&xRegAlloc.lock)
+ dest = (*xRegState)(xRegAlloc.alloc.alloc())
+ unlock(&xRegAlloc.lock)
+ }
+
+ // Copy state saved in the scratchpad to dest.
+ //
+ // If we ever need to save less state (e.g., avoid saving vector registers
+ // that aren't in use), we could have multiple allocation pools for
+ // different size states and copy only the registers we need.
+ dest.regs = pp.xRegs.scratch
+
+ // Save on the G.
+ gp.xRegs.state = dest
+}
+
+// xRegRestore prepares the extended register state on gp to be restored.
+//
+// It moves the state to gp.m.p.xRegs.cache where [asyncPreempt] expects to find
+// it. This means nothing else may use the cache between this call and the
+// return to asyncPreempt. This is not quite symmetric with [xRegSave], which
+// uses gp.m.p.xRegs.scratch. By using cache instead, we save a block copy.
+//
+// This is called with asyncPreempt on the stack and thus must not grow the
+// stack.
+//
+//go:nosplit
+func xRegRestore(gp *g) {
+ if gp.xRegs.state == nil {
+ throw("gp.xRegState.p == nil on return from async preempt")
+ }
+ // If the P has a block cached on it, free that so we can replace it.
+ pp := gp.m.p.ptr()
+ if pp.xRegs.cache != nil {
+ // Don't grow the G stack.
+ systemstack(func() {
+ pp.xRegs.free()
+ })
+ }
+ pp.xRegs.cache = gp.xRegs.state
+ gp.xRegs.state = nil
+}
+
+func (xRegs *xRegPerP) free() {
+ if xRegs.cache != nil {
+ lock(&xRegAlloc.lock)
+ xRegAlloc.alloc.free(unsafe.Pointer(xRegs.cache))
+ xRegs.cache = nil
+ unlock(&xRegAlloc.lock)
+ }
+}
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index ec66384a75fa1b..25d39d9ba389ad 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -5838,6 +5838,7 @@ func (pp *p) destroy() {
pp.gcAssistTime = 0
gcCleanups.queued += pp.cleanupsQueued
pp.cleanupsQueued = 0
+ pp.xRegs.free()
pp.status = _Pdead
}
diff --git a/src/runtime/race/internal/amd64v1/race_darwin.syso b/src/runtime/race/internal/amd64v1/race_darwin.syso
index e92f4ce74533f7..d3a9c200a500c8 100644
Binary files a/src/runtime/race/internal/amd64v1/race_darwin.syso and b/src/runtime/race/internal/amd64v1/race_darwin.syso differ
diff --git a/src/runtime/race/race_darwin_amd64.go b/src/runtime/race/race_darwin_amd64.go
index 02d73f8d388d3c..3f4e587e6bbead 100644
--- a/src/runtime/race/race_darwin_amd64.go
+++ b/src/runtime/race/race_darwin_amd64.go
@@ -28,9 +28,6 @@ package race
//go:cgo_import_dynamic _dyld_get_shared_cache_uuid _dyld_get_shared_cache_uuid ""
//go:cgo_import_dynamic _dyld_image_count _dyld_image_count ""
//go:cgo_import_dynamic _exit _exit ""
-//go:cgo_import_dynamic _sanitizer_internal_memcpy _sanitizer_internal_memcpy ""
-//go:cgo_import_dynamic _sanitizer_internal_memmove _sanitizer_internal_memmove ""
-//go:cgo_import_dynamic _sanitizer_internal_memset _sanitizer_internal_memset ""
//go:cgo_import_dynamic abort abort ""
//go:cgo_import_dynamic arc4random_buf arc4random_buf ""
//go:cgo_import_dynamic close close ""
@@ -57,7 +54,6 @@ package race
//go:cgo_import_dynamic madvise madvise ""
//go:cgo_import_dynamic malloc_num_zones malloc_num_zones ""
//go:cgo_import_dynamic malloc_zones malloc_zones ""
-//go:cgo_import_dynamic memcpy memcpy ""
//go:cgo_import_dynamic memset_pattern16 memset_pattern16 ""
//go:cgo_import_dynamic mkdir mkdir ""
//go:cgo_import_dynamic mprotect mprotect ""
@@ -103,6 +99,3 @@ package race
//go:cgo_import_dynamic vm_region_recurse_64 vm_region_recurse_64 ""
//go:cgo_import_dynamic waitpid waitpid ""
//go:cgo_import_dynamic write write ""
-//go:cgo_import_dynamic memcpy memcpy ""
-//go:cgo_import_dynamic memmove memmove ""
-//go:cgo_import_dynamic memset memset ""
diff --git a/src/runtime/race/race_darwin_arm64.go b/src/runtime/race/race_darwin_arm64.go
index cb703a6dedd3de..66e327efcc4105 100644
--- a/src/runtime/race/race_darwin_arm64.go
+++ b/src/runtime/race/race_darwin_arm64.go
@@ -27,9 +27,6 @@ package race
//go:cgo_import_dynamic _dyld_get_shared_cache_uuid _dyld_get_shared_cache_uuid ""
//go:cgo_import_dynamic _dyld_image_count _dyld_image_count ""
//go:cgo_import_dynamic _exit _exit ""
-//go:cgo_import_dynamic _sanitizer_internal_memcpy _sanitizer_internal_memcpy ""
-//go:cgo_import_dynamic _sanitizer_internal_memmove _sanitizer_internal_memmove ""
-//go:cgo_import_dynamic _sanitizer_internal_memset _sanitizer_internal_memset ""
//go:cgo_import_dynamic abort abort ""
//go:cgo_import_dynamic arc4random_buf arc4random_buf ""
//go:cgo_import_dynamic bzero bzero ""
@@ -57,7 +54,6 @@ package race
//go:cgo_import_dynamic madvise madvise ""
//go:cgo_import_dynamic malloc_num_zones malloc_num_zones ""
//go:cgo_import_dynamic malloc_zones malloc_zones ""
-//go:cgo_import_dynamic memcpy memcpy ""
//go:cgo_import_dynamic memset_pattern16 memset_pattern16 ""
//go:cgo_import_dynamic mkdir mkdir ""
//go:cgo_import_dynamic mprotect mprotect ""
@@ -103,6 +99,3 @@ package race
//go:cgo_import_dynamic vm_region_recurse_64 vm_region_recurse_64 ""
//go:cgo_import_dynamic waitpid waitpid ""
//go:cgo_import_dynamic write write ""
-//go:cgo_import_dynamic memcpy memcpy ""
-//go:cgo_import_dynamic memmove memmove ""
-//go:cgo_import_dynamic memset memset ""
diff --git a/src/runtime/race/race_darwin_arm64.syso b/src/runtime/race/race_darwin_arm64.syso
index 8d8c120717fe71..706951f4716dc1 100644
Binary files a/src/runtime/race/race_darwin_arm64.syso and b/src/runtime/race/race_darwin_arm64.syso differ
diff --git a/src/runtime/rt0_windows_arm.s b/src/runtime/rt0_windows_arm.s
deleted file mode 100644
index c5787d0dee0034..00000000000000
--- a/src/runtime/rt0_windows_arm.s
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "go_asm.h"
-#include "go_tls.h"
-#include "textflag.h"
-
-// This is the entry point for the program from the
-// kernel for an ordinary -buildmode=exe program.
-TEXT _rt0_arm_windows(SB),NOSPLIT|NOFRAME,$0
- B ·rt0_go(SB)
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 29e9b8a7b999ae..b5d2dcefaded99 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -492,6 +492,10 @@ type g struct {
coroarg *coro // argument during coroutine transfers
bubble *synctestBubble
+ // xRegs stores the extended register state if this G has been
+ // asynchronously preempted.
+ xRegs xRegPerG
+
// Per-G tracer state.
trace gTraceState
@@ -760,6 +764,11 @@ type p struct {
// gcStopTime is the nanotime timestamp that this P last entered _Pgcstop.
gcStopTime int64
+ // xRegs is the per-P extended register state used by asynchronous
+ // preemption. This is an empty struct on platforms that don't use extended
+ // register state.
+ xRegs xRegPerP
+
// Padding is no longer needed. False sharing is now not a worry because p is large enough
// that its size class is an integer multiple of the cache line size (for any of our architectures).
}
diff --git a/src/runtime/select.go b/src/runtime/select.go
index ae7754b17377dd..113dc8ad19e984 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -178,7 +178,7 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo
if cas.c.bubble != nil {
if getg().bubble != cas.c.bubble {
- panic(plainError("select on synctest channel from outside bubble"))
+ fatal("select on synctest channel from outside bubble")
}
} else {
allSynctest = false
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index 07778c8ebed7a8..f7628a0165b2ed 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -39,7 +39,7 @@ func enableWER() {
}
}
-// in sys_windows_386.s, sys_windows_amd64.s, sys_windows_arm.s, and sys_windows_arm64.s
+// in sys_windows_386.s, sys_windows_amd64.s, and sys_windows_arm64.s
func exceptiontramp()
func firstcontinuetramp()
func lastcontinuetramp()
@@ -64,10 +64,9 @@ func initExceptionHandler() {
//go:nosplit
func isAbort(r *context) bool {
pc := r.ip()
- if GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" {
+ if GOARCH == "386" || GOARCH == "amd64" {
// In the case of an abort, the exception IP is one byte after
- // the INT3 (this differs from UNIX OSes). Note that on ARM,
- // this means that the exception IP is no longer aligned.
+ // the INT3 (this differs from UNIX OSes).
pc--
}
return isAbortPC(pc)
diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go
index a5dc8aed3443bc..de859866a5adb2 100644
--- a/src/runtime/sizeof_test.go
+++ b/src/runtime/sizeof_test.go
@@ -15,13 +15,18 @@ import (
func TestSizeof(t *testing.T) {
const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+ const xreg = unsafe.Sizeof(runtime.XRegPerG{}) // Varies per architecture
var tests = []struct {
val any // type as a value
_32bit uintptr // size on 32bit platforms
_64bit uintptr // size on 64bit platforms
}{
- {runtime.G{}, 280, 440}, // g, but exported for testing
- {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing
+ {runtime.G{}, 280 + xreg, 440 + xreg}, // g, but exported for testing
+ {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing
+ }
+
+ if xreg > runtime.PtrSize {
+ t.Errorf("unsafe.Sizeof(xRegPerG) = %d, want <= %d", xreg, runtime.PtrSize)
}
for _, tt := range tests {
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index b3c3d8c0d53864..e86ebf41c7d6ac 100644
--- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go
@@ -236,7 +236,7 @@ func callbackasm()
// and we want callback to arrive at
// correspondent call instruction instead of start of
// runtime.callbackasm.
-// On ARM, runtime.callbackasm is a series of mov and branch instructions.
+// On ARM64, runtime.callbackasm is a series of mov and branch instructions.
// R12 is loaded with the callback index. Each entry is two instructions,
// hence 8 bytes.
func callbackasmAddr(i int) uintptr {
@@ -246,8 +246,8 @@ func callbackasmAddr(i int) uintptr {
panic("unsupported architecture")
case "386", "amd64":
entrySize = 5
- case "arm", "arm64":
- // On ARM and ARM64, each entry is a MOV instruction
+ case "arm64":
+ // On ARM64, each entry is a MOV instruction
// followed by a branch instruction
entrySize = 8
}
diff --git a/src/runtime/testdata/testprog/checkptr.go b/src/runtime/testdata/testprog/checkptr.go
index 60e71e66d7f4f5..ff99fa8c7b702f 100644
--- a/src/runtime/testdata/testprog/checkptr.go
+++ b/src/runtime/testdata/testprog/checkptr.go
@@ -16,6 +16,7 @@ func init() {
register("CheckPtrAlignmentNilPtr", CheckPtrAlignmentNilPtr)
register("CheckPtrArithmetic", CheckPtrArithmetic)
register("CheckPtrArithmetic2", CheckPtrArithmetic2)
+ register("CheckPtrArithmeticUnsafeAdd", CheckPtrArithmeticUnsafeAdd)
register("CheckPtrSize", CheckPtrSize)
register("CheckPtrSmall", CheckPtrSmall)
register("CheckPtrSliceOK", CheckPtrSliceOK)
@@ -79,6 +80,11 @@ func CheckPtrArithmetic2() {
sink2 = unsafe.Pointer(uintptr(p) & ^one)
}
+func CheckPtrArithmeticUnsafeAdd() {
+ data := make([]byte, 128)
+ sink2 = (*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(data)), len(data)))
+}
+
func CheckPtrSize() {
p := new(int64)
sink2 = p
diff --git a/src/runtime/time.go b/src/runtime/time.go
index 4880dce8cddc79..e9d1f0b6c9a10f 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -415,7 +415,7 @@ func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg
//go:linkname stopTimer time.stopTimer
func stopTimer(t *timeTimer) bool {
if t.isFake && getg().bubble == nil {
- panic("stop of synctest timer from outside bubble")
+ fatal("stop of synctest timer from outside bubble")
}
return t.stop()
}
@@ -430,7 +430,7 @@ func resetTimer(t *timeTimer, when, period int64) bool {
racerelease(unsafe.Pointer(&t.timer))
}
if t.isFake && getg().bubble == nil {
- panic("reset of synctest timer from outside bubble")
+ fatal("reset of synctest timer from outside bubble")
}
return t.reset(when, period)
}
diff --git a/src/runtime/time_windows_arm.s b/src/runtime/time_windows_arm.s
deleted file mode 100644
index ff5686d9c41139..00000000000000
--- a/src/runtime/time_windows_arm.s
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !faketime
-
-#include "go_asm.h"
-#include "textflag.h"
-#include "time_windows.h"
-
-TEXT time·now(SB),NOSPLIT,$0-20
- MOVW $_INTERRUPT_TIME, R3
-loop:
- MOVW time_hi1(R3), R1
- DMB MB_ISH
- MOVW time_lo(R3), R0
- DMB MB_ISH
- MOVW time_hi2(R3), R2
- CMP R1, R2
- BNE loop
-
- // wintime = R1:R0, multiply by 100
- MOVW $100, R2
- MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2
- MULA R1, R2, R4, R4
-
- // wintime*100 = R4:R3
- MOVW R3, mono+12(FP)
- MOVW R4, mono+16(FP)
-
- MOVW $_SYSTEM_TIME, R3
-wall:
- MOVW time_hi1(R3), R1
- DMB MB_ISH
- MOVW time_lo(R3), R0
- DMB MB_ISH
- MOVW time_hi2(R3), R2
- CMP R1, R2
- BNE wall
-
- // w = R1:R0 in 100ns untis
- // convert to Unix epoch (but still 100ns units)
- #define delta 116444736000000000
- SUB.S $(delta & 0xFFFFFFFF), R0
- SBC $(delta >> 32), R1
-
- // Convert to nSec
- MOVW $100, R2
- MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2
- MULA R1, R2, R4, R4
- // w = R2:R1 in nSec
- MOVW R3, R1 // R4:R3 -> R2:R1
- MOVW R4, R2
-
- // multiply nanoseconds by reciprocal of 10**9 (scaled by 2**61)
- // to get seconds (96 bit scaled result)
- MOVW $0x89705f41, R3 // 2**61 * 10**-9
- MULLU R1,R3,(R6,R5) // R7:R6:R5 = R2:R1 * R3
- MOVW $0,R7
- MULALU R2,R3,(R7,R6)
-
- // unscale by discarding low 32 bits, shifting the rest by 29
- MOVW R6>>29,R6 // R7:R6 = (R7:R6:R5 >> 61)
- ORR R7<<3,R6
- MOVW R7>>29,R7
-
- // subtract (10**9 * sec) from nsec to get nanosecond remainder
- MOVW $1000000000, R5 // 10**9
- MULLU R6,R5,(R9,R8) // R9:R8 = R7:R6 * R5
- MULA R7,R5,R9,R9
- SUB.S R8,R1 // R2:R1 -= R9:R8
- SBC R9,R2
-
- // because reciprocal was a truncated repeating fraction, quotient
- // may be slightly too small -- adjust to make remainder < 10**9
- CMP R5,R1 // if remainder > 10**9
- SUB.HS R5,R1 // remainder -= 10**9
- ADD.HS $1,R6 // sec += 1
-
- MOVW R6,sec_lo+0(FP)
- MOVW R7,sec_hi+4(FP)
- MOVW R1,nsec+8(FP)
- RET
-
diff --git a/src/runtime/trace/flightrecorder.go b/src/runtime/trace/flightrecorder.go
index 24163f32b21200..b0b75ceb60bc0f 100644
--- a/src/runtime/trace/flightrecorder.go
+++ b/src/runtime/trace/flightrecorder.go
@@ -158,7 +158,7 @@ type FlightRecorderConfig struct {
//
// The flight recorder will strive to promptly discard events older than the minimum age,
// but older events may appear in the window snapshot. The age setting will always be
- // overridden by MaxSize.
+ // overridden by MaxBytes.
//
// If this is 0, the minimum age is implementation defined, but can be assumed to be on the order
// of seconds.
diff --git a/src/runtime/vdso_linux.go b/src/runtime/vdso_linux.go
index 72b17ce4ac4efa..c068eede777918 100644
--- a/src/runtime/vdso_linux.go
+++ b/src/runtime/vdso_linux.go
@@ -285,7 +285,7 @@ func vdsoauxv(tag, val uintptr) {
}
}
-// vdsoMarker reports whether PC is on the VDSO page.
+// inVDSOPage reports whether PC is on the VDSO page.
//
//go:nosplit
func inVDSOPage(pc uintptr) bool {
diff --git a/src/runtime/vdso_linux_test.go b/src/runtime/vdso_linux_test.go
new file mode 100644
index 00000000000000..313dd6e7185a6a
--- /dev/null
+++ b/src/runtime/vdso_linux_test.go
@@ -0,0 +1,52 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (386 || amd64 || arm || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x)
+
+package runtime_test
+
+import (
+ "runtime"
+ "testing"
+)
+
+// DT_GNU_HASH hash function.
+func gnuHash(s string) uint32 {
+ h := uint32(5381)
+ for _, r := range s {
+ h = (h << 5) + h + uint32(r)
+ }
+ return h
+}
+
+// DT_HASH hash function.
+func symHash(s string) uint32 {
+ var h, g uint32
+ for _, r := range s {
+ h = (h << 4) + uint32(r)
+ g = h & 0xf0000000
+ if g != 0 {
+ h ^= g >> 24
+ }
+ h &^= g
+ }
+ return h
+}
+
+func TestVDSOHash(t *testing.T) {
+ for _, sym := range runtime.VDSOSymbolKeys() {
+ name := sym.Name()
+ t.Run(name, func(t *testing.T) {
+ want := symHash(name)
+ if sym.SymHash() != want {
+ t.Errorf("SymHash got %#x want %#x", sym.SymHash(), want)
+ }
+
+ want = gnuHash(name)
+ if sym.GNUHash() != want {
+ t.Errorf("GNUHash got %#x want %#x", sym.GNUHash(), want)
+ }
+ })
+ }
+}
diff --git a/src/runtime/wincallback.go b/src/runtime/wincallback.go
index 14847db3fdbc78..7f0ac70bfd1ef3 100644
--- a/src/runtime/wincallback.go
+++ b/src/runtime/wincallback.go
@@ -47,34 +47,6 @@ TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
}
}
-func genasmArm() {
- var buf bytes.Buffer
-
- buf.WriteString(`// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
-
-// External code calls into callbackasm at an offset corresponding
-// to the callback index. Callbackasm is a table of MOV and B instructions.
-// The MOV instruction loads R12 with the callback index, and the
-// B instruction branches to callbackasm1.
-// callbackasm1 takes the callback index from R12 and
-// indexes into an array that stores information about each callback.
-// It then calls the Go implementation for that callback.
-#include "textflag.h"
-
-TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
-`)
- for i := 0; i < maxCallback; i++ {
- fmt.Fprintf(&buf, "\tMOVW\t$%d, R12\n", i)
- buf.WriteString("\tB\truntime·callbackasm1(SB)\n")
- }
-
- err := os.WriteFile("zcallback_windows_arm.s", buf.Bytes(), 0666)
- if err != nil {
- fmt.Fprintf(os.Stderr, "wincallback: %s\n", err)
- os.Exit(2)
- }
-}
-
func genasmArm64() {
var buf bytes.Buffer
@@ -121,7 +93,6 @@ const cb_max = %d // maximum number of windows callbacks allowed
func main() {
genasm386Amd64()
- genasmArm()
genasmArm64()
gengo()
}
diff --git a/src/runtime/zcallback_windows_arm.s b/src/runtime/zcallback_windows_arm.s
deleted file mode 100644
index f943d84cbfe5fd..00000000000000
--- a/src/runtime/zcallback_windows_arm.s
+++ /dev/null
@@ -1,4012 +0,0 @@
-// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
-
-// External code calls into callbackasm at an offset corresponding
-// to the callback index. Callbackasm is a table of MOV and B instructions.
-// The MOV instruction loads R12 with the callback index, and the
-// B instruction branches to callbackasm1.
-// callbackasm1 takes the callback index from R12 and
-// indexes into an array that stores information about each callback.
-// It then calls the Go implementation for that callback.
-#include "textflag.h"
-
-TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
- MOVW $0, R12
- B runtime·callbackasm1(SB)
- MOVW $1, R12
- B runtime·callbackasm1(SB)
- MOVW $2, R12
- B runtime·callbackasm1(SB)
- MOVW $3, R12
- B runtime·callbackasm1(SB)
- MOVW $4, R12
- B runtime·callbackasm1(SB)
- MOVW $5, R12
- B runtime·callbackasm1(SB)
- MOVW $6, R12
- B runtime·callbackasm1(SB)
- MOVW $7, R12
- B runtime·callbackasm1(SB)
- MOVW $8, R12
- B runtime·callbackasm1(SB)
- MOVW $9, R12
- B runtime·callbackasm1(SB)
- MOVW $10, R12
- B runtime·callbackasm1(SB)
- MOVW $11, R12
- B runtime·callbackasm1(SB)
- MOVW $12, R12
- B runtime·callbackasm1(SB)
- MOVW $13, R12
- B runtime·callbackasm1(SB)
- MOVW $14, R12
- B runtime·callbackasm1(SB)
- MOVW $15, R12
- B runtime·callbackasm1(SB)
- MOVW $16, R12
- B runtime·callbackasm1(SB)
- MOVW $17, R12
- B runtime·callbackasm1(SB)
- MOVW $18, R12
- B runtime·callbackasm1(SB)
- MOVW $19, R12
- B runtime·callbackasm1(SB)
- MOVW $20, R12
- B runtime·callbackasm1(SB)
- MOVW $21, R12
- B runtime·callbackasm1(SB)
- MOVW $22, R12
- B runtime·callbackasm1(SB)
- MOVW $23, R12
- B runtime·callbackasm1(SB)
- MOVW $24, R12
- B runtime·callbackasm1(SB)
- MOVW $25, R12
- B runtime·callbackasm1(SB)
- MOVW $26, R12
- B runtime·callbackasm1(SB)
- MOVW $27, R12
- B runtime·callbackasm1(SB)
- MOVW $28, R12
- B runtime·callbackasm1(SB)
- MOVW $29, R12
- B runtime·callbackasm1(SB)
- MOVW $30, R12
- B runtime·callbackasm1(SB)
- MOVW $31, R12
- B runtime·callbackasm1(SB)
- MOVW $32, R12
- B runtime·callbackasm1(SB)
- MOVW $33, R12
- B runtime·callbackasm1(SB)
- MOVW $34, R12
- B runtime·callbackasm1(SB)
- MOVW $35, R12
- B runtime·callbackasm1(SB)
- MOVW $36, R12
- B runtime·callbackasm1(SB)
- MOVW $37, R12
- B runtime·callbackasm1(SB)
- MOVW $38, R12
- B runtime·callbackasm1(SB)
- MOVW $39, R12
- B runtime·callbackasm1(SB)
- MOVW $40, R12
- B runtime·callbackasm1(SB)
- MOVW $41, R12
- B runtime·callbackasm1(SB)
- MOVW $42, R12
- B runtime·callbackasm1(SB)
- MOVW $43, R12
- B runtime·callbackasm1(SB)
- MOVW $44, R12
- B runtime·callbackasm1(SB)
- MOVW $45, R12
- B runtime·callbackasm1(SB)
- MOVW $46, R12
- B runtime·callbackasm1(SB)
- MOVW $47, R12
- B runtime·callbackasm1(SB)
- MOVW $48, R12
- B runtime·callbackasm1(SB)
- MOVW $49, R12
- B runtime·callbackasm1(SB)
- MOVW $50, R12
- B runtime·callbackasm1(SB)
- MOVW $51, R12
- B runtime·callbackasm1(SB)
- MOVW $52, R12
- B runtime·callbackasm1(SB)
- MOVW $53, R12
- B runtime·callbackasm1(SB)
- MOVW $54, R12
- B runtime·callbackasm1(SB)
- MOVW $55, R12
- B runtime·callbackasm1(SB)
- MOVW $56, R12
- B runtime·callbackasm1(SB)
- MOVW $57, R12
- B runtime·callbackasm1(SB)
- MOVW $58, R12
- B runtime·callbackasm1(SB)
- MOVW $59, R12
- B runtime·callbackasm1(SB)
- MOVW $60, R12
- B runtime·callbackasm1(SB)
- MOVW $61, R12
- B runtime·callbackasm1(SB)
- MOVW $62, R12
- B runtime·callbackasm1(SB)
- MOVW $63, R12
- B runtime·callbackasm1(SB)
- MOVW $64, R12
- B runtime·callbackasm1(SB)
- MOVW $65, R12
- B runtime·callbackasm1(SB)
- MOVW $66, R12
- B runtime·callbackasm1(SB)
- MOVW $67, R12
- B runtime·callbackasm1(SB)
- MOVW $68, R12
- B runtime·callbackasm1(SB)
- MOVW $69, R12
- B runtime·callbackasm1(SB)
- MOVW $70, R12
- B runtime·callbackasm1(SB)
- MOVW $71, R12
- B runtime·callbackasm1(SB)
- MOVW $72, R12
- B runtime·callbackasm1(SB)
- MOVW $73, R12
- B runtime·callbackasm1(SB)
- MOVW $74, R12
- B runtime·callbackasm1(SB)
- MOVW $75, R12
- B runtime·callbackasm1(SB)
- MOVW $76, R12
- B runtime·callbackasm1(SB)
- MOVW $77, R12
- B runtime·callbackasm1(SB)
- MOVW $78, R12
- B runtime·callbackasm1(SB)
- MOVW $79, R12
- B runtime·callbackasm1(SB)
- MOVW $80, R12
- B runtime·callbackasm1(SB)
- MOVW $81, R12
- B runtime·callbackasm1(SB)
- MOVW $82, R12
- B runtime·callbackasm1(SB)
- MOVW $83, R12
- B runtime·callbackasm1(SB)
- MOVW $84, R12
- B runtime·callbackasm1(SB)
- MOVW $85, R12
- B runtime·callbackasm1(SB)
- MOVW $86, R12
- B runtime·callbackasm1(SB)
- MOVW $87, R12
- B runtime·callbackasm1(SB)
- MOVW $88, R12
- B runtime·callbackasm1(SB)
- MOVW $89, R12
- B runtime·callbackasm1(SB)
- MOVW $90, R12
- B runtime·callbackasm1(SB)
- MOVW $91, R12
- B runtime·callbackasm1(SB)
- MOVW $92, R12
- B runtime·callbackasm1(SB)
- MOVW $93, R12
- B runtime·callbackasm1(SB)
- MOVW $94, R12
- B runtime·callbackasm1(SB)
- MOVW $95, R12
- B runtime·callbackasm1(SB)
- MOVW $96, R12
- B runtime·callbackasm1(SB)
- MOVW $97, R12
- B runtime·callbackasm1(SB)
- MOVW $98, R12
- B runtime·callbackasm1(SB)
- MOVW $99, R12
- B runtime·callbackasm1(SB)
- MOVW $100, R12
- B runtime·callbackasm1(SB)
- MOVW $101, R12
- B runtime·callbackasm1(SB)
- MOVW $102, R12
- B runtime·callbackasm1(SB)
- MOVW $103, R12
- B runtime·callbackasm1(SB)
- MOVW $104, R12
- B runtime·callbackasm1(SB)
- MOVW $105, R12
- B runtime·callbackasm1(SB)
- MOVW $106, R12
- B runtime·callbackasm1(SB)
- MOVW $107, R12
- B runtime·callbackasm1(SB)
- MOVW $108, R12
- B runtime·callbackasm1(SB)
- MOVW $109, R12
- B runtime·callbackasm1(SB)
- MOVW $110, R12
- B runtime·callbackasm1(SB)
- MOVW $111, R12
- B runtime·callbackasm1(SB)
- MOVW $112, R12
- B runtime·callbackasm1(SB)
- MOVW $113, R12
- B runtime·callbackasm1(SB)
- MOVW $114, R12
- B runtime·callbackasm1(SB)
- MOVW $115, R12
- B runtime·callbackasm1(SB)
- MOVW $116, R12
- B runtime·callbackasm1(SB)
- MOVW $117, R12
- B runtime·callbackasm1(SB)
- MOVW $118, R12
- B runtime·callbackasm1(SB)
- MOVW $119, R12
- B runtime·callbackasm1(SB)
- MOVW $120, R12
- B runtime·callbackasm1(SB)
- MOVW $121, R12
- B runtime·callbackasm1(SB)
- MOVW $122, R12
- B runtime·callbackasm1(SB)
- MOVW $123, R12
- B runtime·callbackasm1(SB)
- MOVW $124, R12
- B runtime·callbackasm1(SB)
- MOVW $125, R12
- B runtime·callbackasm1(SB)
- MOVW $126, R12
- B runtime·callbackasm1(SB)
- MOVW $127, R12
- B runtime·callbackasm1(SB)
- MOVW $128, R12
- B runtime·callbackasm1(SB)
- MOVW $129, R12
- B runtime·callbackasm1(SB)
- MOVW $130, R12
- B runtime·callbackasm1(SB)
- MOVW $131, R12
- B runtime·callbackasm1(SB)
- MOVW $132, R12
- B runtime·callbackasm1(SB)
- MOVW $133, R12
- B runtime·callbackasm1(SB)
- MOVW $134, R12
- B runtime·callbackasm1(SB)
- MOVW $135, R12
- B runtime·callbackasm1(SB)
- MOVW $136, R12
- B runtime·callbackasm1(SB)
- MOVW $137, R12
- B runtime·callbackasm1(SB)
- MOVW $138, R12
- B runtime·callbackasm1(SB)
- MOVW $139, R12
- B runtime·callbackasm1(SB)
- MOVW $140, R12
- B runtime·callbackasm1(SB)
- MOVW $141, R12
- B runtime·callbackasm1(SB)
- MOVW $142, R12
- B runtime·callbackasm1(SB)
- MOVW $143, R12
- B runtime·callbackasm1(SB)
- MOVW $144, R12
- B runtime·callbackasm1(SB)
- MOVW $145, R12
- B runtime·callbackasm1(SB)
- MOVW $146, R12
- B runtime·callbackasm1(SB)
- MOVW $147, R12
- B runtime·callbackasm1(SB)
- MOVW $148, R12
- B runtime·callbackasm1(SB)
- MOVW $149, R12
- B runtime·callbackasm1(SB)
- MOVW $150, R12
- B runtime·callbackasm1(SB)
- MOVW $151, R12
- B runtime·callbackasm1(SB)
- MOVW $152, R12
- B runtime·callbackasm1(SB)
- MOVW $153, R12
- B runtime·callbackasm1(SB)
- MOVW $154, R12
- B runtime·callbackasm1(SB)
- MOVW $155, R12
- B runtime·callbackasm1(SB)
- MOVW $156, R12
- B runtime·callbackasm1(SB)
- MOVW $157, R12
- B runtime·callbackasm1(SB)
- MOVW $158, R12
- B runtime·callbackasm1(SB)
- MOVW $159, R12
- B runtime·callbackasm1(SB)
- MOVW $160, R12
- B runtime·callbackasm1(SB)
- MOVW $161, R12
- B runtime·callbackasm1(SB)
- MOVW $162, R12
- B runtime·callbackasm1(SB)
- MOVW $163, R12
- B runtime·callbackasm1(SB)
- MOVW $164, R12
- B runtime·callbackasm1(SB)
- MOVW $165, R12
- B runtime·callbackasm1(SB)
- MOVW $166, R12
- B runtime·callbackasm1(SB)
- MOVW $167, R12
- B runtime·callbackasm1(SB)
- MOVW $168, R12
- B runtime·callbackasm1(SB)
- MOVW $169, R12
- B runtime·callbackasm1(SB)
- MOVW $170, R12
- B runtime·callbackasm1(SB)
- MOVW $171, R12
- B runtime·callbackasm1(SB)
- MOVW $172, R12
- B runtime·callbackasm1(SB)
- MOVW $173, R12
- B runtime·callbackasm1(SB)
- MOVW $174, R12
- B runtime·callbackasm1(SB)
- MOVW $175, R12
- B runtime·callbackasm1(SB)
- MOVW $176, R12
- B runtime·callbackasm1(SB)
- MOVW $177, R12
- B runtime·callbackasm1(SB)
- MOVW $178, R12
- B runtime·callbackasm1(SB)
- MOVW $179, R12
- B runtime·callbackasm1(SB)
- MOVW $180, R12
- B runtime·callbackasm1(SB)
- MOVW $181, R12
- B runtime·callbackasm1(SB)
- MOVW $182, R12
- B runtime·callbackasm1(SB)
- MOVW $183, R12
- B runtime·callbackasm1(SB)
- MOVW $184, R12
- B runtime·callbackasm1(SB)
- MOVW $185, R12
- B runtime·callbackasm1(SB)
- MOVW $186, R12
- B runtime·callbackasm1(SB)
- MOVW $187, R12
- B runtime·callbackasm1(SB)
- MOVW $188, R12
- B runtime·callbackasm1(SB)
- MOVW $189, R12
- B runtime·callbackasm1(SB)
- MOVW $190, R12
- B runtime·callbackasm1(SB)
- MOVW $191, R12
- B runtime·callbackasm1(SB)
- MOVW $192, R12
- B runtime·callbackasm1(SB)
- MOVW $193, R12
- B runtime·callbackasm1(SB)
- MOVW $194, R12
- B runtime·callbackasm1(SB)
- MOVW $195, R12
- B runtime·callbackasm1(SB)
- MOVW $196, R12
- B runtime·callbackasm1(SB)
- MOVW $197, R12
- B runtime·callbackasm1(SB)
- MOVW $198, R12
- B runtime·callbackasm1(SB)
- MOVW $199, R12
- B runtime·callbackasm1(SB)
- MOVW $200, R12
- B runtime·callbackasm1(SB)
- MOVW $201, R12
- B runtime·callbackasm1(SB)
- MOVW $202, R12
- B runtime·callbackasm1(SB)
- MOVW $203, R12
- B runtime·callbackasm1(SB)
- MOVW $204, R12
- B runtime·callbackasm1(SB)
- MOVW $205, R12
- B runtime·callbackasm1(SB)
- MOVW $206, R12
- B runtime·callbackasm1(SB)
- MOVW $207, R12
- B runtime·callbackasm1(SB)
- MOVW $208, R12
- B runtime·callbackasm1(SB)
- MOVW $209, R12
- B runtime·callbackasm1(SB)
- MOVW $210, R12
- B runtime·callbackasm1(SB)
- MOVW $211, R12
- B runtime·callbackasm1(SB)
- MOVW $212, R12
- B runtime·callbackasm1(SB)
- MOVW $213, R12
- B runtime·callbackasm1(SB)
- MOVW $214, R12
- B runtime·callbackasm1(SB)
- MOVW $215, R12
- B runtime·callbackasm1(SB)
- MOVW $216, R12
- B runtime·callbackasm1(SB)
- MOVW $217, R12
- B runtime·callbackasm1(SB)
- MOVW $218, R12
- B runtime·callbackasm1(SB)
- MOVW $219, R12
- B runtime·callbackasm1(SB)
- MOVW $220, R12
- B runtime·callbackasm1(SB)
- MOVW $221, R12
- B runtime·callbackasm1(SB)
- MOVW $222, R12
- B runtime·callbackasm1(SB)
- MOVW $223, R12
- B runtime·callbackasm1(SB)
- MOVW $224, R12
- B runtime·callbackasm1(SB)
- MOVW $225, R12
- B runtime·callbackasm1(SB)
- MOVW $226, R12
- B runtime·callbackasm1(SB)
- MOVW $227, R12
- B runtime·callbackasm1(SB)
- MOVW $228, R12
- B runtime·callbackasm1(SB)
- MOVW $229, R12
- B runtime·callbackasm1(SB)
- MOVW $230, R12
- B runtime·callbackasm1(SB)
- MOVW $231, R12
- B runtime·callbackasm1(SB)
- MOVW $232, R12
- B runtime·callbackasm1(SB)
- MOVW $233, R12
- B runtime·callbackasm1(SB)
- MOVW $234, R12
- B runtime·callbackasm1(SB)
- MOVW $235, R12
- B runtime·callbackasm1(SB)
- MOVW $236, R12
- B runtime·callbackasm1(SB)
- MOVW $237, R12
- B runtime·callbackasm1(SB)
- MOVW $238, R12
- B runtime·callbackasm1(SB)
- MOVW $239, R12
- B runtime·callbackasm1(SB)
- MOVW $240, R12
- B runtime·callbackasm1(SB)
- MOVW $241, R12
- B runtime·callbackasm1(SB)
- MOVW $242, R12
- B runtime·callbackasm1(SB)
- MOVW $243, R12
- B runtime·callbackasm1(SB)
- MOVW $244, R12
- B runtime·callbackasm1(SB)
- MOVW $245, R12
- B runtime·callbackasm1(SB)
- MOVW $246, R12
- B runtime·callbackasm1(SB)
- MOVW $247, R12
- B runtime·callbackasm1(SB)
- MOVW $248, R12
- B runtime·callbackasm1(SB)
- MOVW $249, R12
- B runtime·callbackasm1(SB)
- MOVW $250, R12
- B runtime·callbackasm1(SB)
- MOVW $251, R12
- B runtime·callbackasm1(SB)
- MOVW $252, R12
- B runtime·callbackasm1(SB)
- MOVW $253, R12
- B runtime·callbackasm1(SB)
- MOVW $254, R12
- B runtime·callbackasm1(SB)
- MOVW $255, R12
- B runtime·callbackasm1(SB)
- MOVW $256, R12
- B runtime·callbackasm1(SB)
- MOVW $257, R12
- B runtime·callbackasm1(SB)
- MOVW $258, R12
- B runtime·callbackasm1(SB)
- MOVW $259, R12
- B runtime·callbackasm1(SB)
- MOVW $260, R12
- B runtime·callbackasm1(SB)
- MOVW $261, R12
- B runtime·callbackasm1(SB)
- MOVW $262, R12
- B runtime·callbackasm1(SB)
- MOVW $263, R12
- B runtime·callbackasm1(SB)
- MOVW $264, R12
- B runtime·callbackasm1(SB)
- MOVW $265, R12
- B runtime·callbackasm1(SB)
- MOVW $266, R12
- B runtime·callbackasm1(SB)
- MOVW $267, R12
- B runtime·callbackasm1(SB)
- MOVW $268, R12
- B runtime·callbackasm1(SB)
- MOVW $269, R12
- B runtime·callbackasm1(SB)
- MOVW $270, R12
- B runtime·callbackasm1(SB)
- MOVW $271, R12
- B runtime·callbackasm1(SB)
- MOVW $272, R12
- B runtime·callbackasm1(SB)
- MOVW $273, R12
- B runtime·callbackasm1(SB)
- MOVW $274, R12
- B runtime·callbackasm1(SB)
- MOVW $275, R12
- B runtime·callbackasm1(SB)
- MOVW $276, R12
- B runtime·callbackasm1(SB)
- MOVW $277, R12
- B runtime·callbackasm1(SB)
- MOVW $278, R12
- B runtime·callbackasm1(SB)
- MOVW $279, R12
- B runtime·callbackasm1(SB)
- MOVW $280, R12
- B runtime·callbackasm1(SB)
- MOVW $281, R12
- B runtime·callbackasm1(SB)
- MOVW $282, R12
- B runtime·callbackasm1(SB)
- MOVW $283, R12
- B runtime·callbackasm1(SB)
- MOVW $284, R12
- B runtime·callbackasm1(SB)
- MOVW $285, R12
- B runtime·callbackasm1(SB)
- MOVW $286, R12
- B runtime·callbackasm1(SB)
- MOVW $287, R12
- B runtime·callbackasm1(SB)
- MOVW $288, R12
- B runtime·callbackasm1(SB)
- MOVW $289, R12
- B runtime·callbackasm1(SB)
- MOVW $290, R12
- B runtime·callbackasm1(SB)
- MOVW $291, R12
- B runtime·callbackasm1(SB)
- MOVW $292, R12
- B runtime·callbackasm1(SB)
- MOVW $293, R12
- B runtime·callbackasm1(SB)
- MOVW $294, R12
- B runtime·callbackasm1(SB)
- MOVW $295, R12
- B runtime·callbackasm1(SB)
- MOVW $296, R12
- B runtime·callbackasm1(SB)
- MOVW $297, R12
- B runtime·callbackasm1(SB)
- MOVW $298, R12
- B runtime·callbackasm1(SB)
- MOVW $299, R12
- B runtime·callbackasm1(SB)
- MOVW $300, R12
- B runtime·callbackasm1(SB)
- MOVW $301, R12
- B runtime·callbackasm1(SB)
- MOVW $302, R12
- B runtime·callbackasm1(SB)
- MOVW $303, R12
- B runtime·callbackasm1(SB)
- MOVW $304, R12
- B runtime·callbackasm1(SB)
- MOVW $305, R12
- B runtime·callbackasm1(SB)
- MOVW $306, R12
- B runtime·callbackasm1(SB)
- MOVW $307, R12
- B runtime·callbackasm1(SB)
- MOVW $308, R12
- B runtime·callbackasm1(SB)
- MOVW $309, R12
- B runtime·callbackasm1(SB)
- MOVW $310, R12
- B runtime·callbackasm1(SB)
- MOVW $311, R12
- B runtime·callbackasm1(SB)
- MOVW $312, R12
- B runtime·callbackasm1(SB)
- MOVW $313, R12
- B runtime·callbackasm1(SB)
- MOVW $314, R12
- B runtime·callbackasm1(SB)
- MOVW $315, R12
- B runtime·callbackasm1(SB)
- MOVW $316, R12
- B runtime·callbackasm1(SB)
- MOVW $317, R12
- B runtime·callbackasm1(SB)
- MOVW $318, R12
- B runtime·callbackasm1(SB)
- MOVW $319, R12
- B runtime·callbackasm1(SB)
- MOVW $320, R12
- B runtime·callbackasm1(SB)
- MOVW $321, R12
- B runtime·callbackasm1(SB)
- MOVW $322, R12
- B runtime·callbackasm1(SB)
- MOVW $323, R12
- B runtime·callbackasm1(SB)
- MOVW $324, R12
- B runtime·callbackasm1(SB)
- MOVW $325, R12
- B runtime·callbackasm1(SB)
- MOVW $326, R12
- B runtime·callbackasm1(SB)
- MOVW $327, R12
- B runtime·callbackasm1(SB)
- MOVW $328, R12
- B runtime·callbackasm1(SB)
- MOVW $329, R12
- B runtime·callbackasm1(SB)
- MOVW $330, R12
- B runtime·callbackasm1(SB)
- MOVW $331, R12
- B runtime·callbackasm1(SB)
- MOVW $332, R12
- B runtime·callbackasm1(SB)
- MOVW $333, R12
- B runtime·callbackasm1(SB)
- MOVW $334, R12
- B runtime·callbackasm1(SB)
- MOVW $335, R12
- B runtime·callbackasm1(SB)
- MOVW $336, R12
- B runtime·callbackasm1(SB)
- MOVW $337, R12
- B runtime·callbackasm1(SB)
- MOVW $338, R12
- B runtime·callbackasm1(SB)
- MOVW $339, R12
- B runtime·callbackasm1(SB)
- MOVW $340, R12
- B runtime·callbackasm1(SB)
- MOVW $341, R12
- B runtime·callbackasm1(SB)
- MOVW $342, R12
- B runtime·callbackasm1(SB)
- MOVW $343, R12
- B runtime·callbackasm1(SB)
- MOVW $344, R12
- B runtime·callbackasm1(SB)
- MOVW $345, R12
- B runtime·callbackasm1(SB)
- MOVW $346, R12
- B runtime·callbackasm1(SB)
- MOVW $347, R12
- B runtime·callbackasm1(SB)
- MOVW $348, R12
- B runtime·callbackasm1(SB)
- MOVW $349, R12
- B runtime·callbackasm1(SB)
- MOVW $350, R12
- B runtime·callbackasm1(SB)
- MOVW $351, R12
- B runtime·callbackasm1(SB)
- MOVW $352, R12
- B runtime·callbackasm1(SB)
- MOVW $353, R12
- B runtime·callbackasm1(SB)
- MOVW $354, R12
- B runtime·callbackasm1(SB)
- MOVW $355, R12
- B runtime·callbackasm1(SB)
- MOVW $356, R12
- B runtime·callbackasm1(SB)
- MOVW $357, R12
- B runtime·callbackasm1(SB)
- MOVW $358, R12
- B runtime·callbackasm1(SB)
- MOVW $359, R12
- B runtime·callbackasm1(SB)
- MOVW $360, R12
- B runtime·callbackasm1(SB)
- MOVW $361, R12
- B runtime·callbackasm1(SB)
- MOVW $362, R12
- B runtime·callbackasm1(SB)
- MOVW $363, R12
- B runtime·callbackasm1(SB)
- MOVW $364, R12
- B runtime·callbackasm1(SB)
- MOVW $365, R12
- B runtime·callbackasm1(SB)
- MOVW $366, R12
- B runtime·callbackasm1(SB)
- MOVW $367, R12
- B runtime·callbackasm1(SB)
- MOVW $368, R12
- B runtime·callbackasm1(SB)
- MOVW $369, R12
- B runtime·callbackasm1(SB)
- MOVW $370, R12
- B runtime·callbackasm1(SB)
- MOVW $371, R12
- B runtime·callbackasm1(SB)
- MOVW $372, R12
- B runtime·callbackasm1(SB)
- MOVW $373, R12
- B runtime·callbackasm1(SB)
- MOVW $374, R12
- B runtime·callbackasm1(SB)
- MOVW $375, R12
- B runtime·callbackasm1(SB)
- MOVW $376, R12
- B runtime·callbackasm1(SB)
- MOVW $377, R12
- B runtime·callbackasm1(SB)
- MOVW $378, R12
- B runtime·callbackasm1(SB)
- MOVW $379, R12
- B runtime·callbackasm1(SB)
- MOVW $380, R12
- B runtime·callbackasm1(SB)
- MOVW $381, R12
- B runtime·callbackasm1(SB)
- MOVW $382, R12
- B runtime·callbackasm1(SB)
- MOVW $383, R12
- B runtime·callbackasm1(SB)
- MOVW $384, R12
- B runtime·callbackasm1(SB)
- MOVW $385, R12
- B runtime·callbackasm1(SB)
- MOVW $386, R12
- B runtime·callbackasm1(SB)
- MOVW $387, R12
- B runtime·callbackasm1(SB)
- MOVW $388, R12
- B runtime·callbackasm1(SB)
- MOVW $389, R12
- B runtime·callbackasm1(SB)
- MOVW $390, R12
- B runtime·callbackasm1(SB)
- MOVW $391, R12
- B runtime·callbackasm1(SB)
- MOVW $392, R12
- B runtime·callbackasm1(SB)
- MOVW $393, R12
- B runtime·callbackasm1(SB)
- MOVW $394, R12
- B runtime·callbackasm1(SB)
- MOVW $395, R12
- B runtime·callbackasm1(SB)
- MOVW $396, R12
- B runtime·callbackasm1(SB)
- MOVW $397, R12
- B runtime·callbackasm1(SB)
- MOVW $398, R12
- B runtime·callbackasm1(SB)
- MOVW $399, R12
- B runtime·callbackasm1(SB)
- MOVW $400, R12
- B runtime·callbackasm1(SB)
- MOVW $401, R12
- B runtime·callbackasm1(SB)
- MOVW $402, R12
- B runtime·callbackasm1(SB)
- MOVW $403, R12
- B runtime·callbackasm1(SB)
- MOVW $404, R12
- B runtime·callbackasm1(SB)
- MOVW $405, R12
- B runtime·callbackasm1(SB)
- MOVW $406, R12
- B runtime·callbackasm1(SB)
- MOVW $407, R12
- B runtime·callbackasm1(SB)
- MOVW $408, R12
- B runtime·callbackasm1(SB)
- MOVW $409, R12
- B runtime·callbackasm1(SB)
- MOVW $410, R12
- B runtime·callbackasm1(SB)
- MOVW $411, R12
- B runtime·callbackasm1(SB)
- MOVW $412, R12
- B runtime·callbackasm1(SB)
- MOVW $413, R12
- B runtime·callbackasm1(SB)
- MOVW $414, R12
- B runtime·callbackasm1(SB)
- MOVW $415, R12
- B runtime·callbackasm1(SB)
- MOVW $416, R12
- B runtime·callbackasm1(SB)
- MOVW $417, R12
- B runtime·callbackasm1(SB)
- MOVW $418, R12
- B runtime·callbackasm1(SB)
- MOVW $419, R12
- B runtime·callbackasm1(SB)
- MOVW $420, R12
- B runtime·callbackasm1(SB)
- MOVW $421, R12
- B runtime·callbackasm1(SB)
- MOVW $422, R12
- B runtime·callbackasm1(SB)
- MOVW $423, R12
- B runtime·callbackasm1(SB)
- MOVW $424, R12
- B runtime·callbackasm1(SB)
- MOVW $425, R12
- B runtime·callbackasm1(SB)
- MOVW $426, R12
- B runtime·callbackasm1(SB)
- MOVW $427, R12
- B runtime·callbackasm1(SB)
- MOVW $428, R12
- B runtime·callbackasm1(SB)
- MOVW $429, R12
- B runtime·callbackasm1(SB)
- MOVW $430, R12
- B runtime·callbackasm1(SB)
- MOVW $431, R12
- B runtime·callbackasm1(SB)
- MOVW $432, R12
- B runtime·callbackasm1(SB)
- MOVW $433, R12
- B runtime·callbackasm1(SB)
- MOVW $434, R12
- B runtime·callbackasm1(SB)
- MOVW $435, R12
- B runtime·callbackasm1(SB)
- MOVW $436, R12
- B runtime·callbackasm1(SB)
- MOVW $437, R12
- B runtime·callbackasm1(SB)
- MOVW $438, R12
- B runtime·callbackasm1(SB)
- MOVW $439, R12
- B runtime·callbackasm1(SB)
- MOVW $440, R12
- B runtime·callbackasm1(SB)
- MOVW $441, R12
- B runtime·callbackasm1(SB)
- MOVW $442, R12
- B runtime·callbackasm1(SB)
- MOVW $443, R12
- B runtime·callbackasm1(SB)
- MOVW $444, R12
- B runtime·callbackasm1(SB)
- MOVW $445, R12
- B runtime·callbackasm1(SB)
- MOVW $446, R12
- B runtime·callbackasm1(SB)
- MOVW $447, R12
- B runtime·callbackasm1(SB)
- MOVW $448, R12
- B runtime·callbackasm1(SB)
- MOVW $449, R12
- B runtime·callbackasm1(SB)
- MOVW $450, R12
- B runtime·callbackasm1(SB)
- MOVW $451, R12
- B runtime·callbackasm1(SB)
- MOVW $452, R12
- B runtime·callbackasm1(SB)
- MOVW $453, R12
- B runtime·callbackasm1(SB)
- MOVW $454, R12
- B runtime·callbackasm1(SB)
- MOVW $455, R12
- B runtime·callbackasm1(SB)
- MOVW $456, R12
- B runtime·callbackasm1(SB)
- MOVW $457, R12
- B runtime·callbackasm1(SB)
- MOVW $458, R12
- B runtime·callbackasm1(SB)
- MOVW $459, R12
- B runtime·callbackasm1(SB)
- MOVW $460, R12
- B runtime·callbackasm1(SB)
- MOVW $461, R12
- B runtime·callbackasm1(SB)
- MOVW $462, R12
- B runtime·callbackasm1(SB)
- MOVW $463, R12
- B runtime·callbackasm1(SB)
- MOVW $464, R12
- B runtime·callbackasm1(SB)
- MOVW $465, R12
- B runtime·callbackasm1(SB)
- MOVW $466, R12
- B runtime·callbackasm1(SB)
- MOVW $467, R12
- B runtime·callbackasm1(SB)
- MOVW $468, R12
- B runtime·callbackasm1(SB)
- MOVW $469, R12
- B runtime·callbackasm1(SB)
- MOVW $470, R12
- B runtime·callbackasm1(SB)
- MOVW $471, R12
- B runtime·callbackasm1(SB)
- MOVW $472, R12
- B runtime·callbackasm1(SB)
- MOVW $473, R12
- B runtime·callbackasm1(SB)
- MOVW $474, R12
- B runtime·callbackasm1(SB)
- MOVW $475, R12
- B runtime·callbackasm1(SB)
- MOVW $476, R12
- B runtime·callbackasm1(SB)
- MOVW $477, R12
- B runtime·callbackasm1(SB)
- MOVW $478, R12
- B runtime·callbackasm1(SB)
- MOVW $479, R12
- B runtime·callbackasm1(SB)
- MOVW $480, R12
- B runtime·callbackasm1(SB)
- MOVW $481, R12
- B runtime·callbackasm1(SB)
- MOVW $482, R12
- B runtime·callbackasm1(SB)
- MOVW $483, R12
- B runtime·callbackasm1(SB)
- MOVW $484, R12
- B runtime·callbackasm1(SB)
- MOVW $485, R12
- B runtime·callbackasm1(SB)
- MOVW $486, R12
- B runtime·callbackasm1(SB)
- MOVW $487, R12
- B runtime·callbackasm1(SB)
- MOVW $488, R12
- B runtime·callbackasm1(SB)
- MOVW $489, R12
- B runtime·callbackasm1(SB)
- MOVW $490, R12
- B runtime·callbackasm1(SB)
- MOVW $491, R12
- B runtime·callbackasm1(SB)
- MOVW $492, R12
- B runtime·callbackasm1(SB)
- MOVW $493, R12
- B runtime·callbackasm1(SB)
- MOVW $494, R12
- B runtime·callbackasm1(SB)
- MOVW $495, R12
- B runtime·callbackasm1(SB)
- MOVW $496, R12
- B runtime·callbackasm1(SB)
- MOVW $497, R12
- B runtime·callbackasm1(SB)
- MOVW $498, R12
- B runtime·callbackasm1(SB)
- MOVW $499, R12
- B runtime·callbackasm1(SB)
- MOVW $500, R12
- B runtime·callbackasm1(SB)
- MOVW $501, R12
- B runtime·callbackasm1(SB)
- MOVW $502, R12
- B runtime·callbackasm1(SB)
- MOVW $503, R12
- B runtime·callbackasm1(SB)
- MOVW $504, R12
- B runtime·callbackasm1(SB)
- MOVW $505, R12
- B runtime·callbackasm1(SB)
- MOVW $506, R12
- B runtime·callbackasm1(SB)
- MOVW $507, R12
- B runtime·callbackasm1(SB)
- MOVW $508, R12
- B runtime·callbackasm1(SB)
- MOVW $509, R12
- B runtime·callbackasm1(SB)
- MOVW $510, R12
- B runtime·callbackasm1(SB)
- MOVW $511, R12
- B runtime·callbackasm1(SB)
- MOVW $512, R12
- B runtime·callbackasm1(SB)
- MOVW $513, R12
- B runtime·callbackasm1(SB)
- MOVW $514, R12
- B runtime·callbackasm1(SB)
- MOVW $515, R12
- B runtime·callbackasm1(SB)
- MOVW $516, R12
- B runtime·callbackasm1(SB)
- MOVW $517, R12
- B runtime·callbackasm1(SB)
- MOVW $518, R12
- B runtime·callbackasm1(SB)
- MOVW $519, R12
- B runtime·callbackasm1(SB)
- MOVW $520, R12
- B runtime·callbackasm1(SB)
- MOVW $521, R12
- B runtime·callbackasm1(SB)
- MOVW $522, R12
- B runtime·callbackasm1(SB)
- MOVW $523, R12
- B runtime·callbackasm1(SB)
- MOVW $524, R12
- B runtime·callbackasm1(SB)
- MOVW $525, R12
- B runtime·callbackasm1(SB)
- MOVW $526, R12
- B runtime·callbackasm1(SB)
- MOVW $527, R12
- B runtime·callbackasm1(SB)
- MOVW $528, R12
- B runtime·callbackasm1(SB)
- MOVW $529, R12
- B runtime·callbackasm1(SB)
- MOVW $530, R12
- B runtime·callbackasm1(SB)
- MOVW $531, R12
- B runtime·callbackasm1(SB)
- MOVW $532, R12
- B runtime·callbackasm1(SB)
- MOVW $533, R12
- B runtime·callbackasm1(SB)
- MOVW $534, R12
- B runtime·callbackasm1(SB)
- MOVW $535, R12
- B runtime·callbackasm1(SB)
- MOVW $536, R12
- B runtime·callbackasm1(SB)
- MOVW $537, R12
- B runtime·callbackasm1(SB)
- MOVW $538, R12
- B runtime·callbackasm1(SB)
- MOVW $539, R12
- B runtime·callbackasm1(SB)
- MOVW $540, R12
- B runtime·callbackasm1(SB)
- MOVW $541, R12
- B runtime·callbackasm1(SB)
- MOVW $542, R12
- B runtime·callbackasm1(SB)
- MOVW $543, R12
- B runtime·callbackasm1(SB)
- MOVW $544, R12
- B runtime·callbackasm1(SB)
- MOVW $545, R12
- B runtime·callbackasm1(SB)
- MOVW $546, R12
- B runtime·callbackasm1(SB)
- MOVW $547, R12
- B runtime·callbackasm1(SB)
- MOVW $548, R12
- B runtime·callbackasm1(SB)
- MOVW $549, R12
- B runtime·callbackasm1(SB)
- MOVW $550, R12
- B runtime·callbackasm1(SB)
- MOVW $551, R12
- B runtime·callbackasm1(SB)
- MOVW $552, R12
- B runtime·callbackasm1(SB)
- MOVW $553, R12
- B runtime·callbackasm1(SB)
- MOVW $554, R12
- B runtime·callbackasm1(SB)
- MOVW $555, R12
- B runtime·callbackasm1(SB)
- MOVW $556, R12
- B runtime·callbackasm1(SB)
- MOVW $557, R12
- B runtime·callbackasm1(SB)
- MOVW $558, R12
- B runtime·callbackasm1(SB)
- MOVW $559, R12
- B runtime·callbackasm1(SB)
- MOVW $560, R12
- B runtime·callbackasm1(SB)
- MOVW $561, R12
- B runtime·callbackasm1(SB)
- MOVW $562, R12
- B runtime·callbackasm1(SB)
- MOVW $563, R12
- B runtime·callbackasm1(SB)
- MOVW $564, R12
- B runtime·callbackasm1(SB)
- MOVW $565, R12
- B runtime·callbackasm1(SB)
- MOVW $566, R12
- B runtime·callbackasm1(SB)
- MOVW $567, R12
- B runtime·callbackasm1(SB)
- MOVW $568, R12
- B runtime·callbackasm1(SB)
- MOVW $569, R12
- B runtime·callbackasm1(SB)
- MOVW $570, R12
- B runtime·callbackasm1(SB)
- MOVW $571, R12
- B runtime·callbackasm1(SB)
- MOVW $572, R12
- B runtime·callbackasm1(SB)
- MOVW $573, R12
- B runtime·callbackasm1(SB)
- MOVW $574, R12
- B runtime·callbackasm1(SB)
- MOVW $575, R12
- B runtime·callbackasm1(SB)
- MOVW $576, R12
- B runtime·callbackasm1(SB)
- MOVW $577, R12
- B runtime·callbackasm1(SB)
- MOVW $578, R12
- B runtime·callbackasm1(SB)
- MOVW $579, R12
- B runtime·callbackasm1(SB)
- MOVW $580, R12
- B runtime·callbackasm1(SB)
- MOVW $581, R12
- B runtime·callbackasm1(SB)
- MOVW $582, R12
- B runtime·callbackasm1(SB)
- MOVW $583, R12
- B runtime·callbackasm1(SB)
- MOVW $584, R12
- B runtime·callbackasm1(SB)
- MOVW $585, R12
- B runtime·callbackasm1(SB)
- MOVW $586, R12
- B runtime·callbackasm1(SB)
- MOVW $587, R12
- B runtime·callbackasm1(SB)
- MOVW $588, R12
- B runtime·callbackasm1(SB)
- MOVW $589, R12
- B runtime·callbackasm1(SB)
- MOVW $590, R12
- B runtime·callbackasm1(SB)
- MOVW $591, R12
- B runtime·callbackasm1(SB)
- MOVW $592, R12
- B runtime·callbackasm1(SB)
- MOVW $593, R12
- B runtime·callbackasm1(SB)
- MOVW $594, R12
- B runtime·callbackasm1(SB)
- MOVW $595, R12
- B runtime·callbackasm1(SB)
- MOVW $596, R12
- B runtime·callbackasm1(SB)
- MOVW $597, R12
- B runtime·callbackasm1(SB)
- MOVW $598, R12
- B runtime·callbackasm1(SB)
- MOVW $599, R12
- B runtime·callbackasm1(SB)
- MOVW $600, R12
- B runtime·callbackasm1(SB)
- MOVW $601, R12
- B runtime·callbackasm1(SB)
- MOVW $602, R12
- B runtime·callbackasm1(SB)
- MOVW $603, R12
- B runtime·callbackasm1(SB)
- MOVW $604, R12
- B runtime·callbackasm1(SB)
- MOVW $605, R12
- B runtime·callbackasm1(SB)
- MOVW $606, R12
- B runtime·callbackasm1(SB)
- MOVW $607, R12
- B runtime·callbackasm1(SB)
- MOVW $608, R12
- B runtime·callbackasm1(SB)
- MOVW $609, R12
- B runtime·callbackasm1(SB)
- MOVW $610, R12
- B runtime·callbackasm1(SB)
- MOVW $611, R12
- B runtime·callbackasm1(SB)
- MOVW $612, R12
- B runtime·callbackasm1(SB)
- MOVW $613, R12
- B runtime·callbackasm1(SB)
- MOVW $614, R12
- B runtime·callbackasm1(SB)
- MOVW $615, R12
- B runtime·callbackasm1(SB)
- MOVW $616, R12
- B runtime·callbackasm1(SB)
- MOVW $617, R12
- B runtime·callbackasm1(SB)
- MOVW $618, R12
- B runtime·callbackasm1(SB)
- MOVW $619, R12
- B runtime·callbackasm1(SB)
- MOVW $620, R12
- B runtime·callbackasm1(SB)
- MOVW $621, R12
- B runtime·callbackasm1(SB)
- MOVW $622, R12
- B runtime·callbackasm1(SB)
- MOVW $623, R12
- B runtime·callbackasm1(SB)
- MOVW $624, R12
- B runtime·callbackasm1(SB)
- MOVW $625, R12
- B runtime·callbackasm1(SB)
- MOVW $626, R12
- B runtime·callbackasm1(SB)
- MOVW $627, R12
- B runtime·callbackasm1(SB)
- MOVW $628, R12
- B runtime·callbackasm1(SB)
- MOVW $629, R12
- B runtime·callbackasm1(SB)
- MOVW $630, R12
- B runtime·callbackasm1(SB)
- MOVW $631, R12
- B runtime·callbackasm1(SB)
- MOVW $632, R12
- B runtime·callbackasm1(SB)
- MOVW $633, R12
- B runtime·callbackasm1(SB)
- MOVW $634, R12
- B runtime·callbackasm1(SB)
- MOVW $635, R12
- B runtime·callbackasm1(SB)
- MOVW $636, R12
- B runtime·callbackasm1(SB)
- MOVW $637, R12
- B runtime·callbackasm1(SB)
- MOVW $638, R12
- B runtime·callbackasm1(SB)
- MOVW $639, R12
- B runtime·callbackasm1(SB)
- MOVW $640, R12
- B runtime·callbackasm1(SB)
- MOVW $641, R12
- B runtime·callbackasm1(SB)
- MOVW $642, R12
- B runtime·callbackasm1(SB)
- MOVW $643, R12
- B runtime·callbackasm1(SB)
- MOVW $644, R12
- B runtime·callbackasm1(SB)
- MOVW $645, R12
- B runtime·callbackasm1(SB)
- MOVW $646, R12
- B runtime·callbackasm1(SB)
- MOVW $647, R12
- B runtime·callbackasm1(SB)
- MOVW $648, R12
- B runtime·callbackasm1(SB)
- MOVW $649, R12
- B runtime·callbackasm1(SB)
- MOVW $650, R12
- B runtime·callbackasm1(SB)
- MOVW $651, R12
- B runtime·callbackasm1(SB)
- MOVW $652, R12
- B runtime·callbackasm1(SB)
- MOVW $653, R12
- B runtime·callbackasm1(SB)
- MOVW $654, R12
- B runtime·callbackasm1(SB)
- MOVW $655, R12
- B runtime·callbackasm1(SB)
- MOVW $656, R12
- B runtime·callbackasm1(SB)
- MOVW $657, R12
- B runtime·callbackasm1(SB)
- MOVW $658, R12
- B runtime·callbackasm1(SB)
- MOVW $659, R12
- B runtime·callbackasm1(SB)
- MOVW $660, R12
- B runtime·callbackasm1(SB)
- MOVW $661, R12
- B runtime·callbackasm1(SB)
- MOVW $662, R12
- B runtime·callbackasm1(SB)
- MOVW $663, R12
- B runtime·callbackasm1(SB)
- MOVW $664, R12
- B runtime·callbackasm1(SB)
- MOVW $665, R12
- B runtime·callbackasm1(SB)
- MOVW $666, R12
- B runtime·callbackasm1(SB)
- MOVW $667, R12
- B runtime·callbackasm1(SB)
- MOVW $668, R12
- B runtime·callbackasm1(SB)
- MOVW $669, R12
- B runtime·callbackasm1(SB)
- MOVW $670, R12
- B runtime·callbackasm1(SB)
- MOVW $671, R12
- B runtime·callbackasm1(SB)
- MOVW $672, R12
- B runtime·callbackasm1(SB)
- MOVW $673, R12
- B runtime·callbackasm1(SB)
- MOVW $674, R12
- B runtime·callbackasm1(SB)
- MOVW $675, R12
- B runtime·callbackasm1(SB)
- MOVW $676, R12
- B runtime·callbackasm1(SB)
- MOVW $677, R12
- B runtime·callbackasm1(SB)
- MOVW $678, R12
- B runtime·callbackasm1(SB)
- MOVW $679, R12
- B runtime·callbackasm1(SB)
- MOVW $680, R12
- B runtime·callbackasm1(SB)
- MOVW $681, R12
- B runtime·callbackasm1(SB)
- MOVW $682, R12
- B runtime·callbackasm1(SB)
- MOVW $683, R12
- B runtime·callbackasm1(SB)
- MOVW $684, R12
- B runtime·callbackasm1(SB)
- MOVW $685, R12
- B runtime·callbackasm1(SB)
- MOVW $686, R12
- B runtime·callbackasm1(SB)
- MOVW $687, R12
- B runtime·callbackasm1(SB)
- MOVW $688, R12
- B runtime·callbackasm1(SB)
- MOVW $689, R12
- B runtime·callbackasm1(SB)
- MOVW $690, R12
- B runtime·callbackasm1(SB)
- MOVW $691, R12
- B runtime·callbackasm1(SB)
- MOVW $692, R12
- B runtime·callbackasm1(SB)
- MOVW $693, R12
- B runtime·callbackasm1(SB)
- MOVW $694, R12
- B runtime·callbackasm1(SB)
- MOVW $695, R12
- B runtime·callbackasm1(SB)
- MOVW $696, R12
- B runtime·callbackasm1(SB)
- MOVW $697, R12
- B runtime·callbackasm1(SB)
- MOVW $698, R12
- B runtime·callbackasm1(SB)
- MOVW $699, R12
- B runtime·callbackasm1(SB)
- MOVW $700, R12
- B runtime·callbackasm1(SB)
- MOVW $701, R12
- B runtime·callbackasm1(SB)
- MOVW $702, R12
- B runtime·callbackasm1(SB)
- MOVW $703, R12
- B runtime·callbackasm1(SB)
- MOVW $704, R12
- B runtime·callbackasm1(SB)
- MOVW $705, R12
- B runtime·callbackasm1(SB)
- MOVW $706, R12
- B runtime·callbackasm1(SB)
- MOVW $707, R12
- B runtime·callbackasm1(SB)
- MOVW $708, R12
- B runtime·callbackasm1(SB)
- MOVW $709, R12
- B runtime·callbackasm1(SB)
- MOVW $710, R12
- B runtime·callbackasm1(SB)
- MOVW $711, R12
- B runtime·callbackasm1(SB)
- MOVW $712, R12
- B runtime·callbackasm1(SB)
- MOVW $713, R12
- B runtime·callbackasm1(SB)
- MOVW $714, R12
- B runtime·callbackasm1(SB)
- MOVW $715, R12
- B runtime·callbackasm1(SB)
- MOVW $716, R12
- B runtime·callbackasm1(SB)
- MOVW $717, R12
- B runtime·callbackasm1(SB)
- MOVW $718, R12
- B runtime·callbackasm1(SB)
- MOVW $719, R12
- B runtime·callbackasm1(SB)
- MOVW $720, R12
- B runtime·callbackasm1(SB)
- MOVW $721, R12
- B runtime·callbackasm1(SB)
- MOVW $722, R12
- B runtime·callbackasm1(SB)
- MOVW $723, R12
- B runtime·callbackasm1(SB)
- MOVW $724, R12
- B runtime·callbackasm1(SB)
- MOVW $725, R12
- B runtime·callbackasm1(SB)
- MOVW $726, R12
- B runtime·callbackasm1(SB)
- MOVW $727, R12
- B runtime·callbackasm1(SB)
- MOVW $728, R12
- B runtime·callbackasm1(SB)
- MOVW $729, R12
- B runtime·callbackasm1(SB)
- MOVW $730, R12
- B runtime·callbackasm1(SB)
- MOVW $731, R12
- B runtime·callbackasm1(SB)
- MOVW $732, R12
- B runtime·callbackasm1(SB)
- MOVW $733, R12
- B runtime·callbackasm1(SB)
- MOVW $734, R12
- B runtime·callbackasm1(SB)
- MOVW $735, R12
- B runtime·callbackasm1(SB)
- MOVW $736, R12
- B runtime·callbackasm1(SB)
- MOVW $737, R12
- B runtime·callbackasm1(SB)
- MOVW $738, R12
- B runtime·callbackasm1(SB)
- MOVW $739, R12
- B runtime·callbackasm1(SB)
- MOVW $740, R12
- B runtime·callbackasm1(SB)
- MOVW $741, R12
- B runtime·callbackasm1(SB)
- MOVW $742, R12
- B runtime·callbackasm1(SB)
- MOVW $743, R12
- B runtime·callbackasm1(SB)
- MOVW $744, R12
- B runtime·callbackasm1(SB)
- MOVW $745, R12
- B runtime·callbackasm1(SB)
- MOVW $746, R12
- B runtime·callbackasm1(SB)
- MOVW $747, R12
- B runtime·callbackasm1(SB)
- MOVW $748, R12
- B runtime·callbackasm1(SB)
- MOVW $749, R12
- B runtime·callbackasm1(SB)
- MOVW $750, R12
- B runtime·callbackasm1(SB)
- MOVW $751, R12
- B runtime·callbackasm1(SB)
- MOVW $752, R12
- B runtime·callbackasm1(SB)
- MOVW $753, R12
- B runtime·callbackasm1(SB)
- MOVW $754, R12
- B runtime·callbackasm1(SB)
- MOVW $755, R12
- B runtime·callbackasm1(SB)
- MOVW $756, R12
- B runtime·callbackasm1(SB)
- MOVW $757, R12
- B runtime·callbackasm1(SB)
- MOVW $758, R12
- B runtime·callbackasm1(SB)
- MOVW $759, R12
- B runtime·callbackasm1(SB)
- MOVW $760, R12
- B runtime·callbackasm1(SB)
- MOVW $761, R12
- B runtime·callbackasm1(SB)
- MOVW $762, R12
- B runtime·callbackasm1(SB)
- MOVW $763, R12
- B runtime·callbackasm1(SB)
- MOVW $764, R12
- B runtime·callbackasm1(SB)
- MOVW $765, R12
- B runtime·callbackasm1(SB)
- MOVW $766, R12
- B runtime·callbackasm1(SB)
- MOVW $767, R12
- B runtime·callbackasm1(SB)
- MOVW $768, R12
- B runtime·callbackasm1(SB)
- MOVW $769, R12
- B runtime·callbackasm1(SB)
- MOVW $770, R12
- B runtime·callbackasm1(SB)
- MOVW $771, R12
- B runtime·callbackasm1(SB)
- MOVW $772, R12
- B runtime·callbackasm1(SB)
- MOVW $773, R12
- B runtime·callbackasm1(SB)
- MOVW $774, R12
- B runtime·callbackasm1(SB)
- MOVW $775, R12
- B runtime·callbackasm1(SB)
- MOVW $776, R12
- B runtime·callbackasm1(SB)
- MOVW $777, R12
- B runtime·callbackasm1(SB)
- MOVW $778, R12
- B runtime·callbackasm1(SB)
- MOVW $779, R12
- B runtime·callbackasm1(SB)
- MOVW $780, R12
- B runtime·callbackasm1(SB)
- MOVW $781, R12
- B runtime·callbackasm1(SB)
- MOVW $782, R12
- B runtime·callbackasm1(SB)
- MOVW $783, R12
- B runtime·callbackasm1(SB)
- MOVW $784, R12
- B runtime·callbackasm1(SB)
- MOVW $785, R12
- B runtime·callbackasm1(SB)
- MOVW $786, R12
- B runtime·callbackasm1(SB)
- MOVW $787, R12
- B runtime·callbackasm1(SB)
- MOVW $788, R12
- B runtime·callbackasm1(SB)
- MOVW $789, R12
- B runtime·callbackasm1(SB)
- MOVW $790, R12
- B runtime·callbackasm1(SB)
- MOVW $791, R12
- B runtime·callbackasm1(SB)
- MOVW $792, R12
- B runtime·callbackasm1(SB)
- MOVW $793, R12
- B runtime·callbackasm1(SB)
- MOVW $794, R12
- B runtime·callbackasm1(SB)
- MOVW $795, R12
- B runtime·callbackasm1(SB)
- MOVW $796, R12
- B runtime·callbackasm1(SB)
- MOVW $797, R12
- B runtime·callbackasm1(SB)
- MOVW $798, R12
- B runtime·callbackasm1(SB)
- MOVW $799, R12
- B runtime·callbackasm1(SB)
- MOVW $800, R12
- B runtime·callbackasm1(SB)
- MOVW $801, R12
- B runtime·callbackasm1(SB)
- MOVW $802, R12
- B runtime·callbackasm1(SB)
- MOVW $803, R12
- B runtime·callbackasm1(SB)
- MOVW $804, R12
- B runtime·callbackasm1(SB)
- MOVW $805, R12
- B runtime·callbackasm1(SB)
- MOVW $806, R12
- B runtime·callbackasm1(SB)
- MOVW $807, R12
- B runtime·callbackasm1(SB)
- MOVW $808, R12
- B runtime·callbackasm1(SB)
- MOVW $809, R12
- B runtime·callbackasm1(SB)
- MOVW $810, R12
- B runtime·callbackasm1(SB)
- MOVW $811, R12
- B runtime·callbackasm1(SB)
- MOVW $812, R12
- B runtime·callbackasm1(SB)
- MOVW $813, R12
- B runtime·callbackasm1(SB)
- MOVW $814, R12
- B runtime·callbackasm1(SB)
- MOVW $815, R12
- B runtime·callbackasm1(SB)
- MOVW $816, R12
- B runtime·callbackasm1(SB)
- MOVW $817, R12
- B runtime·callbackasm1(SB)
- MOVW $818, R12
- B runtime·callbackasm1(SB)
- MOVW $819, R12
- B runtime·callbackasm1(SB)
- MOVW $820, R12
- B runtime·callbackasm1(SB)
- MOVW $821, R12
- B runtime·callbackasm1(SB)
- MOVW $822, R12
- B runtime·callbackasm1(SB)
- MOVW $823, R12
- B runtime·callbackasm1(SB)
- MOVW $824, R12
- B runtime·callbackasm1(SB)
- MOVW $825, R12
- B runtime·callbackasm1(SB)
- MOVW $826, R12
- B runtime·callbackasm1(SB)
- MOVW $827, R12
- B runtime·callbackasm1(SB)
- MOVW $828, R12
- B runtime·callbackasm1(SB)
- MOVW $829, R12
- B runtime·callbackasm1(SB)
- MOVW $830, R12
- B runtime·callbackasm1(SB)
- MOVW $831, R12
- B runtime·callbackasm1(SB)
- MOVW $832, R12
- B runtime·callbackasm1(SB)
- MOVW $833, R12
- B runtime·callbackasm1(SB)
- MOVW $834, R12
- B runtime·callbackasm1(SB)
- MOVW $835, R12
- B runtime·callbackasm1(SB)
- MOVW $836, R12
- B runtime·callbackasm1(SB)
- MOVW $837, R12
- B runtime·callbackasm1(SB)
- MOVW $838, R12
- B runtime·callbackasm1(SB)
- MOVW $839, R12
- B runtime·callbackasm1(SB)
- MOVW $840, R12
- B runtime·callbackasm1(SB)
- MOVW $841, R12
- B runtime·callbackasm1(SB)
- MOVW $842, R12
- B runtime·callbackasm1(SB)
- MOVW $843, R12
- B runtime·callbackasm1(SB)
- MOVW $844, R12
- B runtime·callbackasm1(SB)
- MOVW $845, R12
- B runtime·callbackasm1(SB)
- MOVW $846, R12
- B runtime·callbackasm1(SB)
- MOVW $847, R12
- B runtime·callbackasm1(SB)
- MOVW $848, R12
- B runtime·callbackasm1(SB)
- MOVW $849, R12
- B runtime·callbackasm1(SB)
- MOVW $850, R12
- B runtime·callbackasm1(SB)
- MOVW $851, R12
- B runtime·callbackasm1(SB)
- MOVW $852, R12
- B runtime·callbackasm1(SB)
- MOVW $853, R12
- B runtime·callbackasm1(SB)
- MOVW $854, R12
- B runtime·callbackasm1(SB)
- MOVW $855, R12
- B runtime·callbackasm1(SB)
- MOVW $856, R12
- B runtime·callbackasm1(SB)
- MOVW $857, R12
- B runtime·callbackasm1(SB)
- MOVW $858, R12
- B runtime·callbackasm1(SB)
- MOVW $859, R12
- B runtime·callbackasm1(SB)
- MOVW $860, R12
- B runtime·callbackasm1(SB)
- MOVW $861, R12
- B runtime·callbackasm1(SB)
- MOVW $862, R12
- B runtime·callbackasm1(SB)
- MOVW $863, R12
- B runtime·callbackasm1(SB)
- MOVW $864, R12
- B runtime·callbackasm1(SB)
- MOVW $865, R12
- B runtime·callbackasm1(SB)
- MOVW $866, R12
- B runtime·callbackasm1(SB)
- MOVW $867, R12
- B runtime·callbackasm1(SB)
- MOVW $868, R12
- B runtime·callbackasm1(SB)
- MOVW $869, R12
- B runtime·callbackasm1(SB)
- MOVW $870, R12
- B runtime·callbackasm1(SB)
- MOVW $871, R12
- B runtime·callbackasm1(SB)
- MOVW $872, R12
- B runtime·callbackasm1(SB)
- MOVW $873, R12
- B runtime·callbackasm1(SB)
- MOVW $874, R12
- B runtime·callbackasm1(SB)
- MOVW $875, R12
- B runtime·callbackasm1(SB)
- MOVW $876, R12
- B runtime·callbackasm1(SB)
- MOVW $877, R12
- B runtime·callbackasm1(SB)
- MOVW $878, R12
- B runtime·callbackasm1(SB)
- MOVW $879, R12
- B runtime·callbackasm1(SB)
- MOVW $880, R12
- B runtime·callbackasm1(SB)
- MOVW $881, R12
- B runtime·callbackasm1(SB)
- MOVW $882, R12
- B runtime·callbackasm1(SB)
- MOVW $883, R12
- B runtime·callbackasm1(SB)
- MOVW $884, R12
- B runtime·callbackasm1(SB)
- MOVW $885, R12
- B runtime·callbackasm1(SB)
- MOVW $886, R12
- B runtime·callbackasm1(SB)
- MOVW $887, R12
- B runtime·callbackasm1(SB)
- MOVW $888, R12
- B runtime·callbackasm1(SB)
- MOVW $889, R12
- B runtime·callbackasm1(SB)
- MOVW $890, R12
- B runtime·callbackasm1(SB)
- MOVW $891, R12
- B runtime·callbackasm1(SB)
- MOVW $892, R12
- B runtime·callbackasm1(SB)
- MOVW $893, R12
- B runtime·callbackasm1(SB)
- MOVW $894, R12
- B runtime·callbackasm1(SB)
- MOVW $895, R12
- B runtime·callbackasm1(SB)
- MOVW $896, R12
- B runtime·callbackasm1(SB)
- MOVW $897, R12
- B runtime·callbackasm1(SB)
- MOVW $898, R12
- B runtime·callbackasm1(SB)
- MOVW $899, R12
- B runtime·callbackasm1(SB)
- MOVW $900, R12
- B runtime·callbackasm1(SB)
- MOVW $901, R12
- B runtime·callbackasm1(SB)
- MOVW $902, R12
- B runtime·callbackasm1(SB)
- MOVW $903, R12
- B runtime·callbackasm1(SB)
- MOVW $904, R12
- B runtime·callbackasm1(SB)
- MOVW $905, R12
- B runtime·callbackasm1(SB)
- MOVW $906, R12
- B runtime·callbackasm1(SB)
- MOVW $907, R12
- B runtime·callbackasm1(SB)
- MOVW $908, R12
- B runtime·callbackasm1(SB)
- MOVW $909, R12
- B runtime·callbackasm1(SB)
- MOVW $910, R12
- B runtime·callbackasm1(SB)
- MOVW $911, R12
- B runtime·callbackasm1(SB)
- MOVW $912, R12
- B runtime·callbackasm1(SB)
- MOVW $913, R12
- B runtime·callbackasm1(SB)
- MOVW $914, R12
- B runtime·callbackasm1(SB)
- MOVW $915, R12
- B runtime·callbackasm1(SB)
- MOVW $916, R12
- B runtime·callbackasm1(SB)
- MOVW $917, R12
- B runtime·callbackasm1(SB)
- MOVW $918, R12
- B runtime·callbackasm1(SB)
- MOVW $919, R12
- B runtime·callbackasm1(SB)
- MOVW $920, R12
- B runtime·callbackasm1(SB)
- MOVW $921, R12
- B runtime·callbackasm1(SB)
- MOVW $922, R12
- B runtime·callbackasm1(SB)
- MOVW $923, R12
- B runtime·callbackasm1(SB)
- MOVW $924, R12
- B runtime·callbackasm1(SB)
- MOVW $925, R12
- B runtime·callbackasm1(SB)
- MOVW $926, R12
- B runtime·callbackasm1(SB)
- MOVW $927, R12
- B runtime·callbackasm1(SB)
- MOVW $928, R12
- B runtime·callbackasm1(SB)
- MOVW $929, R12
- B runtime·callbackasm1(SB)
- MOVW $930, R12
- B runtime·callbackasm1(SB)
- MOVW $931, R12
- B runtime·callbackasm1(SB)
- MOVW $932, R12
- B runtime·callbackasm1(SB)
- MOVW $933, R12
- B runtime·callbackasm1(SB)
- MOVW $934, R12
- B runtime·callbackasm1(SB)
- MOVW $935, R12
- B runtime·callbackasm1(SB)
- MOVW $936, R12
- B runtime·callbackasm1(SB)
- MOVW $937, R12
- B runtime·callbackasm1(SB)
- MOVW $938, R12
- B runtime·callbackasm1(SB)
- MOVW $939, R12
- B runtime·callbackasm1(SB)
- MOVW $940, R12
- B runtime·callbackasm1(SB)
- MOVW $941, R12
- B runtime·callbackasm1(SB)
- MOVW $942, R12
- B runtime·callbackasm1(SB)
- MOVW $943, R12
- B runtime·callbackasm1(SB)
- MOVW $944, R12
- B runtime·callbackasm1(SB)
- MOVW $945, R12
- B runtime·callbackasm1(SB)
- MOVW $946, R12
- B runtime·callbackasm1(SB)
- MOVW $947, R12
- B runtime·callbackasm1(SB)
- MOVW $948, R12
- B runtime·callbackasm1(SB)
- MOVW $949, R12
- B runtime·callbackasm1(SB)
- MOVW $950, R12
- B runtime·callbackasm1(SB)
- MOVW $951, R12
- B runtime·callbackasm1(SB)
- MOVW $952, R12
- B runtime·callbackasm1(SB)
- MOVW $953, R12
- B runtime·callbackasm1(SB)
- MOVW $954, R12
- B runtime·callbackasm1(SB)
- MOVW $955, R12
- B runtime·callbackasm1(SB)
- MOVW $956, R12
- B runtime·callbackasm1(SB)
- MOVW $957, R12
- B runtime·callbackasm1(SB)
- MOVW $958, R12
- B runtime·callbackasm1(SB)
- MOVW $959, R12
- B runtime·callbackasm1(SB)
- MOVW $960, R12
- B runtime·callbackasm1(SB)
- MOVW $961, R12
- B runtime·callbackasm1(SB)
- MOVW $962, R12
- B runtime·callbackasm1(SB)
- MOVW $963, R12
- B runtime·callbackasm1(SB)
- MOVW $964, R12
- B runtime·callbackasm1(SB)
- MOVW $965, R12
- B runtime·callbackasm1(SB)
- MOVW $966, R12
- B runtime·callbackasm1(SB)
- MOVW $967, R12
- B runtime·callbackasm1(SB)
- MOVW $968, R12
- B runtime·callbackasm1(SB)
- MOVW $969, R12
- B runtime·callbackasm1(SB)
- MOVW $970, R12
- B runtime·callbackasm1(SB)
- MOVW $971, R12
- B runtime·callbackasm1(SB)
- MOVW $972, R12
- B runtime·callbackasm1(SB)
- MOVW $973, R12
- B runtime·callbackasm1(SB)
- MOVW $974, R12
- B runtime·callbackasm1(SB)
- MOVW $975, R12
- B runtime·callbackasm1(SB)
- MOVW $976, R12
- B runtime·callbackasm1(SB)
- MOVW $977, R12
- B runtime·callbackasm1(SB)
- MOVW $978, R12
- B runtime·callbackasm1(SB)
- MOVW $979, R12
- B runtime·callbackasm1(SB)
- MOVW $980, R12
- B runtime·callbackasm1(SB)
- MOVW $981, R12
- B runtime·callbackasm1(SB)
- MOVW $982, R12
- B runtime·callbackasm1(SB)
- MOVW $983, R12
- B runtime·callbackasm1(SB)
- MOVW $984, R12
- B runtime·callbackasm1(SB)
- MOVW $985, R12
- B runtime·callbackasm1(SB)
- MOVW $986, R12
- B runtime·callbackasm1(SB)
- MOVW $987, R12
- B runtime·callbackasm1(SB)
- MOVW $988, R12
- B runtime·callbackasm1(SB)
- MOVW $989, R12
- B runtime·callbackasm1(SB)
- MOVW $990, R12
- B runtime·callbackasm1(SB)
- MOVW $991, R12
- B runtime·callbackasm1(SB)
- MOVW $992, R12
- B runtime·callbackasm1(SB)
- MOVW $993, R12
- B runtime·callbackasm1(SB)
- MOVW $994, R12
- B runtime·callbackasm1(SB)
- MOVW $995, R12
- B runtime·callbackasm1(SB)
- MOVW $996, R12
- B runtime·callbackasm1(SB)
- MOVW $997, R12
- B runtime·callbackasm1(SB)
- MOVW $998, R12
- B runtime·callbackasm1(SB)
- MOVW $999, R12
- B runtime·callbackasm1(SB)
- MOVW $1000, R12
- B runtime·callbackasm1(SB)
- MOVW $1001, R12
- B runtime·callbackasm1(SB)
- MOVW $1002, R12
- B runtime·callbackasm1(SB)
- MOVW $1003, R12
- B runtime·callbackasm1(SB)
- MOVW $1004, R12
- B runtime·callbackasm1(SB)
- MOVW $1005, R12
- B runtime·callbackasm1(SB)
- MOVW $1006, R12
- B runtime·callbackasm1(SB)
- MOVW $1007, R12
- B runtime·callbackasm1(SB)
- MOVW $1008, R12
- B runtime·callbackasm1(SB)
- MOVW $1009, R12
- B runtime·callbackasm1(SB)
- MOVW $1010, R12
- B runtime·callbackasm1(SB)
- MOVW $1011, R12
- B runtime·callbackasm1(SB)
- MOVW $1012, R12
- B runtime·callbackasm1(SB)
- MOVW $1013, R12
- B runtime·callbackasm1(SB)
- MOVW $1014, R12
- B runtime·callbackasm1(SB)
- MOVW $1015, R12
- B runtime·callbackasm1(SB)
- MOVW $1016, R12
- B runtime·callbackasm1(SB)
- MOVW $1017, R12
- B runtime·callbackasm1(SB)
- MOVW $1018, R12
- B runtime·callbackasm1(SB)
- MOVW $1019, R12
- B runtime·callbackasm1(SB)
- MOVW $1020, R12
- B runtime·callbackasm1(SB)
- MOVW $1021, R12
- B runtime·callbackasm1(SB)
- MOVW $1022, R12
- B runtime·callbackasm1(SB)
- MOVW $1023, R12
- B runtime·callbackasm1(SB)
- MOVW $1024, R12
- B runtime·callbackasm1(SB)
- MOVW $1025, R12
- B runtime·callbackasm1(SB)
- MOVW $1026, R12
- B runtime·callbackasm1(SB)
- MOVW $1027, R12
- B runtime·callbackasm1(SB)
- MOVW $1028, R12
- B runtime·callbackasm1(SB)
- MOVW $1029, R12
- B runtime·callbackasm1(SB)
- MOVW $1030, R12
- B runtime·callbackasm1(SB)
- MOVW $1031, R12
- B runtime·callbackasm1(SB)
- MOVW $1032, R12
- B runtime·callbackasm1(SB)
- MOVW $1033, R12
- B runtime·callbackasm1(SB)
- MOVW $1034, R12
- B runtime·callbackasm1(SB)
- MOVW $1035, R12
- B runtime·callbackasm1(SB)
- MOVW $1036, R12
- B runtime·callbackasm1(SB)
- MOVW $1037, R12
- B runtime·callbackasm1(SB)
- MOVW $1038, R12
- B runtime·callbackasm1(SB)
- MOVW $1039, R12
- B runtime·callbackasm1(SB)
- MOVW $1040, R12
- B runtime·callbackasm1(SB)
- MOVW $1041, R12
- B runtime·callbackasm1(SB)
- MOVW $1042, R12
- B runtime·callbackasm1(SB)
- MOVW $1043, R12
- B runtime·callbackasm1(SB)
- MOVW $1044, R12
- B runtime·callbackasm1(SB)
- MOVW $1045, R12
- B runtime·callbackasm1(SB)
- MOVW $1046, R12
- B runtime·callbackasm1(SB)
- MOVW $1047, R12
- B runtime·callbackasm1(SB)
- MOVW $1048, R12
- B runtime·callbackasm1(SB)
- MOVW $1049, R12
- B runtime·callbackasm1(SB)
- MOVW $1050, R12
- B runtime·callbackasm1(SB)
- MOVW $1051, R12
- B runtime·callbackasm1(SB)
- MOVW $1052, R12
- B runtime·callbackasm1(SB)
- MOVW $1053, R12
- B runtime·callbackasm1(SB)
- MOVW $1054, R12
- B runtime·callbackasm1(SB)
- MOVW $1055, R12
- B runtime·callbackasm1(SB)
- MOVW $1056, R12
- B runtime·callbackasm1(SB)
- MOVW $1057, R12
- B runtime·callbackasm1(SB)
- MOVW $1058, R12
- B runtime·callbackasm1(SB)
- MOVW $1059, R12
- B runtime·callbackasm1(SB)
- MOVW $1060, R12
- B runtime·callbackasm1(SB)
- MOVW $1061, R12
- B runtime·callbackasm1(SB)
- MOVW $1062, R12
- B runtime·callbackasm1(SB)
- MOVW $1063, R12
- B runtime·callbackasm1(SB)
- MOVW $1064, R12
- B runtime·callbackasm1(SB)
- MOVW $1065, R12
- B runtime·callbackasm1(SB)
- MOVW $1066, R12
- B runtime·callbackasm1(SB)
- MOVW $1067, R12
- B runtime·callbackasm1(SB)
- MOVW $1068, R12
- B runtime·callbackasm1(SB)
- MOVW $1069, R12
- B runtime·callbackasm1(SB)
- MOVW $1070, R12
- B runtime·callbackasm1(SB)
- MOVW $1071, R12
- B runtime·callbackasm1(SB)
- MOVW $1072, R12
- B runtime·callbackasm1(SB)
- MOVW $1073, R12
- B runtime·callbackasm1(SB)
- MOVW $1074, R12
- B runtime·callbackasm1(SB)
- MOVW $1075, R12
- B runtime·callbackasm1(SB)
- MOVW $1076, R12
- B runtime·callbackasm1(SB)
- MOVW $1077, R12
- B runtime·callbackasm1(SB)
- MOVW $1078, R12
- B runtime·callbackasm1(SB)
- MOVW $1079, R12
- B runtime·callbackasm1(SB)
- MOVW $1080, R12
- B runtime·callbackasm1(SB)
- MOVW $1081, R12
- B runtime·callbackasm1(SB)
- MOVW $1082, R12
- B runtime·callbackasm1(SB)
- MOVW $1083, R12
- B runtime·callbackasm1(SB)
- MOVW $1084, R12
- B runtime·callbackasm1(SB)
- MOVW $1085, R12
- B runtime·callbackasm1(SB)
- MOVW $1086, R12
- B runtime·callbackasm1(SB)
- MOVW $1087, R12
- B runtime·callbackasm1(SB)
- MOVW $1088, R12
- B runtime·callbackasm1(SB)
- MOVW $1089, R12
- B runtime·callbackasm1(SB)
- MOVW $1090, R12
- B runtime·callbackasm1(SB)
- MOVW $1091, R12
- B runtime·callbackasm1(SB)
- MOVW $1092, R12
- B runtime·callbackasm1(SB)
- MOVW $1093, R12
- B runtime·callbackasm1(SB)
- MOVW $1094, R12
- B runtime·callbackasm1(SB)
- MOVW $1095, R12
- B runtime·callbackasm1(SB)
- MOVW $1096, R12
- B runtime·callbackasm1(SB)
- MOVW $1097, R12
- B runtime·callbackasm1(SB)
- MOVW $1098, R12
- B runtime·callbackasm1(SB)
- MOVW $1099, R12
- B runtime·callbackasm1(SB)
- MOVW $1100, R12
- B runtime·callbackasm1(SB)
- MOVW $1101, R12
- B runtime·callbackasm1(SB)
- MOVW $1102, R12
- B runtime·callbackasm1(SB)
- MOVW $1103, R12
- B runtime·callbackasm1(SB)
- MOVW $1104, R12
- B runtime·callbackasm1(SB)
- MOVW $1105, R12
- B runtime·callbackasm1(SB)
- MOVW $1106, R12
- B runtime·callbackasm1(SB)
- MOVW $1107, R12
- B runtime·callbackasm1(SB)
- MOVW $1108, R12
- B runtime·callbackasm1(SB)
- MOVW $1109, R12
- B runtime·callbackasm1(SB)
- MOVW $1110, R12
- B runtime·callbackasm1(SB)
- MOVW $1111, R12
- B runtime·callbackasm1(SB)
- MOVW $1112, R12
- B runtime·callbackasm1(SB)
- MOVW $1113, R12
- B runtime·callbackasm1(SB)
- MOVW $1114, R12
- B runtime·callbackasm1(SB)
- MOVW $1115, R12
- B runtime·callbackasm1(SB)
- MOVW $1116, R12
- B runtime·callbackasm1(SB)
- MOVW $1117, R12
- B runtime·callbackasm1(SB)
- MOVW $1118, R12
- B runtime·callbackasm1(SB)
- MOVW $1119, R12
- B runtime·callbackasm1(SB)
- MOVW $1120, R12
- B runtime·callbackasm1(SB)
- MOVW $1121, R12
- B runtime·callbackasm1(SB)
- MOVW $1122, R12
- B runtime·callbackasm1(SB)
- MOVW $1123, R12
- B runtime·callbackasm1(SB)
- MOVW $1124, R12
- B runtime·callbackasm1(SB)
- MOVW $1125, R12
- B runtime·callbackasm1(SB)
- MOVW $1126, R12
- B runtime·callbackasm1(SB)
- MOVW $1127, R12
- B runtime·callbackasm1(SB)
- MOVW $1128, R12
- B runtime·callbackasm1(SB)
- MOVW $1129, R12
- B runtime·callbackasm1(SB)
- MOVW $1130, R12
- B runtime·callbackasm1(SB)
- MOVW $1131, R12
- B runtime·callbackasm1(SB)
- MOVW $1132, R12
- B runtime·callbackasm1(SB)
- MOVW $1133, R12
- B runtime·callbackasm1(SB)
- MOVW $1134, R12
- B runtime·callbackasm1(SB)
- MOVW $1135, R12
- B runtime·callbackasm1(SB)
- MOVW $1136, R12
- B runtime·callbackasm1(SB)
- MOVW $1137, R12
- B runtime·callbackasm1(SB)
- MOVW $1138, R12
- B runtime·callbackasm1(SB)
- MOVW $1139, R12
- B runtime·callbackasm1(SB)
- MOVW $1140, R12
- B runtime·callbackasm1(SB)
- MOVW $1141, R12
- B runtime·callbackasm1(SB)
- MOVW $1142, R12
- B runtime·callbackasm1(SB)
- MOVW $1143, R12
- B runtime·callbackasm1(SB)
- MOVW $1144, R12
- B runtime·callbackasm1(SB)
- MOVW $1145, R12
- B runtime·callbackasm1(SB)
- MOVW $1146, R12
- B runtime·callbackasm1(SB)
- MOVW $1147, R12
- B runtime·callbackasm1(SB)
- MOVW $1148, R12
- B runtime·callbackasm1(SB)
- MOVW $1149, R12
- B runtime·callbackasm1(SB)
- MOVW $1150, R12
- B runtime·callbackasm1(SB)
- MOVW $1151, R12
- B runtime·callbackasm1(SB)
- MOVW $1152, R12
- B runtime·callbackasm1(SB)
- MOVW $1153, R12
- B runtime·callbackasm1(SB)
- MOVW $1154, R12
- B runtime·callbackasm1(SB)
- MOVW $1155, R12
- B runtime·callbackasm1(SB)
- MOVW $1156, R12
- B runtime·callbackasm1(SB)
- MOVW $1157, R12
- B runtime·callbackasm1(SB)
- MOVW $1158, R12
- B runtime·callbackasm1(SB)
- MOVW $1159, R12
- B runtime·callbackasm1(SB)
- MOVW $1160, R12
- B runtime·callbackasm1(SB)
- MOVW $1161, R12
- B runtime·callbackasm1(SB)
- MOVW $1162, R12
- B runtime·callbackasm1(SB)
- MOVW $1163, R12
- B runtime·callbackasm1(SB)
- MOVW $1164, R12
- B runtime·callbackasm1(SB)
- MOVW $1165, R12
- B runtime·callbackasm1(SB)
- MOVW $1166, R12
- B runtime·callbackasm1(SB)
- MOVW $1167, R12
- B runtime·callbackasm1(SB)
- MOVW $1168, R12
- B runtime·callbackasm1(SB)
- MOVW $1169, R12
- B runtime·callbackasm1(SB)
- MOVW $1170, R12
- B runtime·callbackasm1(SB)
- MOVW $1171, R12
- B runtime·callbackasm1(SB)
- MOVW $1172, R12
- B runtime·callbackasm1(SB)
- MOVW $1173, R12
- B runtime·callbackasm1(SB)
- MOVW $1174, R12
- B runtime·callbackasm1(SB)
- MOVW $1175, R12
- B runtime·callbackasm1(SB)
- MOVW $1176, R12
- B runtime·callbackasm1(SB)
- MOVW $1177, R12
- B runtime·callbackasm1(SB)
- MOVW $1178, R12
- B runtime·callbackasm1(SB)
- MOVW $1179, R12
- B runtime·callbackasm1(SB)
- MOVW $1180, R12
- B runtime·callbackasm1(SB)
- MOVW $1181, R12
- B runtime·callbackasm1(SB)
- MOVW $1182, R12
- B runtime·callbackasm1(SB)
- MOVW $1183, R12
- B runtime·callbackasm1(SB)
- MOVW $1184, R12
- B runtime·callbackasm1(SB)
- MOVW $1185, R12
- B runtime·callbackasm1(SB)
- MOVW $1186, R12
- B runtime·callbackasm1(SB)
- MOVW $1187, R12
- B runtime·callbackasm1(SB)
- MOVW $1188, R12
- B runtime·callbackasm1(SB)
- MOVW $1189, R12
- B runtime·callbackasm1(SB)
- MOVW $1190, R12
- B runtime·callbackasm1(SB)
- MOVW $1191, R12
- B runtime·callbackasm1(SB)
- MOVW $1192, R12
- B runtime·callbackasm1(SB)
- MOVW $1193, R12
- B runtime·callbackasm1(SB)
- MOVW $1194, R12
- B runtime·callbackasm1(SB)
- MOVW $1195, R12
- B runtime·callbackasm1(SB)
- MOVW $1196, R12
- B runtime·callbackasm1(SB)
- MOVW $1197, R12
- B runtime·callbackasm1(SB)
- MOVW $1198, R12
- B runtime·callbackasm1(SB)
- MOVW $1199, R12
- B runtime·callbackasm1(SB)
- MOVW $1200, R12
- B runtime·callbackasm1(SB)
- MOVW $1201, R12
- B runtime·callbackasm1(SB)
- MOVW $1202, R12
- B runtime·callbackasm1(SB)
- MOVW $1203, R12
- B runtime·callbackasm1(SB)
- MOVW $1204, R12
- B runtime·callbackasm1(SB)
- MOVW $1205, R12
- B runtime·callbackasm1(SB)
- MOVW $1206, R12
- B runtime·callbackasm1(SB)
- MOVW $1207, R12
- B runtime·callbackasm1(SB)
- MOVW $1208, R12
- B runtime·callbackasm1(SB)
- MOVW $1209, R12
- B runtime·callbackasm1(SB)
- MOVW $1210, R12
- B runtime·callbackasm1(SB)
- MOVW $1211, R12
- B runtime·callbackasm1(SB)
- MOVW $1212, R12
- B runtime·callbackasm1(SB)
- MOVW $1213, R12
- B runtime·callbackasm1(SB)
- MOVW $1214, R12
- B runtime·callbackasm1(SB)
- MOVW $1215, R12
- B runtime·callbackasm1(SB)
- MOVW $1216, R12
- B runtime·callbackasm1(SB)
- MOVW $1217, R12
- B runtime·callbackasm1(SB)
- MOVW $1218, R12
- B runtime·callbackasm1(SB)
- MOVW $1219, R12
- B runtime·callbackasm1(SB)
- MOVW $1220, R12
- B runtime·callbackasm1(SB)
- MOVW $1221, R12
- B runtime·callbackasm1(SB)
- MOVW $1222, R12
- B runtime·callbackasm1(SB)
- MOVW $1223, R12
- B runtime·callbackasm1(SB)
- MOVW $1224, R12
- B runtime·callbackasm1(SB)
- MOVW $1225, R12
- B runtime·callbackasm1(SB)
- MOVW $1226, R12
- B runtime·callbackasm1(SB)
- MOVW $1227, R12
- B runtime·callbackasm1(SB)
- MOVW $1228, R12
- B runtime·callbackasm1(SB)
- MOVW $1229, R12
- B runtime·callbackasm1(SB)
- MOVW $1230, R12
- B runtime·callbackasm1(SB)
- MOVW $1231, R12
- B runtime·callbackasm1(SB)
- MOVW $1232, R12
- B runtime·callbackasm1(SB)
- MOVW $1233, R12
- B runtime·callbackasm1(SB)
- MOVW $1234, R12
- B runtime·callbackasm1(SB)
- MOVW $1235, R12
- B runtime·callbackasm1(SB)
- MOVW $1236, R12
- B runtime·callbackasm1(SB)
- MOVW $1237, R12
- B runtime·callbackasm1(SB)
- MOVW $1238, R12
- B runtime·callbackasm1(SB)
- MOVW $1239, R12
- B runtime·callbackasm1(SB)
- MOVW $1240, R12
- B runtime·callbackasm1(SB)
- MOVW $1241, R12
- B runtime·callbackasm1(SB)
- MOVW $1242, R12
- B runtime·callbackasm1(SB)
- MOVW $1243, R12
- B runtime·callbackasm1(SB)
- MOVW $1244, R12
- B runtime·callbackasm1(SB)
- MOVW $1245, R12
- B runtime·callbackasm1(SB)
- MOVW $1246, R12
- B runtime·callbackasm1(SB)
- MOVW $1247, R12
- B runtime·callbackasm1(SB)
- MOVW $1248, R12
- B runtime·callbackasm1(SB)
- MOVW $1249, R12
- B runtime·callbackasm1(SB)
- MOVW $1250, R12
- B runtime·callbackasm1(SB)
- MOVW $1251, R12
- B runtime·callbackasm1(SB)
- MOVW $1252, R12
- B runtime·callbackasm1(SB)
- MOVW $1253, R12
- B runtime·callbackasm1(SB)
- MOVW $1254, R12
- B runtime·callbackasm1(SB)
- MOVW $1255, R12
- B runtime·callbackasm1(SB)
- MOVW $1256, R12
- B runtime·callbackasm1(SB)
- MOVW $1257, R12
- B runtime·callbackasm1(SB)
- MOVW $1258, R12
- B runtime·callbackasm1(SB)
- MOVW $1259, R12
- B runtime·callbackasm1(SB)
- MOVW $1260, R12
- B runtime·callbackasm1(SB)
- MOVW $1261, R12
- B runtime·callbackasm1(SB)
- MOVW $1262, R12
- B runtime·callbackasm1(SB)
- MOVW $1263, R12
- B runtime·callbackasm1(SB)
- MOVW $1264, R12
- B runtime·callbackasm1(SB)
- MOVW $1265, R12
- B runtime·callbackasm1(SB)
- MOVW $1266, R12
- B runtime·callbackasm1(SB)
- MOVW $1267, R12
- B runtime·callbackasm1(SB)
- MOVW $1268, R12
- B runtime·callbackasm1(SB)
- MOVW $1269, R12
- B runtime·callbackasm1(SB)
- MOVW $1270, R12
- B runtime·callbackasm1(SB)
- MOVW $1271, R12
- B runtime·callbackasm1(SB)
- MOVW $1272, R12
- B runtime·callbackasm1(SB)
- MOVW $1273, R12
- B runtime·callbackasm1(SB)
- MOVW $1274, R12
- B runtime·callbackasm1(SB)
- MOVW $1275, R12
- B runtime·callbackasm1(SB)
- MOVW $1276, R12
- B runtime·callbackasm1(SB)
- MOVW $1277, R12
- B runtime·callbackasm1(SB)
- MOVW $1278, R12
- B runtime·callbackasm1(SB)
- MOVW $1279, R12
- B runtime·callbackasm1(SB)
- MOVW $1280, R12
- B runtime·callbackasm1(SB)
- MOVW $1281, R12
- B runtime·callbackasm1(SB)
- MOVW $1282, R12
- B runtime·callbackasm1(SB)
- MOVW $1283, R12
- B runtime·callbackasm1(SB)
- MOVW $1284, R12
- B runtime·callbackasm1(SB)
- MOVW $1285, R12
- B runtime·callbackasm1(SB)
- MOVW $1286, R12
- B runtime·callbackasm1(SB)
- MOVW $1287, R12
- B runtime·callbackasm1(SB)
- MOVW $1288, R12
- B runtime·callbackasm1(SB)
- MOVW $1289, R12
- B runtime·callbackasm1(SB)
- MOVW $1290, R12
- B runtime·callbackasm1(SB)
- MOVW $1291, R12
- B runtime·callbackasm1(SB)
- MOVW $1292, R12
- B runtime·callbackasm1(SB)
- MOVW $1293, R12
- B runtime·callbackasm1(SB)
- MOVW $1294, R12
- B runtime·callbackasm1(SB)
- MOVW $1295, R12
- B runtime·callbackasm1(SB)
- MOVW $1296, R12
- B runtime·callbackasm1(SB)
- MOVW $1297, R12
- B runtime·callbackasm1(SB)
- MOVW $1298, R12
- B runtime·callbackasm1(SB)
- MOVW $1299, R12
- B runtime·callbackasm1(SB)
- MOVW $1300, R12
- B runtime·callbackasm1(SB)
- MOVW $1301, R12
- B runtime·callbackasm1(SB)
- MOVW $1302, R12
- B runtime·callbackasm1(SB)
- MOVW $1303, R12
- B runtime·callbackasm1(SB)
- MOVW $1304, R12
- B runtime·callbackasm1(SB)
- MOVW $1305, R12
- B runtime·callbackasm1(SB)
- MOVW $1306, R12
- B runtime·callbackasm1(SB)
- MOVW $1307, R12
- B runtime·callbackasm1(SB)
- MOVW $1308, R12
- B runtime·callbackasm1(SB)
- MOVW $1309, R12
- B runtime·callbackasm1(SB)
- MOVW $1310, R12
- B runtime·callbackasm1(SB)
- MOVW $1311, R12
- B runtime·callbackasm1(SB)
- MOVW $1312, R12
- B runtime·callbackasm1(SB)
- MOVW $1313, R12
- B runtime·callbackasm1(SB)
- MOVW $1314, R12
- B runtime·callbackasm1(SB)
- MOVW $1315, R12
- B runtime·callbackasm1(SB)
- MOVW $1316, R12
- B runtime·callbackasm1(SB)
- MOVW $1317, R12
- B runtime·callbackasm1(SB)
- MOVW $1318, R12
- B runtime·callbackasm1(SB)
- MOVW $1319, R12
- B runtime·callbackasm1(SB)
- MOVW $1320, R12
- B runtime·callbackasm1(SB)
- MOVW $1321, R12
- B runtime·callbackasm1(SB)
- MOVW $1322, R12
- B runtime·callbackasm1(SB)
- MOVW $1323, R12
- B runtime·callbackasm1(SB)
- MOVW $1324, R12
- B runtime·callbackasm1(SB)
- MOVW $1325, R12
- B runtime·callbackasm1(SB)
- MOVW $1326, R12
- B runtime·callbackasm1(SB)
- MOVW $1327, R12
- B runtime·callbackasm1(SB)
- MOVW $1328, R12
- B runtime·callbackasm1(SB)
- MOVW $1329, R12
- B runtime·callbackasm1(SB)
- MOVW $1330, R12
- B runtime·callbackasm1(SB)
- MOVW $1331, R12
- B runtime·callbackasm1(SB)
- MOVW $1332, R12
- B runtime·callbackasm1(SB)
- MOVW $1333, R12
- B runtime·callbackasm1(SB)
- MOVW $1334, R12
- B runtime·callbackasm1(SB)
- MOVW $1335, R12
- B runtime·callbackasm1(SB)
- MOVW $1336, R12
- B runtime·callbackasm1(SB)
- MOVW $1337, R12
- B runtime·callbackasm1(SB)
- MOVW $1338, R12
- B runtime·callbackasm1(SB)
- MOVW $1339, R12
- B runtime·callbackasm1(SB)
- MOVW $1340, R12
- B runtime·callbackasm1(SB)
- MOVW $1341, R12
- B runtime·callbackasm1(SB)
- MOVW $1342, R12
- B runtime·callbackasm1(SB)
- MOVW $1343, R12
- B runtime·callbackasm1(SB)
- MOVW $1344, R12
- B runtime·callbackasm1(SB)
- MOVW $1345, R12
- B runtime·callbackasm1(SB)
- MOVW $1346, R12
- B runtime·callbackasm1(SB)
- MOVW $1347, R12
- B runtime·callbackasm1(SB)
- MOVW $1348, R12
- B runtime·callbackasm1(SB)
- MOVW $1349, R12
- B runtime·callbackasm1(SB)
- MOVW $1350, R12
- B runtime·callbackasm1(SB)
- MOVW $1351, R12
- B runtime·callbackasm1(SB)
- MOVW $1352, R12
- B runtime·callbackasm1(SB)
- MOVW $1353, R12
- B runtime·callbackasm1(SB)
- MOVW $1354, R12
- B runtime·callbackasm1(SB)
- MOVW $1355, R12
- B runtime·callbackasm1(SB)
- MOVW $1356, R12
- B runtime·callbackasm1(SB)
- MOVW $1357, R12
- B runtime·callbackasm1(SB)
- MOVW $1358, R12
- B runtime·callbackasm1(SB)
- MOVW $1359, R12
- B runtime·callbackasm1(SB)
- MOVW $1360, R12
- B runtime·callbackasm1(SB)
- MOVW $1361, R12
- B runtime·callbackasm1(SB)
- MOVW $1362, R12
- B runtime·callbackasm1(SB)
- MOVW $1363, R12
- B runtime·callbackasm1(SB)
- MOVW $1364, R12
- B runtime·callbackasm1(SB)
- MOVW $1365, R12
- B runtime·callbackasm1(SB)
- MOVW $1366, R12
- B runtime·callbackasm1(SB)
- MOVW $1367, R12
- B runtime·callbackasm1(SB)
- MOVW $1368, R12
- B runtime·callbackasm1(SB)
- MOVW $1369, R12
- B runtime·callbackasm1(SB)
- MOVW $1370, R12
- B runtime·callbackasm1(SB)
- MOVW $1371, R12
- B runtime·callbackasm1(SB)
- MOVW $1372, R12
- B runtime·callbackasm1(SB)
- MOVW $1373, R12
- B runtime·callbackasm1(SB)
- MOVW $1374, R12
- B runtime·callbackasm1(SB)
- MOVW $1375, R12
- B runtime·callbackasm1(SB)
- MOVW $1376, R12
- B runtime·callbackasm1(SB)
- MOVW $1377, R12
- B runtime·callbackasm1(SB)
- MOVW $1378, R12
- B runtime·callbackasm1(SB)
- MOVW $1379, R12
- B runtime·callbackasm1(SB)
- MOVW $1380, R12
- B runtime·callbackasm1(SB)
- MOVW $1381, R12
- B runtime·callbackasm1(SB)
- MOVW $1382, R12
- B runtime·callbackasm1(SB)
- MOVW $1383, R12
- B runtime·callbackasm1(SB)
- MOVW $1384, R12
- B runtime·callbackasm1(SB)
- MOVW $1385, R12
- B runtime·callbackasm1(SB)
- MOVW $1386, R12
- B runtime·callbackasm1(SB)
- MOVW $1387, R12
- B runtime·callbackasm1(SB)
- MOVW $1388, R12
- B runtime·callbackasm1(SB)
- MOVW $1389, R12
- B runtime·callbackasm1(SB)
- MOVW $1390, R12
- B runtime·callbackasm1(SB)
- MOVW $1391, R12
- B runtime·callbackasm1(SB)
- MOVW $1392, R12
- B runtime·callbackasm1(SB)
- MOVW $1393, R12
- B runtime·callbackasm1(SB)
- MOVW $1394, R12
- B runtime·callbackasm1(SB)
- MOVW $1395, R12
- B runtime·callbackasm1(SB)
- MOVW $1396, R12
- B runtime·callbackasm1(SB)
- MOVW $1397, R12
- B runtime·callbackasm1(SB)
- MOVW $1398, R12
- B runtime·callbackasm1(SB)
- MOVW $1399, R12
- B runtime·callbackasm1(SB)
- MOVW $1400, R12
- B runtime·callbackasm1(SB)
- MOVW $1401, R12
- B runtime·callbackasm1(SB)
- MOVW $1402, R12
- B runtime·callbackasm1(SB)
- MOVW $1403, R12
- B runtime·callbackasm1(SB)
- MOVW $1404, R12
- B runtime·callbackasm1(SB)
- MOVW $1405, R12
- B runtime·callbackasm1(SB)
- MOVW $1406, R12
- B runtime·callbackasm1(SB)
- MOVW $1407, R12
- B runtime·callbackasm1(SB)
- MOVW $1408, R12
- B runtime·callbackasm1(SB)
- MOVW $1409, R12
- B runtime·callbackasm1(SB)
- MOVW $1410, R12
- B runtime·callbackasm1(SB)
- MOVW $1411, R12
- B runtime·callbackasm1(SB)
- MOVW $1412, R12
- B runtime·callbackasm1(SB)
- MOVW $1413, R12
- B runtime·callbackasm1(SB)
- MOVW $1414, R12
- B runtime·callbackasm1(SB)
- MOVW $1415, R12
- B runtime·callbackasm1(SB)
- MOVW $1416, R12
- B runtime·callbackasm1(SB)
- MOVW $1417, R12
- B runtime·callbackasm1(SB)
- MOVW $1418, R12
- B runtime·callbackasm1(SB)
- MOVW $1419, R12
- B runtime·callbackasm1(SB)
- MOVW $1420, R12
- B runtime·callbackasm1(SB)
- MOVW $1421, R12
- B runtime·callbackasm1(SB)
- MOVW $1422, R12
- B runtime·callbackasm1(SB)
- MOVW $1423, R12
- B runtime·callbackasm1(SB)
- MOVW $1424, R12
- B runtime·callbackasm1(SB)
- MOVW $1425, R12
- B runtime·callbackasm1(SB)
- MOVW $1426, R12
- B runtime·callbackasm1(SB)
- MOVW $1427, R12
- B runtime·callbackasm1(SB)
- MOVW $1428, R12
- B runtime·callbackasm1(SB)
- MOVW $1429, R12
- B runtime·callbackasm1(SB)
- MOVW $1430, R12
- B runtime·callbackasm1(SB)
- MOVW $1431, R12
- B runtime·callbackasm1(SB)
- MOVW $1432, R12
- B runtime·callbackasm1(SB)
- MOVW $1433, R12
- B runtime·callbackasm1(SB)
- MOVW $1434, R12
- B runtime·callbackasm1(SB)
- MOVW $1435, R12
- B runtime·callbackasm1(SB)
- MOVW $1436, R12
- B runtime·callbackasm1(SB)
- MOVW $1437, R12
- B runtime·callbackasm1(SB)
- MOVW $1438, R12
- B runtime·callbackasm1(SB)
- MOVW $1439, R12
- B runtime·callbackasm1(SB)
- MOVW $1440, R12
- B runtime·callbackasm1(SB)
- MOVW $1441, R12
- B runtime·callbackasm1(SB)
- MOVW $1442, R12
- B runtime·callbackasm1(SB)
- MOVW $1443, R12
- B runtime·callbackasm1(SB)
- MOVW $1444, R12
- B runtime·callbackasm1(SB)
- MOVW $1445, R12
- B runtime·callbackasm1(SB)
- MOVW $1446, R12
- B runtime·callbackasm1(SB)
- MOVW $1447, R12
- B runtime·callbackasm1(SB)
- MOVW $1448, R12
- B runtime·callbackasm1(SB)
- MOVW $1449, R12
- B runtime·callbackasm1(SB)
- MOVW $1450, R12
- B runtime·callbackasm1(SB)
- MOVW $1451, R12
- B runtime·callbackasm1(SB)
- MOVW $1452, R12
- B runtime·callbackasm1(SB)
- MOVW $1453, R12
- B runtime·callbackasm1(SB)
- MOVW $1454, R12
- B runtime·callbackasm1(SB)
- MOVW $1455, R12
- B runtime·callbackasm1(SB)
- MOVW $1456, R12
- B runtime·callbackasm1(SB)
- MOVW $1457, R12
- B runtime·callbackasm1(SB)
- MOVW $1458, R12
- B runtime·callbackasm1(SB)
- MOVW $1459, R12
- B runtime·callbackasm1(SB)
- MOVW $1460, R12
- B runtime·callbackasm1(SB)
- MOVW $1461, R12
- B runtime·callbackasm1(SB)
- MOVW $1462, R12
- B runtime·callbackasm1(SB)
- MOVW $1463, R12
- B runtime·callbackasm1(SB)
- MOVW $1464, R12
- B runtime·callbackasm1(SB)
- MOVW $1465, R12
- B runtime·callbackasm1(SB)
- MOVW $1466, R12
- B runtime·callbackasm1(SB)
- MOVW $1467, R12
- B runtime·callbackasm1(SB)
- MOVW $1468, R12
- B runtime·callbackasm1(SB)
- MOVW $1469, R12
- B runtime·callbackasm1(SB)
- MOVW $1470, R12
- B runtime·callbackasm1(SB)
- MOVW $1471, R12
- B runtime·callbackasm1(SB)
- MOVW $1472, R12
- B runtime·callbackasm1(SB)
- MOVW $1473, R12
- B runtime·callbackasm1(SB)
- MOVW $1474, R12
- B runtime·callbackasm1(SB)
- MOVW $1475, R12
- B runtime·callbackasm1(SB)
- MOVW $1476, R12
- B runtime·callbackasm1(SB)
- MOVW $1477, R12
- B runtime·callbackasm1(SB)
- MOVW $1478, R12
- B runtime·callbackasm1(SB)
- MOVW $1479, R12
- B runtime·callbackasm1(SB)
- MOVW $1480, R12
- B runtime·callbackasm1(SB)
- MOVW $1481, R12
- B runtime·callbackasm1(SB)
- MOVW $1482, R12
- B runtime·callbackasm1(SB)
- MOVW $1483, R12
- B runtime·callbackasm1(SB)
- MOVW $1484, R12
- B runtime·callbackasm1(SB)
- MOVW $1485, R12
- B runtime·callbackasm1(SB)
- MOVW $1486, R12
- B runtime·callbackasm1(SB)
- MOVW $1487, R12
- B runtime·callbackasm1(SB)
- MOVW $1488, R12
- B runtime·callbackasm1(SB)
- MOVW $1489, R12
- B runtime·callbackasm1(SB)
- MOVW $1490, R12
- B runtime·callbackasm1(SB)
- MOVW $1491, R12
- B runtime·callbackasm1(SB)
- MOVW $1492, R12
- B runtime·callbackasm1(SB)
- MOVW $1493, R12
- B runtime·callbackasm1(SB)
- MOVW $1494, R12
- B runtime·callbackasm1(SB)
- MOVW $1495, R12
- B runtime·callbackasm1(SB)
- MOVW $1496, R12
- B runtime·callbackasm1(SB)
- MOVW $1497, R12
- B runtime·callbackasm1(SB)
- MOVW $1498, R12
- B runtime·callbackasm1(SB)
- MOVW $1499, R12
- B runtime·callbackasm1(SB)
- MOVW $1500, R12
- B runtime·callbackasm1(SB)
- MOVW $1501, R12
- B runtime·callbackasm1(SB)
- MOVW $1502, R12
- B runtime·callbackasm1(SB)
- MOVW $1503, R12
- B runtime·callbackasm1(SB)
- MOVW $1504, R12
- B runtime·callbackasm1(SB)
- MOVW $1505, R12
- B runtime·callbackasm1(SB)
- MOVW $1506, R12
- B runtime·callbackasm1(SB)
- MOVW $1507, R12
- B runtime·callbackasm1(SB)
- MOVW $1508, R12
- B runtime·callbackasm1(SB)
- MOVW $1509, R12
- B runtime·callbackasm1(SB)
- MOVW $1510, R12
- B runtime·callbackasm1(SB)
- MOVW $1511, R12
- B runtime·callbackasm1(SB)
- MOVW $1512, R12
- B runtime·callbackasm1(SB)
- MOVW $1513, R12
- B runtime·callbackasm1(SB)
- MOVW $1514, R12
- B runtime·callbackasm1(SB)
- MOVW $1515, R12
- B runtime·callbackasm1(SB)
- MOVW $1516, R12
- B runtime·callbackasm1(SB)
- MOVW $1517, R12
- B runtime·callbackasm1(SB)
- MOVW $1518, R12
- B runtime·callbackasm1(SB)
- MOVW $1519, R12
- B runtime·callbackasm1(SB)
- MOVW $1520, R12
- B runtime·callbackasm1(SB)
- MOVW $1521, R12
- B runtime·callbackasm1(SB)
- MOVW $1522, R12
- B runtime·callbackasm1(SB)
- MOVW $1523, R12
- B runtime·callbackasm1(SB)
- MOVW $1524, R12
- B runtime·callbackasm1(SB)
- MOVW $1525, R12
- B runtime·callbackasm1(SB)
- MOVW $1526, R12
- B runtime·callbackasm1(SB)
- MOVW $1527, R12
- B runtime·callbackasm1(SB)
- MOVW $1528, R12
- B runtime·callbackasm1(SB)
- MOVW $1529, R12
- B runtime·callbackasm1(SB)
- MOVW $1530, R12
- B runtime·callbackasm1(SB)
- MOVW $1531, R12
- B runtime·callbackasm1(SB)
- MOVW $1532, R12
- B runtime·callbackasm1(SB)
- MOVW $1533, R12
- B runtime·callbackasm1(SB)
- MOVW $1534, R12
- B runtime·callbackasm1(SB)
- MOVW $1535, R12
- B runtime·callbackasm1(SB)
- MOVW $1536, R12
- B runtime·callbackasm1(SB)
- MOVW $1537, R12
- B runtime·callbackasm1(SB)
- MOVW $1538, R12
- B runtime·callbackasm1(SB)
- MOVW $1539, R12
- B runtime·callbackasm1(SB)
- MOVW $1540, R12
- B runtime·callbackasm1(SB)
- MOVW $1541, R12
- B runtime·callbackasm1(SB)
- MOVW $1542, R12
- B runtime·callbackasm1(SB)
- MOVW $1543, R12
- B runtime·callbackasm1(SB)
- MOVW $1544, R12
- B runtime·callbackasm1(SB)
- MOVW $1545, R12
- B runtime·callbackasm1(SB)
- MOVW $1546, R12
- B runtime·callbackasm1(SB)
- MOVW $1547, R12
- B runtime·callbackasm1(SB)
- MOVW $1548, R12
- B runtime·callbackasm1(SB)
- MOVW $1549, R12
- B runtime·callbackasm1(SB)
- MOVW $1550, R12
- B runtime·callbackasm1(SB)
- MOVW $1551, R12
- B runtime·callbackasm1(SB)
- MOVW $1552, R12
- B runtime·callbackasm1(SB)
- MOVW $1553, R12
- B runtime·callbackasm1(SB)
- MOVW $1554, R12
- B runtime·callbackasm1(SB)
- MOVW $1555, R12
- B runtime·callbackasm1(SB)
- MOVW $1556, R12
- B runtime·callbackasm1(SB)
- MOVW $1557, R12
- B runtime·callbackasm1(SB)
- MOVW $1558, R12
- B runtime·callbackasm1(SB)
- MOVW $1559, R12
- B runtime·callbackasm1(SB)
- MOVW $1560, R12
- B runtime·callbackasm1(SB)
- MOVW $1561, R12
- B runtime·callbackasm1(SB)
- MOVW $1562, R12
- B runtime·callbackasm1(SB)
- MOVW $1563, R12
- B runtime·callbackasm1(SB)
- MOVW $1564, R12
- B runtime·callbackasm1(SB)
- MOVW $1565, R12
- B runtime·callbackasm1(SB)
- MOVW $1566, R12
- B runtime·callbackasm1(SB)
- MOVW $1567, R12
- B runtime·callbackasm1(SB)
- MOVW $1568, R12
- B runtime·callbackasm1(SB)
- MOVW $1569, R12
- B runtime·callbackasm1(SB)
- MOVW $1570, R12
- B runtime·callbackasm1(SB)
- MOVW $1571, R12
- B runtime·callbackasm1(SB)
- MOVW $1572, R12
- B runtime·callbackasm1(SB)
- MOVW $1573, R12
- B runtime·callbackasm1(SB)
- MOVW $1574, R12
- B runtime·callbackasm1(SB)
- MOVW $1575, R12
- B runtime·callbackasm1(SB)
- MOVW $1576, R12
- B runtime·callbackasm1(SB)
- MOVW $1577, R12
- B runtime·callbackasm1(SB)
- MOVW $1578, R12
- B runtime·callbackasm1(SB)
- MOVW $1579, R12
- B runtime·callbackasm1(SB)
- MOVW $1580, R12
- B runtime·callbackasm1(SB)
- MOVW $1581, R12
- B runtime·callbackasm1(SB)
- MOVW $1582, R12
- B runtime·callbackasm1(SB)
- MOVW $1583, R12
- B runtime·callbackasm1(SB)
- MOVW $1584, R12
- B runtime·callbackasm1(SB)
- MOVW $1585, R12
- B runtime·callbackasm1(SB)
- MOVW $1586, R12
- B runtime·callbackasm1(SB)
- MOVW $1587, R12
- B runtime·callbackasm1(SB)
- MOVW $1588, R12
- B runtime·callbackasm1(SB)
- MOVW $1589, R12
- B runtime·callbackasm1(SB)
- MOVW $1590, R12
- B runtime·callbackasm1(SB)
- MOVW $1591, R12
- B runtime·callbackasm1(SB)
- MOVW $1592, R12
- B runtime·callbackasm1(SB)
- MOVW $1593, R12
- B runtime·callbackasm1(SB)
- MOVW $1594, R12
- B runtime·callbackasm1(SB)
- MOVW $1595, R12
- B runtime·callbackasm1(SB)
- MOVW $1596, R12
- B runtime·callbackasm1(SB)
- MOVW $1597, R12
- B runtime·callbackasm1(SB)
- MOVW $1598, R12
- B runtime·callbackasm1(SB)
- MOVW $1599, R12
- B runtime·callbackasm1(SB)
- MOVW $1600, R12
- B runtime·callbackasm1(SB)
- MOVW $1601, R12
- B runtime·callbackasm1(SB)
- MOVW $1602, R12
- B runtime·callbackasm1(SB)
- MOVW $1603, R12
- B runtime·callbackasm1(SB)
- MOVW $1604, R12
- B runtime·callbackasm1(SB)
- MOVW $1605, R12
- B runtime·callbackasm1(SB)
- MOVW $1606, R12
- B runtime·callbackasm1(SB)
- MOVW $1607, R12
- B runtime·callbackasm1(SB)
- MOVW $1608, R12
- B runtime·callbackasm1(SB)
- MOVW $1609, R12
- B runtime·callbackasm1(SB)
- MOVW $1610, R12
- B runtime·callbackasm1(SB)
- MOVW $1611, R12
- B runtime·callbackasm1(SB)
- MOVW $1612, R12
- B runtime·callbackasm1(SB)
- MOVW $1613, R12
- B runtime·callbackasm1(SB)
- MOVW $1614, R12
- B runtime·callbackasm1(SB)
- MOVW $1615, R12
- B runtime·callbackasm1(SB)
- MOVW $1616, R12
- B runtime·callbackasm1(SB)
- MOVW $1617, R12
- B runtime·callbackasm1(SB)
- MOVW $1618, R12
- B runtime·callbackasm1(SB)
- MOVW $1619, R12
- B runtime·callbackasm1(SB)
- MOVW $1620, R12
- B runtime·callbackasm1(SB)
- MOVW $1621, R12
- B runtime·callbackasm1(SB)
- MOVW $1622, R12
- B runtime·callbackasm1(SB)
- MOVW $1623, R12
- B runtime·callbackasm1(SB)
- MOVW $1624, R12
- B runtime·callbackasm1(SB)
- MOVW $1625, R12
- B runtime·callbackasm1(SB)
- MOVW $1626, R12
- B runtime·callbackasm1(SB)
- MOVW $1627, R12
- B runtime·callbackasm1(SB)
- MOVW $1628, R12
- B runtime·callbackasm1(SB)
- MOVW $1629, R12
- B runtime·callbackasm1(SB)
- MOVW $1630, R12
- B runtime·callbackasm1(SB)
- MOVW $1631, R12
- B runtime·callbackasm1(SB)
- MOVW $1632, R12
- B runtime·callbackasm1(SB)
- MOVW $1633, R12
- B runtime·callbackasm1(SB)
- MOVW $1634, R12
- B runtime·callbackasm1(SB)
- MOVW $1635, R12
- B runtime·callbackasm1(SB)
- MOVW $1636, R12
- B runtime·callbackasm1(SB)
- MOVW $1637, R12
- B runtime·callbackasm1(SB)
- MOVW $1638, R12
- B runtime·callbackasm1(SB)
- MOVW $1639, R12
- B runtime·callbackasm1(SB)
- MOVW $1640, R12
- B runtime·callbackasm1(SB)
- MOVW $1641, R12
- B runtime·callbackasm1(SB)
- MOVW $1642, R12
- B runtime·callbackasm1(SB)
- MOVW $1643, R12
- B runtime·callbackasm1(SB)
- MOVW $1644, R12
- B runtime·callbackasm1(SB)
- MOVW $1645, R12
- B runtime·callbackasm1(SB)
- MOVW $1646, R12
- B runtime·callbackasm1(SB)
- MOVW $1647, R12
- B runtime·callbackasm1(SB)
- MOVW $1648, R12
- B runtime·callbackasm1(SB)
- MOVW $1649, R12
- B runtime·callbackasm1(SB)
- MOVW $1650, R12
- B runtime·callbackasm1(SB)
- MOVW $1651, R12
- B runtime·callbackasm1(SB)
- MOVW $1652, R12
- B runtime·callbackasm1(SB)
- MOVW $1653, R12
- B runtime·callbackasm1(SB)
- MOVW $1654, R12
- B runtime·callbackasm1(SB)
- MOVW $1655, R12
- B runtime·callbackasm1(SB)
- MOVW $1656, R12
- B runtime·callbackasm1(SB)
- MOVW $1657, R12
- B runtime·callbackasm1(SB)
- MOVW $1658, R12
- B runtime·callbackasm1(SB)
- MOVW $1659, R12
- B runtime·callbackasm1(SB)
- MOVW $1660, R12
- B runtime·callbackasm1(SB)
- MOVW $1661, R12
- B runtime·callbackasm1(SB)
- MOVW $1662, R12
- B runtime·callbackasm1(SB)
- MOVW $1663, R12
- B runtime·callbackasm1(SB)
- MOVW $1664, R12
- B runtime·callbackasm1(SB)
- MOVW $1665, R12
- B runtime·callbackasm1(SB)
- MOVW $1666, R12
- B runtime·callbackasm1(SB)
- MOVW $1667, R12
- B runtime·callbackasm1(SB)
- MOVW $1668, R12
- B runtime·callbackasm1(SB)
- MOVW $1669, R12
- B runtime·callbackasm1(SB)
- MOVW $1670, R12
- B runtime·callbackasm1(SB)
- MOVW $1671, R12
- B runtime·callbackasm1(SB)
- MOVW $1672, R12
- B runtime·callbackasm1(SB)
- MOVW $1673, R12
- B runtime·callbackasm1(SB)
- MOVW $1674, R12
- B runtime·callbackasm1(SB)
- MOVW $1675, R12
- B runtime·callbackasm1(SB)
- MOVW $1676, R12
- B runtime·callbackasm1(SB)
- MOVW $1677, R12
- B runtime·callbackasm1(SB)
- MOVW $1678, R12
- B runtime·callbackasm1(SB)
- MOVW $1679, R12
- B runtime·callbackasm1(SB)
- MOVW $1680, R12
- B runtime·callbackasm1(SB)
- MOVW $1681, R12
- B runtime·callbackasm1(SB)
- MOVW $1682, R12
- B runtime·callbackasm1(SB)
- MOVW $1683, R12
- B runtime·callbackasm1(SB)
- MOVW $1684, R12
- B runtime·callbackasm1(SB)
- MOVW $1685, R12
- B runtime·callbackasm1(SB)
- MOVW $1686, R12
- B runtime·callbackasm1(SB)
- MOVW $1687, R12
- B runtime·callbackasm1(SB)
- MOVW $1688, R12
- B runtime·callbackasm1(SB)
- MOVW $1689, R12
- B runtime·callbackasm1(SB)
- MOVW $1690, R12
- B runtime·callbackasm1(SB)
- MOVW $1691, R12
- B runtime·callbackasm1(SB)
- MOVW $1692, R12
- B runtime·callbackasm1(SB)
- MOVW $1693, R12
- B runtime·callbackasm1(SB)
- MOVW $1694, R12
- B runtime·callbackasm1(SB)
- MOVW $1695, R12
- B runtime·callbackasm1(SB)
- MOVW $1696, R12
- B runtime·callbackasm1(SB)
- MOVW $1697, R12
- B runtime·callbackasm1(SB)
- MOVW $1698, R12
- B runtime·callbackasm1(SB)
- MOVW $1699, R12
- B runtime·callbackasm1(SB)
- MOVW $1700, R12
- B runtime·callbackasm1(SB)
- MOVW $1701, R12
- B runtime·callbackasm1(SB)
- MOVW $1702, R12
- B runtime·callbackasm1(SB)
- MOVW $1703, R12
- B runtime·callbackasm1(SB)
- MOVW $1704, R12
- B runtime·callbackasm1(SB)
- MOVW $1705, R12
- B runtime·callbackasm1(SB)
- MOVW $1706, R12
- B runtime·callbackasm1(SB)
- MOVW $1707, R12
- B runtime·callbackasm1(SB)
- MOVW $1708, R12
- B runtime·callbackasm1(SB)
- MOVW $1709, R12
- B runtime·callbackasm1(SB)
- MOVW $1710, R12
- B runtime·callbackasm1(SB)
- MOVW $1711, R12
- B runtime·callbackasm1(SB)
- MOVW $1712, R12
- B runtime·callbackasm1(SB)
- MOVW $1713, R12
- B runtime·callbackasm1(SB)
- MOVW $1714, R12
- B runtime·callbackasm1(SB)
- MOVW $1715, R12
- B runtime·callbackasm1(SB)
- MOVW $1716, R12
- B runtime·callbackasm1(SB)
- MOVW $1717, R12
- B runtime·callbackasm1(SB)
- MOVW $1718, R12
- B runtime·callbackasm1(SB)
- MOVW $1719, R12
- B runtime·callbackasm1(SB)
- MOVW $1720, R12
- B runtime·callbackasm1(SB)
- MOVW $1721, R12
- B runtime·callbackasm1(SB)
- MOVW $1722, R12
- B runtime·callbackasm1(SB)
- MOVW $1723, R12
- B runtime·callbackasm1(SB)
- MOVW $1724, R12
- B runtime·callbackasm1(SB)
- MOVW $1725, R12
- B runtime·callbackasm1(SB)
- MOVW $1726, R12
- B runtime·callbackasm1(SB)
- MOVW $1727, R12
- B runtime·callbackasm1(SB)
- MOVW $1728, R12
- B runtime·callbackasm1(SB)
- MOVW $1729, R12
- B runtime·callbackasm1(SB)
- MOVW $1730, R12
- B runtime·callbackasm1(SB)
- MOVW $1731, R12
- B runtime·callbackasm1(SB)
- MOVW $1732, R12
- B runtime·callbackasm1(SB)
- MOVW $1733, R12
- B runtime·callbackasm1(SB)
- MOVW $1734, R12
- B runtime·callbackasm1(SB)
- MOVW $1735, R12
- B runtime·callbackasm1(SB)
- MOVW $1736, R12
- B runtime·callbackasm1(SB)
- MOVW $1737, R12
- B runtime·callbackasm1(SB)
- MOVW $1738, R12
- B runtime·callbackasm1(SB)
- MOVW $1739, R12
- B runtime·callbackasm1(SB)
- MOVW $1740, R12
- B runtime·callbackasm1(SB)
- MOVW $1741, R12
- B runtime·callbackasm1(SB)
- MOVW $1742, R12
- B runtime·callbackasm1(SB)
- MOVW $1743, R12
- B runtime·callbackasm1(SB)
- MOVW $1744, R12
- B runtime·callbackasm1(SB)
- MOVW $1745, R12
- B runtime·callbackasm1(SB)
- MOVW $1746, R12
- B runtime·callbackasm1(SB)
- MOVW $1747, R12
- B runtime·callbackasm1(SB)
- MOVW $1748, R12
- B runtime·callbackasm1(SB)
- MOVW $1749, R12
- B runtime·callbackasm1(SB)
- MOVW $1750, R12
- B runtime·callbackasm1(SB)
- MOVW $1751, R12
- B runtime·callbackasm1(SB)
- MOVW $1752, R12
- B runtime·callbackasm1(SB)
- MOVW $1753, R12
- B runtime·callbackasm1(SB)
- MOVW $1754, R12
- B runtime·callbackasm1(SB)
- MOVW $1755, R12
- B runtime·callbackasm1(SB)
- MOVW $1756, R12
- B runtime·callbackasm1(SB)
- MOVW $1757, R12
- B runtime·callbackasm1(SB)
- MOVW $1758, R12
- B runtime·callbackasm1(SB)
- MOVW $1759, R12
- B runtime·callbackasm1(SB)
- MOVW $1760, R12
- B runtime·callbackasm1(SB)
- MOVW $1761, R12
- B runtime·callbackasm1(SB)
- MOVW $1762, R12
- B runtime·callbackasm1(SB)
- MOVW $1763, R12
- B runtime·callbackasm1(SB)
- MOVW $1764, R12
- B runtime·callbackasm1(SB)
- MOVW $1765, R12
- B runtime·callbackasm1(SB)
- MOVW $1766, R12
- B runtime·callbackasm1(SB)
- MOVW $1767, R12
- B runtime·callbackasm1(SB)
- MOVW $1768, R12
- B runtime·callbackasm1(SB)
- MOVW $1769, R12
- B runtime·callbackasm1(SB)
- MOVW $1770, R12
- B runtime·callbackasm1(SB)
- MOVW $1771, R12
- B runtime·callbackasm1(SB)
- MOVW $1772, R12
- B runtime·callbackasm1(SB)
- MOVW $1773, R12
- B runtime·callbackasm1(SB)
- MOVW $1774, R12
- B runtime·callbackasm1(SB)
- MOVW $1775, R12
- B runtime·callbackasm1(SB)
- MOVW $1776, R12
- B runtime·callbackasm1(SB)
- MOVW $1777, R12
- B runtime·callbackasm1(SB)
- MOVW $1778, R12
- B runtime·callbackasm1(SB)
- MOVW $1779, R12
- B runtime·callbackasm1(SB)
- MOVW $1780, R12
- B runtime·callbackasm1(SB)
- MOVW $1781, R12
- B runtime·callbackasm1(SB)
- MOVW $1782, R12
- B runtime·callbackasm1(SB)
- MOVW $1783, R12
- B runtime·callbackasm1(SB)
- MOVW $1784, R12
- B runtime·callbackasm1(SB)
- MOVW $1785, R12
- B runtime·callbackasm1(SB)
- MOVW $1786, R12
- B runtime·callbackasm1(SB)
- MOVW $1787, R12
- B runtime·callbackasm1(SB)
- MOVW $1788, R12
- B runtime·callbackasm1(SB)
- MOVW $1789, R12
- B runtime·callbackasm1(SB)
- MOVW $1790, R12
- B runtime·callbackasm1(SB)
- MOVW $1791, R12
- B runtime·callbackasm1(SB)
- MOVW $1792, R12
- B runtime·callbackasm1(SB)
- MOVW $1793, R12
- B runtime·callbackasm1(SB)
- MOVW $1794, R12
- B runtime·callbackasm1(SB)
- MOVW $1795, R12
- B runtime·callbackasm1(SB)
- MOVW $1796, R12
- B runtime·callbackasm1(SB)
- MOVW $1797, R12
- B runtime·callbackasm1(SB)
- MOVW $1798, R12
- B runtime·callbackasm1(SB)
- MOVW $1799, R12
- B runtime·callbackasm1(SB)
- MOVW $1800, R12
- B runtime·callbackasm1(SB)
- MOVW $1801, R12
- B runtime·callbackasm1(SB)
- MOVW $1802, R12
- B runtime·callbackasm1(SB)
- MOVW $1803, R12
- B runtime·callbackasm1(SB)
- MOVW $1804, R12
- B runtime·callbackasm1(SB)
- MOVW $1805, R12
- B runtime·callbackasm1(SB)
- MOVW $1806, R12
- B runtime·callbackasm1(SB)
- MOVW $1807, R12
- B runtime·callbackasm1(SB)
- MOVW $1808, R12
- B runtime·callbackasm1(SB)
- MOVW $1809, R12
- B runtime·callbackasm1(SB)
- MOVW $1810, R12
- B runtime·callbackasm1(SB)
- MOVW $1811, R12
- B runtime·callbackasm1(SB)
- MOVW $1812, R12
- B runtime·callbackasm1(SB)
- MOVW $1813, R12
- B runtime·callbackasm1(SB)
- MOVW $1814, R12
- B runtime·callbackasm1(SB)
- MOVW $1815, R12
- B runtime·callbackasm1(SB)
- MOVW $1816, R12
- B runtime·callbackasm1(SB)
- MOVW $1817, R12
- B runtime·callbackasm1(SB)
- MOVW $1818, R12
- B runtime·callbackasm1(SB)
- MOVW $1819, R12
- B runtime·callbackasm1(SB)
- MOVW $1820, R12
- B runtime·callbackasm1(SB)
- MOVW $1821, R12
- B runtime·callbackasm1(SB)
- MOVW $1822, R12
- B runtime·callbackasm1(SB)
- MOVW $1823, R12
- B runtime·callbackasm1(SB)
- MOVW $1824, R12
- B runtime·callbackasm1(SB)
- MOVW $1825, R12
- B runtime·callbackasm1(SB)
- MOVW $1826, R12
- B runtime·callbackasm1(SB)
- MOVW $1827, R12
- B runtime·callbackasm1(SB)
- MOVW $1828, R12
- B runtime·callbackasm1(SB)
- MOVW $1829, R12
- B runtime·callbackasm1(SB)
- MOVW $1830, R12
- B runtime·callbackasm1(SB)
- MOVW $1831, R12
- B runtime·callbackasm1(SB)
- MOVW $1832, R12
- B runtime·callbackasm1(SB)
- MOVW $1833, R12
- B runtime·callbackasm1(SB)
- MOVW $1834, R12
- B runtime·callbackasm1(SB)
- MOVW $1835, R12
- B runtime·callbackasm1(SB)
- MOVW $1836, R12
- B runtime·callbackasm1(SB)
- MOVW $1837, R12
- B runtime·callbackasm1(SB)
- MOVW $1838, R12
- B runtime·callbackasm1(SB)
- MOVW $1839, R12
- B runtime·callbackasm1(SB)
- MOVW $1840, R12
- B runtime·callbackasm1(SB)
- MOVW $1841, R12
- B runtime·callbackasm1(SB)
- MOVW $1842, R12
- B runtime·callbackasm1(SB)
- MOVW $1843, R12
- B runtime·callbackasm1(SB)
- MOVW $1844, R12
- B runtime·callbackasm1(SB)
- MOVW $1845, R12
- B runtime·callbackasm1(SB)
- MOVW $1846, R12
- B runtime·callbackasm1(SB)
- MOVW $1847, R12
- B runtime·callbackasm1(SB)
- MOVW $1848, R12
- B runtime·callbackasm1(SB)
- MOVW $1849, R12
- B runtime·callbackasm1(SB)
- MOVW $1850, R12
- B runtime·callbackasm1(SB)
- MOVW $1851, R12
- B runtime·callbackasm1(SB)
- MOVW $1852, R12
- B runtime·callbackasm1(SB)
- MOVW $1853, R12
- B runtime·callbackasm1(SB)
- MOVW $1854, R12
- B runtime·callbackasm1(SB)
- MOVW $1855, R12
- B runtime·callbackasm1(SB)
- MOVW $1856, R12
- B runtime·callbackasm1(SB)
- MOVW $1857, R12
- B runtime·callbackasm1(SB)
- MOVW $1858, R12
- B runtime·callbackasm1(SB)
- MOVW $1859, R12
- B runtime·callbackasm1(SB)
- MOVW $1860, R12
- B runtime·callbackasm1(SB)
- MOVW $1861, R12
- B runtime·callbackasm1(SB)
- MOVW $1862, R12
- B runtime·callbackasm1(SB)
- MOVW $1863, R12
- B runtime·callbackasm1(SB)
- MOVW $1864, R12
- B runtime·callbackasm1(SB)
- MOVW $1865, R12
- B runtime·callbackasm1(SB)
- MOVW $1866, R12
- B runtime·callbackasm1(SB)
- MOVW $1867, R12
- B runtime·callbackasm1(SB)
- MOVW $1868, R12
- B runtime·callbackasm1(SB)
- MOVW $1869, R12
- B runtime·callbackasm1(SB)
- MOVW $1870, R12
- B runtime·callbackasm1(SB)
- MOVW $1871, R12
- B runtime·callbackasm1(SB)
- MOVW $1872, R12
- B runtime·callbackasm1(SB)
- MOVW $1873, R12
- B runtime·callbackasm1(SB)
- MOVW $1874, R12
- B runtime·callbackasm1(SB)
- MOVW $1875, R12
- B runtime·callbackasm1(SB)
- MOVW $1876, R12
- B runtime·callbackasm1(SB)
- MOVW $1877, R12
- B runtime·callbackasm1(SB)
- MOVW $1878, R12
- B runtime·callbackasm1(SB)
- MOVW $1879, R12
- B runtime·callbackasm1(SB)
- MOVW $1880, R12
- B runtime·callbackasm1(SB)
- MOVW $1881, R12
- B runtime·callbackasm1(SB)
- MOVW $1882, R12
- B runtime·callbackasm1(SB)
- MOVW $1883, R12
- B runtime·callbackasm1(SB)
- MOVW $1884, R12
- B runtime·callbackasm1(SB)
- MOVW $1885, R12
- B runtime·callbackasm1(SB)
- MOVW $1886, R12
- B runtime·callbackasm1(SB)
- MOVW $1887, R12
- B runtime·callbackasm1(SB)
- MOVW $1888, R12
- B runtime·callbackasm1(SB)
- MOVW $1889, R12
- B runtime·callbackasm1(SB)
- MOVW $1890, R12
- B runtime·callbackasm1(SB)
- MOVW $1891, R12
- B runtime·callbackasm1(SB)
- MOVW $1892, R12
- B runtime·callbackasm1(SB)
- MOVW $1893, R12
- B runtime·callbackasm1(SB)
- MOVW $1894, R12
- B runtime·callbackasm1(SB)
- MOVW $1895, R12
- B runtime·callbackasm1(SB)
- MOVW $1896, R12
- B runtime·callbackasm1(SB)
- MOVW $1897, R12
- B runtime·callbackasm1(SB)
- MOVW $1898, R12
- B runtime·callbackasm1(SB)
- MOVW $1899, R12
- B runtime·callbackasm1(SB)
- MOVW $1900, R12
- B runtime·callbackasm1(SB)
- MOVW $1901, R12
- B runtime·callbackasm1(SB)
- MOVW $1902, R12
- B runtime·callbackasm1(SB)
- MOVW $1903, R12
- B runtime·callbackasm1(SB)
- MOVW $1904, R12
- B runtime·callbackasm1(SB)
- MOVW $1905, R12
- B runtime·callbackasm1(SB)
- MOVW $1906, R12
- B runtime·callbackasm1(SB)
- MOVW $1907, R12
- B runtime·callbackasm1(SB)
- MOVW $1908, R12
- B runtime·callbackasm1(SB)
- MOVW $1909, R12
- B runtime·callbackasm1(SB)
- MOVW $1910, R12
- B runtime·callbackasm1(SB)
- MOVW $1911, R12
- B runtime·callbackasm1(SB)
- MOVW $1912, R12
- B runtime·callbackasm1(SB)
- MOVW $1913, R12
- B runtime·callbackasm1(SB)
- MOVW $1914, R12
- B runtime·callbackasm1(SB)
- MOVW $1915, R12
- B runtime·callbackasm1(SB)
- MOVW $1916, R12
- B runtime·callbackasm1(SB)
- MOVW $1917, R12
- B runtime·callbackasm1(SB)
- MOVW $1918, R12
- B runtime·callbackasm1(SB)
- MOVW $1919, R12
- B runtime·callbackasm1(SB)
- MOVW $1920, R12
- B runtime·callbackasm1(SB)
- MOVW $1921, R12
- B runtime·callbackasm1(SB)
- MOVW $1922, R12
- B runtime·callbackasm1(SB)
- MOVW $1923, R12
- B runtime·callbackasm1(SB)
- MOVW $1924, R12
- B runtime·callbackasm1(SB)
- MOVW $1925, R12
- B runtime·callbackasm1(SB)
- MOVW $1926, R12
- B runtime·callbackasm1(SB)
- MOVW $1927, R12
- B runtime·callbackasm1(SB)
- MOVW $1928, R12
- B runtime·callbackasm1(SB)
- MOVW $1929, R12
- B runtime·callbackasm1(SB)
- MOVW $1930, R12
- B runtime·callbackasm1(SB)
- MOVW $1931, R12
- B runtime·callbackasm1(SB)
- MOVW $1932, R12
- B runtime·callbackasm1(SB)
- MOVW $1933, R12
- B runtime·callbackasm1(SB)
- MOVW $1934, R12
- B runtime·callbackasm1(SB)
- MOVW $1935, R12
- B runtime·callbackasm1(SB)
- MOVW $1936, R12
- B runtime·callbackasm1(SB)
- MOVW $1937, R12
- B runtime·callbackasm1(SB)
- MOVW $1938, R12
- B runtime·callbackasm1(SB)
- MOVW $1939, R12
- B runtime·callbackasm1(SB)
- MOVW $1940, R12
- B runtime·callbackasm1(SB)
- MOVW $1941, R12
- B runtime·callbackasm1(SB)
- MOVW $1942, R12
- B runtime·callbackasm1(SB)
- MOVW $1943, R12
- B runtime·callbackasm1(SB)
- MOVW $1944, R12
- B runtime·callbackasm1(SB)
- MOVW $1945, R12
- B runtime·callbackasm1(SB)
- MOVW $1946, R12
- B runtime·callbackasm1(SB)
- MOVW $1947, R12
- B runtime·callbackasm1(SB)
- MOVW $1948, R12
- B runtime·callbackasm1(SB)
- MOVW $1949, R12
- B runtime·callbackasm1(SB)
- MOVW $1950, R12
- B runtime·callbackasm1(SB)
- MOVW $1951, R12
- B runtime·callbackasm1(SB)
- MOVW $1952, R12
- B runtime·callbackasm1(SB)
- MOVW $1953, R12
- B runtime·callbackasm1(SB)
- MOVW $1954, R12
- B runtime·callbackasm1(SB)
- MOVW $1955, R12
- B runtime·callbackasm1(SB)
- MOVW $1956, R12
- B runtime·callbackasm1(SB)
- MOVW $1957, R12
- B runtime·callbackasm1(SB)
- MOVW $1958, R12
- B runtime·callbackasm1(SB)
- MOVW $1959, R12
- B runtime·callbackasm1(SB)
- MOVW $1960, R12
- B runtime·callbackasm1(SB)
- MOVW $1961, R12
- B runtime·callbackasm1(SB)
- MOVW $1962, R12
- B runtime·callbackasm1(SB)
- MOVW $1963, R12
- B runtime·callbackasm1(SB)
- MOVW $1964, R12
- B runtime·callbackasm1(SB)
- MOVW $1965, R12
- B runtime·callbackasm1(SB)
- MOVW $1966, R12
- B runtime·callbackasm1(SB)
- MOVW $1967, R12
- B runtime·callbackasm1(SB)
- MOVW $1968, R12
- B runtime·callbackasm1(SB)
- MOVW $1969, R12
- B runtime·callbackasm1(SB)
- MOVW $1970, R12
- B runtime·callbackasm1(SB)
- MOVW $1971, R12
- B runtime·callbackasm1(SB)
- MOVW $1972, R12
- B runtime·callbackasm1(SB)
- MOVW $1973, R12
- B runtime·callbackasm1(SB)
- MOVW $1974, R12
- B runtime·callbackasm1(SB)
- MOVW $1975, R12
- B runtime·callbackasm1(SB)
- MOVW $1976, R12
- B runtime·callbackasm1(SB)
- MOVW $1977, R12
- B runtime·callbackasm1(SB)
- MOVW $1978, R12
- B runtime·callbackasm1(SB)
- MOVW $1979, R12
- B runtime·callbackasm1(SB)
- MOVW $1980, R12
- B runtime·callbackasm1(SB)
- MOVW $1981, R12
- B runtime·callbackasm1(SB)
- MOVW $1982, R12
- B runtime·callbackasm1(SB)
- MOVW $1983, R12
- B runtime·callbackasm1(SB)
- MOVW $1984, R12
- B runtime·callbackasm1(SB)
- MOVW $1985, R12
- B runtime·callbackasm1(SB)
- MOVW $1986, R12
- B runtime·callbackasm1(SB)
- MOVW $1987, R12
- B runtime·callbackasm1(SB)
- MOVW $1988, R12
- B runtime·callbackasm1(SB)
- MOVW $1989, R12
- B runtime·callbackasm1(SB)
- MOVW $1990, R12
- B runtime·callbackasm1(SB)
- MOVW $1991, R12
- B runtime·callbackasm1(SB)
- MOVW $1992, R12
- B runtime·callbackasm1(SB)
- MOVW $1993, R12
- B runtime·callbackasm1(SB)
- MOVW $1994, R12
- B runtime·callbackasm1(SB)
- MOVW $1995, R12
- B runtime·callbackasm1(SB)
- MOVW $1996, R12
- B runtime·callbackasm1(SB)
- MOVW $1997, R12
- B runtime·callbackasm1(SB)
- MOVW $1998, R12
- B runtime·callbackasm1(SB)
- MOVW $1999, R12
- B runtime·callbackasm1(SB)
diff --git a/src/syscall/dirent_test.go b/src/syscall/dirent_test.go
index cfa5478feb1bc8..173ccc3ed28ef3 100644
--- a/src/syscall/dirent_test.go
+++ b/src/syscall/dirent_test.go
@@ -140,7 +140,7 @@ func TestDirentRepeat(t *testing.T) {
// Check results
slices.Sort(files)
slices.Sort(files2)
- if strings.Join(files, "|") != strings.Join(files2, "|") {
+ if !slices.Equal(files, files2) {
t.Errorf("bad file list: want\n%q\ngot\n%q", files, files2)
}
}
diff --git a/src/syscall/getdirentries_test.go b/src/syscall/getdirentries_test.go
index 5d401d8dd6fa2c..b5361ddaef7024 100644
--- a/src/syscall/getdirentries_test.go
+++ b/src/syscall/getdirentries_test.go
@@ -11,7 +11,6 @@ import (
"os"
"path/filepath"
"slices"
- "strings"
"syscall"
"testing"
"unsafe"
@@ -78,7 +77,7 @@ func testGetdirentries(t *testing.T, count int) {
names = append(names, ".", "..") // Getdirentries returns these also
slices.Sort(names)
slices.Sort(names2)
- if strings.Join(names, ":") != strings.Join(names2, ":") {
+ if !slices.Equal(names, names2) {
t.Errorf("names don't match\n names: %q\nnames2: %q", names, names2)
}
}
diff --git a/src/syscall/syscall_unix.go b/src/syscall/syscall_unix.go
index ecd5952975a73d..7de2272b591b95 100644
--- a/src/syscall/syscall_unix.go
+++ b/src/syscall/syscall_unix.go
@@ -410,17 +410,25 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
}
func sendmsgNInet4(fd int, p, oob []byte, to *SockaddrInet4, flags int) (n int, err error) {
- ptr, salen, err := to.sockaddr()
- if err != nil {
- return 0, err
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
}
return sendmsgN(fd, p, oob, ptr, salen, flags)
}
func sendmsgNInet6(fd int, p, oob []byte, to *SockaddrInet6, flags int) (n int, err error) {
- ptr, salen, err := to.sockaddr()
- if err != nil {
- return 0, err
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
}
return sendmsgN(fd, p, oob, ptr, salen, flags)
}
diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go
index 01c039cf287047..c1416b3731056b 100644
--- a/src/syscall/syscall_windows.go
+++ b/src/syscall/syscall_windows.go
@@ -14,7 +14,6 @@ import (
"internal/msan"
"internal/oserror"
"internal/race"
- "runtime"
"sync"
"unsafe"
)
@@ -525,18 +524,8 @@ func setFilePointerEx(handle Handle, distToMove int64, newFilePointer *int64, wh
if unsafe.Sizeof(uintptr(0)) == 8 {
_, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 4, uintptr(handle), uintptr(distToMove), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence), 0, 0)
} else {
- // Different 32-bit systems disgaree about whether distToMove starts 8-byte aligned.
- switch runtime.GOARCH {
- default:
- panic("unsupported 32-bit architecture")
- case "386":
- // distToMove is a LARGE_INTEGER, which is 64 bits.
- _, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 5, uintptr(handle), uintptr(distToMove), uintptr(distToMove>>32), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence), 0)
- case "arm":
- // distToMove must be 8-byte aligned per ARM calling convention
- // https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions#stage-c-assignment-of-arguments-to-registers-and-stack
- _, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 6, uintptr(handle), 0, uintptr(distToMove), uintptr(distToMove>>32), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence))
- }
+ // distToMove is a LARGE_INTEGER, which is 64 bits.
+ _, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 5, uintptr(handle), uintptr(distToMove), uintptr(distToMove>>32), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence), 0)
}
if e1 != 0 {
return errnoErr(e1)
diff --git a/src/syscall/types_windows_arm.go b/src/syscall/types_windows_arm.go
deleted file mode 100644
index e72e9f5ced2bd0..00000000000000
--- a/src/syscall/types_windows_arm.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package syscall
-
-type WSAData struct {
- Version uint16
- HighVersion uint16
- Description [WSADESCRIPTION_LEN + 1]byte
- SystemStatus [WSASYS_STATUS_LEN + 1]byte
- MaxSockets uint16
- MaxUdpDg uint16
- VendorInfo *byte
-}
-
-type Servent struct {
- Name *byte
- Aliases **byte
- Port uint16
- Proto *byte
-}
diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go
index 4d733135fe5a85..c28c3ea2002d21 100644
--- a/src/text/template/funcs.go
+++ b/src/text/template/funcs.go
@@ -62,26 +62,13 @@ func builtins() FuncMap {
}
}
-var builtinFuncsOnce struct {
- sync.Once
- v map[string]reflect.Value
-}
-
-// builtinFuncsOnce lazily computes & caches the builtinFuncs map.
-// TODO: revert this back to a global map once golang.org/issue/2559 is fixed.
-func builtinFuncs() map[string]reflect.Value {
- builtinFuncsOnce.Do(func() {
- builtinFuncsOnce.v = createValueFuncs(builtins())
- })
- return builtinFuncsOnce.v
-}
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
- m := make(map[string]reflect.Value)
+// builtinFuncs lazily computes & caches the builtinFuncs map.
+var builtinFuncs = sync.OnceValue(func() map[string]reflect.Value {
+ funcMap := builtins()
+ m := make(map[string]reflect.Value, len(funcMap))
addValueFuncs(m, funcMap)
return m
-}
+})
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
diff --git a/src/time/tick_test.go b/src/time/tick_test.go
index 416bef59ee91ee..d89d2dbdeadf2f 100644
--- a/src/time/tick_test.go
+++ b/src/time/tick_test.go
@@ -151,7 +151,7 @@ func TestTickerResetLtZeroDuration(t *testing.T) {
}
func TestLongAdjustTimers(t *testing.T) {
- if runtime.GOOS == "android" || runtime.GOOS == "ios" {
+ if runtime.GOOS == "android" || runtime.GOOS == "ios" || runtime.GOOS == "plan9" {
t.Skipf("skipping on %s - too slow", runtime.GOOS)
}
t.Parallel()
diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt
index 8507f01b12f518..645cec45e2f7eb 100644
--- a/src/vendor/modules.txt
+++ b/src/vendor/modules.txt
@@ -1,4 +1,4 @@
-# golang.org/x/crypto v0.39.0
+# golang.org/x/crypto v0.41.0
## explicit; go 1.23.0
golang.org/x/crypto/chacha20
golang.org/x/crypto/chacha20poly1305
@@ -6,7 +6,7 @@ golang.org/x/crypto/cryptobyte
golang.org/x/crypto/cryptobyte/asn1
golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
-# golang.org/x/net v0.41.0
+# golang.org/x/net v0.43.0
## explicit; go 1.23.0
golang.org/x/net/dns/dnsmessage
golang.org/x/net/http/httpguts
@@ -15,10 +15,10 @@ golang.org/x/net/http2/hpack
golang.org/x/net/idna
golang.org/x/net/lif
golang.org/x/net/nettest
-# golang.org/x/sys v0.33.0
+# golang.org/x/sys v0.35.0
## explicit; go 1.23.0
golang.org/x/sys/cpu
-# golang.org/x/text v0.26.0
+# golang.org/x/text v0.28.0
## explicit; go 1.23.0
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go
index 9f400065bdb696..67adb50fa59bbf 100644
--- a/test/codegen/arithmetic.go
+++ b/test/codegen/arithmetic.go
@@ -257,7 +257,7 @@ func Mul_96(n int) int {
// 386:`SHLL\t[$]5`,`LEAL\t\(.*\)\(.*\*2\),`,-`IMULL`
// arm64:`LSL\t[$]5`,`ADD\sR[0-9]+<<1,\sR[0-9]+`,-`MUL`
// arm:`SLL\t[$]5`,`ADD\sR[0-9]+<<1,\sR[0-9]+`,-`MUL`
- // loong64:"ADDVU","SLLV\t[$]5",-"MULV"
+ // loong64:"SLLV\t[$]5","ALSLV\t[$]1,"
// s390x:`SLD\t[$]5`,`SLD\t[$]6`,-`MULLD`
return n * 96
}
@@ -314,6 +314,18 @@ func MergeMuls5(a, n int) int {
return a*n - 19*n // (a-19)n
}
+// Multiplications folded negation
+
+func FoldNegMul(a int) int {
+ // loong64:"SUBVU","ALSLV\t[$]2","ALSLV\t[$]1"
+ return (-a) * 11
+}
+
+func Fold2NegMul(a, b int) int {
+ // loong64:"MULV",-"SUBVU\tR[0-9], R0,"
+ return (-a) * (-b)
+}
+
// -------------- //
// Division //
// -------------- //
diff --git a/test/codegen/fuse.go b/test/codegen/fuse.go
index 79dd337dee2234..8d6ea3c5c74664 100644
--- a/test/codegen/fuse.go
+++ b/test/codegen/fuse.go
@@ -195,3 +195,24 @@ func ui4d(c <-chan uint8) {
for x := <-c; x < 126 || x >= 128; x = <-c {
}
}
+
+// ------------------------------------ //
+// regressions //
+// ------------------------------------ //
+
+func gte4(x uint64) bool {
+ return x >= 4
+}
+
+func lt20(x uint64) bool {
+ return x < 20
+}
+
+func issue74915(c <-chan uint64) {
+ // Check that the optimization is not blocked by function inlining.
+
+ // amd64:"CMPQ\t.+, [$]16","ADDQ\t[$]-4,"
+ // s390x:"CLGIJ\t[$]4, R[0-9]+, [$]16","ADD\t[$]-4,"
+ for x := <-c; gte4(x) && lt20(x); x = <-c {
+ }
+}
diff --git a/test/codegen/issue74788.go b/test/codegen/issue74788.go
index d04a89b42edb6e..e102f638273cbc 100644
--- a/test/codegen/issue74788.go
+++ b/test/codegen/issue74788.go
@@ -7,11 +7,11 @@
package codegen
func fa(a [2]int) (r [2]int) {
- // amd64:1`MOVUPS[^,]+, X0$`,1`MOVUPS\sX0,[^\n]+$`
+ // amd64:1`MOVUPS[^,]+, X[0-9]+$`,1`MOVUPS\sX[0-9]+,[^\n]+$`
return a
}
func fb(a [4]int) (r [4]int) {
- // amd64:2`MOVUPS[^,]+, X0$`,2`MOVUPS\sX0,[^\n]+$`
+ // amd64:2`MOVUPS[^,]+, X[0-9]+$`,2`MOVUPS\sX[0-9]+,[^\n]+$`
return a
}
diff --git a/test/codegen/math.go b/test/codegen/math.go
index 87d9cd7b2715ba..5b3e7272542387 100644
--- a/test/codegen/math.go
+++ b/test/codegen/math.go
@@ -154,12 +154,73 @@ func fnma(x, y, z float64) float64 {
return math.FMA(x, -y, -z)
}
+func isPosInf(x float64) bool {
+ // riscv64:"FCLASSD"
+ return math.IsInf(x, 1)
+}
+
+func isPosInfEq(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x == math.Inf(1)
+}
+
+func isPosInfCmp(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x > math.MaxFloat64
+}
+
+func isNotPosInf(x float64) bool {
+ // riscv64:"FCLASSD"
+ return !math.IsInf(x, 1)
+}
+
+func isNotPosInfEq(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x != math.Inf(1)
+}
+
+func isNotPosInfCmp(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x <= math.MaxFloat64
+}
+
+func isNegInf(x float64) bool {
+ // riscv64:"FCLASSD"
+ return math.IsInf(x, -1)
+}
+
+func isNegInfEq(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x == math.Inf(-1)
+}
+
+func isNegInfCmp(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x < -math.MaxFloat64
+}
+
+func isNotNegInf(x float64) bool {
+ // riscv64:"FCLASSD"
+ return !math.IsInf(x, -1)
+}
+
+func isNotNegInfEq(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x != math.Inf(-1)
+}
+
+func isNotNegInfCmp(x float64) bool {
+ // riscv64:"FCLASSD"
+ return x >= -math.MaxFloat64
+}
+
func fromFloat64(f64 float64) uint64 {
// amd64:"MOVQ\tX.*, [^X].*"
// arm64:"FMOVD\tF.*, R.*"
// loong64:"MOVV\tF.*, R.*"
// ppc64x:"MFVSRD"
// mips64/hardfloat:"MOVV\tF.*, R.*"
+ // riscv64:"FMVXD"
return math.Float64bits(f64+1) + 1
}
@@ -168,6 +229,7 @@ func fromFloat32(f32 float32) uint32 {
// arm64:"FMOVS\tF.*, R.*"
// loong64:"MOVW\tF.*, R.*"
// mips64/hardfloat:"MOVW\tF.*, R.*"
+ // riscv64:"FMVXW"
return math.Float32bits(f32+1) + 1
}
@@ -177,6 +239,7 @@ func toFloat64(u64 uint64) float64 {
// loong64:"MOVV\tR.*, F.*"
// ppc64x:"MTVSRD"
// mips64/hardfloat:"MOVV\tR.*, F.*"
+ // riscv64:"FMVDX"
return math.Float64frombits(u64+1) + 1
}
@@ -185,6 +248,7 @@ func toFloat32(u32 uint32) float32 {
// arm64:"FMOVS\tR.*, F.*"
// loong64:"MOVW\tR.*, F.*"
// mips64/hardfloat:"MOVW\tR.*, F.*"
+ // riscv64:"FMVWX"
return math.Float32frombits(u32+1) + 1
}
diff --git a/test/codegen/multiply.go b/test/codegen/multiply.go
index bb22d1a2b19f36..dc2910dab7b6fb 100644
--- a/test/codegen/multiply.go
+++ b/test/codegen/multiply.go
@@ -24,7 +24,7 @@ func m2(x int64) int64 {
func m3(x int64) int64 {
// amd64: "LEAQ\t.*[*]2"
// arm64: "ADD\tR[0-9]+<<1,"
- // loong64: "ADDVU","ADDVU"
+ // loong64: "ALSLV\t[$]1,"
return x * 3
}
func m4(x int64) int64 {
@@ -36,19 +36,19 @@ func m4(x int64) int64 {
func m5(x int64) int64 {
// amd64: "LEAQ\t.*[*]4"
// arm64: "ADD\tR[0-9]+<<2,"
- // loong64: "SLLV\t[$]2,","ADDVU"
+ // loong64: "ALSLV\t[$]2,"
return x * 5
}
func m6(x int64) int64 {
// amd64: "LEAQ\t.*[*]1", "LEAQ\t.*[*]2"
// arm64: "ADD\tR[0-9]+,", "ADD\tR[0-9]+<<1,"
- // loong64: "ADDVU","ADDVU","ADDVU"
+ // loong64: "ADDVU", "ADDVU", "ADDVU"
return x * 6
}
func m7(x int64) int64 {
// amd64: "LEAQ\t.*[*]2"
// arm64: "LSL\t[$]3,", "SUB\tR[0-9]+,"
- // loong64: "SLLV\t[$]3,","SUBVU"
+ // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]1,"
return x * 7
}
func m8(x int64) int64 {
@@ -60,43 +60,43 @@ func m8(x int64) int64 {
func m9(x int64) int64 {
// amd64: "LEAQ\t.*[*]8"
// arm64: "ADD\tR[0-9]+<<3,"
- // loong64: "SLLV\t[$]3,","ADDVU"
+ // loong64: "ALSLV\t[$]3,"
return x * 9
}
func m10(x int64) int64 {
// amd64: "LEAQ\t.*[*]1", "LEAQ\t.*[*]4"
// arm64: "ADD\tR[0-9]+,", "ADD\tR[0-9]+<<2,"
- // loong64: "ADDVU","SLLV\t[$]3,","ADDVU"
+ // loong64: "ADDVU", "ALSLV\t[$]2,"
return x * 10
}
func m11(x int64) int64 {
// amd64: "LEAQ\t.*[*]4", "LEAQ\t.*[*]2"
// arm64: "MOVD\t[$]11,", "MUL"
- // loong64: "MOVV\t[$]11,", "MULV"
+ // loong64: "ALSLV\t[$]2,", "ALSLV\t[$]1,"
return x * 11
}
func m12(x int64) int64 {
// amd64: "LEAQ\t.*[*]2", "SHLQ\t[$]2,"
// arm64: "LSL\t[$]2,", "ADD\tR[0-9]+<<1,"
- // loong64: "ADDVU","ADDVU","SLLV\t[$]2,"
+ // loong64: "SLLV", "ALSLV\t[$]1,"
return x * 12
}
func m13(x int64) int64 {
// amd64: "LEAQ\t.*[*]2", "LEAQ\t.*[*]4"
// arm64: "MOVD\t[$]13,", "MUL"
- // loong64: "MOVV\t[$]13,","MULV"
+ // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]2,"
return x * 13
}
func m14(x int64) int64 {
// amd64: "IMUL3Q\t[$]14,"
// arm64: "LSL\t[$]4,", "SUB\tR[0-9]+<<1,"
- // loong64: "ADDVU","SLLV\t[$]4,","SUBVU"
+ // loong64: "ADDVU", "ALSLV\t[$]1", "ALSLV\t[$]2"
return x * 14
}
func m15(x int64) int64 {
// amd64: "LEAQ\t.*[*]2", "LEAQ\t.*[*]4"
// arm64: "LSL\t[$]4,", "SUB\tR[0-9]+,"
- // loong64: "SLLV\t[$]4,","SUBVU"
+ // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]2,"
return x * 15
}
func m16(x int64) int64 {
@@ -108,79 +108,79 @@ func m16(x int64) int64 {
func m17(x int64) int64 {
// amd64: "LEAQ\t.*[*]1", "LEAQ\t.*[*]8"
// arm64: "ADD\tR[0-9]+<<4,"
- // loong64: "SLLV\t[$]4,","ADDVU"
+ // loong64: "ALSLV\t[$]"
return x * 17
}
func m18(x int64) int64 {
// amd64: "LEAQ\t.*[*]1", "LEAQ\t.*[*]8"
// arm64: "ADD\tR[0-9]+,", "ADD\tR[0-9]+<<3,"
- // loong64: "ADDVU","SLLV\t[$]4,","ADDVU"
+ // loong64: "ADDVU", "ALSLV\t[$]3,"
return x * 18
}
func m19(x int64) int64 {
// amd64: "LEAQ\t.*[*]8", "LEAQ\t.*[*]2"
// arm64: "MOVD\t[$]19,", "MUL"
- // loong64: "MOVV\t[$]19,","MULV"
+ // loong64: "ALSLV\t[$]3,", "ALSLV\t[$]1,"
return x * 19
}
func m20(x int64) int64 {
// amd64: "LEAQ\t.*[*]4", "SHLQ\t[$]2,"
// arm64: "LSL\t[$]2,", "ADD\tR[0-9]+<<2,"
- // loong64: "SLLV\t[$]2,","SLLV\t[$]4,","ADDVU"
+ // loong64: "SLLV\t[$]2,", "ALSLV\t[$]2,"
return x * 20
}
func m21(x int64) int64 {
// amd64: "LEAQ\t.*[*]4", "LEAQ\t.*[*]4"
// arm64: "MOVD\t[$]21,", "MUL"
- // loong64: "MOVV\t[$]21,","MULV"
+ // loong64: "ALSLV\t[$]2,", "ALSLV\t[$]2,"
return x * 21
}
func m22(x int64) int64 {
// amd64: "IMUL3Q\t[$]22,"
// arm64: "MOVD\t[$]22,", "MUL"
- // loong64: "MOVV\t[$]22,","MULV"
+ // loong64: "ADDVU", "ALSLV\t[$]2,", "ALSLV\t[$]2,"
return x * 22
}
func m23(x int64) int64 {
// amd64: "IMUL3Q\t[$]23,"
// arm64: "MOVD\t[$]23,", "MUL"
- // loong64: "MOVV\t[$]23,","MULV"
+ // loong64: "ALSLV\t[$]1,", "SUBVU", "ALSLV\t[$]3,"
return x * 23
}
func m24(x int64) int64 {
// amd64: "LEAQ\t.*[*]2", "SHLQ\t[$]3,"
// arm64: "LSL\t[$]3,", "ADD\tR[0-9]+<<1,"
- // loong64: "ADDVU","ADDVU","SLLV\t[$]3,"
+ // loong64: "SLLV\t[$]3", "ALSLV\t[$]1,"
return x * 24
}
func m25(x int64) int64 {
// amd64: "LEAQ\t.*[*]4", "LEAQ\t.*[*]4"
// arm64: "MOVD\t[$]25,", "MUL"
- // loong64: "MOVV\t[$]25,","MULV"
+ // loong64: "ALSLV\t[$]2,", "ALSLV\t[$]2,"
return x * 25
}
func m26(x int64) int64 {
// amd64: "IMUL3Q\t[$]26,"
// arm64: "MOVD\t[$]26,", "MUL"
- // loong64: "MOVV\t[$]26,","MULV"
+ // loong64: "ADDVU", "ALSLV\t[$]1,", "ALSLV\t[$]3,"
return x * 26
}
func m27(x int64) int64 {
// amd64: "LEAQ\t.*[*]2", "LEAQ\t.*[*]8"
// arm64: "MOVD\t[$]27,", "MUL"
- // loong64: "MOVV\t[$]27,","MULV"
+ // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]3,"
return x * 27
}
func m28(x int64) int64 {
// amd64: "IMUL3Q\t[$]28,"
// arm64: "LSL\t[$]5, "SUB\tR[0-9]+<<2,"
- // loong64: "SLLV\t[$]5,","SLLV\t[$]2,","SUBVU"
+ // loong64: "ALSLV\t[$]1,","SLLV\t[$]2,","ALSLV\t[$]3,"
return x * 28
}
func m29(x int64) int64 {
// amd64: "IMUL3Q\t[$]29,"
// arm64: "MOVD\t[$]29,", "MUL"
- // loong64: "MOVV\t[$]29,","MULV"
+ // loong64: "ALSLV\t[$]1,","SLLV\t[$]5,","SUBVU"
return x * 29
}
func m30(x int64) int64 {
@@ -204,49 +204,49 @@ func m32(x int64) int64 {
func m33(x int64) int64 {
// amd64: "SHLQ\t[$]2,", "LEAQ\t.*[*]8"
// arm64: "ADD\tR[0-9]+<<5,"
- // loong64: "SLLV\t[$]5,","ADDVU"
+ // loong64: "ADDVU", "ALSLV\t[$]4,"
return x * 33
}
func m34(x int64) int64 {
// amd64: "SHLQ\t[$]5,", "LEAQ\t.*[*]2"
// arm64: "ADD\tR[0-9]+,", "ADD\tR[0-9]+<<4,"
- // loong64: "ADDVU","SLLV\t[$]5,","ADDVU"
+ // loong64: "ADDVU", "ALSLV\t[$]4,"
return x * 34
}
func m35(x int64) int64 {
// amd64: "IMUL3Q\t[$]35,"
// arm64: "MOVD\t[$]35,", "MUL"
- // loong64: "MOVV\t[$]35,","MULV"
+ // loong64: "ALSLV\t[$]4,", "ALSLV\t[$]1,"
return x * 35
}
func m36(x int64) int64 {
// amd64: "LEAQ\t.*[*]8", "SHLQ\t[$]2,"
// arm64: "LSL\t[$]2,", "ADD\tR[0-9]+<<3,"
- // loong64: "SLLV\t[$]2,","SLLV\t[$]5,","ADDVU"
+ // loong64: "SLLV\t[$]2,", "ALSLV\t[$]3,"
return x * 36
}
func m37(x int64) int64 {
// amd64: "LEAQ\t.*[*]8", "LEAQ\t.*[*]4"
// arm64: "MOVD\t[$]37,", "MUL"
- // loong64: "MOVV\t[$]37,","MULV"
+ // loong64: "ALSLV\t[$]3,", "ALSLV\t[$]2,"
return x * 37
}
func m38(x int64) int64 {
// amd64: "IMUL3Q\t[$]38,"
// arm64: "MOVD\t[$]38,", "MUL"
- // loong64: "MOVV\t[$]38,","MULV"
+ // loong64: "ALSLV\t[$]3,", "ALSLV\t[$]2,"
return x * 38
}
func m39(x int64) int64 {
// amd64: "IMUL3Q\t[$]39,"
// arm64: "MOVD\t[$]39,", "MUL"
- // loong64: "MOVV\t[$]39,", "MULV"
+ // loong64: "ALSLV\t[$]2,", "SUBVU", "ALSLV\t[$]3,"
return x * 39
}
func m40(x int64) int64 {
// amd64: "LEAQ\t.*[*]4", "SHLQ\t[$]3,"
// arm64: "LSL\t[$]3,", "ADD\tR[0-9]+<<2,"
- // loong64: "SLLV\t[$]3,","SLLV\t[$]5,","ADDVU"
+ // loong64: "SLLV\t[$]3,", "ALSLV\t[$]2,"
return x * 40
}
@@ -265,7 +265,7 @@ func mn2(x int64) int64 {
func mn3(x int64) int64 {
// amd64: "NEGQ", "LEAQ\t.*[*]2"
// arm64: "SUB\tR[0-9]+<<2,"
- // loong64: "SLLV\t[$]2,","SUBVU"
+ // loong64: "SUBVU", "ALSLV\t[$]1,"
return x * -3
}
func mn4(x int64) int64 {
@@ -277,19 +277,19 @@ func mn4(x int64) int64 {
func mn5(x int64) int64 {
// amd64: "NEGQ", "LEAQ\t.*[*]4"
// arm64: "NEG\tR[0-9]+,", "ADD\tR[0-9]+<<2,"
- // loong64: "SUBVU\tR[0-9], R0,","SLLV\t[$]2,","SUBVU"
+ // loong64: "SUBVU", "ALSLV\t[$]2,"
return x * -5
}
func mn6(x int64) int64 {
// amd64: "IMUL3Q\t[$]-6,"
// arm64: "ADD\tR[0-9]+,", "SUB\tR[0-9]+<<2,"
- // loong64: "ADDVU","SLLV\t[$]3,","SUBVU"
+ // loong64: "ADDVU", "SUBVU", "ALSLV\t[$]3,"
return x * -6
}
func mn7(x int64) int64 {
// amd64: "NEGQ", "LEAQ\t.*[*]8"
// arm64: "SUB\tR[0-9]+<<3,"
- // loong64: "SLLV\t[$]3","SUBVU"
+ // loong64: "SUBVU", "ALSLV\t[$]3,"
return x * -7
}
func mn8(x int64) int64 {
@@ -301,43 +301,43 @@ func mn8(x int64) int64 {
func mn9(x int64) int64 {
// amd64: "NEGQ", "LEAQ\t.*[*]8"
// arm64: "NEG\tR[0-9]+,", "ADD\tR[0-9]+<<3,"
- // loong64: "SUBVU\tR[0-9], R0,","SLLV\t[$]3","SUBVU"
+ // loong64: "SUBVU", "ALSLV\t[$]3,"
return x * -9
}
func mn10(x int64) int64 {
// amd64: "IMUL3Q\t[$]-10,"
// arm64: "MOVD\t[$]-10,", "MUL"
- // loong64: "MOVV\t[$]-10","MULV"
+ // loong64: "ADDVU", "ALSLV\t[$]3", "SUBVU"
return x * -10
}
func mn11(x int64) int64 {
// amd64: "IMUL3Q\t[$]-11,"
// arm64: "MOVD\t[$]-11,", "MUL"
- // loong64: "MOVV\t[$]-11","MULV"
+ // loong64: "ALSLV\t[$]2,", "SUBVU", "ALSLV\t[$]4,"
return x * -11
}
func mn12(x int64) int64 {
// amd64: "IMUL3Q\t[$]-12,"
// arm64: "LSL\t[$]2,", "SUB\tR[0-9]+<<2,"
- // loong64: "SLLV\t[$]2,","SLLV\t[$]4,","SUBVU"
+ // loong64: "SUBVU", "SLLV\t[$]2,", "ALSLV\t[$]4,"
return x * -12
}
func mn13(x int64) int64 {
// amd64: "IMUL3Q\t[$]-13,"
// arm64: "MOVD\t[$]-13,", "MUL"
- // loong64: "MOVV\t[$]-13","MULV"
+ // loong64: "ALSLV\t[$]4,", "SLLV\t[$]2, ", "SUBVU"
return x * -13
}
func mn14(x int64) int64 {
// amd64: "IMUL3Q\t[$]-14,"
// arm64: "ADD\tR[0-9]+,", "SUB\tR[0-9]+<<3,"
- // loong64: "ADDVU","SLLV\t[$]4,","SUBVU"
+ // loong64: "ADDVU", "SUBVU", "ALSLV\t[$]4,"
return x * -14
}
func mn15(x int64) int64 {
// amd64: "SHLQ\t[$]4,", "SUBQ"
// arm64: "SUB\tR[0-9]+<<4,"
- // loong64: "SLLV\t[$]4,","SUBVU"
+ // loong64: "SUBVU", "ALSLV\t[$]4,"
return x * -15
}
func mn16(x int64) int64 {
@@ -349,24 +349,24 @@ func mn16(x int64) int64 {
func mn17(x int64) int64 {
// amd64: "IMUL3Q\t[$]-17,"
// arm64: "NEG\tR[0-9]+,", "ADD\tR[0-9]+<<4,"
- // loong64: "SUBVU\tR[0-9], R0,","SLLV\t[$]4,","SUBVU"
+ // loong64: "SUBVU", "ALSLV\t[$]4,"
return x * -17
}
func mn18(x int64) int64 {
// amd64: "IMUL3Q\t[$]-18,"
// arm64: "MOVD\t[$]-18,", "MUL"
- // loong64: "MOVV\t[$]-18","MULV"
+ // loong64: "ADDVU", "ALSLV\t[$]4,", "SUBVU"
return x * -18
}
func mn19(x int64) int64 {
// amd64: "IMUL3Q\t[$]-19,"
// arm64: "MOVD\t[$]-19,", "MUL"
- // loong64: "MOVV\t[$]-19","MULV"
+ // loong64: "ALSLV\t[$]1,", "ALSLV\t[$]4,", "SUBVU"
return x * -19
}
func mn20(x int64) int64 {
// amd64: "IMUL3Q\t[$]-20,"
// arm64: "MOVD\t[$]-20,", "MUL"
- // loong64: "MOVV\t[$]-20","MULV"
+ // loong64: "SLLV\t[$]2,", "ALSLV\t[$]4,", "SUBVU"
return x * -20
}
diff --git a/test/fixedbugs/issue68054.go b/test/fixedbugs/issue68054.go
index 5409fc90818003..e9f95efa14991e 100644
--- a/test/fixedbugs/issue68054.go
+++ b/test/fixedbugs/issue68054.go
@@ -1,4 +1,4 @@
-// compile -goexperiment aliastypeparams
+// compile
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue68526.dir/a/a.go b/test/fixedbugs/issue68526.dir/a/a.go
index 83462c7fb99c86..6249eb59df032b 100644
--- a/test/fixedbugs/issue68526.dir/a/a.go
+++ b/test/fixedbugs/issue68526.dir/a/a.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.aliastypeparams
-
package a
type A[T any] = struct{ F T }
diff --git a/test/fixedbugs/issue68526.dir/main.go b/test/fixedbugs/issue68526.dir/main.go
index 966efd71900234..8b72ea37b69ebd 100644
--- a/test/fixedbugs/issue68526.dir/main.go
+++ b/test/fixedbugs/issue68526.dir/main.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.aliastypeparams
-
package main
import (
diff --git a/test/fixedbugs/issue68526.go b/test/fixedbugs/issue68526.go
index 3067aa76222941..aca6354b7c6948 100644
--- a/test/fixedbugs/issue68526.go
+++ b/test/fixedbugs/issue68526.go
@@ -1,4 +1,4 @@
-// runindir -goexperiment aliastypeparams -gomodversion "1.23"
+// runindir -gomodversion "1.23"
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/fixedbugs/issue68580.go b/test/fixedbugs/issue68580.go
index b60a7447aaa77b..5c25d15b4362d2 100644
--- a/test/fixedbugs/issue68580.go
+++ b/test/fixedbugs/issue68580.go
@@ -1,4 +1,4 @@
-// compile -goexperiment aliastypeparams
+// compile
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/test/prove.go b/test/prove.go
index ef7690bbde6a37..70a27865cfd7c3 100644
--- a/test/prove.go
+++ b/test/prove.go
@@ -2330,6 +2330,18 @@ func issue74473(s []uint) {
}
}
+func setCapMaxBasedOnElementSize(x []uint64) int {
+ c := uintptr(cap(x))
+ max := ^uintptr(0) >> 3
+ if c > max { // ERROR "Disproved Less"
+ return 42
+ }
+ if c <= max { // ERROR "Proved Leq"
+ return 1337
+ }
+ return 0
+}
+
//go:noinline
func useInt(a int) {
}