From 9ae8f71c9431d287893443fa2b7fbdb72a9b56a2 Mon Sep 17 00:00:00 2001 From: Jeremy Faller Date: Mon, 3 Aug 2020 13:19:46 -0400 Subject: [PATCH 001/281] [dev.link] cmd/link: stop renumbering files for pclntab generation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Creates two new symbols: runtime.cutab, and runtime.filetab, and strips the filenames out of runtime.pclntab_old. All stats are for cmd/compile. Time: Pclntab_GC 48.2ms ± 3% 45.5ms ± 9% -5.47% (p=0.004 n=9+9) Alloc/op: Pclntab_GC 30.0MB ± 0% 29.5MB ± 0% -1.88% (p=0.000 n=10+10) Allocs/op: Pclntab_GC 90.4k ± 0% 73.1k ± 0% -19.11% (p=0.000 n=10+10) live-B: Pclntab_GC 29.1M ± 0% 29.2M ± 0% +0.10% (p=0.000 n=10+10) binary sizes: NEW: 18565600 OLD: 18532768 The size differences in the binary are caused by the increased size of the Func objects, and (less likely) some extra alignment padding needed as a result. This is probably the maximum increase in size we'll size from the pclntab reworking. Change-Id: Idd95a9b159fea46f7701cfe6506813b88257fbea Reviewed-on: https://go-review.googlesource.com/c/go/+/246497 Run-TryBot: Jeremy Faller TryBot-Result: Gobot Gobot Reviewed-by: Than McIntosh Reviewed-by: Austin Clements --- src/cmd/link/internal/ld/data.go | 4 + src/cmd/link/internal/ld/link.go | 1 - src/cmd/link/internal/ld/pcln.go | 303 ++++++++++++++++------------- src/cmd/link/internal/ld/symtab.go | 12 +- src/debug/gosym/pclntab.go | 52 +++-- src/runtime/runtime2.go | 7 +- src/runtime/symtab.go | 19 +- 7 files changed, 231 insertions(+), 167 deletions(-) diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index dc7096ea8c..a551d46403 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -1923,6 +1923,8 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pclntab", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pcheader", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.funcnametab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.cutab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.filetab", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pclntab_old", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.epclntab", 0), sect) if ctxt.HeadType == objabi.Haix { @@ -2507,6 +2509,8 @@ func (ctxt *Link) address() []*sym.Segment { ctxt.xdefine("runtime.pclntab", sym.SRODATA, int64(pclntab.Vaddr)) ctxt.defineInternal("runtime.pcheader", sym.SRODATA) ctxt.defineInternal("runtime.funcnametab", sym.SRODATA) + ctxt.defineInternal("runtime.cutab", sym.SRODATA) + ctxt.defineInternal("runtime.filetab", sym.SRODATA) ctxt.defineInternal("runtime.pclntab_old", sym.SRODATA) ctxt.xdefine("runtime.epclntab", sym.SRODATA, int64(pclntab.Vaddr+pclntab.Length)) ctxt.xdefine("runtime.noptrdata", sym.SNOPTRDATA, int64(noptr.Vaddr)) diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go index a2c8552e94..f26d051a49 100644 --- a/src/cmd/link/internal/ld/link.go +++ b/src/cmd/link/internal/ld/link.go @@ -71,7 +71,6 @@ type Link struct { LibraryByPkg map[string]*sym.Library Shlibs []Shlib Textp []loader.Sym - NumFilesyms int Moduledata loader.Sym PackageFile map[string]string diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index 30e0bdc839..c7535f6a61 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -6,16 +6,11 @@ package ld import ( "cmd/internal/goobj" - "cmd/internal/obj" "cmd/internal/objabi" - "cmd/internal/src" "cmd/internal/sys" "cmd/link/internal/loader" "cmd/link/internal/sym" - "encoding/binary" "fmt" - "log" - "math" "os" "path/filepath" "strings" @@ -23,18 +18,13 @@ import ( // oldPclnState holds state information used during pclntab generation. Here // 'ldr' is just a pointer to the context's loader, 'deferReturnSym' is the -// index for the symbol "runtime.deferreturn", 'nameToOffset' is a helper -// function for capturing function names, 'numberedFiles' records the file -// number assigned to a given file symbol, 'filepaths' is a slice of expanded -// paths (indexed by file number). +// index for the symbol "runtime.deferreturn", // // NB: This is deprecated, and will be eliminated when pclntab_old is // eliminated. type oldPclnState struct { ldr *loader.Loader deferReturnSym loader.Sym - numberedFiles map[string]int64 - filepaths []string } // pclntab holds the state needed for pclntab generation. @@ -42,9 +32,6 @@ type pclntab struct { // The first and last functions found. firstFunc, lastFunc loader.Sym - // The offset to the filetab. - filetabOffset int32 - // Running total size of pclntab. size int64 @@ -54,6 +41,8 @@ type pclntab struct { pcheader loader.Sym funcnametab loader.Sym findfunctab loader.Sym + cutab loader.Sym + filetab loader.Sym // The number of functions + number of TEXT sections - 1. This is such an // unexpected value because platforms that have more than one TEXT section @@ -64,6 +53,9 @@ type pclntab struct { // On most platforms this is the number of reachable functions. nfunc int32 + // The number of filenames in runtime.filetab. + nfiles uint32 + // maps the function symbol to offset in runtime.funcnametab // This doesn't need to reside in the state once pclntab_old's been // deleted -- it can live in generateFuncnametab. @@ -89,11 +81,6 @@ func makeOldPclnState(ctxt *Link) *oldPclnState { state := &oldPclnState{ ldr: ldr, deferReturnSym: drs, - numberedFiles: make(map[string]int64), - // NB: initial entry in filepaths below is to reserve the zero value, - // so that when we do a map lookup in numberedFiles fails, it will not - // return a value slot in filepaths. - filepaths: []string{""}, } return state @@ -153,78 +140,6 @@ func ftabaddstring(ftab *loader.SymbolBuilder, s string) int32 { return int32(start) } -// numberfile assigns a file number to the file if it hasn't been assigned already. -// This funciton looks at a CU's file at index [i], and if it's a new filename, -// stores that filename in the global file table, and adds it to the map lookup -// for renumbering pcfile. -func (state *oldPclnState) numberfile(cu *sym.CompilationUnit, i goobj.CUFileIndex) int64 { - file := cu.FileTable[i] - if val, ok := state.numberedFiles[file]; ok { - return val - } - path := file - if strings.HasPrefix(path, src.FileSymPrefix) { - path = file[len(src.FileSymPrefix):] - } - val := int64(len(state.filepaths)) - state.numberedFiles[file] = val - state.filepaths = append(state.filepaths, expandGoroot(path)) - return val -} - -func (state *oldPclnState) fileVal(cu *sym.CompilationUnit, i int32) int64 { - file := cu.FileTable[i] - if val, ok := state.numberedFiles[file]; ok { - return val - } - panic("should have been numbered first") -} - -func (state *oldPclnState) renumberfiles(ctxt *Link, cu *sym.CompilationUnit, fi loader.FuncInfo, d *sym.Pcdata) { - // Give files numbers. - nf := fi.NumFile() - for i := uint32(0); i < nf; i++ { - state.numberfile(cu, fi.File(int(i))) - } - - buf := make([]byte, binary.MaxVarintLen32) - newval := int32(-1) - var out sym.Pcdata - it := obj.NewPCIter(uint32(ctxt.Arch.MinLC)) - for it.Init(d.P); !it.Done; it.Next() { - // value delta - oldval := it.Value - - var val int32 - if oldval == -1 { - val = -1 - } else { - if oldval < 0 || oldval >= int32(len(cu.FileTable)) { - log.Fatalf("bad pcdata %d", oldval) - } - val = int32(state.fileVal(cu, oldval)) - } - - dv := val - newval - newval = val - - // value - n := binary.PutVarint(buf, int64(dv)) - out.P = append(out.P, buf[:n]...) - - // pc delta - pc := (it.NextPC - it.PC) / it.PCScale - n = binary.PutUvarint(buf, uint64(pc)) - out.P = append(out.P, buf[:n]...) - } - - // terminating value delta - // we want to write varint-encoded 0, which is just 0 - out.P = append(out.P, 0) - - *d = out -} - // onlycsymbol looks at a symbol's name to report whether this is a // symbol that is referenced by C code func onlycsymbol(sname string) bool { @@ -308,12 +223,7 @@ func (state *oldPclnState) genInlTreeSym(cu *sym.CompilationUnit, fi loader.Func ninl := fi.NumInlTree() for i := 0; i < int(ninl); i++ { call := fi.InlTree(i) - // Usually, call.File is already numbered since the file - // shows up in the Pcfile table. However, two inlined calls - // might overlap exactly so that only the innermost file - // appears in the Pcfile table. In that case, this assigns - // the outer file a number. - val := state.numberfile(cu, call.File) + val := call.File nameoff, ok := newState.funcNameOffset[call.Func] if !ok { panic("couldn't find function name offset") @@ -359,11 +269,14 @@ func (state *pclntab) generatePCHeader(ctxt *Link) { header.SetUint8(ctxt.Arch, 6, uint8(ctxt.Arch.MinLC)) header.SetUint8(ctxt.Arch, 7, uint8(ctxt.Arch.PtrSize)) off := header.SetUint(ctxt.Arch, 8, uint64(state.nfunc)) + off = header.SetUint(ctxt.Arch, off, uint64(state.nfiles)) off = writeSymOffset(off, state.funcnametab) + off = writeSymOffset(off, state.cutab) + off = writeSymOffset(off, state.filetab) off = writeSymOffset(off, state.pclntab) } - size := int64(8 + 3*ctxt.Arch.PtrSize) + size := int64(8 + 6*ctxt.Arch.PtrSize) state.pcheader = state.addGeneratedSym(ctxt, "runtime.pcheader", size, writeHeader) } @@ -417,6 +330,139 @@ func (state *pclntab) generateFuncnametab(ctxt *Link, container loader.Bitmap) { state.funcnametab = state.addGeneratedSym(ctxt, "runtime.funcnametab", size, writeFuncNameTab) } +// walkFilenames walks the filenames in the all reachable functions. +func walkFilenames(ctxt *Link, container loader.Bitmap, f func(*sym.CompilationUnit, goobj.CUFileIndex)) { + ldr := ctxt.loader + + // Loop through all functions, finding the filenames we need. + for _, ls := range ctxt.Textp { + s := loader.Sym(ls) + if !emitPcln(ctxt, s, container) { + continue + } + + fi := ldr.FuncInfo(s) + if !fi.Valid() { + continue + } + fi.Preload() + + cu := ldr.SymUnit(s) + for i, nf := 0, int(fi.NumFile()); i < nf; i++ { + f(cu, fi.File(i)) + } + for i, ninl := 0, int(fi.NumInlTree()); i < ninl; i++ { + call := fi.InlTree(i) + f(cu, call.File) + } + } +} + +// generateFilenameTabs creates LUTs needed for filename lookup. Returns a slice +// of the index at which each CU begins in runtime.cutab. +// +// Function objects keep track of the files they reference to print the stack. +// This function creates a per-CU list of filenames if CU[M] references +// files[1-N], the following is generated: +// +// runtime.cutab: +// CU[M] +// offsetToFilename[0] +// offsetToFilename[1] +// .. +// +// runtime.filetab +// filename[0] +// filename[1] +// +// Looking up a filename then becomes: +// 0) Given a func, and filename index [K] +// 1) Get Func.CUIndex: M := func.cuOffset +// 2) Find filename offset: fileOffset := runtime.cutab[M+K] +// 3) Get the filename: getcstring(runtime.filetab[fileOffset]) +func (state *pclntab) generateFilenameTabs(ctxt *Link, compUnits []*sym.CompilationUnit, container loader.Bitmap) []uint32 { + // On a per-CU basis, keep track of all the filenames we need. + // + // Note, that we store the filenames in a separate section in the object + // files, and deduplicate based on the actual value. It would be better to + // store the filenames as symbols, using content addressable symbols (and + // then not loading extra filenames), and just use the hash value of the + // symbol name to do this cataloging. + // + // TOOD: Store filenames as symbols. (Note this would be easiest if you + // also move strings to ALWAYS using the larger content addressable hash + // function, and use that hash value for uniqueness testing.) + cuEntries := make([]goobj.CUFileIndex, len(compUnits)) + fileOffsets := make(map[string]uint32) + + // Walk the filenames. + // We store the total filename string length we need to load, and the max + // file index we've seen per CU so we can calculate how large the + // CU->global table needs to be. + var fileSize int64 + walkFilenames(ctxt, container, func(cu *sym.CompilationUnit, i goobj.CUFileIndex) { + // Note we use the raw filename for lookup, but use the expanded filename + // when we save the size. + filename := cu.FileTable[i] + if _, ok := fileOffsets[filename]; !ok { + fileOffsets[filename] = uint32(fileSize) + fileSize += int64(len(expandFile(filename)) + 1) // NULL terminate + } + + // Find the maximum file index we've seen. + if cuEntries[cu.PclnIndex] < i+1 { + cuEntries[cu.PclnIndex] = i + 1 // Store max + 1 + } + }) + + // Calculate the size of the runtime.cutab variable. + var totalEntries uint32 + cuOffsets := make([]uint32, len(cuEntries)) + for i, entries := range cuEntries { + // Note, cutab is a slice of uint32, so an offset to a cu's entry is just the + // running total of all cu indices we've needed to store so far, not the + // number of bytes we've stored so far. + cuOffsets[i] = totalEntries + totalEntries += uint32(entries) + } + + // Write cutab. + writeCutab := func(ctxt *Link, s loader.Sym) { + sb := ctxt.loader.MakeSymbolUpdater(s) + + var off int64 + for i, max := range cuEntries { + // Write the per CU LUT. + cu := compUnits[i] + for j := goobj.CUFileIndex(0); j < max; j++ { + fileOffset, ok := fileOffsets[cu.FileTable[j]] + if !ok { + // We're looping through all possible file indices. It's possible a file's + // been deadcode eliminated, and although it's a valid file in the CU, it's + // not needed in this binary. When that happens, use an invalid offset. + fileOffset = ^uint32(0) + } + off = sb.SetUint32(ctxt.Arch, off, fileOffset) + } + } + } + state.cutab = state.addGeneratedSym(ctxt, "runtime.cutab", int64(totalEntries*4), writeCutab) + + // Write filetab. + writeFiletab := func(ctxt *Link, s loader.Sym) { + sb := ctxt.loader.MakeSymbolUpdater(s) + + // Write the strings. + for filename, loc := range fileOffsets { + sb.AddStringAt(int64(loc), expandFile(filename)) + } + } + state.nfiles = uint32(len(fileOffsets)) + state.filetab = state.addGeneratedSym(ctxt, "runtime.filetab", fileSize, writeFiletab) + + return cuOffsets +} + // pclntab initializes the pclntab symbol with // runtime function and file name information. @@ -425,7 +471,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // Go 1.2's symtab layout is documented in golang.org/s/go12symtab, but the // layout and data has changed since that time. // - // As of July 2020, here's the layout of pclntab: + // As of August 2020, here's the layout of pclntab: // // .gopclntab/__gopclntab [elf/macho section] // runtime.pclntab @@ -438,17 +484,23 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // offset to runtime.pclntab_old from beginning of runtime.pcheader // // runtime.funcnametab - // []list of null terminated function names + // []list of null terminated function names + // + // runtime.cutab + // for i=0..#CUs + // for j=0..#max used file index in CU[i] + // uint32 offset into runtime.filetab for the filename[j] + // + // runtime.filetab + // []null terminated filename strings // // runtime.pclntab_old // function table, alternating PC and offset to func struct [each entry thearch.ptrsize bytes] // end PC [thearch.ptrsize bytes] - // offset to file table [4 bytes] // func structures, pcdata tables. - // filetable oldState := makeOldPclnState(ctxt) - state, _ := makePclntab(ctxt, container) + state, compUnits := makePclntab(ctxt, container) ldr := ctxt.loader state.carrier = ldr.LookupOrCreateSym("runtime.pclntab", 0) @@ -461,6 +513,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { state.pclntab = ldr.LookupOrCreateSym("runtime.pclntab_old", 0) state.generatePCHeader(ctxt) state.generateFuncnametab(ctxt, container) + cuOffsets := state.generateFilenameTabs(ctxt, compUnits, container) funcdataBytes := int64(0) ldr.SetCarrierSym(state.pclntab, state.carrier) @@ -583,7 +636,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // fixed size of struct, checked below off := funcstart - end := funcstart + int32(ctxt.Arch.PtrSize) + 3*4 + 5*4 + int32(len(pcdata))*4 + int32(len(funcdata))*int32(ctxt.Arch.PtrSize) + end := funcstart + int32(ctxt.Arch.PtrSize) + 3*4 + 6*4 + int32(len(pcdata))*4 + int32(len(funcdata))*int32(ctxt.Arch.PtrSize) if len(funcdata) > 0 && (end&int32(ctxt.Arch.PtrSize-1) != 0) { end += 4 } @@ -616,17 +669,6 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { pcsp = sym.Pcdata{P: fi.Pcsp()} pcfile = sym.Pcdata{P: fi.Pcfile()} pcline = sym.Pcdata{P: fi.Pcline()} - oldState.renumberfiles(ctxt, cu, fi, &pcfile) - if false { - // Sanity check the new numbering - it := obj.NewPCIter(uint32(ctxt.Arch.MinLC)) - for it.Init(pcfile.P); !it.Done; it.Next() { - if it.Value < 1 || it.Value > int32(len(oldState.numberedFiles)) { - ctxt.Errorf(s, "bad file number in pcfile: %d not in range [1, %d]\n", it.Value, len(oldState.numberedFiles)) - errorexit() - } - } - } } if fi.Valid() && fi.NumInlTree() > 0 { @@ -641,15 +683,12 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { off = writepctab(off, pcline.P) off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(len(pcdata)))) - // Store the compilation unit index. - cuIdx := ^uint16(0) + // Store the offset to compilation unit's file table. + cuIdx := ^uint32(0) if cu := ldr.SymUnit(s); cu != nil { - if cu.PclnIndex > math.MaxUint16 { - panic("cu limit reached.") - } - cuIdx = uint16(cu.PclnIndex) + cuIdx = cuOffsets[cu.PclnIndex] } - off = int32(ftab.SetUint16(ctxt.Arch, int64(off), cuIdx)) + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), cuIdx)) // funcID uint8 var funcID objabi.FuncID @@ -658,6 +697,8 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { } off = int32(ftab.SetUint8(ctxt.Arch, int64(off), uint8(funcID))) + off += 2 // pad + // nfuncdata must be the final entry. off = int32(ftab.SetUint8(ctxt.Arch, int64(off), uint8(len(funcdata)))) for i := range pcdata { @@ -694,26 +735,8 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // Final entry of table is just end pc. setAddr(ftab, ctxt.Arch, int64(nfunc)*2*int64(ctxt.Arch.PtrSize), state.lastFunc, ldr.SymSize(state.lastFunc)) - // Start file table. - dSize := len(ftab.Data()) - start := int32(dSize) - start += int32(-dSize) & (int32(ctxt.Arch.PtrSize) - 1) - state.filetabOffset = start - ftab.SetUint32(ctxt.Arch, int64(nfunc)*2*int64(ctxt.Arch.PtrSize)+int64(ctxt.Arch.PtrSize), uint32(start)) - - nf := len(oldState.numberedFiles) - ftab.Grow(int64(start) + int64((nf+1)*4)) - ftab.SetUint32(ctxt.Arch, int64(start), uint32(nf+1)) - for i := nf; i > 0; i-- { - path := oldState.filepaths[i] - val := int64(i) - ftab.SetUint32(ctxt.Arch, int64(start)+val*4, uint32(ftabaddstring(ftab, path))) - } - ftab.SetSize(int64(len(ftab.Data()))) - ctxt.NumFilesyms = len(oldState.numberedFiles) - if ctxt.Debugvlog != 0 { ctxt.Logf("pclntab=%d bytes, funcdata total %d bytes\n", ftab.Size(), funcdataBytes) } diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index bc880955b8..d05b98f04a 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -619,6 +619,14 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { moduledata.AddAddr(ctxt.Arch, pcln.funcnametab) moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.funcnametab))) moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.funcnametab))) + // The cutab slice + moduledata.AddAddr(ctxt.Arch, pcln.cutab) + moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.cutab))) + moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.cutab))) + // The filetab slice + moduledata.AddAddr(ctxt.Arch, pcln.filetab) + moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.filetab))) + moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.filetab))) // The pclntab slice moduledata.AddAddr(ctxt.Arch, pcln.pclntab) moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.pclntab))) @@ -627,10 +635,6 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { moduledata.AddAddr(ctxt.Arch, pcln.pclntab) moduledata.AddUint(ctxt.Arch, uint64(pcln.nfunc+1)) moduledata.AddUint(ctxt.Arch, uint64(pcln.nfunc+1)) - // The filetab slice - moduledata.AddAddrPlus(ctxt.Arch, pcln.pclntab, int64(pcln.filetabOffset)) - moduledata.AddUint(ctxt.Arch, uint64(ctxt.NumFilesyms)+1) - moduledata.AddUint(ctxt.Arch, uint64(ctxt.NumFilesyms)+1) // findfunctab moduledata.AddAddr(ctxt.Arch, pcln.findfunctab) // minpc, maxpc diff --git a/src/debug/gosym/pclntab.go b/src/debug/gosym/pclntab.go index e5c50520fc..e383ea460a 100644 --- a/src/debug/gosym/pclntab.go +++ b/src/debug/gosym/pclntab.go @@ -53,6 +53,7 @@ type LineTable struct { quantum uint32 ptrsize uint32 funcnametab []byte + cutab []byte funcdata []byte functab []byte nfunctab uint32 @@ -223,17 +224,18 @@ func (t *LineTable) parsePclnTab() { switch possibleVersion { case ver116: t.nfunctab = uint32(t.uintptr(t.Data[8:])) - offset := t.uintptr(t.Data[8+t.ptrsize:]) + t.nfiletab = uint32(t.uintptr(t.Data[8+t.ptrsize:])) + offset := t.uintptr(t.Data[8+2*t.ptrsize:]) t.funcnametab = t.Data[offset:] - offset = t.uintptr(t.Data[8+2*t.ptrsize:]) + offset = t.uintptr(t.Data[8+3*t.ptrsize:]) + t.cutab = t.Data[offset:] + offset = t.uintptr(t.Data[8+4*t.ptrsize:]) + t.filetab = t.Data[offset:] + offset = t.uintptr(t.Data[8+5*t.ptrsize:]) t.funcdata = t.Data[offset:] t.functab = t.Data[offset:] functabsize := t.nfunctab*2*t.ptrsize + t.ptrsize - fileoff := t.binary.Uint32(t.functab[functabsize:]) - t.filetab = t.functab[fileoff:] t.functab = t.functab[:functabsize] - t.nfiletab = t.binary.Uint32(t.filetab) - t.filetab = t.filetab[:t.nfiletab*4] case ver12: t.nfunctab = uint32(t.uintptr(t.Data[8:])) t.funcdata = t.Data @@ -330,17 +332,22 @@ func (t *LineTable) funcName(off uint32) string { return s } -// string returns a Go string found at off. -func (t *LineTable) string(off uint32) string { +// stringFrom returns a Go string found at off from a position. +func (t *LineTable) stringFrom(arr []byte, off uint32) string { if s, ok := t.strings[off]; ok { return s } - i := bytes.IndexByte(t.funcdata[off:], 0) - s := string(t.funcdata[off : off+uint32(i)]) + i := bytes.IndexByte(arr[off:], 0) + s := string(arr[off : off+uint32(i)]) t.strings[off] = s return s } +// string returns a Go string found at off. +func (t *LineTable) string(off uint32) string { + return t.stringFrom(t.funcdata, off) +} + // step advances to the next pc, value pair in the encoded table. func (t *LineTable) step(p *[]byte, pc *uint64, val *int32, first bool) bool { uvdelta := t.readvarint(p) @@ -453,7 +460,15 @@ func (t *LineTable) go12PCToFile(pc uint64) (file string) { if fno <= 0 { return "" } - return t.string(t.binary.Uint32(t.filetab[4*fno:])) + if t.version == ver12 { + return t.string(t.binary.Uint32(t.filetab[4*fno:])) + } + // Go ≥ 1.16 + cuoff := t.binary.Uint32(f[t.ptrsize+7*4:]) + if fnoff := t.binary.Uint32(t.cutab[(cuoff+uint32(fno))*4:]); fnoff != ^uint32(0) { + return t.stringFrom(t.filetab, fnoff) + } + return "" } // go12LineToPC maps a (file, line) pair to a program counter for the Go 1.2 pcln table. @@ -496,9 +511,18 @@ func (t *LineTable) initFileMap() { } m := make(map[string]uint32) - for i := uint32(1); i < t.nfiletab; i++ { - s := t.string(t.binary.Uint32(t.filetab[4*i:])) - m[s] = i + if t.version == ver12 { + for i := uint32(1); i < t.nfiletab; i++ { + s := t.string(t.binary.Uint32(t.filetab[4*i:])) + m[s] = i + } + } else { + var pos uint32 + for i := uint32(1); i < t.nfiletab; i++ { + s := t.stringFrom(t.filetab, pos) + pos += uint32(len(s) + 1) + m[s] = i + } } t.fileMap = m } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 0bddcaa789..5a79c7e6ec 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -804,9 +804,10 @@ type _func struct { pcfile int32 pcln int32 npcdata int32 - cuIndex uint16 // TODO(jfaller): 16 bits is never enough, make this larger. - funcID funcID // set for certain special runtime functions - nfuncdata uint8 // must be last + cuOffset uint32 // runtime.cutab offset of this function's CU + funcID funcID // set for certain special runtime functions + _ [2]byte // pad + nfuncdata uint8 // must be last } // Pseudo-Func that is returned for PCs that occur in inlined code. diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index ddb5ea82b4..fbd9315522 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -334,14 +334,17 @@ const ( funcID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.) ) -// PCHeader holds data used by the pclntab lookups. +// pcHeader holds data used by the pclntab lookups. type pcHeader struct { magic uint32 // 0xFFFFFFFA pad1, pad2 uint8 // 0,0 minLC uint8 // min instruction size ptrSize uint8 // size of a ptr in bytes nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab. funcnameOffset uintptr // offset to the funcnametab variable from pcHeader + cuOffset uintptr // offset to the cutab variable from pcHeader + filetabOffset uintptr // offset to the filetab variable from pcHeader pclnOffset uintptr // offset to the pclntab variable from pcHeader } @@ -353,9 +356,10 @@ type pcHeader struct { type moduledata struct { pcHeader *pcHeader funcnametab []byte + cutab []uint32 + filetab []byte pclntable []byte ftab []functab - filetab []uint32 findfunctab uintptr minpc, maxpc uintptr @@ -851,7 +855,12 @@ func funcfile(f funcInfo, fileno int32) string { if !f.valid() { return "?" } - return gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) + // Make sure the cu index and file offset are valid + if fileoff := datap.cutab[f.cuOffset+uint32(fileno)]; fileoff != ^uint32(0) { + return gostringnocopy(&datap.filetab[fileoff]) + } + // pcln section is corrupt. + return "?" } func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32) { @@ -865,7 +874,7 @@ func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int // print("looking for ", hex(targetpc), " in ", funcname(f), " got file=", fileno, " line=", lineno, "\n") return "?", 0 } - file = gostringnocopy(&datap.pclntable[datap.filetab[fileno]]) + file = funcfile(f, fileno) return } @@ -1005,7 +1014,7 @@ type inlinedCall struct { parent int16 // index of parent in the inltree, or < 0 funcID funcID // type of the called function _ byte - file int32 // fileno index into filetab + file int32 // perCU file index for inlined call. See cmd/link:pcln.go line int32 // line number of the call site func_ int32 // offset into pclntab for name of called function parentPc int32 // position of an instruction whose source position is the call site (offset from entry) From b249703e3c53cd7f1e5f808fb2f03714fec44b43 Mon Sep 17 00:00:00 2001 From: Jeremy Faller Date: Wed, 12 Aug 2020 12:54:03 -0400 Subject: [PATCH 002/281] [dev.link] cmd/compile, cmd/asm: add length to hashed symbols While working on deduplicating pcdata, I found that the following hashed symbols would result in the same: [] == [0,0,0,0....] This makes using content addressable symbols untenable for pcdata. Adding the length to the hash keeps the dream alive. No difference in binary size (darwin, cmd/compile), spurious improvements in DWARF phase memory. Change-Id: I21101f7754a3d870922b0dea39c947cc8509432f Reviewed-on: https://go-review.googlesource.com/c/go/+/247903 Run-TryBot: Jeremy Faller TryBot-Result: Gobot Gobot Reviewed-by: Than McIntosh Reviewed-by: Austin Clements Reviewed-by: Cherry Zhang --- src/cmd/internal/obj/objfile.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index 7bc4f4992e..8234697d72 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -372,10 +372,22 @@ func contentHash64(s *LSym) goobj.Hash64Type { // hashed symbols. func (w *writer) contentHash(s *LSym) goobj.HashType { h := sha1.New() + var tmp [14]byte + + // Include the size of the symbol in the hash. + // This preserves the length of symbols, preventing the following two symbols + // from hashing the same: + // + // [2]int{1,2} ≠ [10]int{1,2,0,0,0...} + // + // In this case, if the smaller symbol is alive, the larger is not kept unless + // needed. + binary.LittleEndian.PutUint64(tmp[:8], uint64(s.Size)) + h.Write(tmp[:8]) + // The compiler trims trailing zeros _sometimes_. We just do // it always. h.Write(bytes.TrimRight(s.P, "\x00")) - var tmp [14]byte for i := range s.R { r := &s.R[i] binary.LittleEndian.PutUint32(tmp[:4], uint32(r.Off)) From 954db9fe51154e5d4663c0c1a62c82a99eef1ed4 Mon Sep 17 00:00:00 2001 From: Jeremy Faller Date: Wed, 12 Aug 2020 16:37:42 -0400 Subject: [PATCH 003/281] [dev.link] debug/gosym: fix file mappings CL 246497 introduced bugs in gosym that the long tests caught. These two bugs were: 1) In 1.16, 0 is now a valid file number from pcfile tables. 2) Also, in 1.16, when we scan all functions looking for a pc/file pair, the values returned from pcfile are no longer the direct offset into the file table. Rather, the values from pcfile are the offset into the cu->file look-up table. This CL fixes those two issues. Change-Id: I0cd280bdcaeda89faaf9fac41809abdb87734499 Reviewed-on: https://go-review.googlesource.com/c/go/+/248317 Reviewed-by: Cherry Zhang Reviewed-by: Austin Clements Reviewed-by: Than McIntosh --- src/debug/gosym/pclntab.go | 41 ++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/src/debug/gosym/pclntab.go b/src/debug/gosym/pclntab.go index e383ea460a..21edddda20 100644 --- a/src/debug/gosym/pclntab.go +++ b/src/debug/gosym/pclntab.go @@ -59,9 +59,12 @@ type LineTable struct { nfunctab uint32 filetab []byte nfiletab uint32 - fileMap map[string]uint32 funcNames map[uint32]string // cache the function names strings map[uint32]string // interned substrings of Data, keyed by offset + // fileMap varies depending on the version of the object file. + // For ver12, it maps the name to the index in the file table. + // For ver116, it maps the name to the offset in filetab. + fileMap map[string]uint32 } // NOTE(rsc): This is wrong for GOARCH=arm, which uses a quantum of 4, @@ -388,7 +391,7 @@ func (t *LineTable) pcvalue(off uint32, entry, targetpc uint64) int32 { // to file number. Since most functions come from a single file, these // are usually short and quick to scan. If a file match is found, then the // code goes to the expense of looking for a simultaneous line number match. -func (t *LineTable) findFileLine(entry uint64, filetab, linetab uint32, filenum, line int32) uint64 { +func (t *LineTable) findFileLine(entry uint64, filetab, linetab uint32, filenum, line int32, cutab []byte) uint64 { if filetab == 0 || linetab == 0 { return 0 } @@ -401,8 +404,12 @@ func (t *LineTable) findFileLine(entry uint64, filetab, linetab uint32, filenum, linePC := entry fileStartPC := filePC for t.step(&fp, &filePC, &fileVal, filePC == entry) { - if fileVal == filenum && fileStartPC < filePC { - // fileVal is in effect starting at fileStartPC up to + fileIndex := fileVal + if t.version == ver116 { + fileIndex = int32(t.binary.Uint32(cutab[fileVal*4:])) + } + if fileIndex == filenum && fileStartPC < filePC { + // fileIndex is in effect starting at fileStartPC up to // but not including filePC, and it's the file we want. // Run the PC table looking for a matching line number // or until we reach filePC. @@ -457,13 +464,16 @@ func (t *LineTable) go12PCToFile(pc uint64) (file string) { entry := t.uintptr(f) filetab := t.binary.Uint32(f[t.ptrsize+4*4:]) fno := t.pcvalue(filetab, entry, pc) - if fno <= 0 { - return "" - } if t.version == ver12 { + if fno <= 0 { + return "" + } return t.string(t.binary.Uint32(t.filetab[4*fno:])) } // Go ≥ 1.16 + if fno < 0 { // 0 is valid for ≥ 1.16 + return "" + } cuoff := t.binary.Uint32(f[t.ptrsize+7*4:]) if fnoff := t.binary.Uint32(t.cutab[(cuoff+uint32(fno))*4:]); fnoff != ^uint32(0) { return t.stringFrom(t.filetab, fnoff) @@ -471,7 +481,7 @@ func (t *LineTable) go12PCToFile(pc uint64) (file string) { return "" } -// go12LineToPC maps a (file, line) pair to a program counter for the Go 1.2 pcln table. +// go12LineToPC maps a (file, line) pair to a program counter for the Go 1.2/1.16 pcln table. func (t *LineTable) go12LineToPC(file string, line int) (pc uint64) { defer func() { if recover() != nil { @@ -480,20 +490,25 @@ func (t *LineTable) go12LineToPC(file string, line int) (pc uint64) { }() t.initFileMap() - filenum := t.fileMap[file] - if filenum == 0 { + filenum, ok := t.fileMap[file] + if !ok { return 0 } // Scan all functions. // If this turns out to be a bottleneck, we could build a map[int32][]int32 // mapping file number to a list of functions with code from that file. + var cutab []byte for i := uint32(0); i < t.nfunctab; i++ { f := t.funcdata[t.uintptr(t.functab[2*t.ptrsize*i+t.ptrsize:]):] entry := t.uintptr(f) filetab := t.binary.Uint32(f[t.ptrsize+4*4:]) linetab := t.binary.Uint32(f[t.ptrsize+5*4:]) - pc := t.findFileLine(entry, filetab, linetab, int32(filenum), int32(line)) + if t.version == ver116 { + cuoff := t.binary.Uint32(f[t.ptrsize+7*4:]) * 4 + cutab = t.cutab[cuoff:] + } + pc := t.findFileLine(entry, filetab, linetab, int32(filenum), int32(line), cutab) if pc != 0 { return pc } @@ -518,10 +533,10 @@ func (t *LineTable) initFileMap() { } } else { var pos uint32 - for i := uint32(1); i < t.nfiletab; i++ { + for i := uint32(0); i < t.nfiletab; i++ { s := t.stringFrom(t.filetab, pos) + m[s] = pos pos += uint32(len(s) + 1) - m[s] = i } } t.fileMap = m From 5387cdcb24a07f5d0d49d5105ced2b69e6aafde9 Mon Sep 17 00:00:00 2001 From: Jeremy Faller Date: Fri, 7 Aug 2020 11:31:20 -0400 Subject: [PATCH 004/281] [dev.link] cmd/link, cmd/compile: create content addressable pcdata syms Switch pcdata over to content addressable symbols. This is the last step before removing these from pclntab_old. No meaningful benchmarks changes come from this work. Change-Id: I3f74f3d6026a278babe437c8010e22992c92bd89 Reviewed-on: https://go-review.googlesource.com/c/go/+/247399 Reviewed-by: Austin Clements Reviewed-by: Than McIntosh --- src/cmd/internal/goobj/funcinfo.go | 82 ++++++++++++++------------ src/cmd/internal/goobj/objfile.go | 12 ++-- src/cmd/internal/obj/link.go | 15 ++--- src/cmd/internal/obj/objfile.go | 69 +++++++++++++++++----- src/cmd/internal/obj/pcln.go | 55 ++++++++++------- src/cmd/internal/objfile/goobj.go | 16 +++-- src/cmd/link/internal/ld/dwarf.go | 2 +- src/cmd/link/internal/ld/lib.go | 2 +- src/cmd/link/internal/ld/pcln.go | 13 ++-- src/cmd/link/internal/loader/loader.go | 46 +++++++-------- 10 files changed, 181 insertions(+), 131 deletions(-) diff --git a/src/cmd/internal/goobj/funcinfo.go b/src/cmd/internal/goobj/funcinfo.go index e0e6068b4b..2cca8f6c4e 100644 --- a/src/cmd/internal/goobj/funcinfo.go +++ b/src/cmd/internal/goobj/funcinfo.go @@ -23,12 +23,11 @@ type FuncInfo struct { Locals uint32 FuncID objabi.FuncID - Pcsp uint32 - Pcfile uint32 - Pcline uint32 - Pcinline uint32 - Pcdata []uint32 - PcdataEnd uint32 + Pcsp SymRef + Pcfile SymRef + Pcline SymRef + Pcinline SymRef + Pcdata []SymRef Funcdataoff []uint32 File []CUFileIndex @@ -41,20 +40,24 @@ func (a *FuncInfo) Write(w *bytes.Buffer) { binary.LittleEndian.PutUint32(b[:], x) w.Write(b[:]) } + writeSymRef := func(s SymRef) { + writeUint32(s.PkgIdx) + writeUint32(s.SymIdx) + } writeUint32(a.Args) writeUint32(a.Locals) writeUint32(uint32(a.FuncID)) - writeUint32(a.Pcsp) - writeUint32(a.Pcfile) - writeUint32(a.Pcline) - writeUint32(a.Pcinline) + writeSymRef(a.Pcsp) + writeSymRef(a.Pcfile) + writeSymRef(a.Pcline) + writeSymRef(a.Pcinline) writeUint32(uint32(len(a.Pcdata))) - for _, x := range a.Pcdata { - writeUint32(x) + for _, sym := range a.Pcdata { + writeSymRef(sym) } - writeUint32(a.PcdataEnd) + writeUint32(uint32(len(a.Funcdataoff))) for _, x := range a.Funcdataoff { writeUint32(x) @@ -75,21 +78,23 @@ func (a *FuncInfo) Read(b []byte) { b = b[4:] return x } + readSymIdx := func() SymRef { + return SymRef{readUint32(), readUint32()} + } a.Args = readUint32() a.Locals = readUint32() a.FuncID = objabi.FuncID(readUint32()) - a.Pcsp = readUint32() - a.Pcfile = readUint32() - a.Pcline = readUint32() - a.Pcinline = readUint32() - pcdatalen := readUint32() - a.Pcdata = make([]uint32, pcdatalen) + a.Pcsp = readSymIdx() + a.Pcfile = readSymIdx() + a.Pcline = readSymIdx() + a.Pcinline = readSymIdx() + a.Pcdata = make([]SymRef, readUint32()) for i := range a.Pcdata { - a.Pcdata[i] = readUint32() + a.Pcdata[i] = readSymIdx() } - a.PcdataEnd = readUint32() + funcdataofflen := readUint32() a.Funcdataoff = make([]uint32, funcdataofflen) for i := range a.Funcdataoff { @@ -127,11 +132,13 @@ type FuncInfoLengths struct { func (*FuncInfo) ReadFuncInfoLengths(b []byte) FuncInfoLengths { var result FuncInfoLengths - const numpcdataOff = 28 + // Offset to the number of pcdata values. This value is determined by counting + // the number of bytes until we write pcdata to the file. + const numpcdataOff = 44 result.NumPcdata = binary.LittleEndian.Uint32(b[numpcdataOff:]) result.PcdataOff = numpcdataOff + 4 - numfuncdataoffOff := result.PcdataOff + 4*(result.NumPcdata+1) + numfuncdataoffOff := result.PcdataOff + 8*result.NumPcdata result.NumFuncdataoff = binary.LittleEndian.Uint32(b[numfuncdataoffOff:]) result.FuncdataoffOff = numfuncdataoffOff + 4 @@ -154,29 +161,28 @@ func (*FuncInfo) ReadLocals(b []byte) uint32 { return binary.LittleEndian.Uint32 func (*FuncInfo) ReadFuncID(b []byte) uint32 { return binary.LittleEndian.Uint32(b[8:]) } -// return start and end offsets. -func (*FuncInfo) ReadPcsp(b []byte) (uint32, uint32) { - return binary.LittleEndian.Uint32(b[12:]), binary.LittleEndian.Uint32(b[16:]) +func (*FuncInfo) ReadPcsp(b []byte) SymRef { + return SymRef{binary.LittleEndian.Uint32(b[12:]), binary.LittleEndian.Uint32(b[16:])} } -// return start and end offsets. -func (*FuncInfo) ReadPcfile(b []byte) (uint32, uint32) { - return binary.LittleEndian.Uint32(b[16:]), binary.LittleEndian.Uint32(b[20:]) +func (*FuncInfo) ReadPcfile(b []byte) SymRef { + return SymRef{binary.LittleEndian.Uint32(b[20:]), binary.LittleEndian.Uint32(b[24:])} } -// return start and end offsets. -func (*FuncInfo) ReadPcline(b []byte) (uint32, uint32) { - return binary.LittleEndian.Uint32(b[20:]), binary.LittleEndian.Uint32(b[24:]) +func (*FuncInfo) ReadPcline(b []byte) SymRef { + return SymRef{binary.LittleEndian.Uint32(b[28:]), binary.LittleEndian.Uint32(b[32:])} } -// return start and end offsets. -func (*FuncInfo) ReadPcinline(b []byte, pcdataoffset uint32) (uint32, uint32) { - return binary.LittleEndian.Uint32(b[24:]), binary.LittleEndian.Uint32(b[pcdataoffset:]) +func (*FuncInfo) ReadPcinline(b []byte) SymRef { + return SymRef{binary.LittleEndian.Uint32(b[36:]), binary.LittleEndian.Uint32(b[40:])} } -// return start and end offsets. -func (*FuncInfo) ReadPcdata(b []byte, pcdataoffset uint32, k uint32) (uint32, uint32) { - return binary.LittleEndian.Uint32(b[pcdataoffset+4*k:]), binary.LittleEndian.Uint32(b[pcdataoffset+4+4*k:]) +func (*FuncInfo) ReadPcdata(b []byte) []SymRef { + syms := make([]SymRef, binary.LittleEndian.Uint32(b[44:])) + for i := range syms { + syms[i] = SymRef{binary.LittleEndian.Uint32(b[48+i*8:]), binary.LittleEndian.Uint32(b[52+i*8:])} + } + return syms } func (*FuncInfo) ReadFuncdataoff(b []byte, funcdataofffoff uint32, k uint32) int64 { diff --git a/src/cmd/internal/goobj/objfile.go b/src/cmd/internal/goobj/objfile.go index 5d4a253024..9a64f96cd6 100644 --- a/src/cmd/internal/goobj/objfile.go +++ b/src/cmd/internal/goobj/objfile.go @@ -421,8 +421,11 @@ const ( AuxDwarfLoc AuxDwarfRanges AuxDwarfLines - - // TODO: more. Pcdata? + AuxPcsp + AuxPcfile + AuxPcline + AuxPcinline + AuxPcdata ) func (a *Aux) Type() uint8 { return a[0] } @@ -827,11 +830,6 @@ func (r *Reader) Data(i uint32) []byte { return r.BytesAt(base+off, int(end-off)) } -// AuxDataBase returns the base offset of the aux data block. -func (r *Reader) PcdataBase() uint32 { - return r.h.Offsets[BlkPcdata] -} - // NRefName returns the number of referenced symbol names. func (r *Reader) NRefName() int { return int(r.h.Offsets[BlkRefName+1]-r.h.Offsets[BlkRefName]) / RefNameSize diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index dc47e51be9..11fab63065 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -624,11 +624,12 @@ func (s *LSym) CanBeAnSSASym() { } type Pcln struct { - Pcsp Pcdata - Pcfile Pcdata - Pcline Pcdata - Pcinline Pcdata - Pcdata []Pcdata + // Aux symbols for pcln + Pcsp *LSym + Pcfile *LSym + Pcline *LSym + Pcinline *LSym + Pcdata []*LSym Funcdata []*LSym Funcdataoff []int64 UsedFiles map[goobj.CUFileIndex]struct{} // file indices used while generating pcfile @@ -650,10 +651,6 @@ type Auto struct { Gotype *LSym } -type Pcdata struct { - P []byte -} - // Link holds the context for writing object code from a compiler // to be linker input or for reading that input into the linker. type Link struct { diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index 8234697d72..a2bbdff24e 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -185,7 +185,11 @@ func WriteObjFile(ctxt *Link, b *bio.Writer) { // Pcdata h.Offsets[goobj.BlkPcdata] = w.Offset() for _, s := range ctxt.Text { // iteration order must match genFuncInfoSyms - if s.Func != nil { + // Because of the phase order, it's possible that we try to write an invalid + // object file, and the Pcln variables haven't been filled in. As such, we + // need to check that Pcsp exists, and assume the other pcln variables exist + // as well. Tests like test/fixedbugs/issue22200.go demonstrate this issue. + if s.Func != nil && s.Func.Pcln.Pcsp != nil { pc := &s.Func.Pcln w.Bytes(pc.Pcsp.P) w.Bytes(pc.Pcfile.P) @@ -478,6 +482,22 @@ func (w *writer) Aux(s *LSym) { if s.Func.dwarfDebugLinesSym != nil && s.Func.dwarfDebugLinesSym.Size != 0 { w.aux1(goobj.AuxDwarfLines, s.Func.dwarfDebugLinesSym) } + if s.Func.Pcln.Pcsp != nil && s.Func.Pcln.Pcsp.Size != 0 { + w.aux1(goobj.AuxPcsp, s.Func.Pcln.Pcsp) + } + if s.Func.Pcln.Pcfile != nil && s.Func.Pcln.Pcfile.Size != 0 { + w.aux1(goobj.AuxPcfile, s.Func.Pcln.Pcfile) + } + if s.Func.Pcln.Pcline != nil && s.Func.Pcln.Pcline.Size != 0 { + w.aux1(goobj.AuxPcline, s.Func.Pcln.Pcline) + } + if s.Func.Pcln.Pcinline != nil && s.Func.Pcln.Pcinline.Size != 0 { + w.aux1(goobj.AuxPcinline, s.Func.Pcln.Pcinline) + } + for _, pcSym := range s.Func.Pcln.Pcdata { + w.aux1(goobj.AuxPcdata, pcSym) + } + } } @@ -559,6 +579,19 @@ func nAuxSym(s *LSym) int { if s.Func.dwarfDebugLinesSym != nil && s.Func.dwarfDebugLinesSym.Size != 0 { n++ } + if s.Func.Pcln.Pcsp != nil && s.Func.Pcln.Pcsp.Size != 0 { + n++ + } + if s.Func.Pcln.Pcfile != nil && s.Func.Pcln.Pcfile.Size != 0 { + n++ + } + if s.Func.Pcln.Pcline != nil && s.Func.Pcln.Pcline.Size != 0 { + n++ + } + if s.Func.Pcln.Pcinline != nil && s.Func.Pcln.Pcinline.Size != 0 { + n++ + } + n += len(s.Func.Pcln.Pcdata) } return n } @@ -566,7 +599,17 @@ func nAuxSym(s *LSym) int { // generate symbols for FuncInfo. func genFuncInfoSyms(ctxt *Link) { infosyms := make([]*LSym, 0, len(ctxt.Text)) - var pcdataoff uint32 + hashedsyms := make([]*LSym, 0, 4*len(ctxt.Text)) + preparePcSym := func(s *LSym) *LSym { + if s == nil { + return s + } + s.PkgIdx = goobj.PkgIdxHashed + s.SymIdx = int32(len(hashedsyms) + len(ctxt.hasheddefs)) + s.Set(AttrIndexed, true) + hashedsyms = append(hashedsyms, s) + return s + } var b bytes.Buffer symidx := int32(len(ctxt.defs)) for _, s := range ctxt.Text { @@ -579,20 +622,14 @@ func genFuncInfoSyms(ctxt *Link) { FuncID: objabi.FuncID(s.Func.FuncID), } pc := &s.Func.Pcln - o.Pcsp = pcdataoff - pcdataoff += uint32(len(pc.Pcsp.P)) - o.Pcfile = pcdataoff - pcdataoff += uint32(len(pc.Pcfile.P)) - o.Pcline = pcdataoff - pcdataoff += uint32(len(pc.Pcline.P)) - o.Pcinline = pcdataoff - pcdataoff += uint32(len(pc.Pcinline.P)) - o.Pcdata = make([]uint32, len(pc.Pcdata)) - for i, pcd := range pc.Pcdata { - o.Pcdata[i] = pcdataoff - pcdataoff += uint32(len(pcd.P)) + o.Pcsp = makeSymRef(preparePcSym(pc.Pcsp)) + o.Pcfile = makeSymRef(preparePcSym(pc.Pcfile)) + o.Pcline = makeSymRef(preparePcSym(pc.Pcline)) + o.Pcinline = makeSymRef(preparePcSym(pc.Pcinline)) + o.Pcdata = make([]goobj.SymRef, len(pc.Pcdata)) + for i, pcSym := range pc.Pcdata { + o.Pcdata[i] = makeSymRef(preparePcSym(pcSym)) } - o.PcdataEnd = pcdataoff o.Funcdataoff = make([]uint32, len(pc.Funcdataoff)) for i, x := range pc.Funcdataoff { o.Funcdataoff[i] = uint32(x) @@ -642,9 +679,9 @@ func genFuncInfoSyms(ctxt *Link) { } } ctxt.defs = append(ctxt.defs, infosyms...) + ctxt.hasheddefs = append(ctxt.hasheddefs, hashedsyms...) } -// debugDumpAux is a dumper for selected aux symbols. func writeAuxSymDebug(ctxt *Link, par *LSym, aux *LSym) { // Most aux symbols (ex: funcdata) are not interesting-- // pick out just the DWARF ones for now. diff --git a/src/cmd/internal/obj/pcln.go b/src/cmd/internal/obj/pcln.go index 1f7ccf47ef..7750637796 100644 --- a/src/cmd/internal/obj/pcln.go +++ b/src/cmd/internal/obj/pcln.go @@ -6,6 +6,7 @@ package obj import ( "cmd/internal/goobj" + "cmd/internal/objabi" "encoding/binary" "log" ) @@ -14,16 +15,19 @@ import ( // returned by valfunc parameterized by arg. The invocation of valfunc to update the // current value is, for each p, // -// val = valfunc(func, val, p, 0, arg); -// record val as value at p->pc; -// val = valfunc(func, val, p, 1, arg); +// sym = valfunc(func, p, 0, arg); +// record sym.P as value at p->pc; +// sym = valfunc(func, p, 1, arg); // // where func is the function, val is the current value, p is the instruction being // considered, and arg can be used to further parameterize valfunc. -func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(*Link, *LSym, int32, *Prog, int32, interface{}) int32, arg interface{}) { +func funcpctab(ctxt *Link, func_ *LSym, desc string, valfunc func(*Link, *LSym, int32, *Prog, int32, interface{}) int32, arg interface{}) *LSym { dbg := desc == ctxt.Debugpcln - - dst.P = dst.P[:0] + dst := []byte{} + sym := &LSym{ + Type: objabi.SRODATA, + Attribute: AttrContentAddressable, + } if dbg { ctxt.Logf("funcpctab %s [valfunc=%s]\n", func_.Name, desc) @@ -32,7 +36,8 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(* val := int32(-1) oldval := val if func_.Func.Text == nil { - return + // Return the emtpy symbol we've built so far. + return sym } pc := func_.Func.Text.Pc @@ -88,13 +93,13 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(* if started { pcdelta := (p.Pc - pc) / int64(ctxt.Arch.MinLC) n := binary.PutUvarint(buf, uint64(pcdelta)) - dst.P = append(dst.P, buf[:n]...) + dst = append(dst, buf[:n]...) pc = p.Pc } delta := val - oldval n := binary.PutVarint(buf, int64(delta)) - dst.P = append(dst.P, buf[:n]...) + dst = append(dst, buf[:n]...) oldval = val started = true val = valfunc(ctxt, func_, val, p, 1, arg) @@ -109,18 +114,22 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(* ctxt.Diag("negative pc offset: %v", v) } n := binary.PutUvarint(buf, uint64(v)) - dst.P = append(dst.P, buf[:n]...) + dst = append(dst, buf[:n]...) // add terminating varint-encoded 0, which is just 0 - dst.P = append(dst.P, 0) + dst = append(dst, 0) } if dbg { - ctxt.Logf("wrote %d bytes to %p\n", len(dst.P), dst) - for _, p := range dst.P { + ctxt.Logf("wrote %d bytes to %p\n", len(dst), dst) + for _, p := range dst { ctxt.Logf(" %02x", p) } ctxt.Logf("\n") } + + sym.Size = int64(len(dst)) + sym.P = dst + return sym } // pctofileline computes either the file number (arg == 0) @@ -268,18 +277,17 @@ func linkpcln(ctxt *Link, cursym *LSym) { } } - pcln.Pcdata = make([]Pcdata, npcdata) - pcln.Pcdata = pcln.Pcdata[:npcdata] + pcln.Pcdata = make([]*LSym, npcdata) pcln.Funcdata = make([]*LSym, nfuncdata) pcln.Funcdataoff = make([]int64, nfuncdata) pcln.Funcdataoff = pcln.Funcdataoff[:nfuncdata] - funcpctab(ctxt, &pcln.Pcsp, cursym, "pctospadj", pctospadj, nil) - funcpctab(ctxt, &pcln.Pcfile, cursym, "pctofile", pctofileline, pcln) - funcpctab(ctxt, &pcln.Pcline, cursym, "pctoline", pctofileline, nil) + pcln.Pcsp = funcpctab(ctxt, cursym, "pctospadj", pctospadj, nil) + pcln.Pcfile = funcpctab(ctxt, cursym, "pctofile", pctofileline, pcln) + pcln.Pcline = funcpctab(ctxt, cursym, "pctoline", pctofileline, nil) pcinlineState := new(pcinlineState) - funcpctab(ctxt, &pcln.Pcinline, cursym, "pctoinline", pcinlineState.pctoinline, nil) + pcln.Pcinline = funcpctab(ctxt, cursym, "pctoinline", pcinlineState.pctoinline, nil) for _, inlMark := range cursym.Func.InlMarks { pcinlineState.setParentPC(ctxt, int(inlMark.id), int32(inlMark.p.Pc)) } @@ -309,9 +317,14 @@ func linkpcln(ctxt *Link, cursym *LSym) { // pcdata. for i := 0; i < npcdata; i++ { if (havepc[i/32]>>uint(i%32))&1 == 0 { - continue + // use an empty symbol. + pcln.Pcdata[i] = &LSym{ + Type: objabi.SRODATA, + Attribute: AttrContentAddressable, + } + } else { + pcln.Pcdata[i] = funcpctab(ctxt, cursym, "pctopcdata", pctopcdata, interface{}(uint32(i))) } - funcpctab(ctxt, &pcln.Pcdata[i], cursym, "pctopcdata", pctopcdata, interface{}(uint32(i))) } // funcdata diff --git a/src/cmd/internal/objfile/goobj.go b/src/cmd/internal/objfile/goobj.go index e838f58aed..8eecebb1df 100644 --- a/src/cmd/internal/objfile/goobj.go +++ b/src/cmd/internal/objfile/goobj.go @@ -236,7 +236,15 @@ func (f *goobjFile) PCToLine(pc uint64) (string, int, *gosym.Func) { if arch == nil { return "", 0, nil } - pcdataBase := r.PcdataBase() + getSymData := func(s goobj.SymRef) []byte { + if s.PkgIdx != goobj.PkgIdxHashed { + // We don't need the data for non-hashed symbols, yet. + panic("not supported") + } + i := uint32(s.SymIdx + uint32(r.NSym()+r.NHashed64def())) + return r.BytesAt(r.DataOff(i), r.DataSize(i)) + } + ndef := uint32(r.NSym() + r.NHashed64def() + r.NHasheddef() + r.NNonpkgdef()) for i := uint32(0); i < ndef; i++ { osym := r.Sym(i) @@ -262,11 +270,9 @@ func (f *goobjFile) PCToLine(pc uint64) (string, int, *gosym.Func) { b := r.BytesAt(r.DataOff(isym), r.DataSize(isym)) var info *goobj.FuncInfo lengths := info.ReadFuncInfoLengths(b) - off, end := info.ReadPcline(b) - pcline := r.BytesAt(pcdataBase+off, int(end-off)) + pcline := getSymData(info.ReadPcline(b)) line := int(pcValue(pcline, pc-addr, arch)) - off, end = info.ReadPcfile(b) - pcfile := r.BytesAt(pcdataBase+off, int(end-off)) + pcfile := getSymData(info.ReadPcfile(b)) fileID := pcValue(pcfile, pc-addr, arch) globalFileID := info.ReadFile(b, lengths.FileOff, uint32(fileID)) fileName := r.File(int(globalFileID)) diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index d1f2ac583d..2b95ad5a67 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -1421,7 +1421,7 @@ func (d *dwctxt) writeframes(fs loader.Sym) dwarfSecInfo { deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(thearch.Dwarfreglr)) } - for pcsp.Init(fpcsp); !pcsp.Done; pcsp.Next() { + for pcsp.Init(d.linkctxt.loader.Data(fpcsp)); !pcsp.Done; pcsp.Next() { nextpc := pcsp.NextPC // pciterinit goes up to the end of the function, diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 09c7bbfb53..caa4566190 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -2252,7 +2252,7 @@ func (sc *stkChk) check(up *chain, depth int) int { var ch1 chain pcsp := obj.NewPCIter(uint32(ctxt.Arch.MinLC)) ri := 0 - for pcsp.Init(info.Pcsp()); !pcsp.Done; pcsp.Next() { + for pcsp.Init(ldr.Data(info.Pcsp())); !pcsp.Done; pcsp.Next() { // pcsp.value is in effect for [pcsp.pc, pcsp.nextpc). // Check stack size in effect for this span. diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index c7535f6a61..e9fd5937e7 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -592,9 +592,8 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { fi := ldr.FuncInfo(s) if fi.Valid() { fi.Preload() - npc := fi.NumPcdata() - for i := uint32(0); i < npc; i++ { - pcdata = append(pcdata, sym.Pcdata{P: fi.Pcdata(int(i))}) + for _, dataSym := range fi.Pcdata() { + pcdata = append(pcdata, sym.Pcdata{P: ldr.Data(dataSym)}) } nfd := fi.NumFuncdataoff() for i := uint32(0); i < nfd; i++ { @@ -666,15 +665,15 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { cu := ldr.SymUnit(s) if fi.Valid() { - pcsp = sym.Pcdata{P: fi.Pcsp()} - pcfile = sym.Pcdata{P: fi.Pcfile()} - pcline = sym.Pcdata{P: fi.Pcline()} + pcsp = sym.Pcdata{P: ldr.Data(fi.Pcsp())} + pcfile = sym.Pcdata{P: ldr.Data(fi.Pcfile())} + pcline = sym.Pcdata{P: ldr.Data(fi.Pcline())} } if fi.Valid() && fi.NumInlTree() > 0 { its := oldState.genInlTreeSym(cu, fi, ctxt.Arch, state) funcdata[objabi.FUNCDATA_InlTree] = its - pcdata[objabi.PCDATA_InlTreeIndex] = sym.Pcdata{P: fi.Pcinline()} + pcdata[objabi.PCDATA_InlTreeIndex] = sym.Pcdata{P: ldr.Data(fi.Pcinline())} } // pcdata diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 8fd10b0848..f149e3c831 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -1878,19 +1878,24 @@ func (fi *FuncInfo) FuncID() objabi.FuncID { return objabi.FuncID((*goobj.FuncInfo)(nil).ReadFuncID(fi.data)) } -func (fi *FuncInfo) Pcsp() []byte { - pcsp, end := (*goobj.FuncInfo)(nil).ReadPcsp(fi.data) - return fi.r.BytesAt(fi.r.PcdataBase()+pcsp, int(end-pcsp)) +func (fi *FuncInfo) Pcsp() Sym { + sym := (*goobj.FuncInfo)(nil).ReadPcsp(fi.data) + return fi.l.resolve(fi.r, sym) } -func (fi *FuncInfo) Pcfile() []byte { - pcf, end := (*goobj.FuncInfo)(nil).ReadPcfile(fi.data) - return fi.r.BytesAt(fi.r.PcdataBase()+pcf, int(end-pcf)) +func (fi *FuncInfo) Pcfile() Sym { + sym := (*goobj.FuncInfo)(nil).ReadPcfile(fi.data) + return fi.l.resolve(fi.r, sym) } -func (fi *FuncInfo) Pcline() []byte { - pcln, end := (*goobj.FuncInfo)(nil).ReadPcline(fi.data) - return fi.r.BytesAt(fi.r.PcdataBase()+pcln, int(end-pcln)) +func (fi *FuncInfo) Pcline() Sym { + sym := (*goobj.FuncInfo)(nil).ReadPcline(fi.data) + return fi.l.resolve(fi.r, sym) +} + +func (fi *FuncInfo) Pcinline() Sym { + sym := (*goobj.FuncInfo)(nil).ReadPcinline(fi.data) + return fi.l.resolve(fi.r, sym) } // Preload has to be called prior to invoking the various methods @@ -1899,27 +1904,16 @@ func (fi *FuncInfo) Preload() { fi.lengths = (*goobj.FuncInfo)(nil).ReadFuncInfoLengths(fi.data) } -func (fi *FuncInfo) Pcinline() []byte { +func (fi *FuncInfo) Pcdata() []Sym { if !fi.lengths.Initialized { panic("need to call Preload first") } - pcinl, end := (*goobj.FuncInfo)(nil).ReadPcinline(fi.data, fi.lengths.PcdataOff) - return fi.r.BytesAt(fi.r.PcdataBase()+pcinl, int(end-pcinl)) -} - -func (fi *FuncInfo) NumPcdata() uint32 { - if !fi.lengths.Initialized { - panic("need to call Preload first") + syms := (*goobj.FuncInfo)(nil).ReadPcdata(fi.data) + ret := make([]Sym, len(syms)) + for i := range ret { + ret[i] = fi.l.resolve(fi.r, syms[i]) } - return fi.lengths.NumPcdata -} - -func (fi *FuncInfo) Pcdata(k int) []byte { - if !fi.lengths.Initialized { - panic("need to call Preload first") - } - pcdat, end := (*goobj.FuncInfo)(nil).ReadPcdata(fi.data, fi.lengths.PcdataOff, uint32(k)) - return fi.r.BytesAt(fi.r.PcdataBase()+pcdat, int(end-pcdat)) + return ret } func (fi *FuncInfo) NumFuncdataoff() uint32 { From 26407b22129e2e54db269c1a92826521addd8d56 Mon Sep 17 00:00:00 2001 From: Jeremy Faller Date: Wed, 12 Aug 2020 19:26:53 -0400 Subject: [PATCH 005/281] [dev.link] cmd/{compile,link}: remove pcdata tables from pclntab_old MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the pctables out of pclntab_old. Creates a new generator symbol, runtime.pctab, which holds all the deduplicated pctables. Also, tightens up some of the types in runtime. Darwin, cmd/compile statistics: alloc/op Pclntab_GC 26.4MB ± 0% 13.8MB ± 0% allocs/op Pclntab_GC 89.9k ± 0% 86.4k ± 0% liveB Pclntab_GC 25.5M ± 0% 24.2M ± 0% No significant change in binary size. Change-Id: I1560fd4421f8a210f8d4b508fbc54e1780e338f9 Reviewed-on: https://go-review.googlesource.com/c/go/+/248332 Run-TryBot: Jeremy Faller TryBot-Result: Gobot Gobot Reviewed-by: Cherry Zhang --- src/cmd/link/internal/ld/data.go | 2 + src/cmd/link/internal/ld/pcln.go | 143 ++++++++++++------ src/cmd/link/internal/ld/symtab.go | 4 + src/cmd/link/internal/loader/symbolbuilder.go | 9 ++ src/cmd/link/internal/sym/symbol.go | 4 - src/debug/gosym/pclntab.go | 10 +- src/runtime/runtime2.go | 8 +- src/runtime/symtab.go | 28 ++-- 8 files changed, 136 insertions(+), 72 deletions(-) diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index a551d46403..2aecbfbeb5 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -1925,6 +1925,7 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.funcnametab", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.cutab", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.filetab", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pctab", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pclntab_old", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.epclntab", 0), sect) if ctxt.HeadType == objabi.Haix { @@ -2511,6 +2512,7 @@ func (ctxt *Link) address() []*sym.Segment { ctxt.defineInternal("runtime.funcnametab", sym.SRODATA) ctxt.defineInternal("runtime.cutab", sym.SRODATA) ctxt.defineInternal("runtime.filetab", sym.SRODATA) + ctxt.defineInternal("runtime.pctab", sym.SRODATA) ctxt.defineInternal("runtime.pclntab_old", sym.SRODATA) ctxt.xdefine("runtime.epclntab", sym.SRODATA, int64(pclntab.Vaddr+pclntab.Length)) ctxt.xdefine("runtime.noptrdata", sym.SNOPTRDATA, int64(noptr.Vaddr)) diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index e9fd5937e7..576f1c3780 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -43,6 +43,7 @@ type pclntab struct { findfunctab loader.Sym cutab loader.Sym filetab loader.Sym + pctab loader.Sym // The number of functions + number of TEXT sections - 1. This is such an // unexpected value because platforms that have more than one TEXT section @@ -273,10 +274,11 @@ func (state *pclntab) generatePCHeader(ctxt *Link) { off = writeSymOffset(off, state.funcnametab) off = writeSymOffset(off, state.cutab) off = writeSymOffset(off, state.filetab) + off = writeSymOffset(off, state.pctab) off = writeSymOffset(off, state.pclntab) } - size := int64(8 + 6*ctxt.Arch.PtrSize) + size := int64(8 + 7*ctxt.Arch.PtrSize) state.pcheader = state.addGeneratedSym(ctxt, "runtime.pcheader", size, writeHeader) } @@ -463,6 +465,68 @@ func (state *pclntab) generateFilenameTabs(ctxt *Link, compUnits []*sym.Compilat return cuOffsets } +// generatePctab creates the runtime.pctab variable, holding all the +// deduplicated pcdata. +func (state *pclntab) generatePctab(ctxt *Link, container loader.Bitmap) { + ldr := ctxt.loader + + // Pctab offsets of 0 are considered invalid in the runtime. We respect + // that by just padding a single byte at the beginning of runtime.pctab, + // that way no real offsets can be zero. + size := int64(1) + + // Walk the functions, finding offset to store each pcdata. + seen := make(map[loader.Sym]struct{}) + saveOffset := func(pcSym loader.Sym) { + if _, ok := seen[pcSym]; !ok { + datSize := ldr.SymSize(pcSym) + if datSize != 0 { + ldr.SetSymValue(pcSym, size) + } else { + // Invalid PC data, record as zero. + ldr.SetSymValue(pcSym, 0) + } + size += datSize + seen[pcSym] = struct{}{} + } + } + for _, s := range ctxt.Textp { + if !emitPcln(ctxt, s, container) { + continue + } + fi := ldr.FuncInfo(s) + if !fi.Valid() { + continue + } + fi.Preload() + + pcSyms := []loader.Sym{fi.Pcsp(), fi.Pcfile(), fi.Pcline()} + for _, pcSym := range pcSyms { + saveOffset(pcSym) + } + for _, pcSym := range fi.Pcdata() { + saveOffset(pcSym) + } + if fi.NumInlTree() > 0 { + saveOffset(fi.Pcinline()) + } + } + + // TODO: There is no reason we need a generator for this variable, and it + // could be moved to a carrier symbol. However, carrier symbols containing + // carrier symbols don't work yet (as of Aug 2020). Once this is fixed, + // runtime.pctab could just be a carrier sym. + writePctab := func(ctxt *Link, s loader.Sym) { + ldr := ctxt.loader + sb := ldr.MakeSymbolUpdater(s) + for sym := range seen { + sb.SetBytesAt(ldr.SymValue(sym), ldr.Data(sym)) + } + } + + state.pctab = state.addGeneratedSym(ctxt, "runtime.pctab", size, writePctab) +} + // pclntab initializes the pclntab symbol with // runtime function and file name information. @@ -494,6 +558,9 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // runtime.filetab // []null terminated filename strings // + // runtime.pctab + // []byte of deduplicated pc data. + // // runtime.pclntab_old // function table, alternating PC and offset to func struct [each entry thearch.ptrsize bytes] // end PC [thearch.ptrsize bytes] @@ -514,6 +581,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { state.generatePCHeader(ctxt) state.generateFuncnametab(ctxt, container) cuOffsets := state.generateFilenameTabs(ctxt, compUnits, container) + state.generatePctab(ctxt, container) funcdataBytes := int64(0) ldr.SetCarrierSym(state.pclntab, state.carrier) @@ -525,21 +593,6 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { ftab.Grow(int64(state.nfunc)*2*int64(ctxt.Arch.PtrSize) + int64(ctxt.Arch.PtrSize) + 4) - szHint := len(ctxt.Textp) * 2 - pctaboff := make(map[string]uint32, szHint) - writepctab := func(off int32, p []byte) int32 { - start, ok := pctaboff[string(p)] - if !ok { - if len(p) > 0 { - start = uint32(len(ftab.Data())) - ftab.AddBytes(p) - } - pctaboff[string(p)] = start - } - newoff := int32(ftab.SetUint32(ctxt.Arch, int64(off), start)) - return newoff - } - setAddr := (*loader.SymbolBuilder).SetAddrPlus if ctxt.IsExe() && ctxt.IsInternal() { // Internal linking static executable. At this point the function @@ -555,10 +608,6 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { } } - pcsp := sym.Pcdata{} - pcfile := sym.Pcdata{} - pcline := sym.Pcdata{} - pcdata := []sym.Pcdata{} funcdata := []loader.Sym{} funcdataoff := []int64{} @@ -583,18 +632,13 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { } prevFunc = s - pcsp.P = pcsp.P[:0] - pcline.P = pcline.P[:0] - pcfile.P = pcfile.P[:0] - pcdata = pcdata[:0] + var numPCData int32 funcdataoff = funcdataoff[:0] funcdata = funcdata[:0] fi := ldr.FuncInfo(s) if fi.Valid() { fi.Preload() - for _, dataSym := range fi.Pcdata() { - pcdata = append(pcdata, sym.Pcdata{P: ldr.Data(dataSym)}) - } + numPCData = int32(len(fi.Pcdata())) nfd := fi.NumFuncdataoff() for i := uint32(0); i < nfd; i++ { funcdataoff = append(funcdataoff, fi.Funcdataoff(int(i))) @@ -602,15 +646,12 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { funcdata = fi.Funcdata(funcdata) } + writeInlPCData := false if fi.Valid() && fi.NumInlTree() > 0 { - - if len(pcdata) <= objabi.PCDATA_InlTreeIndex { - // Create inlining pcdata table. - newpcdata := make([]sym.Pcdata, objabi.PCDATA_InlTreeIndex+1) - copy(newpcdata, pcdata) - pcdata = newpcdata + writeInlPCData = true + if numPCData <= objabi.PCDATA_InlTreeIndex { + numPCData = objabi.PCDATA_InlTreeIndex + 1 } - if len(funcdataoff) <= objabi.FUNCDATA_InlTree { // Create inline tree funcdata. newfuncdata := make([]loader.Sym, objabi.FUNCDATA_InlTree+1) @@ -635,7 +676,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // fixed size of struct, checked below off := funcstart - end := funcstart + int32(ctxt.Arch.PtrSize) + 3*4 + 6*4 + int32(len(pcdata))*4 + int32(len(funcdata))*int32(ctxt.Arch.PtrSize) + end := funcstart + int32(ctxt.Arch.PtrSize) + 3*4 + 6*4 + numPCData*4 + int32(len(funcdata))*int32(ctxt.Arch.PtrSize) if len(funcdata) > 0 && (end&int32(ctxt.Arch.PtrSize-1) != 0) { end += 4 } @@ -664,23 +705,21 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { off = int32(ftab.SetUint32(ctxt.Arch, int64(off), deferreturn)) cu := ldr.SymUnit(s) - if fi.Valid() { - pcsp = sym.Pcdata{P: ldr.Data(fi.Pcsp())} - pcfile = sym.Pcdata{P: ldr.Data(fi.Pcfile())} - pcline = sym.Pcdata{P: ldr.Data(fi.Pcline())} - } if fi.Valid() && fi.NumInlTree() > 0 { its := oldState.genInlTreeSym(cu, fi, ctxt.Arch, state) funcdata[objabi.FUNCDATA_InlTree] = its - pcdata[objabi.PCDATA_InlTreeIndex] = sym.Pcdata{P: ldr.Data(fi.Pcinline())} } // pcdata - off = writepctab(off, pcsp.P) - off = writepctab(off, pcfile.P) - off = writepctab(off, pcline.P) - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(len(pcdata)))) + if fi.Valid() { + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcsp())))) + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcfile())))) + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcline())))) + } else { + off += 12 + } + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(numPCData))) // Store the offset to compilation unit's file table. cuIdx := ^uint32(0) @@ -700,9 +739,17 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // nfuncdata must be the final entry. off = int32(ftab.SetUint8(ctxt.Arch, int64(off), uint8(len(funcdata)))) - for i := range pcdata { - off = writepctab(off, pcdata[i].P) + + // Output the pcdata. + if fi.Valid() { + for i, pcSym := range fi.Pcdata() { + ftab.SetUint32(ctxt.Arch, int64(off+int32(i*4)), uint32(ldr.SymValue(pcSym))) + } + if writeInlPCData { + ftab.SetUint32(ctxt.Arch, int64(off+objabi.PCDATA_InlTreeIndex*4), uint32(ldr.SymValue(fi.Pcinline()))) + } } + off += numPCData * 4 // funcdata, must be pointer-aligned and we're only int32-aligned. // Missing funcdata will be 0 (nil pointer). @@ -724,7 +771,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { } if off != end { - ctxt.Errorf(s, "bad math in functab: funcstart=%d off=%d but end=%d (npcdata=%d nfuncdata=%d ptrsize=%d)", funcstart, off, end, len(pcdata), len(funcdata), ctxt.Arch.PtrSize) + ctxt.Errorf(s, "bad math in functab: funcstart=%d off=%d but end=%d (npcdata=%d nfuncdata=%d ptrsize=%d)", funcstart, off, end, numPCData, len(funcdata), ctxt.Arch.PtrSize) errorexit() } diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index d05b98f04a..520aaa44c2 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -627,6 +627,10 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { moduledata.AddAddr(ctxt.Arch, pcln.filetab) moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.filetab))) moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.filetab))) + // The pctab slice + moduledata.AddAddr(ctxt.Arch, pcln.pctab) + moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.pctab))) + moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.pctab))) // The pclntab slice moduledata.AddAddr(ctxt.Arch, pcln.pclntab) moduledata.AddUint(ctxt.Arch, uint64(ldr.SymSize(pcln.pclntab))) diff --git a/src/cmd/link/internal/loader/symbolbuilder.go b/src/cmd/link/internal/loader/symbolbuilder.go index e14d89a927..c0c723d7f0 100644 --- a/src/cmd/link/internal/loader/symbolbuilder.go +++ b/src/cmd/link/internal/loader/symbolbuilder.go @@ -336,6 +336,15 @@ func (sb *SymbolBuilder) Addstring(str string) int64 { return r } +func (sb *SymbolBuilder) SetBytesAt(off int64, b []byte) int64 { + datLen := int64(len(b)) + if off+datLen > int64(len(sb.data)) { + panic("attempt to write past end of buffer") + } + copy(sb.data[off:off+datLen], b) + return off + datLen +} + func (sb *SymbolBuilder) addSymRef(tgt Sym, add int64, typ objabi.RelocType, rsize int) int64 { if sb.kind == 0 { sb.kind = sym.SDATA diff --git a/src/cmd/link/internal/sym/symbol.go b/src/cmd/link/internal/sym/symbol.go index 1a4165ebf7..70cf36a87e 100644 --- a/src/cmd/link/internal/sym/symbol.go +++ b/src/cmd/link/internal/sym/symbol.go @@ -33,7 +33,3 @@ func VersionToABI(v int) (obj.ABI, bool) { } return ^obj.ABI(0), false } - -type Pcdata struct { - P []byte -} diff --git a/src/debug/gosym/pclntab.go b/src/debug/gosym/pclntab.go index 21edddda20..a72f9847d7 100644 --- a/src/debug/gosym/pclntab.go +++ b/src/debug/gosym/pclntab.go @@ -58,6 +58,7 @@ type LineTable struct { functab []byte nfunctab uint32 filetab []byte + pctab []byte // points to the pctables. nfiletab uint32 funcNames map[uint32]string // cache the function names strings map[uint32]string // interned substrings of Data, keyed by offset @@ -235,6 +236,8 @@ func (t *LineTable) parsePclnTab() { offset = t.uintptr(t.Data[8+4*t.ptrsize:]) t.filetab = t.Data[offset:] offset = t.uintptr(t.Data[8+5*t.ptrsize:]) + t.pctab = t.Data[offset:] + offset = t.uintptr(t.Data[8+6*t.ptrsize:]) t.funcdata = t.Data[offset:] t.functab = t.Data[offset:] functabsize := t.nfunctab*2*t.ptrsize + t.ptrsize @@ -244,6 +247,7 @@ func (t *LineTable) parsePclnTab() { t.funcdata = t.Data t.funcnametab = t.Data t.functab = t.Data[8+t.ptrsize:] + t.pctab = t.Data functabsize := t.nfunctab*2*t.ptrsize + t.ptrsize fileoff := t.binary.Uint32(t.functab[functabsize:]) t.functab = t.functab[:functabsize] @@ -373,7 +377,7 @@ func (t *LineTable) step(p *[]byte, pc *uint64, val *int32, first bool) bool { // off is the offset to the beginning of the pc-value table, // and entry is the start PC for the corresponding function. func (t *LineTable) pcvalue(off uint32, entry, targetpc uint64) int32 { - p := t.funcdata[off:] + p := t.pctab[off:] val := int32(-1) pc := entry @@ -396,8 +400,8 @@ func (t *LineTable) findFileLine(entry uint64, filetab, linetab uint32, filenum, return 0 } - fp := t.funcdata[filetab:] - fl := t.funcdata[linetab:] + fp := t.pctab[filetab:] + fl := t.pctab[linetab:] fileVal := int32(-1) filePC := entry lineVal := int32(-1) diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 5a79c7e6ec..755c409078 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -800,10 +800,10 @@ type _func struct { args int32 // in/out args size deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. - pcsp int32 - pcfile int32 - pcln int32 - npcdata int32 + pcsp uint32 + pcfile uint32 + pcln uint32 + npcdata uint32 cuOffset uint32 // runtime.cutab offset of this function's CU funcID funcID // set for certain special runtime functions _ [2]byte // pad diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index fbd9315522..0610f75179 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -345,6 +345,7 @@ type pcHeader struct { funcnameOffset uintptr // offset to the funcnametab variable from pcHeader cuOffset uintptr // offset to the cutab variable from pcHeader filetabOffset uintptr // offset to the filetab variable from pcHeader + pctabOffset uintptr // offset to the pctab varible from pcHeader pclnOffset uintptr // offset to the pclntab variable from pcHeader } @@ -358,6 +359,7 @@ type moduledata struct { funcnametab []byte cutab []uint32 filetab []byte + pctab []byte pclntable []byte ftab []functab findfunctab uintptr @@ -721,7 +723,7 @@ type pcvalueCache struct { type pcvalueCacheEnt struct { // targetpc and off together are the key of this cache entry. targetpc uintptr - off int32 + off uint32 // val is the value of this cached pcvalue entry. val int32 } @@ -736,7 +738,7 @@ func pcvalueCacheKey(targetpc uintptr) uintptr { // Returns the PCData value, and the PC where this value starts. // TODO: the start PC is returned only when cache is nil. -func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) (int32, uintptr) { +func pcvalue(f funcInfo, off uint32, targetpc uintptr, cache *pcvalueCache, strict bool) (int32, uintptr) { if off == 0 { return -1, 0 } @@ -770,7 +772,7 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric return -1, 0 } datap := f.datap - p := datap.pclntable[off:] + p := datap.pctab[off:] pc := f.entry prevpc := pc val := int32(-1) @@ -812,7 +814,7 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric print("runtime: invalid pc-encoded table f=", funcname(f), " pc=", hex(pc), " targetpc=", hex(targetpc), " tab=", p, "\n") - p = datap.pclntable[off:] + p = datap.pctab[off:] pc = f.entry val = -1 for { @@ -893,7 +895,7 @@ func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 { // funcMaxSPDelta returns the maximum spdelta at any point in f. func funcMaxSPDelta(f funcInfo) int32 { datap := f.datap - p := datap.pclntable[f.pcsp:] + p := datap.pctab[f.pcsp:] pc := f.entry val := int32(-1) max := int32(0) @@ -909,20 +911,20 @@ func funcMaxSPDelta(f funcInfo) int32 { } } -func pcdatastart(f funcInfo, table int32) int32 { - return *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) +func pcdatastart(f funcInfo, table uint32) uint32 { + return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) } -func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) int32 { - if table < 0 || table >= f.npcdata { +func pcdatavalue(f funcInfo, table uint32, targetpc uintptr, cache *pcvalueCache) int32 { + if table >= f.npcdata { return -1 } r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, true) return r } -func pcdatavalue1(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { - if table < 0 || table >= f.npcdata { +func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { + if table >= f.npcdata { return -1 } r, _ := pcvalue(f, pcdatastart(f, table), targetpc, cache, strict) @@ -931,8 +933,8 @@ func pcdatavalue1(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache // Like pcdatavalue, but also return the start PC of this PCData value. // It doesn't take a cache. -func pcdatavalue2(f funcInfo, table int32, targetpc uintptr) (int32, uintptr) { - if table < 0 || table >= f.npcdata { +func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr) { + if table >= f.npcdata { return -1, 0 } return pcvalue(f, pcdatastart(f, table), targetpc, nil, true) From ac5c406ef0ab20e2a11f57470271266ef4265221 Mon Sep 17 00:00:00 2001 From: Jeremy Faller Date: Thu, 13 Aug 2020 12:21:18 -0400 Subject: [PATCH 006/281] [dev.link] cmd/link: clean up some pclntab state Clean up some pclntab state, specifically: 1) Remove the oldPclnState type. 2) Move a structure out of pclnState, that was holding some memory. 3) Stop passing container around everywhere and calling emitPcln. Use a slice of function symbols instead. Change-Id: I74e916564cd769a706750d024e55ee0d811a79da Reviewed-on: https://go-review.googlesource.com/c/go/+/248379 Run-TryBot: Jeremy Faller TryBot-Result: Gobot Gobot Reviewed-by: Austin Clements Reviewed-by: Cherry Zhang --- src/cmd/link/internal/ld/pcln.go | 135 +++++++++++-------------------- 1 file changed, 47 insertions(+), 88 deletions(-) diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index 576f1c3780..33476ec292 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -16,17 +16,6 @@ import ( "strings" ) -// oldPclnState holds state information used during pclntab generation. Here -// 'ldr' is just a pointer to the context's loader, 'deferReturnSym' is the -// index for the symbol "runtime.deferreturn", -// -// NB: This is deprecated, and will be eliminated when pclntab_old is -// eliminated. -type oldPclnState struct { - ldr *loader.Loader - deferReturnSym loader.Sym -} - // pclntab holds the state needed for pclntab generation. type pclntab struct { // The first and last functions found. @@ -56,12 +45,6 @@ type pclntab struct { // The number of filenames in runtime.filetab. nfiles uint32 - - // maps the function symbol to offset in runtime.funcnametab - // This doesn't need to reside in the state once pclntab_old's been - // deleted -- it can live in generateFuncnametab. - // TODO(jfaller): Delete me! - funcNameOffset map[loader.Sym]int32 } // addGeneratedSym adds a generator symbol to pclntab, returning the new Sym. @@ -76,35 +59,26 @@ func (state *pclntab) addGeneratedSym(ctxt *Link, name string, size int64, f gen return s } -func makeOldPclnState(ctxt *Link) *oldPclnState { - ldr := ctxt.loader - drs := ldr.Lookup("runtime.deferreturn", sym.SymVerABIInternal) - state := &oldPclnState{ - ldr: ldr, - deferReturnSym: drs, - } - - return state -} - // makePclntab makes a pclntab object, and assembles all the compilation units -// we'll need to write pclntab. -func makePclntab(ctxt *Link, container loader.Bitmap) (*pclntab, []*sym.CompilationUnit) { +// we'll need to write pclntab. Returns the pclntab structure, a slice of the +// CompilationUnits we need, and a slice of the function symbols we need to +// generate pclntab. +func makePclntab(ctxt *Link, container loader.Bitmap) (*pclntab, []*sym.CompilationUnit, []loader.Sym) { ldr := ctxt.loader - state := &pclntab{ - funcNameOffset: make(map[loader.Sym]int32), - } + state := &pclntab{} // Gather some basic stats and info. seenCUs := make(map[*sym.CompilationUnit]struct{}) prevSect := ldr.SymSect(ctxt.Textp[0]) compUnits := []*sym.CompilationUnit{} + funcs := []loader.Sym{} for _, s := range ctxt.Textp { if !emitPcln(ctxt, s, container) { continue } + funcs = append(funcs, s) state.nfunc++ if state.firstFunc == 0 { state.firstFunc = s @@ -130,15 +104,7 @@ func makePclntab(ctxt *Link, container loader.Bitmap) (*pclntab, []*sym.Compilat compUnits = append(compUnits, cu) } } - return state, compUnits -} - -func ftabaddstring(ftab *loader.SymbolBuilder, s string) int32 { - start := len(ftab.Data()) - ftab.Grow(int64(start + len(s) + 1)) // make room for s plus trailing NUL - ftd := ftab.Data() - copy(ftd[start:], s) - return int32(start) + return state, compUnits, funcs } // onlycsymbol looks at a symbol's name to report whether this is a @@ -163,11 +129,13 @@ func emitPcln(ctxt *Link, s loader.Sym, container loader.Bitmap) bool { return !container.Has(s) } -func (state *oldPclnState) computeDeferReturn(target *Target, s loader.Sym) uint32 { +func computeDeferReturn(ctxt *Link, deferReturnSym, s loader.Sym) uint32 { + ldr := ctxt.loader + target := ctxt.Target deferreturn := uint32(0) lastWasmAddr := uint32(0) - relocs := state.ldr.Relocs(s) + relocs := ldr.Relocs(s) for ri := 0; ri < relocs.Count(); ri++ { r := relocs.At(ri) if target.IsWasm() && r.Type() == objabi.R_ADDR { @@ -178,7 +146,7 @@ func (state *oldPclnState) computeDeferReturn(target *Target, s loader.Sym) uint // set the resumption point to PC_B. lastWasmAddr = uint32(r.Add()) } - if r.Type().IsDirectCall() && (r.Sym() == state.deferReturnSym || state.ldr.IsDeferReturnTramp(r.Sym())) { + if r.Type().IsDirectCall() && (r.Sym() == deferReturnSym || ldr.IsDeferReturnTramp(r.Sym())) { if target.IsWasm() { deferreturn = lastWasmAddr - 1 } else { @@ -211,8 +179,8 @@ func (state *oldPclnState) computeDeferReturn(target *Target, s loader.Sym) uint // genInlTreeSym generates the InlTree sym for a function with the // specified FuncInfo. -func (state *oldPclnState) genInlTreeSym(cu *sym.CompilationUnit, fi loader.FuncInfo, arch *sys.Arch, newState *pclntab) loader.Sym { - ldr := state.ldr +func genInlTreeSym(ctxt *Link, cu *sym.CompilationUnit, fi loader.FuncInfo, arch *sys.Arch, nameOffsets map[loader.Sym]uint32) loader.Sym { + ldr := ctxt.loader its := ldr.CreateExtSym("", 0) inlTreeSym := ldr.MakeSymbolUpdater(its) // Note: the generated symbol is given a type of sym.SGOFUNC, as a @@ -225,7 +193,7 @@ func (state *oldPclnState) genInlTreeSym(cu *sym.CompilationUnit, fi loader.Func for i := 0; i < int(ninl); i++ { call := fi.InlTree(i) val := call.File - nameoff, ok := newState.funcNameOffset[call.Func] + nameoff, ok := nameOffsets[call.Func] if !ok { panic("couldn't find function name offset") } @@ -282,16 +250,12 @@ func (state *pclntab) generatePCHeader(ctxt *Link) { state.pcheader = state.addGeneratedSym(ctxt, "runtime.pcheader", size, writeHeader) } -// walkFuncs iterates over the Textp, calling a function for each unique +// walkFuncs iterates over the funcs, calling a function for each unique // function and inlined function. -func (state *pclntab) walkFuncs(ctxt *Link, container loader.Bitmap, f func(loader.Sym)) { +func walkFuncs(ctxt *Link, funcs []loader.Sym, f func(loader.Sym)) { ldr := ctxt.loader seen := make(map[loader.Sym]struct{}) - for _, ls := range ctxt.Textp { - s := loader.Sym(ls) - if !emitPcln(ctxt, s, container) { - continue - } + for _, s := range funcs { if _, ok := seen[s]; !ok { f(s) seen[s] = struct{}{} @@ -312,37 +276,37 @@ func (state *pclntab) walkFuncs(ctxt *Link, container loader.Bitmap, f func(load } } -// generateFuncnametab creates the function name table. -func (state *pclntab) generateFuncnametab(ctxt *Link, container loader.Bitmap) { +// generateFuncnametab creates the function name table. Returns a map of +// func symbol to the name offset in runtime.funcnamtab. +func (state *pclntab) generateFuncnametab(ctxt *Link, funcs []loader.Sym) map[loader.Sym]uint32 { + nameOffsets := make(map[loader.Sym]uint32, state.nfunc) + // Write the null terminated strings. writeFuncNameTab := func(ctxt *Link, s loader.Sym) { symtab := ctxt.loader.MakeSymbolUpdater(s) - for s, off := range state.funcNameOffset { + for s, off := range nameOffsets { symtab.AddStringAt(int64(off), ctxt.loader.SymName(s)) } } // Loop through the CUs, and calculate the size needed. var size int64 - state.walkFuncs(ctxt, container, func(s loader.Sym) { - state.funcNameOffset[s] = int32(size) + walkFuncs(ctxt, funcs, func(s loader.Sym) { + nameOffsets[s] = uint32(size) size += int64(ctxt.loader.SymNameLen(s)) + 1 // NULL terminate }) state.funcnametab = state.addGeneratedSym(ctxt, "runtime.funcnametab", size, writeFuncNameTab) + return nameOffsets } -// walkFilenames walks the filenames in the all reachable functions. -func walkFilenames(ctxt *Link, container loader.Bitmap, f func(*sym.CompilationUnit, goobj.CUFileIndex)) { +// walkFilenames walks funcs, calling a function for each filename used in each +// function's line table. +func walkFilenames(ctxt *Link, funcs []loader.Sym, f func(*sym.CompilationUnit, goobj.CUFileIndex)) { ldr := ctxt.loader // Loop through all functions, finding the filenames we need. - for _, ls := range ctxt.Textp { - s := loader.Sym(ls) - if !emitPcln(ctxt, s, container) { - continue - } - + for _, s := range funcs { fi := ldr.FuncInfo(s) if !fi.Valid() { continue @@ -382,7 +346,7 @@ func walkFilenames(ctxt *Link, container loader.Bitmap, f func(*sym.CompilationU // 1) Get Func.CUIndex: M := func.cuOffset // 2) Find filename offset: fileOffset := runtime.cutab[M+K] // 3) Get the filename: getcstring(runtime.filetab[fileOffset]) -func (state *pclntab) generateFilenameTabs(ctxt *Link, compUnits []*sym.CompilationUnit, container loader.Bitmap) []uint32 { +func (state *pclntab) generateFilenameTabs(ctxt *Link, compUnits []*sym.CompilationUnit, funcs []loader.Sym) []uint32 { // On a per-CU basis, keep track of all the filenames we need. // // Note, that we store the filenames in a separate section in the object @@ -402,7 +366,7 @@ func (state *pclntab) generateFilenameTabs(ctxt *Link, compUnits []*sym.Compilat // file index we've seen per CU so we can calculate how large the // CU->global table needs to be. var fileSize int64 - walkFilenames(ctxt, container, func(cu *sym.CompilationUnit, i goobj.CUFileIndex) { + walkFilenames(ctxt, funcs, func(cu *sym.CompilationUnit, i goobj.CUFileIndex) { // Note we use the raw filename for lookup, but use the expanded filename // when we save the size. filename := cu.FileTable[i] @@ -467,7 +431,7 @@ func (state *pclntab) generateFilenameTabs(ctxt *Link, compUnits []*sym.Compilat // generatePctab creates the runtime.pctab variable, holding all the // deduplicated pcdata. -func (state *pclntab) generatePctab(ctxt *Link, container loader.Bitmap) { +func (state *pclntab) generatePctab(ctxt *Link, funcs []loader.Sym) { ldr := ctxt.loader // Pctab offsets of 0 are considered invalid in the runtime. We respect @@ -490,10 +454,7 @@ func (state *pclntab) generatePctab(ctxt *Link, container loader.Bitmap) { seen[pcSym] = struct{}{} } } - for _, s := range ctxt.Textp { - if !emitPcln(ctxt, s, container) { - continue - } + for _, s := range funcs { fi := ldr.FuncInfo(s) if !fi.Valid() { continue @@ -566,8 +527,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // end PC [thearch.ptrsize bytes] // func structures, pcdata tables. - oldState := makeOldPclnState(ctxt) - state, compUnits := makePclntab(ctxt, container) + state, compUnits, funcs := makePclntab(ctxt, container) ldr := ctxt.loader state.carrier = ldr.LookupOrCreateSym("runtime.pclntab", 0) @@ -579,9 +539,12 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // rational form. state.pclntab = ldr.LookupOrCreateSym("runtime.pclntab_old", 0) state.generatePCHeader(ctxt) - state.generateFuncnametab(ctxt, container) - cuOffsets := state.generateFilenameTabs(ctxt, compUnits, container) - state.generatePctab(ctxt, container) + nameOffsets := state.generateFuncnametab(ctxt, funcs) + cuOffsets := state.generateFilenameTabs(ctxt, compUnits, funcs) + state.generatePctab(ctxt, funcs) + + // Used to when computing defer return. + deferReturnSym := ldr.Lookup("runtime.deferreturn", sym.SymVerABIInternal) funcdataBytes := int64(0) ldr.SetCarrierSym(state.pclntab, state.carrier) @@ -613,11 +576,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { var nfunc int32 prevFunc := ctxt.Textp[0] - for _, s := range ctxt.Textp { - if !emitPcln(ctxt, s, container) { - continue - } - + for _, s := range funcs { thisSect := ldr.SymSect(s) prevSect := ldr.SymSect(prevFunc) if thisSect != prevSect { @@ -686,7 +645,7 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { off = int32(setAddr(ftab, ctxt.Arch, int64(off), s, 0)) // name int32 - nameoff, ok := state.funcNameOffset[s] + nameoff, ok := nameOffsets[s] if !ok { panic("couldn't find function name offset") } @@ -701,13 +660,13 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { off = int32(ftab.SetUint32(ctxt.Arch, int64(off), args)) // deferreturn - deferreturn := oldState.computeDeferReturn(&ctxt.Target, s) + deferreturn := computeDeferReturn(ctxt, deferReturnSym, s) off = int32(ftab.SetUint32(ctxt.Arch, int64(off), deferreturn)) cu := ldr.SymUnit(s) if fi.Valid() && fi.NumInlTree() > 0 { - its := oldState.genInlTreeSym(cu, fi, ctxt.Arch, state) + its := genInlTreeSym(ctxt, cu, fi, ctxt.Arch, nameOffsets) funcdata[objabi.FUNCDATA_InlTree] = its } From 5402d40d5b041399392b29e4543f5fc4506197bd Mon Sep 17 00:00:00 2001 From: Jeremy Faller Date: Tue, 18 Aug 2020 16:35:26 -0400 Subject: [PATCH 007/281] [dev.link] cmd/link: fix memory growth on dev.link MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CL 247399 caused memory growth in the linker. Fix this by adjusting how we preallocate the number of symbols we'll need. cmd/compile (Darwin), alloc/op: Loadlib_GC 33.5MB ± 0% 27.3MB ± 0% Change-Id: I34997329ea4412716114df97fc9dad6ad0c171ee Reviewed-on: https://go-review.googlesource.com/c/go/+/249024 Run-TryBot: Jeremy Faller Reviewed-by: Cherry Zhang Reviewed-by: Austin Clements TryBot-Result: Gobot Gobot --- src/cmd/link/internal/ld/lib.go | 2 +- src/cmd/link/internal/loader/loader.go | 36 +++++++++++++++++--------- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index caa4566190..a01bdefa37 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -543,7 +543,7 @@ func (ctxt *Link) loadlib() { } // Add non-package symbols and references of externally defined symbols. - ctxt.loader.LoadNonpkgSyms(ctxt.Arch) + ctxt.loader.LoadSyms(ctxt.Arch) // Load symbols from shared libraries, after all Go object symbols are loaded. for _, lib := range ctxt.Library { diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index f149e3c831..ea9cd1bd2e 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -328,7 +328,7 @@ func NewLoader(flags uint32, elfsetstring elfsetstringFunc, reporter *ErrorRepor ldr := &Loader{ start: make(map[*oReader]Sym), objs: []objIdx{{}, {extReader, 0}}, // reserve index 0 for nil symbol, 1 for external symbols - objSyms: make([]objSym, 1, 100000), // reserve index 0 for nil symbol + objSyms: make([]objSym, 1, 1), // This will get overwritten later. extReader: extReader, symsByName: [2]map[string]Sym{make(map[string]Sym, 80000), make(map[string]Sym, 50000)}, // preallocate ~2MB for ABI0 and ~1MB for ABI1 symbols objByPkg: make(map[string]*oReader), @@ -2016,8 +2016,9 @@ func (l *Loader) FuncInfo(i Sym) FuncInfo { return FuncInfo{} } -// Preload a package: add autolibs, add defined package symbols to the symbol table. -// Does not add non-package symbols yet, which will be done in LoadNonpkgSyms. +// Preload a package: adds autolib. +// Does not add defined package or non-packaged symbols to the symbol table. +// These are done in LoadSyms. // Does not read symbol data. // Returns the fingerprint of the object. func (l *Loader) Preload(localSymVersion int, f *bio.Reader, lib *sym.Library, unit *sym.CompilationUnit, length int64) goobj.FingerprintType { @@ -2060,8 +2061,6 @@ func (l *Loader) Preload(localSymVersion int, f *bio.Reader, lib *sym.Library, u } l.addObj(lib.Pkg, or) - st := loadState{l: l} - st.preloadSyms(or, pkgDef) // The caller expects us consuming all the data f.MustSeek(length, os.SEEK_CUR) @@ -2144,17 +2143,30 @@ func (st *loadState) preloadSyms(r *oReader, kind int) { } } -// Add hashed (content-addressable) symbols, non-package symbols, and +// Add syms, hashed (content-addressable) symbols, non-package symbols, and // references to external symbols (which are always named). -func (l *Loader) LoadNonpkgSyms(arch *sys.Arch) { +func (l *Loader) LoadSyms(arch *sys.Arch) { + // Allocate space for symbols, making a guess as to how much space we need. + // This function was determined empirically by looking at the cmd/compile on + // Darwin, and picking factors for hashed and hashed64 syms. + var symSize, hashedSize, hashed64Size int + for _, o := range l.objs[goObjStart:] { + symSize += o.r.ndef + o.r.nhasheddef/2 + o.r.nhashed64def/2 + o.r.NNonpkgdef() + hashedSize += o.r.nhasheddef / 2 + hashed64Size += o.r.nhashed64def / 2 + } + // Index 0 is invalid for symbols. + l.objSyms = make([]objSym, 1, symSize) + l.npkgsyms = l.NSym() - // Preallocate some space (a few hundreds KB) for some symbols. - // As of Go 1.15, linking cmd/compile has ~8000 hashed64 symbols and - // ~13000 hashed symbols. st := loadState{ l: l, - hashed64Syms: make(map[uint64]symAndSize, 10000), - hashedSyms: make(map[goobj.HashType]symAndSize, 15000), + hashed64Syms: make(map[uint64]symAndSize, hashed64Size), + hashedSyms: make(map[goobj.HashType]symAndSize, hashedSize), + } + + for _, o := range l.objs[goObjStart:] { + st.preloadSyms(o.r, pkgDef) } for _, o := range l.objs[goObjStart:] { st.preloadSyms(o.r, hashed64Def) From c9551f9c198aac4ffbc7470755a0f66f071c30b8 Mon Sep 17 00:00:00 2001 From: Constantin Konstantinidis Date: Fri, 24 Apr 2020 07:17:33 +0200 Subject: [PATCH 008/281] cmd/compile: enforce strongly typed rules for ARM (1) Remove type casting in: L731 - L764, L772, L780 - L781, L1014 - L1054, L1057 - L1068, L1195, L1199 Toolstack-check successful. Change-Id: I80f90716477f269a227be28b14bf913b78ef375d Reviewed-on: https://go-review.googlesource.com/c/go/+/228824 Run-TryBot: Giovanni Bajo TryBot-Result: Go Bot Reviewed-by: Keith Randall Trust: Giovanni Bajo --- src/cmd/compile/internal/ssa/gen/ARM.rules | 184 +++--- src/cmd/compile/internal/ssa/rewriteARM.go | 722 ++++++++++----------- 2 files changed, 453 insertions(+), 453 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index d2e159709f..4cc8bd52e3 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -728,40 +728,40 @@ (BICconst [c] _) && int32(c)==-1 => (MOVWconst [0]) // generic constant folding -(ADDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) -> (SUBconst [int64(int32(-c))] x) -(SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) -> (ADDconst [int64(int32(-c))] x) -(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) -> (BICconst [int64(int32(^uint32(c)))] x) -(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) -> (ANDconst [int64(int32(^uint32(c)))] x) -(ADDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff -> (SUBconst [int64(int32(-c))] x) -(SUBconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff -> (ANDconst [int64(int32(-c))] x) -(ANDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff -> (BICconst [int64(int32(^uint32(c)))] x) -(BICconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff -> (ANDconst [int64(int32(^uint32(c)))] x) -(ADDconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(c+d))]) -(ADDconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(c+d))] x) -(ADDconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x) -(ADDconst [c] (RSBconst [d] x)) -> (RSBconst [int64(int32(c+d))] x) -(ADCconst [c] (ADDconst [d] x) flags) -> (ADCconst [int64(int32(c+d))] x flags) -(ADCconst [c] (SUBconst [d] x) flags) -> (ADCconst [int64(int32(c-d))] x flags) -(SUBconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d-c))]) -(SUBconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(-c-d))] x) -(SUBconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(-c+d))] x) -(SUBconst [c] (RSBconst [d] x)) -> (RSBconst [int64(int32(-c+d))] x) -(SBCconst [c] (ADDconst [d] x) flags) -> (SBCconst [int64(int32(c-d))] x flags) -(SBCconst [c] (SUBconst [d] x) flags) -> (SBCconst [int64(int32(c+d))] x flags) -(RSBconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(c-d))]) -(RSBconst [c] (RSBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x) -(RSBconst [c] (ADDconst [d] x)) -> (RSBconst [int64(int32(c-d))] x) -(RSBconst [c] (SUBconst [d] x)) -> (RSBconst [int64(int32(c+d))] x) -(RSCconst [c] (ADDconst [d] x) flags) -> (RSCconst [int64(int32(c-d))] x flags) -(RSCconst [c] (SUBconst [d] x) flags) -> (RSCconst [int64(int32(c+d))] x flags) -(SLLconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(uint32(d)< (MOVWconst [int64(int32(uint32(d)>>uint64(c)))]) -(SRAconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)>>uint64(c))]) -(MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c*d))]) -(MULA (MOVWconst [c]) (MOVWconst [d]) a) -> (ADDconst [int64(int32(c*d))] a) -(MULS (MOVWconst [c]) (MOVWconst [d]) a) -> (SUBconst [int64(int32(c*d))] a) -(Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)/uint32(d)))]) -(Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)%uint32(d)))]) +(ADDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (SUBconst [-c] x) +(SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x) +(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x) +(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x) +(ADDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x) +(SUBconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x) +(ANDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x) +(BICconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x) +(ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d]) +(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x) +(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x) +(ADDconst [c] (RSBconst [d] x)) => (RSBconst [c+d] x) +(ADCconst [c] (ADDconst [d] x) flags) => (ADCconst [c+d] x flags) +(ADCconst [c] (SUBconst [d] x) flags) => (ADCconst [c-d] x flags) +(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c]) +(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x) +(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x) +(SUBconst [c] (RSBconst [d] x)) => (RSBconst [-c+d] x) +(SBCconst [c] (ADDconst [d] x) flags) => (SBCconst [c-d] x flags) +(SBCconst [c] (SUBconst [d] x) flags) => (SBCconst [c+d] x flags) +(RSBconst [c] (MOVWconst [d])) => (MOVWconst [c-d]) +(RSBconst [c] (RSBconst [d] x)) => (ADDconst [c-d] x) +(RSBconst [c] (ADDconst [d] x)) => (RSBconst [c-d] x) +(RSBconst [c] (SUBconst [d] x)) => (RSBconst [c+d] x) +(RSCconst [c] (ADDconst [d] x) flags) => (RSCconst [c-d] x flags) +(RSCconst [c] (SUBconst [d] x) flags) => (RSCconst [c+d] x flags) +(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d< (MOVWconst [int32(uint32(d)>>uint64(c))]) +(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint64(c)]) +(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d]) +(MULA (MOVWconst [c]) (MOVWconst [d]) a) => (ADDconst [c*d] a) +(MULS (MOVWconst [c]) (MOVWconst [d]) a) => (SUBconst [c*d] a) +(Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)/uint32(d))]) +(Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)%uint32(d))]) (ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d]) (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) (ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d]) @@ -769,7 +769,7 @@ (XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d]) (XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x) (BICconst [c] (MOVWconst [d])) => (MOVWconst [d&^c]) -(BICconst [c] (BICconst [d] x)) -> (BICconst [int64(int32(c|d))] x) +(BICconst [c] (BICconst [d] x)) => (BICconst [c|d] x) (MVN (MOVWconst [c])) => (MOVWconst [^c]) (MOVBreg (MOVWconst [c])) -> (MOVWconst [int64(int8(c))]) (MOVBUreg (MOVWconst [c])) -> (MOVWconst [int64(uint8(c))]) @@ -777,8 +777,8 @@ (MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))]) (MOVWreg (MOVWconst [c])) => (MOVWconst [c]) // BFX: Width = c >> 8, LSB = c & 0xff, result = d << (32 - Width - LSB) >> (32 - Width) -(BFX [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) -(BFXU [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))))]) +(BFX [c] (MOVWconst [d])) => (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))]) +(BFXU [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) // absorb shifts into ops (ADD x (SLLconst [c] y)) => (ADDshiftLL x y [c]) @@ -1011,61 +1011,61 @@ (CMNshiftRAreg (MOVWconst [c]) x y) => (CMNconst [c] (SRA x y)) // constant folding in *shift ops -(ADDshiftLL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(int32(uint32(c)< (ADDconst x [int64(int32(uint32(c)>>uint64(d)))]) -(ADDshiftRA x (MOVWconst [c]) [d]) -> (ADDconst x [int64(int32(c)>>uint64(d))]) -(ADCshiftLL x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(int32(uint32(c)< (ADCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) -(ADCshiftRA x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(int32(c)>>uint64(d))] flags) -(ADDSshiftLL x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(int32(uint32(c)< (ADDSconst x [int64(int32(uint32(c)>>uint64(d)))]) -(ADDSshiftRA x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(int32(c)>>uint64(d))]) -(SUBshiftLL x (MOVWconst [c]) [d]) -> (SUBconst x [int64(int32(uint32(c)< (SUBconst x [int64(int32(uint32(c)>>uint64(d)))]) -(SUBshiftRA x (MOVWconst [c]) [d]) -> (SUBconst x [int64(int32(c)>>uint64(d))]) -(SBCshiftLL x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(int32(uint32(c)< (SBCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) -(SBCshiftRA x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(int32(c)>>uint64(d))] flags) -(SUBSshiftLL x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(int32(uint32(c)< (SUBSconst x [int64(int32(uint32(c)>>uint64(d)))]) -(SUBSshiftRA x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(int32(c)>>uint64(d))]) -(RSBshiftLL x (MOVWconst [c]) [d]) -> (RSBconst x [int64(int32(uint32(c)< (RSBconst x [int64(int32(uint32(c)>>uint64(d)))]) -(RSBshiftRA x (MOVWconst [c]) [d]) -> (RSBconst x [int64(int32(c)>>uint64(d))]) -(RSCshiftLL x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(int32(uint32(c)< (RSCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) -(RSCshiftRA x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(int32(c)>>uint64(d))] flags) -(RSBSshiftLL x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(int32(uint32(c)< (RSBSconst x [int64(int32(uint32(c)>>uint64(d)))]) -(RSBSshiftRA x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(int32(c)>>uint64(d))]) -(ANDshiftLL x (MOVWconst [c]) [d]) -> (ANDconst x [int64(int32(uint32(c)< (ANDconst x [int64(int32(uint32(c)>>uint64(d)))]) -(ANDshiftRA x (MOVWconst [c]) [d]) -> (ANDconst x [int64(int32(c)>>uint64(d))]) -(ORshiftLL x (MOVWconst [c]) [d]) -> (ORconst x [int64(int32(uint32(c)< (ORconst x [int64(int32(uint32(c)>>uint64(d)))]) -(ORshiftRA x (MOVWconst [c]) [d]) -> (ORconst x [int64(int32(c)>>uint64(d))]) -(XORshiftLL x (MOVWconst [c]) [d]) -> (XORconst x [int64(int32(uint32(c)< (XORconst x [int64(int32(uint32(c)>>uint64(d)))]) -(XORshiftRA x (MOVWconst [c]) [d]) -> (XORconst x [int64(int32(c)>>uint64(d))]) -(XORshiftRR x (MOVWconst [c]) [d]) -> (XORconst x [int64(int32(uint32(c)>>uint64(d)|uint32(c)< (BICconst x [int64(int32(uint32(c)< (BICconst x [int64(int32(uint32(c)>>uint64(d)))]) -(BICshiftRA x (MOVWconst [c]) [d]) -> (BICconst x [int64(int32(c)>>uint64(d))]) -(MVNshiftLL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)< (ADDconst x [c< (ADDconst x [int32(uint32(c)>>uint64(d))]) +(ADDshiftRA x (MOVWconst [c]) [d]) => (ADDconst x [c>>uint64(d)]) +(ADCshiftLL x (MOVWconst [c]) [d] flags) => (ADCconst x [c< (ADCconst x [int32(uint32(c)>>uint64(d))] flags) +(ADCshiftRA x (MOVWconst [c]) [d] flags) => (ADCconst x [c>>uint64(d)] flags) +(ADDSshiftLL x (MOVWconst [c]) [d]) => (ADDSconst x [c< (ADDSconst x [int32(uint32(c)>>uint64(d))]) +(ADDSshiftRA x (MOVWconst [c]) [d]) => (ADDSconst x [c>>uint64(d)]) +(SUBshiftLL x (MOVWconst [c]) [d]) => (SUBconst x [c< (SUBconst x [int32(uint32(c)>>uint64(d))]) +(SUBshiftRA x (MOVWconst [c]) [d]) => (SUBconst x [c>>uint64(d)]) +(SBCshiftLL x (MOVWconst [c]) [d] flags) => (SBCconst x [c< (SBCconst x [int32(uint32(c)>>uint64(d))] flags) +(SBCshiftRA x (MOVWconst [c]) [d] flags) => (SBCconst x [c>>uint64(d)] flags) +(SUBSshiftLL x (MOVWconst [c]) [d]) => (SUBSconst x [c< (SUBSconst x [int32(uint32(c)>>uint64(d))]) +(SUBSshiftRA x (MOVWconst [c]) [d]) => (SUBSconst x [c>>uint64(d)]) +(RSBshiftLL x (MOVWconst [c]) [d]) => (RSBconst x [c< (RSBconst x [int32(uint32(c)>>uint64(d))]) +(RSBshiftRA x (MOVWconst [c]) [d]) => (RSBconst x [c>>uint64(d)]) +(RSCshiftLL x (MOVWconst [c]) [d] flags) => (RSCconst x [c< (RSCconst x [int32(uint32(c)>>uint64(d))] flags) +(RSCshiftRA x (MOVWconst [c]) [d] flags) => (RSCconst x [c>>uint64(d)] flags) +(RSBSshiftLL x (MOVWconst [c]) [d]) => (RSBSconst x [c< (RSBSconst x [int32(uint32(c)>>uint64(d))]) +(RSBSshiftRA x (MOVWconst [c]) [d]) => (RSBSconst x [c>>uint64(d)]) +(ANDshiftLL x (MOVWconst [c]) [d]) => (ANDconst x [c< (ANDconst x [int32(uint32(c)>>uint64(d))]) +(ANDshiftRA x (MOVWconst [c]) [d]) => (ANDconst x [c>>uint64(d)]) +(ORshiftLL x (MOVWconst [c]) [d]) => (ORconst x [c< (ORconst x [int32(uint32(c)>>uint64(d))]) +(ORshiftRA x (MOVWconst [c]) [d]) => (ORconst x [c>>uint64(d)]) +(XORshiftLL x (MOVWconst [c]) [d]) => (XORconst x [c< (XORconst x [int32(uint32(c)>>uint64(d))]) +(XORshiftRA x (MOVWconst [c]) [d]) => (XORconst x [c>>uint64(d)]) +(XORshiftRR x (MOVWconst [c]) [d]) => (XORconst x [int32(uint32(c)>>uint64(d)|uint32(c)< (BICconst x [c< (BICconst x [int32(uint32(c)>>uint64(d))]) +(BICshiftRA x (MOVWconst [c]) [d]) => (BICconst x [c>>uint64(d)]) +(MVNshiftLL (MOVWconst [c]) [d]) => (MOVWconst [^(c< (MOVWconst [^int64(uint32(c)>>uint64(d))]) (MVNshiftRA (MOVWconst [c]) [d]) -> (MOVWconst [^int64(int32(c)>>uint64(d))]) -(CMPshiftLL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(int32(uint32(c)< (CMPconst x [int64(int32(uint32(c)>>uint64(d)))]) -(CMPshiftRA x (MOVWconst [c]) [d]) -> (CMPconst x [int64(int32(c)>>uint64(d))]) -(TSTshiftLL x (MOVWconst [c]) [d]) -> (TSTconst x [int64(int32(uint32(c)< (TSTconst x [int64(int32(uint32(c)>>uint64(d)))]) -(TSTshiftRA x (MOVWconst [c]) [d]) -> (TSTconst x [int64(int32(c)>>uint64(d))]) -(TEQshiftLL x (MOVWconst [c]) [d]) -> (TEQconst x [int64(int32(uint32(c)< (TEQconst x [int64(int32(uint32(c)>>uint64(d)))]) -(TEQshiftRA x (MOVWconst [c]) [d]) -> (TEQconst x [int64(int32(c)>>uint64(d))]) -(CMNshiftLL x (MOVWconst [c]) [d]) -> (CMNconst x [int64(int32(uint32(c)< (CMNconst x [int64(int32(uint32(c)>>uint64(d)))]) -(CMNshiftRA x (MOVWconst [c]) [d]) -> (CMNconst x [int64(int32(c)>>uint64(d))]) +(CMPshiftLL x (MOVWconst [c]) [d]) => (CMPconst x [c< (CMPconst x [int32(uint32(c)>>uint64(d))]) +(CMPshiftRA x (MOVWconst [c]) [d]) => (CMPconst x [c>>uint64(d)]) +(TSTshiftLL x (MOVWconst [c]) [d]) => (TSTconst x [c< (TSTconst x [int32(uint32(c)>>uint64(d))]) +(TSTshiftRA x (MOVWconst [c]) [d]) => (TSTconst x [c>>uint64(d)]) +(TEQshiftLL x (MOVWconst [c]) [d]) => (TEQconst x [c< (TEQconst x [int32(uint32(c)>>uint64(d))]) +(TEQshiftRA x (MOVWconst [c]) [d]) => (TEQconst x [c>>uint64(d)]) +(CMNshiftLL x (MOVWconst [c]) [d]) => (CMNconst x [c< (CMNconst x [int32(uint32(c)>>uint64(d))]) +(CMNshiftRA x (MOVWconst [c]) [d]) => (CMNconst x [c>>uint64(d)]) (ADDshiftLLreg x y (MOVWconst [c])) => (ADDshiftLL x y [c]) (ADDshiftRLreg x y (MOVWconst [c])) => (ADDshiftRL x y [c]) @@ -1192,11 +1192,11 @@ (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(uint32(c)< (MOVWload [int64(uint32(c)>>uint64(d))] ptr mem) -(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) -> (MOVWload [int64(int32(c)>>uint64(d))] ptr mem) +(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) => (MOVWload [c>>uint64(d)] ptr mem) (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(uint32(c)< (MOVWstore [int64(uint32(c)>>uint64(d))] ptr val mem) -(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) -> (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem) +(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [c>>uint64(d)] ptr val mem) // generic simplifications (ADD x (RSBconst [0] y)) => (SUB x y) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 352667f90f..f25b23dc46 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -1026,32 +1026,32 @@ func rewriteValueARM_OpARMADCconst(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADCconst [c] (ADDconst [d] x) flags) - // result: (ADCconst [int64(int32(c+d))] x flags) + // result: (ADCconst [c+d] x flags) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] flags := v_1 v.reset(OpARMADCconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(c + d) v.AddArg2(x, flags) return true } // match: (ADCconst [c] (SUBconst [d] x) flags) - // result: (ADCconst [int64(int32(c-d))] x flags) + // result: (ADCconst [c-d] x flags) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMSUBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] flags := v_1 v.reset(OpARMADCconst) - v.AuxInt = int64(int32(c - d)) + v.AuxInt = int32ToAuxInt(c - d) v.AddArg2(x, flags) return true } @@ -1081,17 +1081,17 @@ func rewriteValueARM_OpARMADCshiftLL(v *Value) bool { return true } // match: (ADCshiftLL x (MOVWconst [c]) [d] flags) - // result: (ADCconst x [int64(int32(uint32(c)<>uint64(d))] flags) + // result: (ADCconst x [c>>uint64(d)] flags) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) flags := v_2 v.reset(OpARMADCconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg2(x, flags) return true } @@ -1241,17 +1241,17 @@ func rewriteValueARM_OpARMADCshiftRL(v *Value) bool { return true } // match: (ADCshiftRL x (MOVWconst [c]) [d] flags) - // result: (ADCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) + // result: (ADCconst x [int32(uint32(c)>>uint64(d))] flags) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) flags := v_2 v.reset(OpARMADCconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg2(x, flags) return true } @@ -1705,16 +1705,16 @@ func rewriteValueARM_OpARMADDSshiftLL(v *Value) bool { return true } // match: (ADDSshiftLL x (MOVWconst [c]) [d]) - // result: (ADDSconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (ADDSconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMADDSconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -1853,16 +1853,16 @@ func rewriteValueARM_OpARMADDSshiftRL(v *Value) bool { return true } // match: (ADDSshiftRL x (MOVWconst [c]) [d]) - // result: (ADDSconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (ADDSconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMADDSconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -1935,83 +1935,83 @@ func rewriteValueARM_OpARMADDconst(v *Value) bool { } // match: (ADDconst [c] x) // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) - // result: (SUBconst [int64(int32(-c))] x) + // result: (SUBconst [-c] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) { break } v.reset(OpARMSUBconst) - v.AuxInt = int64(int32(-c)) + v.AuxInt = int32ToAuxInt(-c) v.AddArg(x) return true } // match: (ADDconst [c] x) // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff - // result: (SUBconst [int64(int32(-c))] x) + // result: (SUBconst [-c] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { break } v.reset(OpARMSUBconst) - v.AuxInt = int64(int32(-c)) + v.AuxInt = int32ToAuxInt(-c) v.AddArg(x) return true } // match: (ADDconst [c] (MOVWconst [d])) - // result: (MOVWconst [int64(int32(c+d))]) + // result: (MOVWconst [c+d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMMOVWconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(c + d) return true } // match: (ADDconst [c] (ADDconst [d] x)) - // result: (ADDconst [int64(int32(c+d))] x) + // result: (ADDconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMADDconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(c + d) v.AddArg(x) return true } // match: (ADDconst [c] (SUBconst [d] x)) - // result: (ADDconst [int64(int32(c-d))] x) + // result: (ADDconst [c-d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMSUBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMADDconst) - v.AuxInt = int64(int32(c - d)) + v.AuxInt = int32ToAuxInt(c - d) v.AddArg(x) return true } // match: (ADDconst [c] (RSBconst [d] x)) - // result: (RSBconst [int64(int32(c+d))] x) + // result: (RSBconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMRSBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMRSBconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(c + d) v.AddArg(x) return true } @@ -2040,16 +2040,16 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool { return true } // match: (ADDshiftLL x (MOVWconst [c]) [d]) - // result: (ADDconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (ADDconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMADDconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -2237,16 +2237,16 @@ func rewriteValueARM_OpARMADDshiftRL(v *Value) bool { return true } // match: (ADDshiftRL x (MOVWconst [c]) [d]) - // result: (ADDconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (ADDconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMADDconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -2527,29 +2527,29 @@ func rewriteValueARM_OpARMANDconst(v *Value) bool { } // match: (ANDconst [c] x) // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) - // result: (BICconst [int64(int32(^uint32(c)))] x) + // result: (BICconst [int32(^uint32(c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) { break } v.reset(OpARMBICconst) - v.AuxInt = int64(int32(^uint32(c))) + v.AuxInt = int32ToAuxInt(int32(^uint32(c))) v.AddArg(x) return true } // match: (ANDconst [c] x) // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff - // result: (BICconst [int64(int32(^uint32(c)))] x) + // result: (BICconst [int32(^uint32(c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { break } v.reset(OpARMBICconst) - v.AuxInt = int64(int32(^uint32(c))) + v.AuxInt = int32ToAuxInt(int32(^uint32(c))) v.AddArg(x) return true } @@ -2603,16 +2603,16 @@ func rewriteValueARM_OpARMANDshiftLL(v *Value) bool { return true } // match: (ANDshiftLL x (MOVWconst [c]) [d]) - // result: (ANDconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (ANDconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMANDconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -2785,16 +2785,16 @@ func rewriteValueARM_OpARMANDshiftRL(v *Value) bool { return true } // match: (ANDshiftRL x (MOVWconst [c]) [d]) - // result: (ANDconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (ANDconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMANDconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -2857,15 +2857,15 @@ func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool { func rewriteValueARM_OpARMBFX(v *Value) bool { v_0 := v.Args[0] // match: (BFX [c] (MOVWconst [d])) - // result: (MOVWconst [int64(int32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) + // result: (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMMOVWconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(d) << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8))) + v.AuxInt = int32ToAuxInt(d << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8))) return true } return false @@ -2873,15 +2873,15 @@ func rewriteValueARM_OpARMBFX(v *Value) bool { func rewriteValueARM_OpARMBFXU(v *Value) bool { v_0 := v.Args[0] // match: (BFXU [c] (MOVWconst [d])) - // result: (MOVWconst [int64(int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))))]) + // result: (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMMOVWconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(uint32(d) << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8)))) + v.AuxInt = int32ToAuxInt(int32(uint32(d) << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8)))) return true } return false @@ -3022,29 +3022,29 @@ func rewriteValueARM_OpARMBICconst(v *Value) bool { } // match: (BICconst [c] x) // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) - // result: (ANDconst [int64(int32(^uint32(c)))] x) + // result: (ANDconst [int32(^uint32(c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) { break } v.reset(OpARMANDconst) - v.AuxInt = int64(int32(^uint32(c))) + v.AuxInt = int32ToAuxInt(int32(^uint32(c))) v.AddArg(x) return true } // match: (BICconst [c] x) // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff - // result: (ANDconst [int64(int32(^uint32(c)))] x) + // result: (ANDconst [int32(^uint32(c))] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { break } v.reset(OpARMANDconst) - v.AuxInt = int64(int32(^uint32(c))) + v.AuxInt = int32ToAuxInt(int32(^uint32(c))) v.AddArg(x) return true } @@ -3061,16 +3061,16 @@ func rewriteValueARM_OpARMBICconst(v *Value) bool { return true } // match: (BICconst [c] (BICconst [d] x)) - // result: (BICconst [int64(int32(c|d))] x) + // result: (BICconst [c|d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMBICconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMBICconst) - v.AuxInt = int64(int32(c | d)) + v.AuxInt = int32ToAuxInt(c | d) v.AddArg(x) return true } @@ -3080,16 +3080,16 @@ func rewriteValueARM_OpARMBICshiftLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BICshiftLL x (MOVWconst [c]) [d]) - // result: (BICconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (BICconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMBICconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -3192,16 +3192,16 @@ func rewriteValueARM_OpARMBICshiftRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (BICshiftRL x (MOVWconst [c]) [d]) - // result: (BICconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (BICconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMBICconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -3417,16 +3417,16 @@ func rewriteValueARM_OpARMCMNshiftLL(v *Value) bool { return true } // match: (CMNshiftLL x (MOVWconst [c]) [d]) - // result: (CMNconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (CMNconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMCMNconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -3565,16 +3565,16 @@ func rewriteValueARM_OpARMCMNshiftRL(v *Value) bool { return true } // match: (CMNshiftRL x (MOVWconst [c]) [d]) - // result: (CMNconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (CMNconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMCMNconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -4080,16 +4080,16 @@ func rewriteValueARM_OpARMCMPshiftLL(v *Value) bool { return true } // match: (CMPshiftLL x (MOVWconst [c]) [d]) - // result: (CMPconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (CMPconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMCMPconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -4236,16 +4236,16 @@ func rewriteValueARM_OpARMCMPshiftRL(v *Value) bool { return true } // match: (CMPshiftRL x (MOVWconst [c]) [d]) - // result: (CMPconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (CMPconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMCMPconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -6479,17 +6479,17 @@ func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool { return true } // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) - // result: (MOVWload [int64(int32(c)>>uint64(d))] ptr mem) + // result: (MOVWload [c>>uint64(d)] ptr mem) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) ptr := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) mem := v_2 v.reset(OpARMMOVWload) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg2(ptr, mem) return true } @@ -6883,18 +6883,18 @@ func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) - // result: (MOVWstore [int64(int32(c)>>uint64(d))] ptr val mem) + // result: (MOVWstore [c>>uint64(d)] ptr val mem) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) ptr := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) val := v_2 mem := v_3 v.reset(OpARMMOVWstore) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg3(ptr, val, mem) return true } @@ -7126,19 +7126,19 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { break } // match: (MUL (MOVWconst [c]) (MOVWconst [d])) - // result: (MOVWconst [int64(int32(c*d))]) + // result: (MOVWconst [c*d]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpARMMOVWconst { continue } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) if v_1.Op != OpARMMOVWconst { continue } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(c * d)) + v.AuxInt = int32ToAuxInt(c * d) return true } break @@ -7533,19 +7533,19 @@ func rewriteValueARM_OpARMMULA(v *Value) bool { return true } // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a) - // result: (ADDconst [int64(int32(c*d))] a) + // result: (ADDconst [c*d] a) for { if v_0.Op != OpARMMOVWconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) if v_1.Op != OpARMMOVWconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) a := v_2 v.reset(OpARMADDconst) - v.AuxInt = int64(int32(c * d)) + v.AuxInt = int32ToAuxInt(c * d) v.AddArg(a) return true } @@ -7987,19 +7987,19 @@ func rewriteValueARM_OpARMMULS(v *Value) bool { return true } // match: (MULS (MOVWconst [c]) (MOVWconst [d]) a) - // result: (SUBconst [int64(int32(c*d))] a) + // result: (SUBconst [c*d] a) for { if v_0.Op != OpARMMOVWconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) if v_1.Op != OpARMMOVWconst { break } - d := v_1.AuxInt + d := auxIntToInt32(v_1.AuxInt) a := v_2 v.reset(OpARMSUBconst) - v.AuxInt = int64(int32(c * d)) + v.AuxInt = int32ToAuxInt(c * d) v.AddArg(a) return true } @@ -8098,15 +8098,15 @@ func rewriteValueARM_OpARMMVN(v *Value) bool { func rewriteValueARM_OpARMMVNshiftLL(v *Value) bool { v_0 := v.Args[0] // match: (MVNshiftLL (MOVWconst [c]) [d]) - // result: (MOVWconst [^int64(uint32(c)<>uint64(d))]) + // result: (ORconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMORconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -8739,16 +8739,16 @@ func rewriteValueARM_OpARMORshiftRL(v *Value) bool { return true } // match: (ORshiftRL x (MOVWconst [c]) [d]) - // result: (ORconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (ORconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMORconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -9067,16 +9067,16 @@ func rewriteValueARM_OpARMRSBSshiftLL(v *Value) bool { return true } // match: (RSBSshiftLL x (MOVWconst [c]) [d]) - // result: (RSBSconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (RSBSconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMRSBSconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -9215,16 +9215,16 @@ func rewriteValueARM_OpARMRSBSshiftRL(v *Value) bool { return true } // match: (RSBSshiftRL x (MOVWconst [c]) [d]) - // result: (RSBSconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (RSBSconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMRSBSconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -9270,56 +9270,56 @@ func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool { func rewriteValueARM_OpARMRSBconst(v *Value) bool { v_0 := v.Args[0] // match: (RSBconst [c] (MOVWconst [d])) - // result: (MOVWconst [int64(int32(c-d))]) + // result: (MOVWconst [c-d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMMOVWconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(c - d)) + v.AuxInt = int32ToAuxInt(c - d) return true } // match: (RSBconst [c] (RSBconst [d] x)) - // result: (ADDconst [int64(int32(c-d))] x) + // result: (ADDconst [c-d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMRSBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMADDconst) - v.AuxInt = int64(int32(c - d)) + v.AuxInt = int32ToAuxInt(c - d) v.AddArg(x) return true } // match: (RSBconst [c] (ADDconst [d] x)) - // result: (RSBconst [int64(int32(c-d))] x) + // result: (RSBconst [c-d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMRSBconst) - v.AuxInt = int64(int32(c - d)) + v.AuxInt = int32ToAuxInt(c - d) v.AddArg(x) return true } // match: (RSBconst [c] (SUBconst [d] x)) - // result: (RSBconst [int64(int32(c+d))] x) + // result: (RSBconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMSUBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMRSBconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(c + d) v.AddArg(x) return true } @@ -9347,16 +9347,16 @@ func rewriteValueARM_OpARMRSBshiftLL(v *Value) bool { return true } // match: (RSBshiftLL x (MOVWconst [c]) [d]) - // result: (RSBconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (RSBconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMRSBconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -9529,16 +9529,16 @@ func rewriteValueARM_OpARMRSBshiftRL(v *Value) bool { return true } // match: (RSBshiftRL x (MOVWconst [c]) [d]) - // result: (RSBconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (RSBconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMRSBconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -9602,32 +9602,32 @@ func rewriteValueARM_OpARMRSCconst(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (RSCconst [c] (ADDconst [d] x) flags) - // result: (RSCconst [int64(int32(c-d))] x flags) + // result: (RSCconst [c-d] x flags) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] flags := v_1 v.reset(OpARMRSCconst) - v.AuxInt = int64(int32(c - d)) + v.AuxInt = int32ToAuxInt(c - d) v.AddArg2(x, flags) return true } // match: (RSCconst [c] (SUBconst [d] x) flags) - // result: (RSCconst [int64(int32(c+d))] x flags) + // result: (RSCconst [c+d] x flags) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMSUBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] flags := v_1 v.reset(OpARMRSCconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(c + d) v.AddArg2(x, flags) return true } @@ -9657,17 +9657,17 @@ func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool { return true } // match: (RSCshiftLL x (MOVWconst [c]) [d] flags) - // result: (RSCconst x [int64(int32(uint32(c)<>uint64(d))] flags) + // result: (RSCconst x [c>>uint64(d)] flags) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) flags := v_2 v.reset(OpARMRSCconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg2(x, flags) return true } @@ -9817,17 +9817,17 @@ func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool { return true } // match: (RSCshiftRL x (MOVWconst [c]) [d] flags) - // result: (RSCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) + // result: (RSCconst x [int32(uint32(c)>>uint64(d))] flags) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) flags := v_2 v.reset(OpARMRSCconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg2(x, flags) return true } @@ -10085,32 +10085,32 @@ func rewriteValueARM_OpARMSBCconst(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SBCconst [c] (ADDconst [d] x) flags) - // result: (SBCconst [int64(int32(c-d))] x flags) + // result: (SBCconst [c-d] x flags) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] flags := v_1 v.reset(OpARMSBCconst) - v.AuxInt = int64(int32(c - d)) + v.AuxInt = int32ToAuxInt(c - d) v.AddArg2(x, flags) return true } // match: (SBCconst [c] (SUBconst [d] x) flags) - // result: (SBCconst [int64(int32(c+d))] x flags) + // result: (SBCconst [c+d] x flags) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMSUBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] flags := v_1 v.reset(OpARMSBCconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(c + d) v.AddArg2(x, flags) return true } @@ -10140,17 +10140,17 @@ func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool { return true } // match: (SBCshiftLL x (MOVWconst [c]) [d] flags) - // result: (SBCconst x [int64(int32(uint32(c)<>uint64(d))] flags) + // result: (SBCconst x [c>>uint64(d)] flags) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) flags := v_2 v.reset(OpARMSBCconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg2(x, flags) return true } @@ -10300,17 +10300,17 @@ func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool { return true } // match: (SBCshiftRL x (MOVWconst [c]) [d] flags) - // result: (SBCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) + // result: (SBCconst x [int32(uint32(c)>>uint64(d))] flags) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) flags := v_2 v.reset(OpARMSBCconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg2(x, flags) return true } @@ -10377,15 +10377,15 @@ func rewriteValueARM_OpARMSLL(v *Value) bool { func rewriteValueARM_OpARMSLLconst(v *Value) bool { v_0 := v.Args[0] // match: (SLLconst [c] (MOVWconst [d])) - // result: (MOVWconst [int64(int32(uint32(d)<>uint64(c))]) + // result: (MOVWconst [d>>uint64(c)]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMMOVWconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(d) >> uint64(c)) + v.AuxInt = int32ToAuxInt(d >> uint64(c)) return true } // match: (SRAconst (SLLconst x [c]) [d]) @@ -10503,15 +10503,15 @@ func rewriteValueARM_OpARMSRL(v *Value) bool { func rewriteValueARM_OpARMSRLconst(v *Value) bool { v_0 := v.Args[0] // match: (SRLconst [c] (MOVWconst [d])) - // result: (MOVWconst [int64(int32(uint32(d)>>uint64(c)))]) + // result: (MOVWconst [int32(uint32(d)>>uint64(c))]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMMOVWconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(uint32(d) >> uint64(c))) + v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint64(c))) return true } // match: (SRLconst (SLLconst x [c]) [d]) @@ -11035,16 +11035,16 @@ func rewriteValueARM_OpARMSUBSshiftLL(v *Value) bool { return true } // match: (SUBSshiftLL x (MOVWconst [c]) [d]) - // result: (SUBSconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (SUBSconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMSUBSconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -11183,16 +11183,16 @@ func rewriteValueARM_OpARMSUBSshiftRL(v *Value) bool { return true } // match: (SUBSshiftRL x (MOVWconst [c]) [d]) - // result: (SUBSconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (SUBSconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMSUBSconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -11265,83 +11265,83 @@ func rewriteValueARM_OpARMSUBconst(v *Value) bool { } // match: (SUBconst [c] x) // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) - // result: (ADDconst [int64(int32(-c))] x) + // result: (ADDconst [-c] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) { break } v.reset(OpARMADDconst) - v.AuxInt = int64(int32(-c)) + v.AuxInt = int32ToAuxInt(-c) v.AddArg(x) return true } // match: (SUBconst [c] x) // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff - // result: (ANDconst [int64(int32(-c))] x) + // result: (ADDconst [-c] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { break } - v.reset(OpARMANDconst) - v.AuxInt = int64(int32(-c)) + v.reset(OpARMADDconst) + v.AuxInt = int32ToAuxInt(-c) v.AddArg(x) return true } // match: (SUBconst [c] (MOVWconst [d])) - // result: (MOVWconst [int64(int32(d-c))]) + // result: (MOVWconst [d-c]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMMOVWconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(d - c)) + v.AuxInt = int32ToAuxInt(d - c) return true } // match: (SUBconst [c] (SUBconst [d] x)) - // result: (ADDconst [int64(int32(-c-d))] x) + // result: (ADDconst [-c-d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMSUBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMADDconst) - v.AuxInt = int64(int32(-c - d)) + v.AuxInt = int32ToAuxInt(-c - d) v.AddArg(x) return true } // match: (SUBconst [c] (ADDconst [d] x)) - // result: (ADDconst [int64(int32(-c+d))] x) + // result: (ADDconst [-c+d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMADDconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMADDconst) - v.AuxInt = int64(int32(-c + d)) + v.AuxInt = int32ToAuxInt(-c + d) v.AddArg(x) return true } // match: (SUBconst [c] (RSBconst [d] x)) - // result: (RSBconst [int64(int32(-c+d))] x) + // result: (RSBconst [-c+d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMRSBconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARMRSBconst) - v.AuxInt = int64(int32(-c + d)) + v.AuxInt = int32ToAuxInt(-c + d) v.AddArg(x) return true } @@ -11369,16 +11369,16 @@ func rewriteValueARM_OpARMSUBshiftLL(v *Value) bool { return true } // match: (SUBshiftLL x (MOVWconst [c]) [d]) - // result: (SUBconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (SUBconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMSUBconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -11551,16 +11551,16 @@ func rewriteValueARM_OpARMSUBshiftRL(v *Value) bool { return true } // match: (SUBshiftRL x (MOVWconst [c]) [d]) - // result: (SUBconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (SUBconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMSUBconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -11778,16 +11778,16 @@ func rewriteValueARM_OpARMTEQshiftLL(v *Value) bool { return true } // match: (TEQshiftLL x (MOVWconst [c]) [d]) - // result: (TEQconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (TEQconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMTEQconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -11926,16 +11926,16 @@ func rewriteValueARM_OpARMTEQshiftRL(v *Value) bool { return true } // match: (TEQshiftRL x (MOVWconst [c]) [d]) - // result: (TEQconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (TEQconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMTEQconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -12136,16 +12136,16 @@ func rewriteValueARM_OpARMTSTshiftLL(v *Value) bool { return true } // match: (TSTshiftLL x (MOVWconst [c]) [d]) - // result: (TSTconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (TSTconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMTSTconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -12284,16 +12284,16 @@ func rewriteValueARM_OpARMTSTshiftRL(v *Value) bool { return true } // match: (TSTshiftRL x (MOVWconst [c]) [d]) - // result: (TSTconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (TSTconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMTSTconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -12547,16 +12547,16 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool { return true } // match: (XORshiftLL x (MOVWconst [c]) [d]) - // result: (XORconst x [int64(int32(uint32(c)<>uint64(d))]) + // result: (XORconst x [c>>uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMXORconst) - v.AuxInt = int64(int32(c) >> uint64(d)) + v.AuxInt = int32ToAuxInt(c >> uint64(d)) v.AddArg(x) return true } @@ -12778,16 +12778,16 @@ func rewriteValueARM_OpARMXORshiftRL(v *Value) bool { return true } // match: (XORshiftRL x (MOVWconst [c]) [d]) - // result: (XORconst x [int64(int32(uint32(c)>>uint64(d)))]) + // result: (XORconst x [int32(uint32(c)>>uint64(d))]) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) x := v_0 if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) v.reset(OpARMXORconst) - v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -12885,16 +12885,16 @@ func rewriteValueARM_OpARMXORshiftRR(v *Value) bool { return true } // match: (XORshiftRR x (MOVWconst [c]) [d]) - // result: (XORconst x [int64(int32(uint32(c)>>uint64(d)|uint32(c)<>uint64(d)|uint32(c)<>uint64(d) | uint32(c)<>uint64(d) | uint32(c)< Date: Sun, 20 Sep 2020 11:57:20 +0200 Subject: [PATCH 009/281] cmd/compile: enforce strongly typed rules for ARM (mem) L274-L281, L293-L307, L312, L317, L319, L335, L341 Toolstash-check successful Change-Id: I69e8e9f964c1f35615e4e19401c3f661e1e64a3a Reviewed-on: https://go-review.googlesource.com/c/go/+/256100 Reviewed-by: Keith Randall Trust: Giovanni Bajo --- src/cmd/compile/internal/ssa/gen/ARM.rules | 40 +++--- src/cmd/compile/internal/ssa/rewriteARM.go | 152 ++++++++++----------- 2 files changed, 96 insertions(+), 96 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 4cc8bd52e3..e5aae3b601 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -260,23 +260,23 @@ (Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) // stores -(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem) -(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem) // zero instructions (Zero [0] _ mem) => mem (Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem) -(Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore ptr (MOVWconst [0]) mem) -(Zero [2] ptr mem) -> +(Zero [2] ptr mem) => (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) -(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore ptr (MOVWconst [0]) mem) -(Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) (Zero [4] ptr mem) => @@ -294,29 +294,29 @@ // 4 and 128 are magic constants, see runtime/mkduff.go (Zero [s] {t} ptr mem) && s%4 == 0 && s > 4 && s <= 512 - && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice -> + && t.Alignment()%4 == 0 && !config.noDuffDevice => (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem) // Large zeroing uses a loop (Zero [s] {t} ptr mem) - && (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0 -> - (LoweredZero [t.(*types.Type).Alignment()] + && (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0 => + (LoweredZero [t.Alignment()] ptr - (ADDconst ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) + (ADDconst ptr [int32(s-moveSize(t.Alignment(), config))]) (MOVWconst [0]) mem) // moves (Move [0] _ _ mem) => mem (Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem) -(Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore dst (MOVHUload src mem) mem) (Move [2] dst src mem) => (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)) -(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 -> +(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore dst (MOVWload src mem) mem) -(Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 -> +(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) (Move [4] dst src mem) => @@ -334,16 +334,16 @@ // 8 and 128 are magic constants, see runtime/mkduff.go (Move [s] {t} dst src mem) && s%4 == 0 && s > 4 && s <= 512 - && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) -> + && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) => (DUFFCOPY [8 * (128 - s/4)] dst src mem) // Large move uses a loop (Move [s] {t} dst src mem) - && ((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) && logLargeCopy(v, s) -> - (LoweredMove [t.(*types.Type).Alignment()] + && ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s) => + (LoweredMove [t.Alignment()] dst src - (ADDconst src [s-moveSize(t.(*types.Type).Alignment(), config)]) + (ADDconst src [int32(s-moveSize(t.Alignment(), config))]) mem) // calls diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index f25b23dc46..dd1c2ad68e 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -14333,17 +14333,17 @@ func rewriteValueARM_OpMove(v *Value) bool { return true } // match: (Move [2] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore dst (MOVHUload src mem) mem) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpARMMOVHstore) @@ -14374,17 +14374,17 @@ func rewriteValueARM_OpMove(v *Value) bool { return true } // match: (Move [4] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore dst (MOVWload src mem) mem) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpARMMOVWstore) @@ -14394,23 +14394,23 @@ func rewriteValueARM_OpMove(v *Value) bool { return true } // match: (Move [4] {t} dst src mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpARMMOVHstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) - v0.AuxInt = 2 + v0.AuxInt = int32ToAuxInt(2) v0.AddArg2(src, mem) v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16) @@ -14480,38 +14480,38 @@ func rewriteValueARM_OpMove(v *Value) bool { return true } // match: (Move [s] {t} dst src mem) - // cond: s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) // result: (DUFFCOPY [8 * (128 - s/4)] dst src mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { break } v.reset(OpARMDUFFCOPY) - v.AuxInt = 8 * (128 - s/4) + v.AuxInt = int64ToAuxInt(8 * (128 - s/4)) v.AddArg3(dst, src, mem) return true } // match: (Move [s] {t} dst src mem) - // cond: ((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) && logLargeCopy(v, s) - // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDconst src [s-moveSize(t.(*types.Type).Alignment(), config)]) mem) + // cond: ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s) + // result: (LoweredMove [t.Alignment()] dst src (ADDconst src [int32(s-moveSize(t.Alignment(), config))]) mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) dst := v_0 src := v_1 mem := v_2 - if !(((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) && logLargeCopy(v, s)) { + if !(((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)) { break } v.reset(OpARMLoweredMove) - v.AuxInt = t.(*types.Type).Alignment() + v.AuxInt = int64ToAuxInt(t.Alignment()) v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type) - v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config))) v0.AddArg(src) v.AddArg4(dst, src, v0, mem) return true @@ -15678,14 +15678,14 @@ func rewriteValueARM_OpStore(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 1 + // cond: t.Size() == 1 // result: (MOVBstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 1) { + if !(t.Size() == 1) { break } v.reset(OpARMMOVBstore) @@ -15693,14 +15693,14 @@ func rewriteValueARM_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 2 + // cond: t.Size() == 2 // result: (MOVHstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 2) { + if !(t.Size() == 2) { break } v.reset(OpARMMOVHstore) @@ -15708,14 +15708,14 @@ func rewriteValueARM_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) + // cond: t.Size() == 4 && !is32BitFloat(val.Type) // result: (MOVWstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { + if !(t.Size() == 4 && !is32BitFloat(val.Type)) { break } v.reset(OpARMMOVWstore) @@ -15723,14 +15723,14 @@ func rewriteValueARM_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) + // cond: t.Size() == 4 && is32BitFloat(val.Type) // result: (MOVFstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { + if !(t.Size() == 4 && is32BitFloat(val.Type)) { break } v.reset(OpARMMOVFstore) @@ -15738,14 +15738,14 @@ func rewriteValueARM_OpStore(v *Value) bool { return true } // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) + // cond: t.Size() == 8 && is64BitFloat(val.Type) // result: (MOVDstore ptr val mem) for { - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 val := v_1 mem := v_2 - if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { + if !(t.Size() == 8 && is64BitFloat(val.Type)) { break } v.reset(OpARMMOVDstore) @@ -15785,80 +15785,80 @@ func rewriteValueARM_OpZero(v *Value) bool { return true } // match: (Zero [2] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore ptr (MOVWconst [0]) mem) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpARMMOVHstore) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [2] ptr mem) // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) for { - if v.AuxInt != 2 { + if auxIntToInt64(v.AuxInt) != 2 { break } ptr := v_0 mem := v_1 v.reset(OpARMMOVBstore) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true } // match: (Zero [4] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%4 == 0 + // cond: t.Alignment()%4 == 0 // result: (MOVWstore ptr (MOVWconst [0]) mem) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%4 == 0) { + if !(t.Alignment()%4 == 0) { break } v.reset(OpARMMOVWstore) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [4] {t} ptr mem) - // cond: t.(*types.Type).Alignment()%2 == 0 + // cond: t.Alignment()%2 == 0 // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) for { - if v.AuxInt != 4 { + if auxIntToInt64(v.AuxInt) != 4 { break } - t := v.Aux + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(t.(*types.Type).Alignment()%2 == 0) { + if !(t.Alignment()%2 == 0) { break } v.reset(OpARMMOVHstore) - v.AuxInt = 2 + v.AuxInt = int32ToAuxInt(2) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v1.AddArg3(ptr, v0, mem) v.AddArg3(ptr, v0, v1) return true @@ -15909,41 +15909,41 @@ func rewriteValueARM_OpZero(v *Value) bool { return true } // match: (Zero [s] {t} ptr mem) - // cond: s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice + // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice // result: (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !(s%4 == 0 && s > 4 && s <= 512 && t.(*types.Type).Alignment()%4 == 0 && !config.noDuffDevice) { + if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice) { break } v.reset(OpARMDUFFZERO) - v.AuxInt = 4 * (128 - s/4) + v.AuxInt = int64ToAuxInt(4 * (128 - s/4)) v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (Zero [s] {t} ptr mem) - // cond: (s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0 - // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADDconst ptr [s-moveSize(t.(*types.Type).Alignment(), config)]) (MOVWconst [0]) mem) + // cond: (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0 + // result: (LoweredZero [t.Alignment()] ptr (ADDconst ptr [int32(s-moveSize(t.Alignment(), config))]) (MOVWconst [0]) mem) for { - s := v.AuxInt - t := v.Aux + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) ptr := v_0 mem := v_1 - if !((s > 512 || config.noDuffDevice) || t.(*types.Type).Alignment()%4 != 0) { + if !((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) { break } v.reset(OpARMLoweredZero) - v.AuxInt = t.(*types.Type).Alignment() + v.AuxInt = int64ToAuxInt(t.Alignment()) v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type) - v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config))) v0.AddArg(ptr) v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32) - v1.AuxInt = 0 + v1.AuxInt = int32ToAuxInt(0) v.AddArg4(ptr, v0, v1, mem) return true } From 1f41f04d2c121ba229072bd954f8346a0fc6d3e4 Mon Sep 17 00:00:00 2001 From: Constantin Konstantinidis Date: Sat, 4 Jul 2020 11:01:29 +0200 Subject: [PATCH 010/281] cmd/compile: enforce strongly typed rules for ARM (8) add type casting to int32: L148-L156, L774-L778 Toolstash-check successful Change-Id: Ib6544c1d7853c2811def5b18786e1fc5c18086ca Reviewed-on: https://go-review.googlesource.com/c/go/+/256097 Reviewed-by: Keith Randall Trust: Giovanni Bajo --- src/cmd/compile/internal/ssa/gen/ARM.rules | 26 +++---- src/cmd/compile/internal/ssa/rewriteARM.go | 86 +++++++++++----------- 2 files changed, 56 insertions(+), 56 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index e5aae3b601..2be347d98b 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -145,15 +145,15 @@ // constant shifts // generic opt rewrites all constant shifts to shift by Const64 -(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SLLconst x [c]) -(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAconst x [c]) -(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRLconst x [c]) -(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLLconst x [c]) -(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst x [16]) [c+16]) -(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst x [16]) [c+16]) -(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLLconst x [c]) -(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst x [24]) [c+24]) -(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst x [24]) [c+24]) +(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SLLconst x [int32(c)]) +(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SRAconst x [int32(c)]) +(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 => (SRLconst x [int32(c)]) +(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SLLconst x [int32(c)]) +(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SRAconst (SLLconst x [16]) [int32(c+16)]) +(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 => (SRLconst (SLLconst x [16]) [int32(c+16)]) +(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SLLconst x [int32(c)]) +(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SRAconst (SLLconst x [24]) [int32(c+24)]) +(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 => (SRLconst (SLLconst x [24]) [int32(c+24)]) // large constant shifts (Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0]) @@ -771,10 +771,10 @@ (BICconst [c] (MOVWconst [d])) => (MOVWconst [d&^c]) (BICconst [c] (BICconst [d] x)) => (BICconst [c|d] x) (MVN (MOVWconst [c])) => (MOVWconst [^c]) -(MOVBreg (MOVWconst [c])) -> (MOVWconst [int64(int8(c))]) -(MOVBUreg (MOVWconst [c])) -> (MOVWconst [int64(uint8(c))]) -(MOVHreg (MOVWconst [c])) -> (MOVWconst [int64(int16(c))]) -(MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))]) +(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))]) +(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))]) +(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))]) +(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))]) (MOVWreg (MOVWconst [c])) => (MOVWconst [c]) // BFX: Width = c >> 8, LSB = c & 0xff, result = d << (32 - Width - LSB) >> (32 - Width) (BFX [c] (MOVWconst [d])) => (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))]) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index dd1c2ad68e..594d7427c4 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -4730,14 +4730,14 @@ func rewriteValueARM_OpARMMOVBUreg(v *Value) bool { return true } // match: (MOVBUreg (MOVWconst [c])) - // result: (MOVWconst [int64(uint8(c))]) + // result: (MOVWconst [int32(uint8(c))]) for { if v_0.Op != OpARMMOVWconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(uint8(c)) + v.AuxInt = int32ToAuxInt(int32(uint8(c))) return true } return false @@ -4939,14 +4939,14 @@ func rewriteValueARM_OpARMMOVBreg(v *Value) bool { return true } // match: (MOVBreg (MOVWconst [c])) - // result: (MOVWconst [int64(int8(c))]) + // result: (MOVWconst [int32(int8(c))]) for { if v_0.Op != OpARMMOVWconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int8(c)) + v.AuxInt = int32ToAuxInt(int32(int8(c))) return true } return false @@ -5665,14 +5665,14 @@ func rewriteValueARM_OpARMMOVHUreg(v *Value) bool { return true } // match: (MOVHUreg (MOVWconst [c])) - // result: (MOVWconst [int64(uint16(c))]) + // result: (MOVWconst [int32(uint16(c))]) for { if v_0.Op != OpARMMOVWconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(uint16(c)) + v.AuxInt = int32ToAuxInt(int32(uint16(c))) return true } return false @@ -5918,14 +5918,14 @@ func rewriteValueARM_OpARMMOVHreg(v *Value) bool { return true } // match: (MOVHreg (MOVWconst [c])) - // result: (MOVWconst [int64(int16(c))]) + // result: (MOVWconst [int32(int16(c))]) for { if v_0.Op != OpARMMOVWconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) v.reset(OpARMMOVWconst) - v.AuxInt = int64(int16(c)) + v.AuxInt = int32ToAuxInt(int32(int16(c))) return true } return false @@ -13930,18 +13930,18 @@ func rewriteValueARM_OpLsh16x64(v *Value) bool { v_0 := v.Args[0] // match: (Lsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 - // result: (SLLconst x [c]) + // result: (SLLconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) < 16) { break } v.reset(OpARMSLLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -14027,18 +14027,18 @@ func rewriteValueARM_OpLsh32x64(v *Value) bool { v_0 := v.Args[0] // match: (Lsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 - // result: (SLLconst x [c]) + // result: (SLLconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) < 32) { break } v.reset(OpARMSLLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -14124,18 +14124,18 @@ func rewriteValueARM_OpLsh8x64(v *Value) bool { v_0 := v.Args[0] // match: (Lsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 - // result: (SLLconst x [c]) + // result: (SLLconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) < 8) { break } v.reset(OpARMSLLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -14951,20 +14951,20 @@ func rewriteValueARM_OpRsh16Ux64(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh16Ux64 x (Const64 [c])) // cond: uint64(c) < 16 - // result: (SRLconst (SLLconst x [16]) [c+16]) + // result: (SRLconst (SLLconst x [16]) [int32(c+16)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) < 16) { break } v.reset(OpARMSRLconst) - v.AuxInt = c + 16 + v.AuxInt = int32ToAuxInt(int32(c + 16)) v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 16 + v0.AuxInt = int32ToAuxInt(16) v0.AddArg(x) v.AddArg(v0) return true @@ -15054,20 +15054,20 @@ func rewriteValueARM_OpRsh16x64(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 - // result: (SRAconst (SLLconst x [16]) [c+16]) + // result: (SRAconst (SLLconst x [16]) [int32(c+16)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) < 16) { break } v.reset(OpARMSRAconst) - v.AuxInt = c + 16 + v.AuxInt = int32ToAuxInt(int32(c + 16)) v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 16 + v0.AuxInt = int32ToAuxInt(16) v0.AddArg(x) v.AddArg(v0) return true @@ -15161,18 +15161,18 @@ func rewriteValueARM_OpRsh32Ux64(v *Value) bool { v_0 := v.Args[0] // match: (Rsh32Ux64 x (Const64 [c])) // cond: uint64(c) < 32 - // result: (SRLconst x [c]) + // result: (SRLconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) < 32) { break } v.reset(OpARMSRLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -15252,18 +15252,18 @@ func rewriteValueARM_OpRsh32x64(v *Value) bool { v_0 := v.Args[0] // match: (Rsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 - // result: (SRAconst x [c]) + // result: (SRAconst x [int32(c)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) < 32) { break } v.reset(OpARMSRAconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -15358,20 +15358,20 @@ func rewriteValueARM_OpRsh8Ux64(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh8Ux64 x (Const64 [c])) // cond: uint64(c) < 8 - // result: (SRLconst (SLLconst x [24]) [c+24]) + // result: (SRLconst (SLLconst x [24]) [int32(c+24)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) < 8) { break } v.reset(OpARMSRLconst) - v.AuxInt = c + 24 + v.AuxInt = int32ToAuxInt(int32(c + 24)) v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 24 + v0.AuxInt = int32ToAuxInt(24) v0.AddArg(x) v.AddArg(v0) return true @@ -15461,20 +15461,20 @@ func rewriteValueARM_OpRsh8x64(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 - // result: (SRAconst (SLLconst x [24]) [c+24]) + // result: (SRAconst (SLLconst x [24]) [int32(c+24)]) for { x := v_0 if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint64(c) < 8) { break } v.reset(OpARMSRAconst) - v.AuxInt = c + 24 + v.AuxInt = int32ToAuxInt(int32(c + 24)) v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32) - v0.AuxInt = 24 + v0.AuxInt = int32ToAuxInt(24) v0.AddArg(x) v.AddArg(v0) return true From 11cdbab9d4f3e4f0ce690d595933c72df54fad33 Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Wed, 23 Sep 2020 03:58:52 -0700 Subject: [PATCH 011/281] bytes, internal/bytealg: fix incorrect IndexString usage The IndexString implementation in the bytealg package requires that the string passed into it be in the range '2 <= len(s) <= MaxLen' where MaxLen may be any value (including 0). CL 156998 added calls to bytealg.IndexString where MaxLen was not first checked. This led to an illegal instruction on s390x with the vector facility disabled. This CL guards the calls to bytealg.IndexString with a MaxLen check. If the check fails then the code now falls back to the pre CL 156998 implementation (a loop over the runes in the string). Since the MaxLen check is now in place the generic implementation is no longer called so I have returned it to its original unimplemented state. In future we may want to drop MaxLen to prevent this kind of confusion. Fixes #41552. Change-Id: Ibeb3f08720444a05c08d719ed97f6cef2423bbe9 Reviewed-on: https://go-review.googlesource.com/c/go/+/256717 Run-TryBot: Michael Munday TryBot-Result: Go Bot Trust: Michael Munday Reviewed-by: Keith Randall --- src/bytes/bytes.go | 50 +++++++++++++++++---------- src/internal/bytealg/index_generic.go | 38 ++------------------ 2 files changed, 34 insertions(+), 54 deletions(-) diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go index aa07b9fbc1..ce52649f13 100644 --- a/src/bytes/bytes.go +++ b/src/bytes/bytes.go @@ -227,19 +227,26 @@ func IndexAny(s []byte, chars string) int { continue } r, width = utf8.DecodeRune(s[i:]) - if r == utf8.RuneError { - for _, r = range chars { - if r == utf8.RuneError { + if r != utf8.RuneError { + // r is 2 to 4 bytes + if len(chars) == width { + if chars == string(r) { return i } + continue + } + // Use bytealg.IndexString for performance if available. + if bytealg.MaxLen >= width { + if bytealg.IndexString(chars, string(r)) >= 0 { + return i + } + continue } - continue } - // r is 2 to 4 bytes. Using strings.Index is more reasonable, but as the bytes - // package should not import the strings package, use bytealg.IndexString - // instead. And this does not seem to lose much performance. - if chars == string(r) || bytealg.IndexString(chars, string(r)) >= 0 { - return i + for _, ch := range chars { + if r == ch { + return i + } } } return -1 @@ -304,19 +311,26 @@ func LastIndexAny(s []byte, chars string) int { } r, size := utf8.DecodeLastRune(s[:i]) i -= size - if r == utf8.RuneError { - for _, r = range chars { - if r == utf8.RuneError { + if r != utf8.RuneError { + // r is 2 to 4 bytes + if len(chars) == size { + if chars == string(r) { return i } + continue + } + // Use bytealg.IndexString for performance if available. + if bytealg.MaxLen >= size { + if bytealg.IndexString(chars, string(r)) >= 0 { + return i + } + continue } - continue } - // r is 2 to 4 bytes. Using strings.Index is more reasonable, but as the bytes - // package should not import the strings package, use bytealg.IndexString - // instead. And this does not seem to lose much performance. - if chars == string(r) || bytealg.IndexString(chars, string(r)) >= 0 { - return i + for _, ch := range chars { + if r == ch { + return i + } } } return -1 diff --git a/src/internal/bytealg/index_generic.go b/src/internal/bytealg/index_generic.go index 83345f1013..98e859f925 100644 --- a/src/internal/bytealg/index_generic.go +++ b/src/internal/bytealg/index_generic.go @@ -16,42 +16,8 @@ func Index(a, b []byte) int { // IndexString returns the index of the first instance of b in a, or -1 if b is not present in a. // Requires 2 <= len(b) <= MaxLen. -func IndexString(s, substr string) int { - // This is a partial copy of strings.Index, here because bytes.IndexAny and bytes.LastIndexAny - // call bytealg.IndexString. Some platforms have an optimized assembly version of this function. - // This implementation is used for those that do not. Although the pure Go implementation here - // works for the case of len(b) > MaxLen, we do not require that its assembly implementation also - // supports the case of len(b) > MaxLen. And we do not guarantee that this function supports the - // case of len(b) > MaxLen. - n := len(substr) - c0 := substr[0] - c1 := substr[1] - i := 0 - t := len(s) - n + 1 - fails := 0 - for i < t { - if s[i] != c0 { - o := IndexByteString(s[i:t], c0) - if o < 0 { - return -1 - } - i += o - } - if s[i+1] == c1 && s[i:i+n] == substr { - return i - } - i++ - fails++ - if fails >= 4+i>>4 && i < t { - // See comment in src/bytes/bytes.go. - j := IndexRabinKarp(s[i:], substr) - if j < 0 { - return -1 - } - return i + j - } - } - return -1 +func IndexString(a, b string) int { + panic("unimplemented") } // Cutover reports the number of failures of IndexByte we should tolerate From b6632f770f8c490554612a975840ecd05ebe8a32 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Wed, 23 Sep 2020 14:41:33 +0200 Subject: [PATCH 012/281] cmd/compile: switch to typed for amd64 flag const rules Passes gotip build -toolexec 'toolstash -cmp' -a std Change-Id: I5a322c9a3922107aa3bfcddfae732dcd6e15ac3f Reviewed-on: https://go-review.googlesource.com/c/go/+/256738 Trust: Alberto Donizetti Run-TryBot: Alberto Donizetti TryBot-Result: Go Bot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 188 ++--- src/cmd/compile/internal/ssa/rewriteAMD64.go | 684 +++++++++---------- 2 files changed, 436 insertions(+), 436 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 67c69674f7..fe91c34fe8 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1286,116 +1286,116 @@ // TODO: DIVxU also. // Absorb flag constants into SBB ops. -(SBBQcarrymask (FlagEQ)) -> (MOVQconst [0]) -(SBBQcarrymask (FlagLT_ULT)) -> (MOVQconst [-1]) -(SBBQcarrymask (FlagLT_UGT)) -> (MOVQconst [0]) -(SBBQcarrymask (FlagGT_ULT)) -> (MOVQconst [-1]) -(SBBQcarrymask (FlagGT_UGT)) -> (MOVQconst [0]) -(SBBLcarrymask (FlagEQ)) -> (MOVLconst [0]) -(SBBLcarrymask (FlagLT_ULT)) -> (MOVLconst [-1]) -(SBBLcarrymask (FlagLT_UGT)) -> (MOVLconst [0]) -(SBBLcarrymask (FlagGT_ULT)) -> (MOVLconst [-1]) -(SBBLcarrymask (FlagGT_UGT)) -> (MOVLconst [0]) +(SBBQcarrymask (FlagEQ)) => (MOVQconst [0]) +(SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1]) +(SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0]) +(SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1]) +(SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0]) +(SBBLcarrymask (FlagEQ)) => (MOVLconst [0]) +(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1]) +(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0]) +(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1]) +(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0]) // Absorb flag constants into branches. -((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no) -> (First yes no) -((NE|LT|GT|ULT|UGT) (FlagEQ) yes no) -> (First no yes) -((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) -> (First yes no) -((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) -> (First no yes) -((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) -> (First yes no) -((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) -> (First no yes) -((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) -> (First yes no) -((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) -> (First no yes) -((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) -> (First yes no) -((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) -> (First no yes) +((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no) => (First yes no) +((NE|LT|GT|ULT|UGT) (FlagEQ) yes no) => (First no yes) +((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no) +((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes) +((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no) +((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes) +((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no) +((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes) +((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no) +((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes) // Absorb flag constants into SETxx ops. -((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ)) -> (MOVLconst [1]) -((SETNE|SETL|SETG|SETB|SETA) (FlagEQ)) -> (MOVLconst [0]) -((SETNE|SETL|SETLE|SETB|SETBE) (FlagLT_ULT)) -> (MOVLconst [1]) -((SETEQ|SETG|SETGE|SETA|SETAE) (FlagLT_ULT)) -> (MOVLconst [0]) -((SETNE|SETL|SETLE|SETA|SETAE) (FlagLT_UGT)) -> (MOVLconst [1]) -((SETEQ|SETG|SETGE|SETB|SETBE) (FlagLT_UGT)) -> (MOVLconst [0]) -((SETNE|SETG|SETGE|SETB|SETBE) (FlagGT_ULT)) -> (MOVLconst [1]) -((SETEQ|SETL|SETLE|SETA|SETAE) (FlagGT_ULT)) -> (MOVLconst [0]) -((SETNE|SETG|SETGE|SETA|SETAE) (FlagGT_UGT)) -> (MOVLconst [1]) -((SETEQ|SETL|SETLE|SETB|SETBE) (FlagGT_UGT)) -> (MOVLconst [0]) +((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ)) => (MOVLconst [1]) +((SETNE|SETL|SETG|SETB|SETA) (FlagEQ)) => (MOVLconst [0]) +((SETNE|SETL|SETLE|SETB|SETBE) (FlagLT_ULT)) => (MOVLconst [1]) +((SETEQ|SETG|SETGE|SETA|SETAE) (FlagLT_ULT)) => (MOVLconst [0]) +((SETNE|SETL|SETLE|SETA|SETAE) (FlagLT_UGT)) => (MOVLconst [1]) +((SETEQ|SETG|SETGE|SETB|SETBE) (FlagLT_UGT)) => (MOVLconst [0]) +((SETNE|SETG|SETGE|SETB|SETBE) (FlagGT_ULT)) => (MOVLconst [1]) +((SETEQ|SETL|SETLE|SETA|SETAE) (FlagGT_ULT)) => (MOVLconst [0]) +((SETNE|SETG|SETGE|SETA|SETAE) (FlagGT_UGT)) => (MOVLconst [1]) +((SETEQ|SETL|SETLE|SETB|SETBE) (FlagGT_UGT)) => (MOVLconst [0]) -(SETEQstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETNEstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETLstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETLstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETLstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETLstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETLstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETLEstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETGstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETGstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETGstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETGstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETGstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETGEstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETBstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETBstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETBstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETBstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETBstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETBEstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETAstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETAstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETAstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETAstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETAstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETAEstore [off] {sym} ptr (FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) -(SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) -(SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) // Remove redundant *const ops -(ADDQconst [0] x) -> x -(ADDLconst [c] x) && int32(c)==0 -> x -(SUBQconst [0] x) -> x -(SUBLconst [c] x) && int32(c) == 0 -> x -(ANDQconst [0] _) -> (MOVQconst [0]) -(ANDLconst [c] _) && int32(c)==0 -> (MOVLconst [0]) -(ANDQconst [-1] x) -> x -(ANDLconst [c] x) && int32(c)==-1 -> x -(ORQconst [0] x) -> x -(ORLconst [c] x) && int32(c)==0 -> x -(ORQconst [-1] _) -> (MOVQconst [-1]) -(ORLconst [c] _) && int32(c)==-1 -> (MOVLconst [-1]) -(XORQconst [0] x) -> x -(XORLconst [c] x) && int32(c)==0 -> x +(ADDQconst [0] x) => x +(ADDLconst [c] x) && c==0 => x +(SUBQconst [0] x) => x +(SUBLconst [c] x) && c==0 => x +(ANDQconst [0] _) => (MOVQconst [0]) +(ANDLconst [c] _) && c==0 => (MOVLconst [0]) +(ANDQconst [-1] x) => x +(ANDLconst [c] x) && c==-1 => x +(ORQconst [0] x) => x +(ORLconst [c] x) && c==0 => x +(ORQconst [-1] _) => (MOVQconst [-1]) +(ORLconst [c] _) && c==-1 => (MOVLconst [-1]) +(XORQconst [0] x) => x +(XORLconst [c] x) && c==0 => x // TODO: since we got rid of the W/B versions, we might miss // things like (ANDLconst [0x100] x) which were formerly // (ANDBconst [0] x). Probably doesn't happen very often. diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index b50c8c3496..37a2a9a9ff 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1620,12 +1620,12 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { return true } // match: (ADDLconst [c] x) - // cond: int32(c)==0 + // cond: c==0 // result: x for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(int32(c) == 0) { + if !(c == 0) { break } v.copyOf(x) @@ -2236,7 +2236,7 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { // match: (ADDQconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } x := v_0 @@ -2832,24 +2832,24 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { return true } // match: (ANDLconst [c] _) - // cond: int32(c)==0 + // cond: c==0 // result: (MOVLconst [0]) for { - c := v.AuxInt - if !(int32(c) == 0) { + c := auxIntToInt32(v.AuxInt) + if !(c == 0) { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (ANDLconst [c] x) - // cond: int32(c)==-1 + // cond: c==-1 // result: x for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(int32(c) == -1) { + if !(c == -1) { break } v.copyOf(x) @@ -3217,17 +3217,17 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { // match: (ANDQconst [0] _) // result: (MOVQconst [0]) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (ANDQconst [-1] x) // result: x for { - if v.AuxInt != -1 { + if auxIntToInt32(v.AuxInt) != -1 { break } x := v_0 @@ -18197,27 +18197,27 @@ func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { return true } // match: (ORLconst [c] x) - // cond: int32(c)==0 + // cond: c==0 // result: x for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(int32(c) == 0) { + if !(c == 0) { break } v.copyOf(x) return true } // match: (ORLconst [c] _) - // cond: int32(c)==-1 + // cond: c==-1 // result: (MOVLconst [-1]) for { - c := v.AuxInt - if !(int32(c) == -1) { + c := auxIntToInt32(v.AuxInt) + if !(c == -1) { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = -1 + v.AuxInt = int32ToAuxInt(-1) return true } // match: (ORLconst [c] (MOVLconst [d])) @@ -19830,7 +19830,7 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool { // match: (ORQconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } x := v_0 @@ -19840,11 +19840,11 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool { // match: (ORQconst [-1] _) // result: (MOVQconst [-1]) for { - if v.AuxInt != -1 { + if auxIntToInt32(v.AuxInt) != -1 { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = -1 + v.AuxInt = int64ToAuxInt(-1) return true } // match: (ORQconst [c] (MOVQconst [d])) @@ -21152,7 +21152,7 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SBBLcarrymask (FlagLT_ULT)) @@ -21162,7 +21162,7 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = -1 + v.AuxInt = int32ToAuxInt(-1) return true } // match: (SBBLcarrymask (FlagLT_UGT)) @@ -21172,7 +21172,7 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SBBLcarrymask (FlagGT_ULT)) @@ -21182,7 +21182,7 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = -1 + v.AuxInt = int32ToAuxInt(-1) return true } // match: (SBBLcarrymask (FlagGT_UGT)) @@ -21192,7 +21192,7 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -21242,7 +21242,7 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SBBQcarrymask (FlagLT_ULT)) @@ -21252,7 +21252,7 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = -1 + v.AuxInt = int64ToAuxInt(-1) return true } // match: (SBBQcarrymask (FlagLT_UGT)) @@ -21262,7 +21262,7 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SBBQcarrymask (FlagGT_ULT)) @@ -21272,7 +21272,7 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = -1 + v.AuxInt = int64ToAuxInt(-1) return true } // match: (SBBQcarrymask (FlagGT_UGT)) @@ -21282,7 +21282,7 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } return false @@ -21325,7 +21325,7 @@ func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETA (FlagLT_ULT)) @@ -21335,7 +21335,7 @@ func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETA (FlagLT_UGT)) @@ -21345,7 +21345,7 @@ func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETA (FlagGT_ULT)) @@ -21355,7 +21355,7 @@ func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETA (FlagGT_UGT)) @@ -21365,7 +21365,7 @@ func rewriteValueAMD64_OpAMD64SETA(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } return false @@ -21446,7 +21446,7 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETAE (FlagLT_ULT)) @@ -21456,7 +21456,7 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETAE (FlagLT_UGT)) @@ -21466,7 +21466,7 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETAE (FlagGT_ULT)) @@ -21476,7 +21476,7 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETAE (FlagGT_UGT)) @@ -21486,7 +21486,7 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } return false @@ -21562,90 +21562,90 @@ func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool { // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } @@ -21722,90 +21722,90 @@ func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool { // match: (SETAstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } @@ -21911,7 +21911,7 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETB (FlagLT_ULT)) @@ -21921,7 +21921,7 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETB (FlagLT_UGT)) @@ -21931,7 +21931,7 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETB (FlagGT_ULT)) @@ -21941,7 +21941,7 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETB (FlagGT_UGT)) @@ -21951,7 +21951,7 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -21976,7 +21976,7 @@ func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETBE (FlagLT_ULT)) @@ -21986,7 +21986,7 @@ func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETBE (FlagLT_UGT)) @@ -21996,7 +21996,7 @@ func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETBE (FlagGT_ULT)) @@ -22006,7 +22006,7 @@ func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETBE (FlagGT_UGT)) @@ -22016,7 +22016,7 @@ func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -22092,90 +22092,90 @@ func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool { // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } @@ -22252,90 +22252,90 @@ func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool { // match: (SETBstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } @@ -22706,7 +22706,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETEQ (FlagLT_ULT)) @@ -22716,7 +22716,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETEQ (FlagLT_UGT)) @@ -22726,7 +22726,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETEQ (FlagGT_ULT)) @@ -22736,7 +22736,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETEQ (FlagGT_UGT)) @@ -22746,7 +22746,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -23244,90 +23244,90 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } @@ -23353,7 +23353,7 @@ func rewriteValueAMD64_OpAMD64SETG(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETG (FlagLT_ULT)) @@ -23363,7 +23363,7 @@ func rewriteValueAMD64_OpAMD64SETG(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETG (FlagLT_UGT)) @@ -23373,7 +23373,7 @@ func rewriteValueAMD64_OpAMD64SETG(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETG (FlagGT_ULT)) @@ -23383,7 +23383,7 @@ func rewriteValueAMD64_OpAMD64SETG(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETG (FlagGT_UGT)) @@ -23393,7 +23393,7 @@ func rewriteValueAMD64_OpAMD64SETG(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } return false @@ -23418,7 +23418,7 @@ func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETGE (FlagLT_ULT)) @@ -23428,7 +23428,7 @@ func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETGE (FlagLT_UGT)) @@ -23438,7 +23438,7 @@ func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETGE (FlagGT_ULT)) @@ -23448,7 +23448,7 @@ func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETGE (FlagGT_UGT)) @@ -23458,7 +23458,7 @@ func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } return false @@ -23534,90 +23534,90 @@ func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool { // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } @@ -23694,90 +23694,90 @@ func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool { // match: (SETGstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } @@ -23803,7 +23803,7 @@ func rewriteValueAMD64_OpAMD64SETL(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETL (FlagLT_ULT)) @@ -23813,7 +23813,7 @@ func rewriteValueAMD64_OpAMD64SETL(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETL (FlagLT_UGT)) @@ -23823,7 +23823,7 @@ func rewriteValueAMD64_OpAMD64SETL(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETL (FlagGT_ULT)) @@ -23833,7 +23833,7 @@ func rewriteValueAMD64_OpAMD64SETL(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETL (FlagGT_UGT)) @@ -23843,7 +23843,7 @@ func rewriteValueAMD64_OpAMD64SETL(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -23868,7 +23868,7 @@ func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETLE (FlagLT_ULT)) @@ -23878,7 +23878,7 @@ func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETLE (FlagLT_UGT)) @@ -23888,7 +23888,7 @@ func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETLE (FlagGT_ULT)) @@ -23898,7 +23898,7 @@ func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETLE (FlagGT_UGT)) @@ -23908,7 +23908,7 @@ func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } return false @@ -23984,90 +23984,90 @@ func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool { // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } @@ -24144,90 +24144,90 @@ func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool { // match: (SETLstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } @@ -24622,7 +24622,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SETNE (FlagLT_ULT)) @@ -24632,7 +24632,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETNE (FlagLT_UGT)) @@ -24642,7 +24642,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETNE (FlagGT_ULT)) @@ -24652,7 +24652,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } // match: (SETNE (FlagGT_UGT)) @@ -24662,7 +24662,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 1 + v.AuxInt = int32ToAuxInt(1) return true } return false @@ -25160,90 +25160,90 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagEQ { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 0 + v0.AuxInt = int32ToAuxInt(0) v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagLT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_ULT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64FlagGT_UGT { break } mem := v_2 v.reset(OpAMD64MOVBstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8) - v0.AuxInt = 1 + v0.AuxInt = int32ToAuxInt(1) v.AddArg3(ptr, v0, mem) return true } @@ -26409,12 +26409,12 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool { v_0 := v.Args[0] // match: (SUBLconst [c] x) - // cond: int32(c) == 0 + // cond: c==0 // result: x for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(int32(c) == 0) { + if !(c == 0) { break } v.copyOf(x) @@ -26657,7 +26657,7 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool { // match: (SUBQconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } x := v_0 @@ -27859,12 +27859,12 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { return true } // match: (XORLconst [c] x) - // cond: int32(c)==0 + // cond: c==0 // result: x for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 - if !(int32(c) == 0) { + if !(c == 0) { break } v.copyOf(x) @@ -28231,7 +28231,7 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { // match: (XORQconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } x := v_0 From d54a9a9c42e751a020308cae296add426b56d0f0 Mon Sep 17 00:00:00 2001 From: SparrowLii Date: Tue, 25 Aug 2020 16:33:50 +0800 Subject: [PATCH 013/281] math/big: replace division with multiplication by reciprocal word MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Division is much slower than multiplication. And the method of using multiplication by multiplying reciprocal and replacing division with it can increase the speed of divWVW algorithm by three times,and at the same time increase the speed of nats division. The benchmark test on arm64 is as follows: name old time/op new time/op delta DivWVW/1-4 13.1ns ± 4% 13.3ns ± 4% ~ (p=0.444 n=5+5) DivWVW/2-4 48.6ns ± 1% 51.2ns ± 2% +5.39% (p=0.008 n=5+5) DivWVW/3-4 82.0ns ± 1% 69.7ns ± 1% -15.03% (p=0.008 n=5+5) DivWVW/4-4 116ns ± 1% 71ns ± 2% -38.88% (p=0.008 n=5+5) DivWVW/5-4 152ns ± 1% 84ns ± 4% -44.70% (p=0.008 n=5+5) DivWVW/10-4 319ns ± 1% 155ns ± 4% -51.50% (p=0.008 n=5+5) DivWVW/100-4 3.44µs ± 3% 1.30µs ± 8% -62.30% (p=0.008 n=5+5) DivWVW/1000-4 33.8µs ± 0% 10.9µs ± 1% -67.74% (p=0.008 n=5+5) DivWVW/10000-4 343µs ± 4% 111µs ± 5% -67.63% (p=0.008 n=5+5) DivWVW/100000-4 3.35ms ± 1% 1.25ms ± 3% -62.79% (p=0.008 n=5+5) QuoRem-4 3.08µs ± 2% 2.21µs ± 4% -28.40% (p=0.008 n=5+5) ModSqrt225_Tonelli-4 444µs ± 2% 457µs ± 3% ~ (p=0.095 n=5+5) ModSqrt225_3Mod4-4 136µs ± 1% 138µs ± 3% ~ (p=0.151 n=5+5) ModSqrt231_Tonelli-4 473µs ± 3% 483µs ± 4% ~ (p=0.548 n=5+5) ModSqrt231_5Mod8-4 164µs ± 9% 169µs ±12% ~ (p=0.421 n=5+5) Sqrt-4 36.8µs ± 1% 28.6µs ± 0% -22.17% (p=0.016 n=5+4) Div/20/10-4 50.0ns ± 3% 51.3ns ± 6% ~ (p=0.238 n=5+5) Div/40/20-4 49.8ns ± 2% 51.3ns ± 6% ~ (p=0.222 n=5+5) Div/100/50-4 85.8ns ± 4% 86.5ns ± 5% ~ (p=0.246 n=5+5) Div/200/100-4 335ns ± 3% 296ns ± 2% -11.60% (p=0.008 n=5+5) Div/400/200-4 442ns ± 2% 359ns ± 5% -18.81% (p=0.008 n=5+5) Div/1000/500-4 858ns ± 3% 643ns ± 6% -25.06% (p=0.008 n=5+5) Div/2000/1000-4 1.70µs ± 3% 1.28µs ± 4% -24.80% (p=0.008 n=5+5) Div/20000/10000-4 45.0µs ± 5% 41.8µs ± 4% -7.17% (p=0.016 n=5+5) Div/200000/100000-4 1.51ms ± 7% 1.43ms ± 3% -5.42% (p=0.016 n=5+5) Div/2000000/1000000-4 57.6ms ± 4% 57.5ms ± 3% ~ (p=1.000 n=5+5) Div/20000000/10000000-4 2.08s ± 3% 2.04s ± 1% ~ (p=0.095 n=5+5) name old speed new speed delta DivWVW/1-4 4.87GB/s ± 4% 4.80GB/s ± 4% ~ (p=0.310 n=5+5) DivWVW/2-4 2.63GB/s ± 1% 2.50GB/s ± 2% -5.07% (p=0.008 n=5+5) DivWVW/3-4 2.34GB/s ± 1% 2.76GB/s ± 1% +17.70% (p=0.008 n=5+5) DivWVW/4-4 2.21GB/s ± 1% 3.61GB/s ± 2% +63.42% (p=0.008 n=5+5) DivWVW/5-4 2.10GB/s ± 2% 3.81GB/s ± 4% +80.89% (p=0.008 n=5+5) DivWVW/10-4 2.01GB/s ± 0% 4.13GB/s ± 4% +105.91% (p=0.008 n=5+5) DivWVW/100-4 1.86GB/s ± 2% 4.95GB/s ± 7% +165.63% (p=0.008 n=5+5) DivWVW/1000-4 1.89GB/s ± 0% 5.86GB/s ± 1% +209.96% (p=0.008 n=5+5) DivWVW/10000-4 1.87GB/s ± 4% 5.76GB/s ± 5% +208.96% (p=0.008 n=5+5) DivWVW/100000-4 1.91GB/s ± 1% 5.14GB/s ± 3% +168.85% (p=0.008 n=5+5) Change-Id: I049f1196562b20800e6ef8a6493fd147f93ad830 Reviewed-on: https://go-review.googlesource.com/c/go/+/250417 Trust: Giovanni Bajo Trust: Keith Randall Run-TryBot: Giovanni Bajo TryBot-Result: Go Bot Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 5 -- src/math/big/arith.go | 93 ++++++++++++++++++++++++++---- src/math/big/arith_386.s | 27 --------- src/math/big/arith_amd64.s | 26 --------- src/math/big/arith_arm.s | 11 ---- src/math/big/arith_arm64.s | 9 +-- src/math/big/arith_decl.go | 2 - src/math/big/arith_decl_pure.go | 8 --- src/math/big/arith_mips64x.s | 5 -- src/math/big/arith_mipsx.s | 5 -- src/math/big/arith_ppc64x.s | 40 ------------- src/math/big/arith_riscv64.s | 5 -- src/math/big/arith_s390x.s | 33 ----------- src/math/big/arith_test.go | 54 ++++++++++++++++- src/math/big/arith_wasm.s | 5 -- src/math/big/nat.go | 3 +- 16 files changed, 138 insertions(+), 193 deletions(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index d0b3e8df94..815ff7f99f 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4022,11 +4022,6 @@ func init() { return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) }, sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X) - add("math/big", "divWW", - func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) - }, - sys.ArchAMD64) } // findIntrinsic returns a function which builds the SSA equivalent of the diff --git a/src/math/big/arith.go b/src/math/big/arith.go index b0885f261f..750ce8aa39 100644 --- a/src/math/big/arith.go +++ b/src/math/big/arith.go @@ -60,12 +60,6 @@ func nlz(x Word) uint { return uint(bits.LeadingZeros(uint(x))) } -// q = (u1<<_W + u0 - r)/v -func divWW_g(u1, u0, v Word) (q, r Word) { - qq, rr := bits.Div(uint(u1), uint(u0), uint(v)) - return Word(qq), Word(rr) -} - // The resulting carry c is either 0 or 1. func addVV_g(z, x, y []Word) (c Word) { // The comment near the top of this file discusses this for loop condition. @@ -207,10 +201,87 @@ func addMulVVW_g(z, x []Word, y Word) (c Word) { return } -func divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) { - r = xn - for i := len(z) - 1; i >= 0; i-- { - z[i], r = divWW_g(r, x[i], y) +// q = ( x1 << _W + x0 - r)/y. m = floor(( _B^2 - 1 ) / d - _B). Requiring x1>(_W-s) + x0 <<= s + y <<= s } - return + d := uint(y) + // We know that + // m = ⎣(B^2-1)/d⎦-B + // ⎣(B^2-1)/d⎦ = m+B + // (B^2-1)/d = m+B+delta1 0 <= delta1 <= (d-1)/d + // B^2/d = m+B+delta2 0 <= delta2 <= 1 + // The quotient we're trying to compute is + // quotient = ⎣(x1*B+x0)/d⎦ + // = ⎣(x1*B*(B^2/d)+x0*(B^2/d))/B^2⎦ + // = ⎣(x1*B*(m+B+delta2)+x0*(m+B+delta2))/B^2⎦ + // = ⎣(x1*m+x1*B+x0)/B + x0*m/B^2 + delta2*(x1*B+x0)/B^2⎦ + // The latter two terms of this three-term sum are between 0 and 1. + // So we can compute just the first term, and we will be low by at most 2. + t1, t0 := bits.Mul(uint(m), uint(x1)) + _, c := bits.Add(t0, uint(x0), 0) + t1, _ = bits.Add(t1, uint(x1), c) + // The quotient is either t1, t1+1, or t1+2. + // We'll try t1 and adjust if needed. + qq := t1 + // compute remainder r=x-d*q. + dq1, dq0 := bits.Mul(d, qq) + r0, b := bits.Sub(uint(x0), dq0, 0) + r1, _ := bits.Sub(uint(x1), dq1, b) + // The remainder we just computed is bounded above by B+d: + // r = x1*B + x0 - d*q. + // = x1*B + x0 - d*⎣(x1*m+x1*B+x0)/B⎦ + // = x1*B + x0 - d*((x1*m+x1*B+x0)/B-alpha) 0 <= alpha < 1 + // = x1*B + x0 - x1*d/B*m - x1*d - x0*d/B + d*alpha + // = x1*B + x0 - x1*d/B*⎣(B^2-1)/d-B⎦ - x1*d - x0*d/B + d*alpha + // = x1*B + x0 - x1*d/B*⎣(B^2-1)/d-B⎦ - x1*d - x0*d/B + d*alpha + // = x1*B + x0 - x1*d/B*((B^2-1)/d-B-beta) - x1*d - x0*d/B + d*alpha 0 <= beta < 1 + // = x1*B + x0 - x1*B + x1/B + x1*d + x1*d/B*beta - x1*d - x0*d/B + d*alpha + // = x0 + x1/B + x1*d/B*beta - x0*d/B + d*alpha + // = x0*(1-d/B) + x1*(1+d*beta)/B + d*alpha + // < B*(1-d/B) + d*B/B + d because x00), x1= d { + qq++ + r0 -= d + } + return Word(qq), Word(r0 >> s) +} + +func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) { + r = xn + if len(x) == 1 { + qq, rr := bits.Div(uint(r), uint(x[0]), uint(y)) + z[0] = Word(qq) + return Word(rr) + } + rec := reciprocalWord(y) + for i := len(z) - 1; i >= 0; i-- { + z[i], r = divWW(r, x[i], y, rec) + } + return r +} + +// reciprocalWord return the reciprocal of the divisor. rec = floor(( _B^2 - 1 ) / u - _B). u = d1 << nlz(d1). +func reciprocalWord(d1 Word) Word { + u := uint(d1 << nlz(d1)) + x1 := ^u + x0 := uint(_M) + rec, _ := bits.Div(x1, x0, u) // (_B^2-1)/U-_B = (_B*(_M-C)+_M)/U + return Word(rec) } diff --git a/src/math/big/arith_386.s b/src/math/big/arith_386.s index f61da2aba7..d0ea949fe6 100644 --- a/src/math/big/arith_386.s +++ b/src/math/big/arith_386.s @@ -18,16 +18,6 @@ TEXT ·mulWW(SB),NOSPLIT,$0 RET -// func divWW(x1, x0, y Word) (q, r Word) -TEXT ·divWW(SB),NOSPLIT,$0 - MOVL x1+0(FP), DX - MOVL x0+4(FP), AX - DIVL y+8(FP) - MOVL AX, q+12(FP) - MOVL DX, r+16(FP) - RET - - // func addVV(z, x, y []Word) (c Word) TEXT ·addVV(SB),NOSPLIT,$0 MOVL z+0(FP), DI @@ -251,21 +241,4 @@ E6: CMPL BX, $0 // i < 0 RET -// func divWVW(z* Word, xn Word, x []Word, y Word) (r Word) -TEXT ·divWVW(SB),NOSPLIT,$0 - MOVL z+0(FP), DI - MOVL xn+12(FP), DX // r = xn - MOVL x+16(FP), SI - MOVL y+28(FP), CX - MOVL z_len+4(FP), BX // i = z - JMP E7 -L7: MOVL (SI)(BX*4), AX - DIVL CX - MOVL AX, (DI)(BX*4) - -E7: SUBL $1, BX // i-- - JGE L7 // i >= 0 - - MOVL DX, r+32(FP) - RET diff --git a/src/math/big/arith_amd64.s b/src/math/big/arith_amd64.s index b75639f540..61043ca2d9 100644 --- a/src/math/big/arith_amd64.s +++ b/src/math/big/arith_amd64.s @@ -18,14 +18,6 @@ TEXT ·mulWW(SB),NOSPLIT,$0 RET -// func divWW(x1, x0, y Word) (q, r Word) -TEXT ·divWW(SB),NOSPLIT,$0 - MOVQ x1+0(FP), DX - MOVQ x0+8(FP), AX - DIVQ y+16(FP) - MOVQ AX, q+24(FP) - MOVQ DX, r+32(FP) - RET // The carry bit is saved with SBBQ Rx, Rx: if the carry was set, Rx is -1, otherwise it is 0. // It is restored with ADDQ Rx, Rx: if Rx was -1 the carry is set, otherwise it is cleared. @@ -531,21 +523,3 @@ adx_short: -// func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) -TEXT ·divWVW(SB),NOSPLIT,$0 - MOVQ z+0(FP), R10 - MOVQ xn+24(FP), DX // r = xn - MOVQ x+32(FP), R8 - MOVQ y+56(FP), R9 - MOVQ z_len+8(FP), BX // i = z - JMP E7 - -L7: MOVQ (R8)(BX*8), AX - DIVQ R9 - MOVQ AX, (R10)(BX*8) - -E7: SUBQ $1, BX // i-- - JGE L7 // i >= 0 - - MOVQ DX, r+64(FP) - RET diff --git a/src/math/big/arith_arm.s b/src/math/big/arith_arm.s index 33aa36f709..cbf7445e7a 100644 --- a/src/math/big/arith_arm.s +++ b/src/math/big/arith_arm.s @@ -272,17 +272,6 @@ E9: RET -// func divWVW(z* Word, xn Word, x []Word, y Word) (r Word) -TEXT ·divWVW(SB),NOSPLIT,$0 - // ARM has no multiword division, so use portable code. - B ·divWVW_g(SB) - - -// func divWW(x1, x0, y Word) (q, r Word) -TEXT ·divWW(SB),NOSPLIT,$0 - // ARM has no multiword division, so use portable code. - B ·divWW_g(SB) - // func mulWW(x, y Word) (z1, z0 Word) TEXT ·mulWW(SB),NOSPLIT,$0 diff --git a/src/math/big/arith_arm64.s b/src/math/big/arith_arm64.s index da6e408e19..22357d088e 100644 --- a/src/math/big/arith_arm64.s +++ b/src/math/big/arith_arm64.s @@ -23,11 +23,6 @@ TEXT ·mulWW(SB),NOSPLIT,$0 RET -// func divWW(x1, x0, y Word) (q, r Word) -TEXT ·divWW(SB),NOSPLIT,$0 - B ·divWW_g(SB) // ARM64 has no multiword division - - // func addVV(z, x, y []Word) (c Word) TEXT ·addVV(SB),NOSPLIT,$0 MOVD z_len+8(FP), R0 @@ -585,6 +580,4 @@ done: MOVD R4, c+56(FP) RET -// func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) -TEXT ·divWVW(SB),NOSPLIT,$0 - B ·divWVW_g(SB) + diff --git a/src/math/big/arith_decl.go b/src/math/big/arith_decl.go index 41e592334c..d519bdc87b 100644 --- a/src/math/big/arith_decl.go +++ b/src/math/big/arith_decl.go @@ -8,7 +8,6 @@ package big // implemented in arith_$GOARCH.s func mulWW(x, y Word) (z1, z0 Word) -func divWW(x1, x0, y Word) (q, r Word) func addVV(z, x, y []Word) (c Word) func subVV(z, x, y []Word) (c Word) func addVW(z, x []Word, y Word) (c Word) @@ -17,4 +16,3 @@ func shlVU(z, x []Word, s uint) (c Word) func shrVU(z, x []Word, s uint) (c Word) func mulAddVWW(z, x []Word, y, r Word) (c Word) func addMulVVW(z, x []Word, y Word) (c Word) -func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) diff --git a/src/math/big/arith_decl_pure.go b/src/math/big/arith_decl_pure.go index 305f7ee03b..5faa3bd281 100644 --- a/src/math/big/arith_decl_pure.go +++ b/src/math/big/arith_decl_pure.go @@ -10,10 +10,6 @@ func mulWW(x, y Word) (z1, z0 Word) { return mulWW_g(x, y) } -func divWW(x1, x0, y Word) (q, r Word) { - return divWW_g(x1, x0, y) -} - func addVV(z, x, y []Word) (c Word) { return addVV_g(z, x, y) } @@ -55,7 +51,3 @@ func mulAddVWW(z, x []Word, y, r Word) (c Word) { func addMulVVW(z, x []Word, y Word) (c Word) { return addMulVVW_g(z, x, y) } - -func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) { - return divWVW_g(z, xn, x, y) -} diff --git a/src/math/big/arith_mips64x.s b/src/math/big/arith_mips64x.s index 983510ee3d..804b9fe06e 100644 --- a/src/math/big/arith_mips64x.s +++ b/src/math/big/arith_mips64x.s @@ -12,9 +12,6 @@ TEXT ·mulWW(SB),NOSPLIT,$0 JMP ·mulWW_g(SB) -TEXT ·divWW(SB),NOSPLIT,$0 - JMP ·divWW_g(SB) - TEXT ·addVV(SB),NOSPLIT,$0 JMP ·addVV_g(SB) @@ -39,5 +36,3 @@ TEXT ·mulAddVWW(SB),NOSPLIT,$0 TEXT ·addMulVVW(SB),NOSPLIT,$0 JMP ·addMulVVW_g(SB) -TEXT ·divWVW(SB),NOSPLIT,$0 - JMP ·divWVW_g(SB) diff --git a/src/math/big/arith_mipsx.s b/src/math/big/arith_mipsx.s index 54cafbd9c0..efdecb80f3 100644 --- a/src/math/big/arith_mipsx.s +++ b/src/math/big/arith_mipsx.s @@ -12,9 +12,6 @@ TEXT ·mulWW(SB),NOSPLIT,$0 JMP ·mulWW_g(SB) -TEXT ·divWW(SB),NOSPLIT,$0 - JMP ·divWW_g(SB) - TEXT ·addVV(SB),NOSPLIT,$0 JMP ·addVV_g(SB) @@ -39,5 +36,3 @@ TEXT ·mulAddVWW(SB),NOSPLIT,$0 TEXT ·addMulVVW(SB),NOSPLIT,$0 JMP ·addMulVVW_g(SB) -TEXT ·divWVW(SB),NOSPLIT,$0 - JMP ·divWVW_g(SB) diff --git a/src/math/big/arith_ppc64x.s b/src/math/big/arith_ppc64x.s index 409e10ab48..b299ccc2fb 100644 --- a/src/math/big/arith_ppc64x.s +++ b/src/math/big/arith_ppc64x.s @@ -478,44 +478,4 @@ done: MOVD R4, c+56(FP) RET -// func divWW(x1, x0, y Word) (q, r Word) -TEXT ·divWW(SB), NOSPLIT, $0 - MOVD x1+0(FP), R4 - MOVD x0+8(FP), R5 - MOVD y+16(FP), R6 - CMPU R4, R6 - BGE divbigger - - // from the programmer's note in ch. 3 of the ISA manual, p.74 - DIVDEU R6, R4, R3 - DIVDU R6, R5, R7 - MULLD R6, R3, R8 - MULLD R6, R7, R20 - SUB R20, R5, R10 - ADD R7, R3, R3 - SUB R8, R10, R4 - CMPU R4, R10 - BLT adjust - CMPU R4, R6 - BLT end - -adjust: - MOVD $1, R21 - ADD R21, R3, R3 - SUB R6, R4, R4 - -end: - MOVD R3, q+24(FP) - MOVD R4, r+32(FP) - - RET - -divbigger: - MOVD $-1, R7 - MOVD R7, q+24(FP) - MOVD R7, r+32(FP) - RET - -TEXT ·divWVW(SB), NOSPLIT, $0 - BR ·divWVW_g(SB) diff --git a/src/math/big/arith_riscv64.s b/src/math/big/arith_riscv64.s index 59065c3f7b..a2f7666c7b 100644 --- a/src/math/big/arith_riscv64.s +++ b/src/math/big/arith_riscv64.s @@ -19,9 +19,6 @@ TEXT ·mulWW(SB),NOSPLIT,$0 MOV X8, z0+24(FP) RET -// func divWW(x1, x0, y Word) (q, r Word) -TEXT ·divWW(SB),NOSPLIT,$0 - JMP ·divWW_g(SB) // riscv64 has no multiword division TEXT ·addVV(SB),NOSPLIT,$0 JMP ·addVV_g(SB) @@ -47,5 +44,3 @@ TEXT ·mulAddVWW(SB),NOSPLIT,$0 TEXT ·addMulVVW(SB),NOSPLIT,$0 JMP ·addMulVVW_g(SB) -TEXT ·divWVW(SB),NOSPLIT,$0 - JMP ·divWVW_g(SB) diff --git a/src/math/big/arith_s390x.s b/src/math/big/arith_s390x.s index 4891768111..242aca7434 100644 --- a/src/math/big/arith_s390x.s +++ b/src/math/big/arith_s390x.s @@ -17,15 +17,6 @@ TEXT ·mulWW(SB), NOSPLIT, $0 MOVD R11, z0+24(FP) RET -// func divWW(x1, x0, y Word) (q, r Word) -TEXT ·divWW(SB), NOSPLIT, $0 - MOVD x1+0(FP), R10 - MOVD x0+8(FP), R11 - MOVD y+16(FP), R5 - WORD $0xb98700a5 // dlgr r10,r5 - MOVD R11, q+24(FP) - MOVD R10, r+32(FP) - RET // DI = R3, CX = R4, SI = r10, r8 = r8, r9=r9, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0) + use R11 // func addVV(z, x, y []Word) (c Word) @@ -990,27 +981,3 @@ E6: MOVD R4, c+56(FP) RET -// func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) -// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, AX = r11, DX = R6, r12=r12, BX = R1(*8) , (R0 set to 0) + use R11 + use R7 for i -TEXT ·divWVW(SB), NOSPLIT, $0 - MOVD z+0(FP), R2 - MOVD xn+24(FP), R10 // r = xn - MOVD x+32(FP), R8 - MOVD y+56(FP), R9 - MOVD z_len+8(FP), R7 // i = z - SLD $3, R7, R1 // i*8 - MOVD $0, R0 // make sure it's zero - BR E7 - -L7: - MOVD (R8)(R1*1), R11 - WORD $0xB98700A9 // DLGR R10,R9 - MOVD R11, (R2)(R1*1) - -E7: - SUB $1, R7 // i-- - SUB $8, R1 - BGE L7 // i >= 0 - - MOVD R10, r+64(FP) - RET diff --git a/src/math/big/arith_test.go b/src/math/big/arith_test.go index fc205934c5..808d178459 100644 --- a/src/math/big/arith_test.go +++ b/src/math/big/arith_test.go @@ -7,6 +7,7 @@ package big import ( "fmt" "internal/testenv" + "math/bits" "math/rand" "strings" "testing" @@ -493,7 +494,6 @@ func TestFunVWW(t *testing.T) { if a.y != 0 && a.r < a.y { arg := argWVW{a.x, a.c, a.z, a.y, a.r} - testFunWVW(t, "divWVW_g", divWVW_g, arg) testFunWVW(t, "divWVW", divWVW, arg) } } @@ -536,6 +536,42 @@ func TestMulAddWWW(t *testing.T) { } } +var divWWTests = []struct { + x1, x0, y Word + q, r Word +}{ + {_M >> 1, 0, _M, _M >> 1, _M >> 1}, + {_M - (1 << (_W - 2)), _M, 3 << (_W - 2), _M, _M - (1 << (_W - 2))}, +} + +const testsNumber = 1 << 16 + +func TestDivWW(t *testing.T) { + i := 0 + for i, test := range divWWTests { + rec := reciprocalWord(test.y) + q, r := divWW(test.x1, test.x0, test.y, rec) + if q != test.q || r != test.r { + t.Errorf("#%d got (%x, %x) want (%x, %x)", i, q, r, test.q, test.r) + } + } + //random tests + for ; i < testsNumber; i++ { + x1 := rndW() + x0 := rndW() + y := rndW() + if x1 >= y { + continue + } + rec := reciprocalWord(y) + qGot, rGot := divWW(x1, x0, y, rec) + qWant, rWant := bits.Div(uint(x1), uint(x0), uint(y)) + if uint(qGot) != qWant || uint(rGot) != rWant { + t.Errorf("#%d got (%x, %x) want (%x, %x)", i, qGot, rGot, qWant, rWant) + } + } +} + func BenchmarkMulAddVWW(b *testing.B) { for _, n := range benchSizes { if isRaceBuilder && n > 1e3 { @@ -570,3 +606,19 @@ func BenchmarkAddMulVVW(b *testing.B) { }) } } +func BenchmarkDivWVW(b *testing.B) { + for _, n := range benchSizes { + if isRaceBuilder && n > 1e3 { + continue + } + x := rndV(n) + y := rndW() + z := make([]Word, n) + b.Run(fmt.Sprint(n), func(b *testing.B) { + b.SetBytes(int64(n * _W)) + for i := 0; i < b.N; i++ { + divWVW(z, 0, x, y) + } + }) + } +} diff --git a/src/math/big/arith_wasm.s b/src/math/big/arith_wasm.s index 382597c694..add1064469 100644 --- a/src/math/big/arith_wasm.s +++ b/src/math/big/arith_wasm.s @@ -9,9 +9,6 @@ TEXT ·mulWW(SB),NOSPLIT,$0 JMP ·mulWW_g(SB) -TEXT ·divWW(SB),NOSPLIT,$0 - JMP ·divWW_g(SB) - TEXT ·addVV(SB),NOSPLIT,$0 JMP ·addVV_g(SB) @@ -36,5 +33,3 @@ TEXT ·mulAddVWW(SB),NOSPLIT,$0 TEXT ·addMulVVW(SB),NOSPLIT,$0 JMP ·addMulVVW_g(SB) -TEXT ·divWVW(SB),NOSPLIT,$0 - JMP ·divWVW_g(SB) diff --git a/src/math/big/nat.go b/src/math/big/nat.go index 6a3989bf9d..c2f3787848 100644 --- a/src/math/big/nat.go +++ b/src/math/big/nat.go @@ -751,6 +751,7 @@ func (q nat) divBasic(u, v nat) { // D2. vn1 := v[n-1] + rec := reciprocalWord(vn1) for j := m; j >= 0; j-- { // D3. qhat := Word(_M) @@ -760,7 +761,7 @@ func (q nat) divBasic(u, v nat) { } if ujn != vn1 { var rhat Word - qhat, rhat = divWW(ujn, u[j+n-1], vn1) + qhat, rhat = divWW(ujn, u[j+n-1], vn1, rec) // x1 | x2 = q̂v_{n-2} vn2 := v[n-2] From aacbd7c3aab5c3bf5d5f6cbfaa63db9d9fc2e3d5 Mon Sep 17 00:00:00 2001 From: Constantin Konstantinidis Date: Sat, 29 Aug 2020 11:10:50 +0200 Subject: [PATCH 014/281] cmd/compile: enforce strongly typed rules for ARM (GOARM) Toolstash-check successful for remaining rules using GOARM value. Change-Id: I254f80d17839ef4957c1b7afbdb4db363a3b9367 Reviewed-on: https://go-review.googlesource.com/c/go/+/240997 Run-TryBot: Giovanni Bajo TryBot-Result: Go Bot Reviewed-by: Keith Randall Trust: Giovanni Bajo --- src/cmd/compile/internal/ssa/gen/ARM.rules | 50 +++++++++---------- src/cmd/compile/internal/ssa/rewriteARM.go | 58 +++++++++++----------- 2 files changed, 54 insertions(+), 54 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 2be347d98b..840b93bb53 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -65,17 +65,17 @@ // count trailing zero for ARMv5 and ARMv6 // 32 - CLZ(x&-x - 1) -(Ctz32 x) && objabi.GOARM<=6 -> +(Ctz32 x) && objabi.GOARM<=6 => (RSBconst [32] (CLZ (SUBconst (AND x (RSBconst [0] x)) [1]))) -(Ctz16 x) && objabi.GOARM<=6 -> +(Ctz16 x) && objabi.GOARM<=6 => (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x10000] x) (RSBconst [0] (ORconst [0x10000] x))) [1]))) -(Ctz8 x) && objabi.GOARM<=6 -> +(Ctz8 x) && objabi.GOARM<=6 => (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x100] x) (RSBconst [0] (ORconst [0x100] x))) [1]))) // count trailing zero for ARMv7 -(Ctz32 x) && objabi.GOARM==7 -> (CLZ (RBIT x)) -(Ctz16 x) && objabi.GOARM==7 -> (CLZ (RBIT (ORconst [0x10000] x))) -(Ctz8 x) && objabi.GOARM==7 -> (CLZ (RBIT (ORconst [0x100] x))) +(Ctz32 x) && objabi.GOARM==7 => (CLZ (RBIT x)) +(Ctz16 x) && objabi.GOARM==7 => (CLZ (RBIT (ORconst [0x10000] x))) +(Ctz8 x) && objabi.GOARM==7 => (CLZ (RBIT (ORconst [0x100] x))) // bit length (BitLen32 x) => (RSBconst [32] (CLZ x)) @@ -89,13 +89,13 @@ // t5 = x right rotate 8 bits -- (d, a, b, c ) // result = t4 ^ t5 -- (d, c, b, a ) // using shifted ops this can be done in 4 instructions. -(Bswap32 x) && objabi.GOARM==5 -> +(Bswap32 x) && objabi.GOARM==5 => (XOR (SRLconst (BICconst (XOR x (SRRconst [16] x)) [0xff0000]) [8]) (SRRconst x [8])) // byte swap for ARMv6 and above -(Bswap32 x) && objabi.GOARM>=6 -> (REV x) +(Bswap32 x) && objabi.GOARM>=6 => (REV x) // boolean ops -- booleans are represented with 0=false, 1=true (AndB ...) => (AND ...) @@ -1139,7 +1139,7 @@ // UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by // ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL. ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x) -((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x) +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 => (REV16 x) // use indexed loads and stores (MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem) @@ -1209,25 +1209,25 @@ (BIC x x) => (MOVWconst [0]) (ADD (MUL x y) a) => (MULA x y a) -(SUB a (MUL x y)) && objabi.GOARM == 7 -> (MULS x y a) -(RSB (MUL x y) a) && objabi.GOARM == 7 -> (MULS x y a) +(SUB a (MUL x y)) && objabi.GOARM == 7 => (MULS x y a) +(RSB (MUL x y) a) && objabi.GOARM == 7 => (MULS x y a) -(NEGF (MULF x y)) && objabi.GOARM >= 6 -> (NMULF x y) -(NEGD (MULD x y)) && objabi.GOARM >= 6 -> (NMULD x y) -(MULF (NEGF x) y) && objabi.GOARM >= 6 -> (NMULF x y) -(MULD (NEGD x) y) && objabi.GOARM >= 6 -> (NMULD x y) +(NEGF (MULF x y)) && objabi.GOARM >= 6 => (NMULF x y) +(NEGD (MULD x y)) && objabi.GOARM >= 6 => (NMULD x y) +(MULF (NEGF x) y) && objabi.GOARM >= 6 => (NMULF x y) +(MULD (NEGD x) y) && objabi.GOARM >= 6 => (NMULD x y) (NMULF (NEGF x) y) => (MULF x y) (NMULD (NEGD x) y) => (MULD x y) // the result will overwrite the addend, since they are in the same register -(ADDF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULAF a x y) -(ADDF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULSF a x y) -(ADDD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULAD a x y) -(ADDD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULSD a x y) -(SUBF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULSF a x y) -(SUBF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULAF a x y) -(SUBD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULSD a x y) -(SUBD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULAD a x y) +(ADDF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAF a x y) +(ADDF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSF a x y) +(ADDD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAD a x y) +(ADDD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSD a x y) +(SUBF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSF a x y) +(SUBF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAF a x y) +(SUBD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSD a x y) +(SUBD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAD a x y) (AND x (MVN y)) => (BIC x y) @@ -1259,8 +1259,8 @@ (CMPD x (MOVDconst [0])) => (CMPD0 x) // bit extraction -(SRAconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 -> (BFX [(d-c)|(32-d)<<8] x) -(SRLconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 -> (BFXU [(d-c)|(32-d)<<8] x) +(SRAconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x) +(SRLconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x) // comparison simplification (CMP x (RSBconst [0] y)) => (CMN x y) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 594d7427c4..b790d28cac 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -2087,11 +2087,11 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool { // cond: objabi.GOARM>=6 // result: (REV16 x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || v_0.AuxInt != 24 { + if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARMSLLconst || v_0_0.AuxInt != 16 { + if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 { break } x := v_0_0.Args[0] @@ -8555,11 +8555,11 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool { // cond: objabi.GOARM>=6 // result: (REV16 x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || v_0.AuxInt != 24 { + if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARMSLLconst || v_0_0.AuxInt != 16 { + if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 { break } x := v_0_0.Args[0] @@ -10466,17 +10466,17 @@ func rewriteValueARM_OpARMSRAconst(v *Value) bool { // cond: objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 // result: (BFX [(d-c)|(32-d)<<8] x) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMSLLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] if !(objabi.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { break } v.reset(OpARMBFX) - v.AuxInt = (d - c) | (32-d)<<8 + v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8) v.AddArg(x) return true } @@ -10518,17 +10518,17 @@ func rewriteValueARM_OpARMSRLconst(v *Value) bool { // cond: objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 // result: (BFXU [(d-c)|(32-d)<<8] x) for { - d := v.AuxInt + d := auxIntToInt32(v.AuxInt) if v_0.Op != OpARMSLLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] if !(objabi.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { break } v.reset(OpARMBFXU) - v.AuxInt = (d - c) | (32-d)<<8 + v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8) v.AddArg(x) return true } @@ -12594,11 +12594,11 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool { // cond: objabi.GOARM>=6 // result: (REV16 x) for { - if v.Type != typ.UInt16 || v.AuxInt != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || v_0.AuxInt != 24 { + if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARMSLLconst || v_0_0.AuxInt != 16 { + if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 { break } x := v_0_0.Args[0] @@ -12951,18 +12951,18 @@ func rewriteValueARM_OpBswap32(v *Value) bool { v.reset(OpARMXOR) v.Type = t v0 := b.NewValue0(v.Pos, OpARMSRLconst, t) - v0.AuxInt = 8 + v0.AuxInt = int32ToAuxInt(8) v1 := b.NewValue0(v.Pos, OpARMBICconst, t) - v1.AuxInt = 0xff0000 + v1.AuxInt = int32ToAuxInt(0xff0000) v2 := b.NewValue0(v.Pos, OpARMXOR, t) v3 := b.NewValue0(v.Pos, OpARMSRRconst, t) - v3.AuxInt = 16 + v3.AuxInt = int32ToAuxInt(16) v3.AddArg(x) v2.AddArg2(x, v3) v1.AddArg(v2) v0.AddArg(v1) v4 := b.NewValue0(v.Pos, OpARMSRRconst, t) - v4.AuxInt = 8 + v4.AuxInt = int32ToAuxInt(8) v4.AddArg(x) v.AddArg2(v0, v4) return true @@ -13004,16 +13004,16 @@ func rewriteValueARM_OpCtz16(v *Value) bool { break } v.reset(OpARMRSBconst) - v.AuxInt = 32 + v.AuxInt = int32ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpARMCLZ, t) v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32) - v1.AuxInt = 1 + v1.AuxInt = int32ToAuxInt(1) v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32) v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) - v3.AuxInt = 0x10000 + v3.AuxInt = int32ToAuxInt(0x10000) v3.AddArg(x) v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32) - v4.AuxInt = 0 + v4.AuxInt = int32ToAuxInt(0) v4.AddArg(v3) v2.AddArg2(v3, v4) v1.AddArg(v2) @@ -13034,7 +13034,7 @@ func rewriteValueARM_OpCtz16(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32) v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) - v1.AuxInt = 0x10000 + v1.AuxInt = int32ToAuxInt(0x10000) v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) @@ -13055,13 +13055,13 @@ func rewriteValueARM_OpCtz32(v *Value) bool { break } v.reset(OpARMRSBconst) - v.AuxInt = 32 + v.AuxInt = int32ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpARMCLZ, t) v1 := b.NewValue0(v.Pos, OpARMSUBconst, t) - v1.AuxInt = 1 + v1.AuxInt = int32ToAuxInt(1) v2 := b.NewValue0(v.Pos, OpARMAND, t) v3 := b.NewValue0(v.Pos, OpARMRSBconst, t) - v3.AuxInt = 0 + v3.AuxInt = int32ToAuxInt(0) v3.AddArg(x) v2.AddArg2(x, v3) v1.AddArg(v2) @@ -13101,16 +13101,16 @@ func rewriteValueARM_OpCtz8(v *Value) bool { break } v.reset(OpARMRSBconst) - v.AuxInt = 32 + v.AuxInt = int32ToAuxInt(32) v0 := b.NewValue0(v.Pos, OpARMCLZ, t) v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32) - v1.AuxInt = 1 + v1.AuxInt = int32ToAuxInt(1) v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32) v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) - v3.AuxInt = 0x100 + v3.AuxInt = int32ToAuxInt(0x100) v3.AddArg(x) v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32) - v4.AuxInt = 0 + v4.AuxInt = int32ToAuxInt(0) v4.AddArg(v3) v2.AddArg2(v3, v4) v1.AddArg(v2) @@ -13131,7 +13131,7 @@ func rewriteValueARM_OpCtz8(v *Value) bool { v.Type = t v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32) v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32) - v1.AuxInt = 0x100 + v1.AuxInt = int32ToAuxInt(0x100) v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) From 9e073b504fbb936f54e6be50a41903319a993ce9 Mon Sep 17 00:00:00 2001 From: witchard Date: Tue, 22 Sep 2020 18:02:52 +0000 Subject: [PATCH 015/281] doc/go1.16: add -insecure deprecation to release notes Updates #37519. Change-Id: Iddf88a24334d4740f9c40caa2354127298692eeb GitHub-Last-Rev: deda4c858b5c5582fa63ae7f1eee5f57292670c4 GitHub-Pull-Request: golang/go#41545 Reviewed-on: https://go-review.googlesource.com/c/go/+/256419 Reviewed-by: Jay Conrod Trust: Jay Conrod Trust: Bryan C. Mills --- doc/go1.16.html | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/go1.16.html b/doc/go1.16.html index 09717dac85..3164acbb6d 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -85,6 +85,16 @@ Do not send CLs removing the interior tags from such phrases. that is still considered to be a passing test.

+

+ The go get -insecure flag is + deprecated and will be removed in a future version. The GOINSECURE + environment variable should be used instead, since it provides control + over which modules may be retrieved using an insecure scheme. Unlike the + -insecure flag, GOINSECURE does not disable module + sum validation using the checksum database. The GOPRIVATE or + GONOSUMDB environment variables may be used instead. +

+

The all pattern

From 8e8bfb697fbc948494d67428c4953605cc89b6f4 Mon Sep 17 00:00:00 2001 From: Ainar Garipov Date: Wed, 23 Sep 2020 21:15:01 +0300 Subject: [PATCH 016/281] crypto/tls: replace errClosed with net.ErrClosed CL 250357 exported net.ErrClosed to allow more reliable detection of closed network connection errors. Use that error in crypto/tls as well. The error message is changed from "tls: use of closed connection" to "use of closed network connection", so the code that detected such errors by looking for that text in the error message will need to be updated to use errors.Is(err, net.ErrClosed) instead. Fixes #41066 Change-Id: Ic05c0ed6a4f57af2a0302d53b00851a59200be2e Reviewed-on: https://go-review.googlesource.com/c/go/+/256897 Reviewed-by: Katie Hockman Trust: Katie Hockman Trust: Ian Lance Taylor Run-TryBot: Katie Hockman TryBot-Result: Go Bot --- src/crypto/tls/conn.go | 5 ++--- src/crypto/tls/tls_test.go | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go index edcfecf81d..5dff76c988 100644 --- a/src/crypto/tls/conn.go +++ b/src/crypto/tls/conn.go @@ -1070,7 +1070,6 @@ func (c *Conn) readHandshake() (interface{}, error) { } var ( - errClosed = errors.New("tls: use of closed connection") errShutdown = errors.New("tls: protocol is shutdown") ) @@ -1080,7 +1079,7 @@ func (c *Conn) Write(b []byte) (int, error) { for { x := atomic.LoadInt32(&c.activeCall) if x&1 != 0 { - return 0, errClosed + return 0, net.ErrClosed } if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) { break @@ -1285,7 +1284,7 @@ func (c *Conn) Close() error { for { x = atomic.LoadInt32(&c.activeCall) if x&1 != 0 { - return errClosed + return net.ErrClosed } if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) { break diff --git a/src/crypto/tls/tls_test.go b/src/crypto/tls/tls_test.go index 198423414b..334bfc411a 100644 --- a/src/crypto/tls/tls_test.go +++ b/src/crypto/tls/tls_test.go @@ -569,8 +569,8 @@ func TestConnCloseBreakingWrite(t *testing.T) { } <-closeReturned - if err := tconn.Close(); err != errClosed { - t.Errorf("Close error = %v; want errClosed", err) + if err := tconn.Close(); err != net.ErrClosed { + t.Errorf("Close error = %v; want net.ErrClosed", err) } } From 83e8bf2e7d9a7037fcafa90d3ee2730f8dd1ad90 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Thu, 24 Sep 2020 14:36:34 +0200 Subject: [PATCH 017/281] cmd/compile: more amd64 typed aux rules Passes gotip build -toolexec 'toolstash -cmp' -a std Change-Id: Id9da1240ca810fe07f23c56b36900b6e35a10a6e Reviewed-on: https://go-review.googlesource.com/c/go/+/257037 Trust: Alberto Donizetti Run-TryBot: Alberto Donizetti TryBot-Result: Go Bot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 160 +-- src/cmd/compile/internal/ssa/op.go | 7 + src/cmd/compile/internal/ssa/rewriteAMD64.go | 1268 +++++++++--------- 3 files changed, 721 insertions(+), 714 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index fe91c34fe8..47ae9272d0 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1470,33 +1470,33 @@ (MULQconst [c] (NEGQ x)) && c != -(1<<31) -> (MULQconst [-c] x) // checking AND against 0. -(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 -> (TESTQ x y) -(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 -> (TESTL x y) -(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 -> (TESTW x y) -(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 -> (TESTB x y) -(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 -> (TESTQconst [c] x) -(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 -> (TESTLconst [c] x) -(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 -> (TESTWconst [int64(int16(c))] x) -(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 -> (TESTBconst [int64(int8(c))] x) +(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y) +(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y) +(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y) +(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y) +(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x) +(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x) +(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x) +(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x) // Convert TESTx to TESTxconst if possible. -(TESTQ (MOVQconst [c]) x) && is32Bit(c) -> (TESTQconst [c] x) -(TESTL (MOVLconst [c]) x) -> (TESTLconst [c] x) -(TESTW (MOVLconst [c]) x) -> (TESTWconst [c] x) -(TESTB (MOVLconst [c]) x) -> (TESTBconst [c] x) +(TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x) +(TESTL (MOVLconst [c]) x) => (TESTLconst [c] x) +(TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x) +(TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x) // TEST %reg,%reg is shorter than CMP -(CMPQconst x [0]) -> (TESTQ x x) -(CMPLconst x [0]) -> (TESTL x x) -(CMPWconst x [0]) -> (TESTW x x) -(CMPBconst x [0]) -> (TESTB x x) -(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst -> (TESTQ x x) -(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst -> (TESTL x x) -(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst -> (TESTW x x) -(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst -> (TESTB x x) +(CMPQconst x [0]) => (TESTQ x x) +(CMPLconst x [0]) => (TESTL x x) +(CMPWconst x [0]) => (TESTW x x) +(CMPBconst x [0]) => (TESTB x x) +(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x) +(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x) +(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x) +(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x) // Convert LEAQ1 back to ADDQ if we can -(LEAQ1 [0] x y) && v.Aux == nil -> (ADDQ x y) +(LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y) // Combining byte loads into larger (unaligned) loads. // There are many ways these combinations could occur. This is @@ -1512,7 +1512,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) (OR(L|Q) x0:(MOVBload [i] {s} p0 mem) sh:(SHL(L|Q)const [8] x1:(MOVBload [i] {s} p1 mem))) @@ -1522,7 +1522,7 @@ && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem) + => @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem) (OR(L|Q) x0:(MOVWload [i0] {s} p mem) sh:(SHL(L|Q)const [16] x1:(MOVWload [i1] {s} p mem))) @@ -1532,7 +1532,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) (OR(L|Q) x0:(MOVWload [i] {s} p0 mem) sh:(SHL(L|Q)const [16] x1:(MOVWload [i] {s} p1 mem))) @@ -1542,7 +1542,7 @@ && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem) + => @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem) (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) @@ -1552,7 +1552,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) + => @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) (ORQ x0:(MOVLload [i] {s} p0 mem) sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem))) @@ -1562,7 +1562,7 @@ && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem) + => @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem) (OR(L|Q) s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem)) @@ -1579,7 +1579,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR(L|Q) (SHL(L|Q)const [j0] (MOVWload [i0] {s} p mem)) y) + => @mergePoint(b,x0,x1,y) (OR(L|Q) (SHL(L|Q)const [j0] (MOVWload [i0] {s} p mem)) y) (OR(L|Q) s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem)) @@ -1596,7 +1596,7 @@ && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR(L|Q) (SHL(L|Q)const [j0] (MOVWload [i] {s} p0 mem)) y) + => @mergePoint(b,x0,x1,y) (OR(L|Q) (SHL(L|Q)const [j0] (MOVWload [i] {s} p0 mem)) y) (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) @@ -1613,7 +1613,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) + => @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem)) @@ -1630,7 +1630,7 @@ && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLload [i] {s} p0 mem)) y) + => @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j0] (MOVLload [i] {s} p0 mem)) y) // Big-endian loads @@ -1643,7 +1643,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) + => @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) (OR(L|Q) x1:(MOVBload [i] {s} p1 mem) @@ -1654,7 +1654,7 @@ && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - -> @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i] {s} p0 mem)) + => @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i] {s} p0 mem)) (OR(L|Q) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) @@ -1667,7 +1667,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) + => @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) (OR(L|Q) r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) @@ -1680,7 +1680,7 @@ && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i] {s} p0 mem)) + => @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i] {s} p0 mem)) (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) @@ -1693,7 +1693,7 @@ && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i0] {s} p mem)) + => @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i0] {s} p mem)) (ORQ r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem)) @@ -1706,7 +1706,7 @@ && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh) - -> @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i] {s} p0 mem)) + => @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i] {s} p0 mem)) (OR(L|Q) s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem)) @@ -1723,7 +1723,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR(L|Q) (SHL(L|Q)const [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) + => @mergePoint(b,x0,x1,y) (OR(L|Q) (SHL(L|Q)const [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) (OR(L|Q) s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem)) @@ -1740,7 +1740,7 @@ && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (OR(L|Q) (SHL(L|Q)const [j1] (ROLWconst [8] (MOVWload [i] {s} p0 mem))) y) + => @mergePoint(b,x0,x1,y) (OR(L|Q) (SHL(L|Q)const [j1] (ROLWconst [8] (MOVWload [i] {s} p0 mem))) y) (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) @@ -1759,7 +1759,7 @@ && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) + => @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))) @@ -1778,20 +1778,20 @@ && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or) - -> @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i] {s} p0 mem))) y) + => @mergePoint(b,x0,x1,y) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i] {s} p0 mem))) y) // Combine 2 byte stores + shift into rolw 8 + word store (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) && x0.Uses == 1 && clobber(x0) - -> (MOVWstore [i-1] {s} p (ROLWconst [8] w) mem) + => (MOVWstore [i-1] {s} p (ROLWconst [8] w) mem) (MOVBstore [i] {s} p1 w x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem)) && x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0) - -> (MOVWstore [i] {s} p0 (ROLWconst [8] w) mem) + => (MOVWstore [i] {s} p0 (ROLWconst [8] w) mem) // Combine stores + shifts into bswap and larger (unaligned) stores (MOVBstore [i] {s} p w @@ -1802,7 +1802,7 @@ && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2) - -> (MOVLstore [i-3] {s} p (BSWAPL w) mem) + => (MOVLstore [i-3] {s} p (BSWAPL w) mem) (MOVBstore [i] {s} p3 w x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w) x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w) @@ -1814,7 +1814,7 @@ && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2) - -> (MOVLstore [i] {s} p0 (BSWAPL w) mem) + => (MOVLstore [i] {s} p0 (BSWAPL w) mem) (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) @@ -1832,7 +1832,7 @@ && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6) - -> (MOVQstore [i-7] {s} p (BSWAPQ w) mem) + => (MOVQstore [i-7] {s} p (BSWAPQ w) mem) (MOVBstore [i] {s} p7 w x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w) x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w) @@ -1856,114 +1856,114 @@ && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6) - -> (MOVQstore [i] {s} p0 (BSWAPQ w) mem) + => (MOVQstore [i] {s} p0 (BSWAPQ w) mem) // Combine constant stores into larger (unaligned) stores. (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) && x.Uses == 1 - && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() + && a.Off() + 1 == c.Off() && clobber(x) - -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) + => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) && x.Uses == 1 - && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() + && a.Off() + 1 == c.Off() && clobber(x) - -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) + => (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) && x.Uses == 1 - && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() + && a.Off() + 2 == c.Off() && clobber(x) - -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) + => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) && x.Uses == 1 - && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() + && a.Off() + 2 == c.Off() && clobber(x) - -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) + => (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) && x.Uses == 1 - && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() + && a.Off() + 4 == c.Off() && clobber(x) - -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem) (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) && x.Uses == 1 - && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() + && a.Off() + 4 == c.Off() && clobber(x) - -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + => (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem) (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) && config.useSSE && x.Uses == 1 - && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() - && ValAndOff(c).Val() == 0 - && ValAndOff(c2).Val() == 0 + && c2.Off() + 8 == c.Off() + && c.Val() == 0 + && c2.Val() == 0 && clobber(x) - -> (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) + => (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem) // Combine stores into larger (unaligned) stores. Little endian. (MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem)) && x.Uses == 1 && clobber(x) - -> (MOVWstore [i-1] {s} p w mem) + => (MOVWstore [i-1] {s} p w mem) (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHR(W|L|Q)const [8] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWstore [i] {s} p w mem) + => (MOVWstore [i] {s} p w mem) (MOVBstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVBstore [i-1] {s} p w0:(SHR(L|Q)const [j-8] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVWstore [i-1] {s} p w0 mem) + => (MOVWstore [i-1] {s} p w0 mem) (MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) x:(MOVBstore [i] {s} p0 w mem)) && x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) - -> (MOVWstore [i] {s} p0 w mem) + => (MOVWstore [i] {s} p0 w mem) (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) mem)) && x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) - -> (MOVWstore [i] {s} p0 w mem) + => (MOVWstore [i] {s} p0 w mem) (MOVBstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVBstore [i] {s} p0 w0:(SHR(L|Q)const [j-8] w) mem)) && x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) - -> (MOVWstore [i] {s} p0 w0 mem) + => (MOVWstore [i] {s} p0 w0 mem) (MOVWstore [i] {s} p (SHR(L|Q)const [16] w) x:(MOVWstore [i-2] {s} p w mem)) && x.Uses == 1 && clobber(x) - -> (MOVLstore [i-2] {s} p w mem) + => (MOVLstore [i-2] {s} p w mem) (MOVWstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVWstore [i-2] {s} p w0:(SHR(L|Q)const [j-16] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVLstore [i-2] {s} p w0 mem) + => (MOVLstore [i-2] {s} p w0 mem) (MOVWstore [i] {s} p1 (SHR(L|Q)const [16] w) x:(MOVWstore [i] {s} p0 w mem)) && x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) - -> (MOVLstore [i] {s} p0 w mem) + => (MOVLstore [i] {s} p0 w mem) (MOVWstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVWstore [i] {s} p0 w0:(SHR(L|Q)const [j-16] w) mem)) && x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) - -> (MOVLstore [i] {s} p0 w0 mem) + => (MOVLstore [i] {s} p0 w0 mem) (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) && x.Uses == 1 && clobber(x) - -> (MOVQstore [i-4] {s} p w mem) + => (MOVQstore [i-4] {s} p w mem) (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) && x.Uses == 1 && clobber(x) - -> (MOVQstore [i-4] {s} p w0 mem) + => (MOVQstore [i-4] {s} p w0 mem) (MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem)) && x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x) - -> (MOVQstore [i] {s} p0 w mem) + => (MOVQstore [i] {s} p0 w mem) (MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem)) && x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x) - -> (MOVQstore [i] {s} p0 w0 mem) + => (MOVQstore [i] {s} p0 w0 mem) (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) @@ -1973,7 +1973,7 @@ && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) - -> (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) + => (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) @@ -1983,7 +1983,7 @@ && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) - -> (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) + => (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) @@ -1993,7 +1993,7 @@ && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) - -> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) + => (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index b22b095401..9b45dd53c7 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -297,6 +297,13 @@ func makeValAndOff32(val, off int32) ValAndOff { return ValAndOff(int64(val)<<32 + int64(uint32(off))) } +func makeValAndOff64(val, off int64) ValAndOff { + if !validValAndOff(val, off) { + panic("invalid makeValAndOff64") + } + return ValAndOff(val<<32 + int64(uint32(off))) +} + func (x ValAndOff) canAdd(off int64) bool { newoff := x.Off() + off return newoff == int64(int32(newoff)) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 37a2a9a9ff..e57d0f3aac 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -6904,7 +6904,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { // cond: a.Uses == 1 // result: (TESTB x y) for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } a := v_0 @@ -6922,29 +6922,29 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { } // match: (CMPBconst a:(ANDLconst [c] x) [0]) // cond: a.Uses == 1 - // result: (TESTBconst [int64(int8(c))] x) + // result: (TESTBconst [int8(c)] x) for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } a := v_0 if a.Op != OpAMD64ANDLconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) x := a.Args[0] if !(a.Uses == 1) { break } v.reset(OpAMD64TESTBconst) - v.AuxInt = int64(int8(c)) + v.AuxInt = int8ToAuxInt(int8(c)) v.AddArg(x) return true } // match: (CMPBconst x [0]) // result: (TESTB x x) for { - if v.AuxInt != 0 { + if auxIntToInt8(v.AuxInt) != 0 { break } x := v_0 @@ -7305,7 +7305,7 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { // cond: a.Uses == 1 // result: (TESTL x y) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } a := v_0 @@ -7325,27 +7325,27 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { // cond: a.Uses == 1 // result: (TESTLconst [c] x) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } a := v_0 if a.Op != OpAMD64ANDLconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) x := a.Args[0] if !(a.Uses == 1) { break } v.reset(OpAMD64TESTLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } // match: (CMPLconst x [0]) // result: (TESTL x x) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } x := v_0 @@ -7886,7 +7886,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { // cond: a.Uses == 1 // result: (TESTQ x y) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } a := v_0 @@ -7906,27 +7906,27 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { // cond: a.Uses == 1 // result: (TESTQconst [c] x) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } a := v_0 if a.Op != OpAMD64ANDQconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) x := a.Args[0] if !(a.Uses == 1) { break } v.reset(OpAMD64TESTQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } // match: (CMPQconst x [0]) // result: (TESTQ x x) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } x := v_0 @@ -8272,7 +8272,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { // cond: a.Uses == 1 // result: (TESTW x y) for { - if v.AuxInt != 0 { + if auxIntToInt16(v.AuxInt) != 0 { break } a := v_0 @@ -8290,29 +8290,29 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { } // match: (CMPWconst a:(ANDLconst [c] x) [0]) // cond: a.Uses == 1 - // result: (TESTWconst [int64(int16(c))] x) + // result: (TESTWconst [int16(c)] x) for { - if v.AuxInt != 0 { + if auxIntToInt16(v.AuxInt) != 0 { break } a := v_0 if a.Op != OpAMD64ANDLconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) x := a.Args[0] if !(a.Uses == 1) { break } v.reset(OpAMD64TESTWconst) - v.AuxInt = int64(int16(c)) + v.AuxInt = int16ToAuxInt(int16(c)) v.AddArg(x) return true } // match: (CMPWconst x [0]) // result: (TESTW x x) for { - if v.AuxInt != 0 { + if auxIntToInt16(v.AuxInt) != 0 { break } x := v_0 @@ -9409,7 +9409,7 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { // cond: v.Aux == nil // result: (ADDQ x y) for { - if v.AuxInt != 0 { + if auxIntToInt32(v.AuxInt) != 0 { break } x := v_0 @@ -10663,12 +10663,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x0.Uses == 1 && clobber(x0) // result: (MOVWstore [i-1] {s} p (ROLWconst [8] w) mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x0 := v_2 - if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-1 || x0.Aux != s { + if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s { break } mem := x0.Args[2] @@ -10676,14 +10676,14 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x0_1 := x0.Args[1] - if x0_1.Op != OpAMD64SHRWconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) { + if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type) - v0.AuxInt = 8 + v0.AuxInt = int8ToAuxInt(8) v0.AddArg(w) v.AddArg3(p, v0, mem) return true @@ -10692,25 +10692,25 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0) // result: (MOVWstore [i] {s} p0 (ROLWconst [8] w) mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 w := v_1 x0 := v_2 - if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { break } mem := x0.Args[2] p0 := x0.Args[0] x0_1 := x0.Args[1] - if x0_1.Op != OpAMD64SHRWconst || x0_1.AuxInt != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) { + if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type) - v0.AuxInt = 8 + v0.AuxInt = int8ToAuxInt(8) v0.AddArg(w) v.AddArg3(p0, v0, mem) return true @@ -10719,12 +10719,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2) // result: (MOVLstore [i-3] {s} p (BSWAPL w) mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x2 := v_2 - if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i-1 || x2.Aux != s { + if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s { break } _ = x2.Args[2] @@ -10732,11 +10732,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x2_1 := x2.Args[1] - if x2_1.Op != OpAMD64SHRLconst || x2_1.AuxInt != 8 || w != x2_1.Args[0] { + if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] { break } x1 := x2.Args[2] - if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i-2 || x1.Aux != s { + if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s { break } _ = x1.Args[2] @@ -10744,11 +10744,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x1_1 := x1.Args[1] - if x1_1.Op != OpAMD64SHRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] { + if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] { break } x0 := x1.Args[2] - if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-3 || x0.Aux != s { + if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s { break } mem := x0.Args[2] @@ -10756,12 +10756,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x0_1 := x0.Args[1] - if x0_1.Op != OpAMD64SHRLconst || x0_1.AuxInt != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) { + if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - 3 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 3) + v.Aux = symToAux(s) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type) v0.AddArg(w) v.AddArg3(p, v0, mem) @@ -10771,43 +10771,43 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2) // result: (MOVLstore [i] {s} p0 (BSWAPL w) mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p3 := v_0 w := v_1 x2 := v_2 - if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i || x2.Aux != s { + if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s { break } _ = x2.Args[2] p2 := x2.Args[0] x2_1 := x2.Args[1] - if x2_1.Op != OpAMD64SHRLconst || x2_1.AuxInt != 8 || w != x2_1.Args[0] { + if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] { break } x1 := x2.Args[2] - if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { break } _ = x1.Args[2] p1 := x1.Args[0] x1_1 := x1.Args[1] - if x1_1.Op != OpAMD64SHRLconst || x1_1.AuxInt != 16 || w != x1_1.Args[0] { + if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] { break } x0 := x1.Args[2] - if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { break } mem := x0.Args[2] p0 := x0.Args[0] x0_1 := x0.Args[1] - if x0_1.Op != OpAMD64SHRLconst || x0_1.AuxInt != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) { + if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type) v0.AddArg(w) v.AddArg3(p0, v0, mem) @@ -10817,12 +10817,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6) // result: (MOVQstore [i-7] {s} p (BSWAPQ w) mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x6 := v_2 - if x6.Op != OpAMD64MOVBstore || x6.AuxInt != i-1 || x6.Aux != s { + if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s { break } _ = x6.Args[2] @@ -10830,11 +10830,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x6_1 := x6.Args[1] - if x6_1.Op != OpAMD64SHRQconst || x6_1.AuxInt != 8 || w != x6_1.Args[0] { + if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] { break } x5 := x6.Args[2] - if x5.Op != OpAMD64MOVBstore || x5.AuxInt != i-2 || x5.Aux != s { + if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s { break } _ = x5.Args[2] @@ -10842,11 +10842,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x5_1 := x5.Args[1] - if x5_1.Op != OpAMD64SHRQconst || x5_1.AuxInt != 16 || w != x5_1.Args[0] { + if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] { break } x4 := x5.Args[2] - if x4.Op != OpAMD64MOVBstore || x4.AuxInt != i-3 || x4.Aux != s { + if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s { break } _ = x4.Args[2] @@ -10854,11 +10854,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x4_1 := x4.Args[1] - if x4_1.Op != OpAMD64SHRQconst || x4_1.AuxInt != 24 || w != x4_1.Args[0] { + if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] { break } x3 := x4.Args[2] - if x3.Op != OpAMD64MOVBstore || x3.AuxInt != i-4 || x3.Aux != s { + if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s { break } _ = x3.Args[2] @@ -10866,11 +10866,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x3_1 := x3.Args[1] - if x3_1.Op != OpAMD64SHRQconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] { + if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] { break } x2 := x3.Args[2] - if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i-5 || x2.Aux != s { + if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s { break } _ = x2.Args[2] @@ -10878,11 +10878,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x2_1 := x2.Args[1] - if x2_1.Op != OpAMD64SHRQconst || x2_1.AuxInt != 40 || w != x2_1.Args[0] { + if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] { break } x1 := x2.Args[2] - if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i-6 || x1.Aux != s { + if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s { break } _ = x1.Args[2] @@ -10890,11 +10890,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x1_1 := x1.Args[1] - if x1_1.Op != OpAMD64SHRQconst || x1_1.AuxInt != 48 || w != x1_1.Args[0] { + if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] { break } x0 := x1.Args[2] - if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i-7 || x0.Aux != s { + if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s { break } mem := x0.Args[2] @@ -10902,12 +10902,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x0_1 := x0.Args[1] - if x0_1.Op != OpAMD64SHRQconst || x0_1.AuxInt != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) { + if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = i - 7 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 7) + v.Aux = symToAux(s) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type) v0.AddArg(w) v.AddArg3(p, v0, mem) @@ -10917,83 +10917,83 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6) // result: (MOVQstore [i] {s} p0 (BSWAPQ w) mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p7 := v_0 w := v_1 x6 := v_2 - if x6.Op != OpAMD64MOVBstore || x6.AuxInt != i || x6.Aux != s { + if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s { break } _ = x6.Args[2] p6 := x6.Args[0] x6_1 := x6.Args[1] - if x6_1.Op != OpAMD64SHRQconst || x6_1.AuxInt != 8 || w != x6_1.Args[0] { + if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] { break } x5 := x6.Args[2] - if x5.Op != OpAMD64MOVBstore || x5.AuxInt != i || x5.Aux != s { + if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s { break } _ = x5.Args[2] p5 := x5.Args[0] x5_1 := x5.Args[1] - if x5_1.Op != OpAMD64SHRQconst || x5_1.AuxInt != 16 || w != x5_1.Args[0] { + if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] { break } x4 := x5.Args[2] - if x4.Op != OpAMD64MOVBstore || x4.AuxInt != i || x4.Aux != s { + if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s { break } _ = x4.Args[2] p4 := x4.Args[0] x4_1 := x4.Args[1] - if x4_1.Op != OpAMD64SHRQconst || x4_1.AuxInt != 24 || w != x4_1.Args[0] { + if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] { break } x3 := x4.Args[2] - if x3.Op != OpAMD64MOVBstore || x3.AuxInt != i || x3.Aux != s { + if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s { break } _ = x3.Args[2] p3 := x3.Args[0] x3_1 := x3.Args[1] - if x3_1.Op != OpAMD64SHRQconst || x3_1.AuxInt != 32 || w != x3_1.Args[0] { + if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] { break } x2 := x3.Args[2] - if x2.Op != OpAMD64MOVBstore || x2.AuxInt != i || x2.Aux != s { + if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s { break } _ = x2.Args[2] p2 := x2.Args[0] x2_1 := x2.Args[1] - if x2_1.Op != OpAMD64SHRQconst || x2_1.AuxInt != 40 || w != x2_1.Args[0] { + if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] { break } x1 := x2.Args[2] - if x1.Op != OpAMD64MOVBstore || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { break } _ = x1.Args[2] p1 := x1.Args[0] x1_1 := x1.Args[1] - if x1_1.Op != OpAMD64SHRQconst || x1_1.AuxInt != 48 || w != x1_1.Args[0] { + if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] { break } x0 := x1.Args[2] - if x0.Op != OpAMD64MOVBstore || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { break } mem := x0.Args[2] p0 := x0.Args[0] x0_1 := x0.Args[1] - if x0_1.Op != OpAMD64SHRQconst || x0_1.AuxInt != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) { + if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type) v0.AddArg(w) v.AddArg3(p0, v0, mem) @@ -11003,15 +11003,15 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i-1] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpAMD64SHRWconst || v_1.AuxInt != 8 { + if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11019,8 +11019,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -11028,15 +11028,15 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i-1] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 8 { + if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11044,8 +11044,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -11053,15 +11053,15 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i-1] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 8 { + if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11069,8 +11069,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -11078,12 +11078,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11091,12 +11091,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpAMD64SHRWconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -11104,12 +11104,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11117,12 +11117,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpAMD64SHRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -11130,12 +11130,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 w := v_1 x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i+1 || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11143,12 +11143,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x_1 := x.Args[1] - if x_1.Op != OpAMD64SHRQconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { + if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -11156,16 +11156,16 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i-1] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpAMD64SHRLconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11173,12 +11173,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -11186,16 +11186,16 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i-1] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpAMD64SHRQconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i-1 || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11203,12 +11203,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -11216,15 +11216,15 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) // result: (MOVWstore [i] {s} p0 w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 - if v_1.Op != OpAMD64SHRWconst || v_1.AuxInt != 8 { + if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11233,8 +11233,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w, mem) return true } @@ -11242,15 +11242,15 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) // result: (MOVWstore [i] {s} p0 w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 - if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 8 { + if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11259,8 +11259,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w, mem) return true } @@ -11268,15 +11268,15 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) // result: (MOVWstore [i] {s} p0 w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 - if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 8 { + if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -11285,8 +11285,8 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w, mem) return true } @@ -11294,23 +11294,23 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) // result: (MOVWstore [i] {s} p0 w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p0 := v_0 w := v_1 x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] p1 := x.Args[0] x_1 := x.Args[1] - if x_1.Op != OpAMD64SHRWconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { + if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w, mem) return true } @@ -11318,23 +11318,23 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) // result: (MOVWstore [i] {s} p0 w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p0 := v_0 w := v_1 x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] p1 := x.Args[0] x_1 := x.Args[1] - if x_1.Op != OpAMD64SHRLconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { + if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w, mem) return true } @@ -11342,23 +11342,23 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) // result: (MOVWstore [i] {s} p0 w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p0 := v_0 w := v_1 x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] p1 := x.Args[0] x_1 := x.Args[1] - if x_1.Op != OpAMD64SHRQconst || x_1.AuxInt != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { + if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w, mem) return true } @@ -11366,27 +11366,27 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) // result: (MOVWstore [i] {s} p0 w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 if v_1.Op != OpAMD64SHRLconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] p0 := x.Args[0] w0 := x.Args[1] - if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { + if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w0, mem) return true } @@ -11394,27 +11394,27 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x) // result: (MOVWstore [i] {s} p0 w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 if v_1.Op != OpAMD64SHRQconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVBstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] p0 := x.Args[0] w0 := x.Args[1] - if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { + if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w0, mem) return true } @@ -11422,19 +11422,19 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x1 := v_1 if x1.Op != OpAMD64MOVBload { break } - j := x1.AuxInt - s2 := x1.Aux + j := auxIntToInt32(x1.AuxInt) + s2 := auxToSym(x1.Aux) mem := x1.Args[1] p2 := x1.Args[0] mem2 := v_2 - if mem2.Op != OpAMD64MOVBstore || mem2.AuxInt != i-1 || mem2.Aux != s { + if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s { break } _ = mem2.Args[2] @@ -11442,7 +11442,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } x2 := mem2.Args[1] - if x2.Op != OpAMD64MOVBload || x2.AuxInt != j-1 || x2.Aux != s2 { + if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 { break } _ = x2.Args[1] @@ -11450,11 +11450,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = i - 1 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 1) + v.Aux = symToAux(s) v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = j - 1 - v0.Aux = s2 + v0.AuxInt = int32ToAuxInt(j - 1) + v0.Aux = symToAux(s2) v0.AddArg2(p2, mem) v.AddArg3(p, v0, mem) return true @@ -11553,52 +11553,52 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { return true } // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) - // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) + // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) + // result: (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) for { - c := v.AuxInt - s := v.Aux + c := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpAMD64MOVBstoreconst { break } - a := x.AuxInt - if x.Aux != s { + a := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) { break } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off())) + v.Aux = symToAux(s) v.AddArg2(p, mem) return true } // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) - // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) + // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x) + // result: (MOVWstoreconst [makeValAndOff64(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem) for { - a := v.AuxInt - s := v.Aux + a := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpAMD64MOVBstoreconst { break } - c := x.AuxInt - if x.Aux != s { + c := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) { break } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xff|c.Val()<<8, a.Off())) + v.Aux = symToAux(s) v.AddArg2(p, mem) return true } @@ -12271,15 +12271,15 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVQstore [i-4] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 32 { + if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -12287,8 +12287,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -12296,16 +12296,16 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVQstore [i-4] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpAMD64SHRQconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVLstore || x.AuxInt != i-4 || x.Aux != s { + if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -12313,12 +12313,12 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -12326,15 +12326,15 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x) // result: (MOVQstore [i] {s} p0 w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 - if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 32 { + if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVLstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -12343,8 +12343,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w, mem) return true } @@ -12352,27 +12352,27 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x) // result: (MOVQstore [i] {s} p0 w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 if v_1.Op != OpAMD64SHRQconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVLstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] p0 := x.Args[0] w0 := x.Args[1] - if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) { + if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w0, mem) return true } @@ -12380,19 +12380,19 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x1 := v_1 if x1.Op != OpAMD64MOVLload { break } - j := x1.AuxInt - s2 := x1.Aux + j := auxIntToInt32(x1.AuxInt) + s2 := auxToSym(x1.Aux) mem := x1.Args[1] p2 := x1.Args[0] mem2 := v_2 - if mem2.Op != OpAMD64MOVLstore || mem2.AuxInt != i-4 || mem2.Aux != s { + if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s { break } _ = mem2.Args[2] @@ -12400,7 +12400,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } x2 := mem2.Args[1] - if x2.Op != OpAMD64MOVLload || x2.AuxInt != j-4 || x2.Aux != s2 { + if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 { break } _ = x2.Args[1] @@ -12408,11 +12408,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = i - 4 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 4) + v.Aux = symToAux(s) v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = j - 4 - v0.Aux = s2 + v0.AuxInt = int32ToAuxInt(j - 4) + v0.Aux = symToAux(s2) v0.AddArg2(p2, mem) v.AddArg3(p, v0, mem) return true @@ -13051,56 +13051,56 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { return true } // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) - // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x) + // result: (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem) for { - c := v.AuxInt - s := v.Aux + c := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpAMD64MOVLstoreconst { break } - a := x.AuxInt - if x.Aux != s { + a := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { + if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = ValAndOff(a).Off() - v.Aux = s + v.AuxInt = int32ToAuxInt(a.Off32()) + v.Aux = symToAux(s) v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 + v0.AuxInt = int64ToAuxInt(a.Val()&0xffffffff | c.Val()<<32) v.AddArg3(p, v0, mem) return true } // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) - // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x) + // result: (MOVQstore [a.Off32()] {s} p (MOVQconst [a.Val()&0xffffffff | c.Val()<<32]) mem) for { - a := v.AuxInt - s := v.Aux + a := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpAMD64MOVLstoreconst { break } - c := x.AuxInt - if x.Aux != s { + c := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { + if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = ValAndOff(a).Off() - v.Aux = s + v.AuxInt = int32ToAuxInt(a.Off32()) + v.Aux = symToAux(s) v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 + v0.AuxInt = int64ToAuxInt(a.Val()&0xffffffff | c.Val()<<32) v.AddArg3(p, v0, mem) return true } @@ -14232,29 +14232,29 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { return true } // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) - // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) - // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) + // cond: config.useSSE && x.Uses == 1 && c2.Off() + 8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x) + // result: (MOVOstore [c2.Off32()] {s} p (MOVOconst [0]) mem) for { - c := v.AuxInt - s := v.Aux + c := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpAMD64MOVQstoreconst { break } - c2 := x.AuxInt - if x.Aux != s { + c2 := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { + if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && c2.Off()+8 == c.Off() && c.Val() == 0 && c2.Val() == 0 && clobber(x)) { break } v.reset(OpAMD64MOVOstore) - v.AuxInt = ValAndOff(c2).Off() - v.Aux = s + v.AuxInt = int32ToAuxInt(c2.Off32()) + v.Aux = symToAux(s) v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128) - v0.AuxInt = 0 + v0.AuxInt = int128ToAuxInt(0) v.AddArg3(p, v0, mem) return true } @@ -15100,15 +15100,15 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVLstore [i-2] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 16 { + if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -15116,8 +15116,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -15125,15 +15125,15 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVLstore [i-2] {s} p w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 - if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 16 { + if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -15141,8 +15141,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w, mem) return true } @@ -15150,16 +15150,16 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVLstore [i-2] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpAMD64SHRLconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -15167,12 +15167,12 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -15180,16 +15180,16 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // cond: x.Uses == 1 && clobber(x) // result: (MOVLstore [i-2] {s} p w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 if v_1.Op != OpAMD64SHRQconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVWstore || x.AuxInt != i-2 || x.Aux != s { + if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -15197,12 +15197,12 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { break } w0 := x.Args[1] - if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { + if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v.AddArg3(p, w0, mem) return true } @@ -15210,15 +15210,15 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) // result: (MOVLstore [i] {s} p0 w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 - if v_1.Op != OpAMD64SHRLconst || v_1.AuxInt != 16 { + if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVWstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -15227,8 +15227,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w, mem) return true } @@ -15236,15 +15236,15 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) // result: (MOVLstore [i] {s} p0 w mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 - if v_1.Op != OpAMD64SHRQconst || v_1.AuxInt != 16 { + if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 { break } w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVWstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] @@ -15253,8 +15253,8 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w, mem) return true } @@ -15262,27 +15262,27 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) // result: (MOVLstore [i] {s} p0 w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 if v_1.Op != OpAMD64SHRLconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVWstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] p0 := x.Args[0] w0 := x.Args[1] - if w0.Op != OpAMD64SHRLconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) { + if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w0, mem) return true } @@ -15290,27 +15290,27 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x) // result: (MOVLstore [i] {s} p0 w0 mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p1 := v_0 if v_1.Op != OpAMD64SHRQconst { break } - j := v_1.AuxInt + j := auxIntToInt8(v_1.AuxInt) w := v_1.Args[0] x := v_2 - if x.Op != OpAMD64MOVWstore || x.AuxInt != i || x.Aux != s { + if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s { break } mem := x.Args[2] p0 := x.Args[0] w0 := x.Args[1] - if w0.Op != OpAMD64SHRQconst || w0.AuxInt != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) { + if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - v.Aux = s + v.AuxInt = int32ToAuxInt(i) + v.Aux = symToAux(s) v.AddArg3(p0, w0, mem) return true } @@ -15318,19 +15318,19 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2) // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) for { - i := v.AuxInt - s := v.Aux + i := auxIntToInt32(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x1 := v_1 if x1.Op != OpAMD64MOVWload { break } - j := x1.AuxInt - s2 := x1.Aux + j := auxIntToInt32(x1.AuxInt) + s2 := auxToSym(x1.Aux) mem := x1.Args[1] p2 := x1.Args[0] mem2 := v_2 - if mem2.Op != OpAMD64MOVWstore || mem2.AuxInt != i-2 || mem2.Aux != s { + if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s { break } _ = mem2.Args[2] @@ -15338,7 +15338,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { break } x2 := mem2.Args[1] - if x2.Op != OpAMD64MOVWload || x2.AuxInt != j-2 || x2.Aux != s2 { + if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 { break } _ = x2.Args[1] @@ -15346,11 +15346,11 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = i - 2 - v.Aux = s + v.AuxInt = int32ToAuxInt(i - 2) + v.Aux = symToAux(s) v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = j - 2 - v0.Aux = s2 + v0.AuxInt = int32ToAuxInt(j - 2) + v0.Aux = symToAux(s2) v0.AddArg2(p2, mem) v.AddArg3(p, v0, mem) return true @@ -15449,52 +15449,52 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { return true } // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) + // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) + // result: (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) for { - c := v.AuxInt - s := v.Aux + c := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpAMD64MOVWstoreconst { break } - a := x.AuxInt - if x.Aux != s { + a := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) { break } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xffff|c.Val()<<16, a.Off())) + v.Aux = symToAux(s) v.AddArg2(p, mem) return true } // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) + // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x) + // result: (MOVLstoreconst [makeValAndOff64(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem) for { - a := v.AuxInt - s := v.Aux + a := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) p := v_0 x := v_1 if x.Op != OpAMD64MOVWstoreconst { break } - c := x.AuxInt - if x.Aux != s { + c := auxIntToValAndOff(x.AuxInt) + if auxToSym(x.Aux) != s { break } mem := x.Args[1] - if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) { break } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(makeValAndOff64(a.Val()&0xffff|c.Val()<<16, a.Off())) + v.Aux = symToAux(s) v.AddArg2(p, mem) return true } @@ -17556,20 +17556,20 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if x0.Op != OpAMD64MOVBload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x1 := sh.Args[0] if x1.Op != OpAMD64MOVBload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -17579,8 +17579,8 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -17595,16 +17595,16 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if x0.Op != OpAMD64MOVBload { continue } - i := x0.AuxInt - s := x0.Aux + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p0 := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -17615,8 +17615,8 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v.copyOf(v0) - v0.AuxInt = i - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) v0.AddArg2(p0, mem) return true } @@ -17631,20 +17631,20 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if x0.Op != OpAMD64MOVWload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { continue } x1 := sh.Args[0] if x1.Op != OpAMD64MOVWload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -17654,8 +17654,8 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -17670,16 +17670,16 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if x0.Op != OpAMD64MOVWload { continue } - i := x0.AuxInt - s := x0.Aux + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p0 := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { continue } x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -17690,8 +17690,8 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) v.copyOf(v0) - v0.AuxInt = i - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) v0.AddArg2(p0, mem) return true } @@ -17706,13 +17706,13 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if s1.Op != OpAMD64SHLLconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpAMD64MOVBload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] or := v_1 @@ -17727,13 +17727,13 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if s0.Op != OpAMD64SHLLconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpAMD64MOVBload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -17748,10 +17748,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) v2.AddArg2(p, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -17769,13 +17769,13 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if s1.Op != OpAMD64SHLLconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpAMD64MOVBload { continue } - i := x1.AuxInt - s := x1.Aux + i := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p1 := x1.Args[0] or := v_1 @@ -17790,9 +17790,9 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if s0.Op != OpAMD64SHLLconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -17808,10 +17808,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i) + v2.Aux = symToAux(s) v2.AddArg2(p0, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -17829,20 +17829,20 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if x1.Op != OpAMD64MOVBload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x0 := sh.Args[0] if x0.Op != OpAMD64MOVBload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -17852,10 +17852,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) v.copyOf(v0) - v0.AuxInt = 8 + v0.AuxInt = int8ToAuxInt(8) v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg2(p, mem) v0.AddArg(v1) return true @@ -17871,16 +17871,16 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if x1.Op != OpAMD64MOVBload { continue } - i := x1.AuxInt - s := x1.Aux + i := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p1 := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 8 { + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -17891,10 +17891,10 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) v.copyOf(v0) - v0.AuxInt = 8 + v0.AuxInt = int8ToAuxInt(8) v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v1.AuxInt = i - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i) + v1.Aux = symToAux(s) v1.AddArg2(p0, mem) v0.AddArg(v1) return true @@ -17907,31 +17907,31 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { r1 := v_0 - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { continue } x1 := r1.Args[0] if x1.Op != OpAMD64MOVWload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { continue } r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { continue } x0 := r0.Args[0] if x0.Op != OpAMD64MOVWload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -17942,8 +17942,8 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg2(p, mem) v0.AddArg(v1) return true @@ -17956,27 +17956,27 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { r1 := v_0 - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { continue } x1 := r1.Args[0] if x1.Op != OpAMD64MOVWload { continue } - i := x1.AuxInt - s := x1.Aux + i := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p1 := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLLconst || sh.AuxInt != 16 { + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { continue } r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { continue } x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -17988,8 +17988,8 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v1.AuxInt = i - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i) + v1.Aux = symToAux(s) v1.AddArg2(p0, mem) v0.AddArg(v1) return true @@ -18005,13 +18005,13 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if s0.Op != OpAMD64SHLLconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpAMD64MOVBload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] or := v_1 @@ -18026,13 +18026,13 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if s1.Op != OpAMD64SHLLconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpAMD64MOVBload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -18047,12 +18047,12 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 + v2.AuxInt = int8ToAuxInt(8) v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i0) + v3.Aux = symToAux(s) v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -18071,13 +18071,13 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if s0.Op != OpAMD64SHLLconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpAMD64MOVBload { continue } - i := x0.AuxInt - s := x0.Aux + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p0 := x0.Args[0] or := v_1 @@ -18092,9 +18092,9 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if s1.Op != OpAMD64SHLLconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -18110,12 +18110,12 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 + v2.AuxInt = int8ToAuxInt(8) v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i) + v3.Aux = symToAux(s) v3.AddArg2(p0, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -18747,20 +18747,20 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x0.Op != OpAMD64MOVBload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x1 := sh.Args[0] if x1.Op != OpAMD64MOVBload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -18770,8 +18770,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -18786,16 +18786,16 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x0.Op != OpAMD64MOVBload { continue } - i := x0.AuxInt - s := x0.Aux + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p0 := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -18806,8 +18806,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v.copyOf(v0) - v0.AuxInt = i - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) v0.AddArg2(p0, mem) return true } @@ -18822,20 +18822,20 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x0.Op != OpAMD64MOVWload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 { continue } x1 := sh.Args[0] if x1.Op != OpAMD64MOVWload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -18845,8 +18845,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -18861,16 +18861,16 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x0.Op != OpAMD64MOVWload { continue } - i := x0.AuxInt - s := x0.Aux + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p0 := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 { continue } x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -18881,8 +18881,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) v.copyOf(v0) - v0.AuxInt = i - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) v0.AddArg2(p0, mem) return true } @@ -18897,20 +18897,20 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x0.Op != OpAMD64MOVLload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { continue } x1 := sh.Args[0] if x1.Op != OpAMD64MOVLload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -18920,8 +18920,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) v.copyOf(v0) - v0.AuxInt = i0 - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) v0.AddArg2(p, mem) return true } @@ -18936,16 +18936,16 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x0.Op != OpAMD64MOVLload { continue } - i := x0.AuxInt - s := x0.Aux + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p0 := x0.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { continue } x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLload || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVLload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -18956,8 +18956,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64) v.copyOf(v0) - v0.AuxInt = i - v0.Aux = s + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) v0.AddArg2(p0, mem) return true } @@ -18972,13 +18972,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s1.Op != OpAMD64SHLQconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpAMD64MOVBload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] or := v_1 @@ -18993,13 +18993,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s0.Op != OpAMD64SHLQconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpAMD64MOVBload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19014,10 +19014,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i0 - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) v2.AddArg2(p, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -19035,13 +19035,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s1.Op != OpAMD64SHLQconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpAMD64MOVBload { continue } - i := x1.AuxInt - s := x1.Aux + i := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p1 := x1.Args[0] or := v_1 @@ -19056,9 +19056,9 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s0.Op != OpAMD64SHLQconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19074,10 +19074,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AuxInt = i - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i) + v2.Aux = symToAux(s) v2.AddArg2(p0, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -19095,13 +19095,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s1.Op != OpAMD64SHLQconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpAMD64MOVWload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] or := v_1 @@ -19116,13 +19116,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s0.Op != OpAMD64SHLQconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpAMD64MOVWload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19137,10 +19137,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AuxInt = i0 - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i0) + v2.Aux = symToAux(s) v2.AddArg2(p, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -19158,13 +19158,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s1.Op != OpAMD64SHLQconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpAMD64MOVWload { continue } - i := x1.AuxInt - s := x1.Aux + i := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p1 := x1.Args[0] or := v_1 @@ -19179,9 +19179,9 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s0.Op != OpAMD64SHLQconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWload || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19197,10 +19197,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 + v1.AuxInt = int8ToAuxInt(j0) v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AuxInt = i - v2.Aux = s + v2.AuxInt = int32ToAuxInt(i) + v2.Aux = symToAux(s) v2.AddArg2(p0, mem) v1.AddArg(v2) v0.AddArg2(v1, y) @@ -19218,20 +19218,20 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x1.Op != OpAMD64MOVBload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x0 := sh.Args[0] if x0.Op != OpAMD64MOVBload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19241,10 +19241,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) v.copyOf(v0) - v0.AuxInt = 8 + v0.AuxInt = int8ToAuxInt(8) v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg2(p, mem) v0.AddArg(v1) return true @@ -19260,16 +19260,16 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x1.Op != OpAMD64MOVBload { continue } - i := x1.AuxInt - s := x1.Aux + i := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p1 := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 8 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 { continue } x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19280,10 +19280,10 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { b = mergePoint(b, x0, x1) v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) v.copyOf(v0) - v0.AuxInt = 8 + v0.AuxInt = int8ToAuxInt(8) v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) - v1.AuxInt = i - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i) + v1.Aux = symToAux(s) v1.AddArg2(p0, mem) v0.AddArg(v1) return true @@ -19296,31 +19296,31 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { r1 := v_0 - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { continue } x1 := r1.Args[0] if x1.Op != OpAMD64MOVWload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 { continue } r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { continue } x0 := r0.Args[0] if x0.Op != OpAMD64MOVWload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19331,8 +19331,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg2(p, mem) v0.AddArg(v1) return true @@ -19345,27 +19345,27 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { r1 := v_0 - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { continue } x1 := r1.Args[0] if x1.Op != OpAMD64MOVWload { continue } - i := x1.AuxInt - s := x1.Aux + i := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p1 := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 16 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 { continue } r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { continue } x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19377,8 +19377,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) - v1.AuxInt = i - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i) + v1.Aux = symToAux(s) v1.AddArg2(p0, mem) v0.AddArg(v1) return true @@ -19398,12 +19398,12 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x1.Op != OpAMD64MOVLload { continue } - i1 := x1.AuxInt - s := x1.Aux + i1 := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { continue } r0 := sh.Args[0] @@ -19414,8 +19414,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x0.Op != OpAMD64MOVLload { continue } - i0 := x0.AuxInt - if x0.Aux != s { + i0 := auxIntToInt32(x0.AuxInt) + if auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19426,8 +19426,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) - v1.AuxInt = i0 - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i0) + v1.Aux = symToAux(s) v1.AddArg2(p, mem) v0.AddArg(v1) return true @@ -19447,12 +19447,12 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if x1.Op != OpAMD64MOVLload { continue } - i := x1.AuxInt - s := x1.Aux + i := auxIntToInt32(x1.AuxInt) + s := auxToSym(x1.Aux) mem := x1.Args[1] p1 := x1.Args[0] sh := v_1 - if sh.Op != OpAMD64SHLQconst || sh.AuxInt != 32 { + if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 { continue } r0 := sh.Args[0] @@ -19460,7 +19460,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { continue } x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLload || x0.AuxInt != i || x0.Aux != s { + if x0.Op != OpAMD64MOVLload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s { continue } _ = x0.Args[1] @@ -19472,8 +19472,8 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64) - v1.AuxInt = i - v1.Aux = s + v1.AuxInt = int32ToAuxInt(i) + v1.Aux = symToAux(s) v1.AddArg2(p0, mem) v0.AddArg(v1) return true @@ -19489,13 +19489,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s0.Op != OpAMD64SHLQconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpAMD64MOVBload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] or := v_1 @@ -19510,13 +19510,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s1.Op != OpAMD64SHLQconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] if x1.Op != OpAMD64MOVBload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -19531,12 +19531,12 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 + v2.AuxInt = int8ToAuxInt(8) v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i0 - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i0) + v3.Aux = symToAux(s) v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -19555,13 +19555,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s0.Op != OpAMD64SHLQconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) x0 := s0.Args[0] if x0.Op != OpAMD64MOVBload { continue } - i := x0.AuxInt - s := x0.Aux + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p0 := x0.Args[0] or := v_1 @@ -19576,9 +19576,9 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s1.Op != OpAMD64SHLQconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -19594,12 +19594,12 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) - v2.AuxInt = 8 + v2.AuxInt = int8ToAuxInt(8) v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v3.AuxInt = i - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i) + v3.Aux = symToAux(s) v3.AddArg2(p0, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -19618,17 +19618,17 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s0.Op != OpAMD64SHLQconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { continue } x0 := r0.Args[0] if x0.Op != OpAMD64MOVWload { continue } - i0 := x0.AuxInt - s := x0.Aux + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p := x0.Args[0] or := v_1 @@ -19643,17 +19643,17 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s1.Op != OpAMD64SHLQconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { continue } x1 := r1.Args[0] if x1.Op != OpAMD64MOVWload { continue } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -19668,11 +19668,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v3.AuxInt = i0 - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i0) + v3.Aux = symToAux(s) v3.AddArg2(p, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -19691,17 +19691,17 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s0.Op != OpAMD64SHLQconst { continue } - j0 := s0.AuxInt + j0 := auxIntToInt8(s0.AuxInt) r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst || r0.AuxInt != 8 { + if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 { continue } x0 := r0.Args[0] if x0.Op != OpAMD64MOVWload { continue } - i := x0.AuxInt - s := x0.Aux + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) mem := x0.Args[1] p0 := x0.Args[0] or := v_1 @@ -19716,13 +19716,13 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if s1.Op != OpAMD64SHLQconst { continue } - j1 := s1.AuxInt + j1 := auxIntToInt8(s1.AuxInt) r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst || r1.AuxInt != 8 { + if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 { continue } x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload || x1.AuxInt != i || x1.Aux != s { + if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { continue } _ = x1.Args[1] @@ -19738,11 +19738,11 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type) v.copyOf(v0) v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 + v1.AuxInt = int8ToAuxInt(j1) v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32) v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v3.AuxInt = i - v3.Aux = s + v3.AuxInt = int32ToAuxInt(i) + v3.Aux = symToAux(s) v3.AddArg2(p0, mem) v2.AddArg(v3) v1.AddArg(v2) @@ -27039,16 +27039,16 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (TESTB (MOVLconst [c]) x) - // result: (TESTBconst [c] x) + // result: (TESTBconst [int8(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 v.reset(OpAMD64TESTBconst) - v.AuxInt = c + v.AuxInt = int8ToAuxInt(int8(c)) v.AddArg(x) return true } @@ -27089,7 +27089,7 @@ func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool { // cond: x.Op != OpAMD64MOVLconst // result: (TESTB x x) for { - if v.AuxInt != -1 { + if auxIntToInt8(v.AuxInt) != -1 { break } x := v_0 @@ -27113,10 +27113,10 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { if v_0.Op != OpAMD64MOVLconst { continue } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 v.reset(OpAMD64TESTLconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(c) v.AddArg(x) return true } @@ -27190,7 +27190,7 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { // cond: x.Op != OpAMD64MOVLconst // result: (TESTL x x) for { - if v.AuxInt != -1 { + if auxIntToInt32(v.AuxInt) != -1 { break } x := v_0 @@ -27209,19 +27209,19 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { b := v.Block // match: (TESTQ (MOVQconst [c]) x) // cond: is32Bit(c) - // result: (TESTQconst [c] x) + // result: (TESTQconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVQconst { continue } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 if !(is32Bit(c)) { continue } v.reset(OpAMD64TESTQconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -27295,7 +27295,7 @@ func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool { // cond: x.Op != OpAMD64MOVQconst // result: (TESTQ x x) for { - if v.AuxInt != -1 { + if auxIntToInt32(v.AuxInt) != -1 { break } x := v_0 @@ -27313,16 +27313,16 @@ func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (TESTW (MOVLconst [c]) x) - // result: (TESTWconst [c] x) + // result: (TESTWconst [int16(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64MOVLconst { continue } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) x := v_1 v.reset(OpAMD64TESTWconst) - v.AuxInt = c + v.AuxInt = int16ToAuxInt(int16(c)) v.AddArg(x) return true } @@ -27363,7 +27363,7 @@ func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool { // cond: x.Op != OpAMD64MOVLconst // result: (TESTW x x) for { - if v.AuxInt != -1 { + if auxIntToInt16(v.AuxInt) != -1 { break } x := v_0 From 0f55d37d440d83f206bfc00b4a2521c1a0bb258b Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Thu, 24 Sep 2020 12:00:43 +0200 Subject: [PATCH 018/281] cmd/compile: use typed rules for const folding on amd64 Passes gotip build -toolexec 'toolstash -cmp' -a std Change-Id: I78cfe2962786604bdd78e02a2c33de68512cfeb3 Reviewed-on: https://go-review.googlesource.com/c/go/+/256997 Reviewed-by: Keith Randall Trust: Alberto Donizetti --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 96 ++++---- src/cmd/compile/internal/ssa/rewriteAMD64.go | 242 +++++++++---------- 2 files changed, 169 insertions(+), 169 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 47ae9272d0..8d6fad4393 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1404,70 +1404,70 @@ // Remove redundant ops // Not in generic rules, because they may appear after lowering e. g. Slicemask -(NEG(Q|L) (NEG(Q|L) x)) -> x -(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 -> (SUB(Q|L) y x) +(NEG(Q|L) (NEG(Q|L) x)) => x +(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x) // Convert constant subtracts to constant adds -(SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x) -(SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x) +(SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x) +(SUBLconst [c] x) => (ADDLconst [-c] x) // generic constant folding // TODO: more of this -(ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d]) -(ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))]) -(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x) -(ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x) -(SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c]) -(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x) -(SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) -(SARLconst [c] (MOVQconst [d])) -> (MOVQconst [int64(int32(d))>>uint64(c)]) -(SARWconst [c] (MOVQconst [d])) -> (MOVQconst [int64(int16(d))>>uint64(c)]) -(SARBconst [c] (MOVQconst [d])) -> (MOVQconst [int64(int8(d))>>uint64(c)]) -(NEGQ (MOVQconst [c])) -> (MOVQconst [-c]) -(NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))]) -(MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d]) -(MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))]) -(ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d]) -(ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d]) -(ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d]) -(ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d]) -(XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d]) -(XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d]) -(NOTQ (MOVQconst [c])) -> (MOVQconst [^c]) -(NOTL (MOVLconst [c])) -> (MOVLconst [^c]) -(BTSQconst [c] (MOVQconst [d])) -> (MOVQconst [d|(1< (MOVLconst [d|(1< (MOVQconst [d&^(1< (MOVLconst [d&^(1< (MOVQconst [d^(1< (MOVLconst [d^(1< (MOVQconst [int64(c)+d]) +(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d]) +(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x) +(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x) +(SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)]) +(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x) +(SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)]) +(SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)]) +(SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)]) +(SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)]) +(NEGQ (MOVQconst [c])) => (MOVQconst [-c]) +(NEGL (MOVLconst [c])) => (MOVLconst [-c]) +(MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d]) +(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d]) +(ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d]) +(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d]) +(ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d]) +(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d]) +(XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d]) +(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d]) +(NOTQ (MOVQconst [c])) => (MOVQconst [^c]) +(NOTL (MOVLconst [c])) => (MOVLconst [^c]) +(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1< (MOVLconst [d|(1< (MOVQconst [d&^(1< (MOVLconst [d&^(1< (MOVQconst [d^(1< (MOVLconst [d^(1< (MOVQconst [c|d]) +(ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d]) // generic simplifications // TODO: more of this -(ADDQ x (NEGQ y)) -> (SUBQ x y) -(ADDL x (NEGL y)) -> (SUBL x y) -(SUBQ x x) -> (MOVQconst [0]) -(SUBL x x) -> (MOVLconst [0]) -(ANDQ x x) -> x -(ANDL x x) -> x -(ORQ x x) -> x -(ORL x x) -> x -(XORQ x x) -> (MOVQconst [0]) -(XORL x x) -> (MOVLconst [0]) +(ADDQ x (NEGQ y)) => (SUBQ x y) +(ADDL x (NEGL y)) => (SUBL x y) +(SUBQ x x) => (MOVQconst [0]) +(SUBL x x) => (MOVLconst [0]) +(ANDQ x x) => x +(ANDL x x) => x +(ORQ x x) => x +(ORL x x) => x +(XORQ x x) => (MOVQconst [0]) +(XORL x x) => (MOVLconst [0]) -(SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c)) << uint64(d)]) -(SHLQconst [d] (MOVQconst [c])) -> (MOVQconst [c << uint64(d)]) -(SHLQconst [d] (MOVLconst [c])) -> (MOVQconst [int64(int32(c)) << uint64(d)]) +(SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)]) +(SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)]) +(SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)]) // Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range. -(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) -> (ADDQconst [-c] x) -(MULQconst [c] (NEGQ x)) && c != -(1<<31) -> (MULQconst [-c] x) +(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x) +(MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x) // checking AND against 0. (CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index e57d0f3aac..3f58ad392b 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1632,28 +1632,28 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { return true } // match: (ADDLconst [c] (MOVLconst [d])) - // result: (MOVLconst [int64(int32(c+d))]) + // result: (MOVLconst [c+d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpAMD64MOVLconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(c + d) return true } // match: (ADDLconst [c] (ADDLconst [d] x)) - // result: (ADDLconst [int64(int32(c+d))] x) + // result: (ADDLconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64ADDLconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] v.reset(OpAMD64ADDLconst) - v.AuxInt = int64(int32(c + d)) + v.AuxInt = int32ToAuxInt(c + d) v.AddArg(x) return true } @@ -2244,32 +2244,32 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { return true } // match: (ADDQconst [c] (MOVQconst [d])) - // result: (MOVQconst [c+d]) + // result: (MOVQconst [int64(c)+d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = c + d + v.AuxInt = int64ToAuxInt(int64(c) + d) return true } // match: (ADDQconst [c] (ADDQconst [d] x)) - // cond: is32Bit(c+d) + // cond: is32Bit(int64(c)+int64(d)) // result: (ADDQconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64ADDQconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(is32Bit(c + d)) { + if !(is32Bit(int64(c) + int64(d))) { break } v.reset(OpAMD64ADDQconst) - v.AuxInt = c + d + v.AuxInt = int32ToAuxInt(c + d) v.AddArg(x) return true } @@ -2858,13 +2858,13 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool { // match: (ANDLconst [c] (MOVLconst [d])) // result: (MOVLconst [c&d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpAMD64MOVLconst) - v.AuxInt = c & d + v.AuxInt = int32ToAuxInt(c & d) return true } return false @@ -3235,15 +3235,15 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool { return true } // match: (ANDQconst [c] (MOVQconst [d])) - // result: (MOVQconst [c&d]) + // result: (MOVQconst [int64(c)&d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = c & d + v.AuxInt = int64ToAuxInt(int64(c) & d) return true } return false @@ -3502,13 +3502,13 @@ func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool { // match: (BTCLconst [c] (MOVLconst [d])) // result: (MOVLconst [d^(1<>uint64(c)]) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = int64(int8(d)) >> uint64(c) + v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c)) return true } return false @@ -20853,13 +20853,13 @@ func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool { // match: (SARLconst [c] (MOVQconst [d])) // result: (MOVQconst [int64(int32(d))>>uint64(c)]) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = int64(int32(d)) >> uint64(c) + v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c)) return true } return false @@ -21075,13 +21075,13 @@ func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool { // match: (SARQconst [c] (MOVQconst [d])) // result: (MOVQconst [d>>uint64(c)]) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = d >> uint64(c) + v.AuxInt = int64ToAuxInt(d >> uint64(c)) return true } return false @@ -21132,13 +21132,13 @@ func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool { // match: (SARWconst [c] (MOVQconst [d])) // result: (MOVQconst [int64(int16(d))>>uint64(c)]) for { - c := v.AuxInt + c := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = int64(int16(d)) >> uint64(c) + v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c)) return true } return false @@ -25470,15 +25470,15 @@ func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool { return true } // match: (SHLLconst [d] (MOVLconst [c])) - // result: (MOVLconst [int64(int32(c)) << uint64(d)]) + // result: (MOVLconst [c << uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) v.reset(OpAMD64MOVLconst) - v.AuxInt = int64(int32(c)) << uint64(d) + v.AuxInt = int32ToAuxInt(c << uint64(d)) return true } return false @@ -25706,25 +25706,25 @@ func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool { // match: (SHLQconst [d] (MOVQconst [c])) // result: (MOVQconst [c << uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = c << uint64(d) + v.AuxInt = int64ToAuxInt(c << uint64(d)) return true } // match: (SHLQconst [d] (MOVLconst [c])) - // result: (MOVQconst [int64(int32(c)) << uint64(d)]) + // result: (MOVQconst [int64(c) << uint64(d)]) for { - d := v.AuxInt + d := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - c := v_0.AuxInt + c := auxIntToInt32(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = int64(int32(c)) << uint64(d) + v.AuxInt = int64ToAuxInt(int64(c) << uint64(d)) return true } return false @@ -26379,7 +26379,7 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) @@ -26421,12 +26421,12 @@ func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool { return true } // match: (SUBLconst [c] x) - // result: (ADDLconst [int64(int32(-c))] x) + // result: (ADDLconst [-c] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 v.reset(OpAMD64ADDLconst) - v.AuxInt = int64(int32(-c)) + v.AuxInt = int32ToAuxInt(-c) v.AddArg(x) return true } @@ -26603,7 +26603,7 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) @@ -26668,43 +26668,43 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool { // cond: c != -(1<<31) // result: (ADDQconst [-c] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) x := v_0 if !(c != -(1 << 31)) { break } v.reset(OpAMD64ADDQconst) - v.AuxInt = -c + v.AuxInt = int32ToAuxInt(-c) v.AddArg(x) return true } // match: (SUBQconst (MOVQconst [d]) [c]) - // result: (MOVQconst [d-c]) + // result: (MOVQconst [d-int64(c)]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = d - c + v.AuxInt = int64ToAuxInt(d - int64(c)) return true } // match: (SUBQconst (SUBQconst x [d]) [c]) - // cond: is32Bit(-c-d) + // cond: is32Bit(int64(-c)-int64(d)) // result: (ADDQconst [-c-d] x) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64SUBQconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(is32Bit(-c - d)) { + if !(is32Bit(int64(-c) - int64(d))) { break } v.reset(OpAMD64ADDQconst) - v.AuxInt = -c - d + v.AuxInt = int32ToAuxInt(-c - d) v.AddArg(x) return true } @@ -27674,7 +27674,7 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = 0 + v.AuxInt = int32ToAuxInt(0) return true } // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) @@ -27873,13 +27873,13 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool { // match: (XORLconst [c] (MOVLconst [d])) // result: (MOVLconst [c^d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - d := v_0.AuxInt + d := auxIntToInt32(v_0.AuxInt) v.reset(OpAMD64MOVLconst) - v.AuxInt = c ^ d + v.AuxInt = int32ToAuxInt(c ^ d) return true } return false @@ -28150,7 +28150,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) @@ -28239,15 +28239,15 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { return true } // match: (XORQconst [c] (MOVQconst [d])) - // result: (MOVQconst [c^d]) + // result: (MOVQconst [int64(c)^d]) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpAMD64MOVQconst) - v.AuxInt = c ^ d + v.AuxInt = int64ToAuxInt(int64(c) ^ d) return true } return false From 25a33daa2b7e7bda773705215113450923ae4815 Mon Sep 17 00:00:00 2001 From: Sean Liao Date: Thu, 21 May 2020 17:52:33 +0200 Subject: [PATCH 019/281] encoding/json: allow semicolon in field key / struct tag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow ';' as a valid character for json field keys and struct tags. Fixes #39189 Change-Id: I4b602a1b0674ff028db07623682f0d1e8e9fd6c9 Reviewed-on: https://go-review.googlesource.com/c/go/+/234818 Run-TryBot: Daniel Martí TryBot-Result: Go Bot Trust: Giovanni Bajo Trust: Daniel Martí Reviewed-by: Daniel Martí --- src/encoding/json/encode.go | 2 +- src/encoding/json/tagkey_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/encoding/json/encode.go b/src/encoding/json/encode.go index 578d551102..c2d191442c 100644 --- a/src/encoding/json/encode.go +++ b/src/encoding/json/encode.go @@ -946,7 +946,7 @@ func isValidTag(s string) bool { } for _, c := range s { switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. diff --git a/src/encoding/json/tagkey_test.go b/src/encoding/json/tagkey_test.go index f77c49c764..bbb4e6a28d 100644 --- a/src/encoding/json/tagkey_test.go +++ b/src/encoding/json/tagkey_test.go @@ -41,7 +41,7 @@ type percentSlashTag struct { } type punctuationTag struct { - V string `json:"!#$%&()*+-./:<=>?@[]^_{|}~"` // https://golang.org/issue/3546 + V string `json:"!#$%&()*+-./:;<=>?@[]^_{|}~ "` // https://golang.org/issue/3546 } type dashTag struct { @@ -90,7 +90,7 @@ var structTagObjectKeyTests = []struct { {badFormatTag{"Orfevre"}, "Orfevre", "Y"}, {badCodeTag{"Reliable Man"}, "Reliable Man", "Z"}, {percentSlashTag{"brut"}, "brut", "text/html%"}, - {punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:<=>?@[]^_{|}~"}, + {punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:;<=>?@[]^_{|}~ "}, {spaceTag{"Perreddu"}, "Perreddu", "With space"}, {unicodeTag{"Loukanikos"}, "Loukanikos", "Ελλάδα"}, } From 428509402b03c608e625a4844ab0cce75e4bead2 Mon Sep 17 00:00:00 2001 From: lujjjh Date: Thu, 17 Sep 2020 14:39:13 +0000 Subject: [PATCH 020/281] encoding/json: detect cyclic maps and slices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now reports an error if cyclic maps and slices are to be encoded instead of an infinite recursion. This case wasn't handled in CL 187920. Fixes #40745. Change-Id: Ia34b014ecbb71fd2663bb065ba5355a307dbcc15 GitHub-Last-Rev: 6f874944f4065b5237babbb0fdce14c1c74a3c97 GitHub-Pull-Request: golang/go#40756 Reviewed-on: https://go-review.googlesource.com/c/go/+/248358 Reviewed-by: Daniel Martí Trust: Daniel Martí Trust: Bryan C. Mills Run-TryBot: Daniel Martí TryBot-Result: Go Bot --- src/encoding/json/encode.go | 27 +++++++++++++++++++++++++++ src/encoding/json/encode_test.go | 27 ++++++++++++++++++++++++++- 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/src/encoding/json/encode.go b/src/encoding/json/encode.go index c2d191442c..ea5eca51ef 100644 --- a/src/encoding/json/encode.go +++ b/src/encoding/json/encode.go @@ -779,6 +779,16 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + ptr := v.Pointer() + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } e.WriteByte('{') // Extract and sort the keys. @@ -801,6 +811,7 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { me.elemEnc(e, v.MapIndex(kv.v), opts) } e.WriteByte('}') + e.ptrLevel-- } func newMapEncoder(t reflect.Type) encoderFunc { @@ -857,7 +868,23 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + // Here we use a struct to memorize the pointer to the first element of the slice + // and its length. + ptr := struct { + ptr uintptr + len int + }{v.Pointer(), v.Len()} + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } se.arrayEnc(e, v, opts) + e.ptrLevel-- } func newSliceEncoder(t reflect.Type) encoderFunc { diff --git a/src/encoding/json/encode_test.go b/src/encoding/json/encode_test.go index 7290eca06f..42bb09d5cd 100644 --- a/src/encoding/json/encode_test.go +++ b/src/encoding/json/encode_test.go @@ -183,7 +183,15 @@ type PointerCycleIndirect struct { Ptrs []interface{} } -var pointerCycleIndirect = &PointerCycleIndirect{} +type RecursiveSlice []RecursiveSlice + +var ( + pointerCycleIndirect = &PointerCycleIndirect{} + mapCycle = make(map[string]interface{}) + sliceCycle = []interface{}{nil} + sliceNoCycle = []interface{}{nil, nil} + recursiveSliceCycle = []RecursiveSlice{nil} +) func init() { ptr := &SamePointerNoCycle{} @@ -192,6 +200,14 @@ func init() { pointerCycle.Ptr = pointerCycle pointerCycleIndirect.Ptrs = []interface{}{pointerCycleIndirect} + + mapCycle["x"] = mapCycle + sliceCycle[0] = sliceCycle + sliceNoCycle[1] = sliceNoCycle[:1] + for i := startDetectingCyclesAfter; i > 0; i-- { + sliceNoCycle = []interface{}{sliceNoCycle} + } + recursiveSliceCycle[0] = recursiveSliceCycle } func TestSamePointerNoCycle(t *testing.T) { @@ -200,12 +216,21 @@ func TestSamePointerNoCycle(t *testing.T) { } } +func TestSliceNoCycle(t *testing.T) { + if _, err := Marshal(sliceNoCycle); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + var unsupportedValues = []interface{}{ math.NaN(), math.Inf(-1), math.Inf(1), pointerCycle, pointerCycleIndirect, + mapCycle, + sliceCycle, + recursiveSliceCycle, } func TestUnsupportedValues(t *testing.T) { From 4cba6c703f68a7c1718e589feaeb2530d7812fbf Mon Sep 17 00:00:00 2001 From: Changkun Ou Date: Sat, 19 Sep 2020 23:32:12 +0200 Subject: [PATCH 021/281] testing: send t.signal only if there is no panic If a signal is sent to t.signal before the panic is triggered, a panicking test may end up with "warning: no tests to run" because the tRunner that invokes the test in t.Run calls runtime.Goexit on panic, which causes the panicking test not be recorded in runTests. Send the signal if and only if there is no panic. Fixes #41479 Change-Id: I812f1303bfe02c443a1902732e68d21620d6672e Reviewed-on: https://go-review.googlesource.com/c/go/+/256098 Run-TryBot: Emmanuel Odeke TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor Reviewed-by: Emmanuel Odeke Trust: Emmanuel Odeke Trust: Bryan C. Mills --- .../go/testdata/script/test_cleanup_failnow.txt | 14 ++++++++++++++ src/testing/testing.go | 10 ++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/cmd/go/testdata/script/test_cleanup_failnow.txt b/src/cmd/go/testdata/script/test_cleanup_failnow.txt index 5ad4185fc1..0737a93db2 100644 --- a/src/cmd/go/testdata/script/test_cleanup_failnow.txt +++ b/src/cmd/go/testdata/script/test_cleanup_failnow.txt @@ -1,11 +1,25 @@ # For issue 41355 [short] skip +# This test could fail if the testing package does not wait until +# a panicking test does the panic. Turn off multithreading, GC, and +# async preemption to increase the probability of such a failure. +env GOMAXPROCS=1 +env GOGC=off +env GODEBUG=asyncpreempt=off + +# If the test exits with 'no tests to run', it means the testing package +# implementation is incorrect and does not wait until a test panic. +# If the test exits with '(?s)panic: die.*panic: die', it means +# the testing package did an extra panic for a panicking test. + ! go test -v cleanup_failnow/panic_nocleanup_test.go +! stdout 'no tests to run' stdout '(?s)panic: die \[recovered\].*panic: die' ! stdout '(?s)panic: die \[recovered\].*panic: die.*panic: die' ! go test -v cleanup_failnow/panic_withcleanup_test.go +! stdout 'no tests to run' stdout '(?s)panic: die \[recovered\].*panic: die' ! stdout '(?s)panic: die \[recovered\].*panic: die.*panic: die' diff --git a/src/testing/testing.go b/src/testing/testing.go index d86354093a..a44c0a0749 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -1091,10 +1091,16 @@ func tRunner(t *T, fn func(t *T)) { // complete even if a cleanup function calls t.FailNow. See issue 41355. didPanic := false defer func() { - t.signal <- signal - if err != nil && !didPanic { + if didPanic { + return + } + if err != nil { panic(err) } + // Only report that the test is complete if it doesn't panic, + // as otherwise the test binary can exit before the panic is + // reported to the user. See issue 41479. + t.signal <- signal }() doPanic := func(err interface{}) { From 5824a4ce1a0e47f3093128371c7156b35fe9d806 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Mart=C3=AD?= Date: Fri, 12 Jun 2020 15:14:42 +0100 Subject: [PATCH 022/281] cmd/go: error when -c or -i are used with unknown flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Other test flags passed to the test binary, such as -run or -count, are equally pointless when -c or -i are used, since the test binary is never run. However, custom flags in that scenario are far more likely to be due to human error, such as: # note the "ldflags" typo, which silently did nothing go test -c -lflags=-w Instead, make this scenario error. It seems unlikely that anyone is using -c along with intended custom-defined test flags, and if they are, removing those extra flags that do nothing is probably a good idea anyway. We don't add this restriction for the flags defined in 'go help testflag', since they are far less likely to be typos or unintended mistakes. Another reason not to do that change is that other commands similarly silently ignore no-op flags, such as: # -d disables the build, so -ldflags is never used go get -d -ldflags=-w Fixes #39484. Change-Id: I6ba2f6866562fe8f8fceaf4cd862d874bf5cd978 Reviewed-on: https://go-review.googlesource.com/c/go/+/237697 Trust: Daniel Martí Run-TryBot: Daniel Martí TryBot-Result: Go Bot Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/test/testflag.go | 16 ++++++++++++++++ src/cmd/go/testdata/script/test_flag.txt | 18 ++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go index 4f0a8924f1..d2671ff5a7 100644 --- a/src/cmd/go/internal/test/testflag.go +++ b/src/cmd/go/internal/test/testflag.go @@ -212,6 +212,10 @@ func testFlags(args []string) (packageNames, passToTest []string) { } }) + // firstUnknownFlag helps us report an error when flags not known to 'go + // test' are used along with -i or -c. + firstUnknownFlag := "" + explicitArgs := make([]string, 0, len(args)) inPkgList := false afterFlagWithoutValue := false @@ -288,6 +292,10 @@ func testFlags(args []string) (packageNames, passToTest []string) { break } + if firstUnknownFlag == "" { + firstUnknownFlag = nd.RawArg + } + explicitArgs = append(explicitArgs, nd.RawArg) args = remainingArgs if !nd.HasValue { @@ -312,6 +320,14 @@ func testFlags(args []string) (packageNames, passToTest []string) { args = remainingArgs } + if firstUnknownFlag != "" && (testC || cfg.BuildI) { + buildFlag := "-c" + if !testC { + buildFlag = "-i" + } + fmt.Fprintf(os.Stderr, "flag %s is not a 'go test' flag (unknown flags cannot be used with %s)\n", firstUnknownFlag, buildFlag) + exitWithUsage() + } var injectedFlags []string if testJSON { diff --git a/src/cmd/go/testdata/script/test_flag.txt b/src/cmd/go/testdata/script/test_flag.txt index bbcad1c59c..ec88d38cbe 100644 --- a/src/cmd/go/testdata/script/test_flag.txt +++ b/src/cmd/go/testdata/script/test_flag.txt @@ -3,6 +3,22 @@ go test flag_test.go -v -args -v=7 # Two distinct -v flags go test -v flag_test.go -args -v=7 # Two distinct -v flags +# Using a custom flag mixed with regular 'go test' flags should be OK. +go test -count=1 -custom -args -v=7 + +# However, it should be an error to use custom flags when -i or -c are used, +# since we know for sure that no test binary will run at all. +! go test -i -custom +stderr '^flag -custom is not a ''go test'' flag \(unknown flags cannot be used with -i\)$' +! go test -c -custom +stderr '^flag -custom is not a ''go test'' flag \(unknown flags cannot be used with -c\)$' + +# The same should apply even if -c or -i come after a custom flag. +! go test -custom -c +stderr '^flag -custom is not a ''go test'' flag \(unknown flags cannot be used with -c\)$' + +-- go.mod -- +module m -- flag_test.go -- package flag_test @@ -14,6 +30,8 @@ import ( var v = flag.Int("v", 0, "v flag") +var custom = flag.Bool("custom", false, "") + // Run this as go test pkg -v=7 func TestVFlagIsSet(t *testing.T) { if *v != 7 { From 23cc16cdd2fbda37dd54de944462f57795da7bd2 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Thu, 24 Sep 2020 12:44:19 -0700 Subject: [PATCH 023/281] spec: better variable name for operator example Suggested by @yaxinlx. Fixes #41612. Change-Id: I98b9968a95d090ee3c67ff02678e1874e6d98c33 Reviewed-on: https://go-review.googlesource.com/c/go/+/257159 Trust: Robert Griesemer Reviewed-by: Rob Pike Reviewed-by: Ian Lance Taylor --- doc/go_spec.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/go_spec.html b/doc/go_spec.html index 154bdbfeaf..e9e9e42130 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -1,6 +1,6 @@ @@ -3646,7 +3646,7 @@ For instance, x / y * z is the same as (x / y) * z. x <= f() ^a >> b f() || g() -x == y+1 && <-chanPtr > 0 +x == y+1 && <-chanInt > 0 From f765dcbd5c8205a0d222257b4514b1194cad26f8 Mon Sep 17 00:00:00 2001 From: Than McIntosh Date: Thu, 24 Sep 2020 16:11:43 -0400 Subject: [PATCH 024/281] cmd/compile,cmd/asm: fix buglet in -S=2 output In CL 255718 the -S=2 assembly output was enhanced to dump symbol ABIs. This patch fixes a bug in that CL: when dumping the relocations on a symbol, we were dumping the symbol's ABI as opposed to the relocation target symbol's ABI. Change-Id: I134128687757f549fa37b998cff1290765889140 Reviewed-on: https://go-review.googlesource.com/c/go/+/257202 Trust: Than McIntosh Run-TryBot: Than McIntosh Reviewed-by: David Chase Reviewed-by: Cherry Zhang TryBot-Result: Go Bot --- src/cmd/internal/obj/objfile.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index aede5fe71c..e4b9620568 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -734,7 +734,7 @@ func (ctxt *Link) writeSymDebugNamed(s *LSym, name string) { if r.Sym != nil { name = r.Sym.Name if ctxt.Debugasm > 1 { - ver = fmt.Sprintf("<%d>", s.ABI()) + ver = fmt.Sprintf("<%d>", r.Sym.ABI()) } } else if r.Type == objabi.R_TLS_LE { name = "TLS" From ea106cc07ac73110a8a25fcc5aef07b283159db0 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 24 Sep 2020 14:25:21 -0700 Subject: [PATCH 025/281] cmd/compile: prevent 387+float32+pie from clobbering registers The 387 port needs to load a floating-point control word from a global location to implement float32 arithmetic. When compiling with -pie, loading that control word clobbers an integer register. If that register had something important in it, boom. Fix by using LEAL to materialize the address of the global location first. LEAL with -pie works because the destination register is used as the scratch register. 387 support is about to go away (#40255), so this will need to be backported to have any effect. No test. I have one, but it requires building with -pie, which requires cgo. Our testing infrastructure doesn't make that easy. Not worth it for a port which is about to vanish. Fixes #41503 Change-Id: I140f9fc8fdce4e74a52c2c046e2bd30ae476d295 Reviewed-on: https://go-review.googlesource.com/c/go/+/257277 Run-TryBot: Keith Randall Reviewed-by: Cherry Zhang Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot Trust: Keith Randall --- src/cmd/compile/internal/x86/387.go | 68 ++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 20 deletions(-) diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go index 796aa82f19..594adb2cd5 100644 --- a/src/cmd/compile/internal/x86/387.go +++ b/src/cmd/compile/internal/x86/387.go @@ -139,12 +139,18 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { // Set precision if needed. 64 bits is the default. switch v.Op { case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS: - p := s.Prog(x86.AFSTCW) + // Save AX so we can use it as scratch space. + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_AX s.AddrScratch(&p.To) - p = s.Prog(x86.AFLDCW) - p.From.Type = obj.TYPE_MEM - p.From.Name = obj.NAME_EXTERN - p.From.Sym = gc.ControlWord32 + // Install a 32-bit version of the control word. + installControlWord(s, gc.ControlWord32, x86.REG_AX) + // Restore AX. + p = s.Prog(x86.AMOVL) + s.AddrScratch(&p.From) + p.To.Type = obj.TYPE_REG + p.To.Reg = x86.REG_AX } var op obj.As @@ -167,8 +173,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { // Restore precision if needed. switch v.Op { case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS: - p := s.Prog(x86.AFLDCW) - s.AddrScratch(&p.From) + restoreControlWord(s) } case ssa.Op386UCOMISS, ssa.Op386UCOMISD: @@ -225,19 +230,11 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { case ssa.Op386CVTTSD2SL, ssa.Op386CVTTSS2SL: push(s, v.Args[0]) - // Save control word. - p := s.Prog(x86.AFSTCW) - s.AddrScratch(&p.To) - p.To.Offset += 4 - // Load control word which truncates (rounds towards zero). - p = s.Prog(x86.AFLDCW) - p.From.Type = obj.TYPE_MEM - p.From.Name = obj.NAME_EXTERN - p.From.Sym = gc.ControlWord64trunc + installControlWord(s, gc.ControlWord64trunc, v.Reg()) // Now do the conversion. - p = s.Prog(x86.AFMOVLP) + p := s.Prog(x86.AFMOVLP) p.From.Type = obj.TYPE_REG p.From.Reg = x86.REG_F0 s.AddrScratch(&p.To) @@ -247,9 +244,7 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = v.Reg() // Restore control word. - p = s.Prog(x86.AFLDCW) - s.AddrScratch(&p.From) - p.From.Offset += 4 + restoreControlWord(s) case ssa.Op386CVTSS2SD: // float32 -> float64 is a nop @@ -373,3 +368,36 @@ func ssaGenBlock387(s *gc.SSAGenState, b, next *ssa.Block) { ssaGenBlock(s, b, next) } + +// installControlWord saves the current floating-point control +// word and installs a new one loaded from cw. +// scratchReg must be an unused register. +// This call must be paired with restoreControlWord. +// Bytes 4-5 of the scratch space (s.AddrScratch) are used between +// this call and restoreControlWord. +func installControlWord(s *gc.SSAGenState, cw *obj.LSym, scratchReg int16) { + // Save current control word. + p := s.Prog(x86.AFSTCW) + s.AddrScratch(&p.To) + p.To.Offset += 4 + + // Materialize address of new control word. + // Note: this must be a seperate instruction to handle PIE correctly. + // See issue 41503. + p = s.Prog(x86.ALEAL) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_EXTERN + p.From.Sym = cw + p.To.Type = obj.TYPE_REG + p.To.Reg = scratchReg + + // Load replacement control word. + p = s.Prog(x86.AFLDCW) + p.From.Type = obj.TYPE_MEM + p.From.Reg = scratchReg +} +func restoreControlWord(s *gc.SSAGenState) { + p := s.Prog(x86.AFLDCW) + s.AddrScratch(&p.From) + p.From.Offset += 4 +} From fa04d488bd54b8fdd78cc9bcc6d90de4bf5f8efb Mon Sep 17 00:00:00 2001 From: fanzha02 Date: Wed, 16 Sep 2020 14:05:18 +0800 Subject: [PATCH 026/281] cmd/asm: fix the issue of moving 128-bit integers to vector registers on arm64 The CL 249758 added `FMOVQ $vcon, Vd` instruction and assembler used 128-bit simd literal-loading to load `$vcon` from pool into 128-bit vector register `Vd`. Because Go does not have 128-bit integers for now, the assembler will report an error of `immediate out of range` when assembleing `FMOVQ $0x123456789abcdef0123456789abcdef, V0` instruction. This patch lets 128-bit integers take two 64-bit operands, for the high and low parts separately and adds `VMOVQ $hi, $lo, Vd` instruction to move `$hi<<64+$lo' into 128-bit register `Vd`. In addition, this patch renames `FMOVQ/FMOVD/FMOVS` ops to 'VMOVQ/VMOVD/VMOVS' and uses them to move 128-bit, 64-bit and 32-bit constants into vector registers, respectively Update the go doc. Fixes #40725 Change-Id: Ia3c83bb6463f104d2bee960905053a97299e0a3a Reviewed-on: https://go-review.googlesource.com/c/go/+/255900 Trust: fannie zhang Reviewed-by: Cherry Zhang --- src/cmd/asm/internal/arch/arm64.go | 18 ++++++----- src/cmd/asm/internal/asm/asm.go | 21 ++++++------- src/cmd/asm/internal/asm/testdata/arm64.s | 6 ++-- src/cmd/internal/obj/arm64/a.out.go | 4 ++- src/cmd/internal/obj/arm64/anames.go | 4 ++- src/cmd/internal/obj/arm64/asm7.go | 38 ++++++++++++----------- src/cmd/internal/obj/arm64/doc.go | 10 ++++++ 7 files changed, 60 insertions(+), 41 deletions(-) diff --git a/src/cmd/asm/internal/arch/arm64.go b/src/cmd/asm/internal/arch/arm64.go index 3817fcd5c2..e643889aef 100644 --- a/src/cmd/asm/internal/arch/arm64.go +++ b/src/cmd/asm/internal/arch/arm64.go @@ -82,6 +82,17 @@ func IsARM64STLXR(op obj.As) bool { return false } +// IsARM64TBL reports whether the op (as defined by an arm64.A* +// constant) is one of the TBL-like instructions and one of its +// inputs does not fit into prog.Reg, so require special handling. +func IsARM64TBL(op obj.As) bool { + switch op { + case arm64.AVTBL, arm64.AVMOVQ: + return true + } + return false +} + // ARM64Suffix handles the special suffix for the ARM64. // It returns a boolean to indicate success; failure means // cond was unrecognized. @@ -125,13 +136,6 @@ func arm64RegisterNumber(name string, n int16) (int16, bool) { return 0, false } -// IsARM64TBL reports whether the op (as defined by an arm64.A* -// constant) is one of the table lookup instructions that require special -// handling. -func IsARM64TBL(op obj.As) bool { - return op == arm64.AVTBL -} - // ARM64RegisterExtension parses an ARM64 register with extension or arrangement. func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error { Rnum := (reg & 31) + int16(num<<5) diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go index 42e217dc23..7878d74549 100644 --- a/src/cmd/asm/internal/asm/asm.go +++ b/src/cmd/asm/internal/asm/asm.go @@ -622,8 +622,9 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.SetFrom3(a[1]) prog.To = a[2] case sys.ARM64: - // ARM64 instructions with one input and two outputs. - if arch.IsARM64STLXR(op) { + switch { + case arch.IsARM64STLXR(op): + // ARM64 instructions with one input and two outputs. prog.From = a[0] prog.To = a[1] if a[2].Type != obj.TYPE_REG { @@ -631,20 +632,16 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { return } prog.RegTo2 = a[2].Reg - break - } - if arch.IsARM64TBL(op) { + case arch.IsARM64TBL(op): + // one of its inputs does not fit into prog.Reg. prog.From = a[0] - if a[1].Type != obj.TYPE_REGLIST { - p.errorf("%s: expected list; found %s", op, obj.Dconv(prog, &a[1])) - } prog.SetFrom3(a[1]) prog.To = a[2] - break + default: + prog.From = a[0] + prog.Reg = p.getRegister(prog, op, &a[1]) + prog.To = a[2] } - prog.From = a[0] - prog.Reg = p.getRegister(prog, op, &a[1]) - prog.To = a[2] case sys.I386: prog.From = a[0] prog.SetFrom3(a[1]) diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index acfb16b096..e277c04b7c 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -218,8 +218,10 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 FMOVD $(28.0), F4 // 0490671e // move a large constant to a Vd. - FMOVD $0x8040201008040201, V20 // FMOVD $-9205322385119247871, V20 - FMOVQ $0x8040201008040202, V29 // FMOVQ $-9205322385119247870, V29 + VMOVS $0x80402010, V11 // VMOVS $2151686160, V11 + VMOVD $0x8040201008040201, V20 // VMOVD $-9205322385119247871, V20 + VMOVQ $0x7040201008040201, $0x8040201008040201, V10 // VMOVQ $8088500183983456769, $-9205322385119247871, V10 + VMOVQ $0x8040201008040202, $0x7040201008040201, V20 // VMOVQ $-9205322385119247870, $8088500183983456769, V20 FMOVS (R2)(R6), F4 // FMOVS (R2)(R6*1), F4 // 446866bc FMOVS (R2)(R6<<2), F4 // 447866bc diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go index b3c9e9a18e..1ca41c15ba 100644 --- a/src/cmd/internal/obj/arm64/a.out.go +++ b/src/cmd/internal/obj/arm64/a.out.go @@ -875,7 +875,9 @@ const ( AFLDPS AFMOVD AFMOVS - AFMOVQ + AVMOVQ + AVMOVD + AVMOVS AFMULD AFMULS AFNEGD diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go index 48c066abfd..900cdba817 100644 --- a/src/cmd/internal/obj/arm64/anames.go +++ b/src/cmd/internal/obj/arm64/anames.go @@ -381,7 +381,9 @@ var Anames = []string{ "FLDPS", "FMOVD", "FMOVS", - "FMOVQ", + "VMOVQ", + "VMOVD", + "VMOVS", "FMULD", "FMULS", "FNEGD", diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index fc2033d689..ee4a33eef4 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -260,8 +260,9 @@ func MOVCONST(d int64, s int, rt int) uint32 { const ( // Optab.flag LFROM = 1 << 0 // p.From uses constant pool - LTO = 1 << 1 // p.To uses constant pool - NOTUSETMP = 1 << 2 // p expands to multiple instructions, but does NOT use REGTMP + LFROM3 = 1 << 1 // p.From3 uses constant pool + LTO = 1 << 2 // p.To uses constant pool + NOTUSETMP = 1 << 3 // p expands to multiple instructions, but does NOT use REGTMP ) var optab = []Optab{ @@ -397,10 +398,10 @@ var optab = []Optab{ /* load long effective stack address (load int32 offset and add) */ {AMOVD, C_LACON, C_NONE, C_NONE, C_RSP, 34, 8, REGSP, LFROM, 0}, - // Move a large constant to a Vn. - {AFMOVQ, C_VCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, - {AFMOVD, C_VCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, - {AFMOVS, C_LCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, + // Move a large constant to a vector register. + {AVMOVQ, C_VCON, C_NONE, C_VCON, C_VREG, 101, 4, 0, LFROM | LFROM3, 0}, + {AVMOVD, C_VCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, + {AVMOVS, C_LCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, /* jump operations */ {AB, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, @@ -950,13 +951,14 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { c.ctxt.Diag("zero-width instruction\n%v", p) } } - switch o.flag & (LFROM | LTO) { - case LFROM: + if o.flag&LFROM != 0 { c.addpool(p, &p.From) - - case LTO: + } + if o.flag&LFROM3 != 0 { + c.addpool(p, p.GetFrom3()) + } + if o.flag<O != 0 { c.addpool(p, &p.To) - break } if p.As == AB || p.As == obj.ARET || p.As == AERET { /* TODO: other unconditional operations */ @@ -1174,8 +1176,8 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { sz := 4 if a.Type == obj.TYPE_CONST { - if lit != int64(int32(lit)) && uint64(lit) != uint64(uint32(lit)) { - // out of range -0x80000000 ~ 0xffffffff, must store 64-bit + if (lit != int64(int32(lit)) && uint64(lit) != uint64(uint32(lit))) || p.As == AVMOVQ || p.As == AVMOVD { + // out of range -0x80000000 ~ 0xffffffff or VMOVQ or VMOVD operand, must store 64-bit. t.As = ADWORD sz = 8 } // else store 32-bit @@ -2675,7 +2677,7 @@ func buildop(ctxt *obj.Link) { case AFCSELD: oprangeset(AFCSELS, t) - case AFMOVS, AFMOVD, AFMOVQ: + case AFMOVS, AFMOVD, AVMOVQ, AVMOVD, AVMOVS: break case AFCVTZSD: @@ -5142,7 +5144,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = q<<30 | 0xe<<24 | len<<13 o1 |= (uint32(rf&31) << 16) | uint32(offset&31)<<5 | uint32(rt&31) - case 101: // FOMVQ/FMOVD $vcon, Vd -> load from constant pool. + case 101: // VMOVQ $vcon1, $vcon2, Vd or VMOVD|VMOVS $vcon, Vd -> FMOVQ/FMOVD/FMOVS pool(PC), Vd: load from constant pool. o1 = c.omovlit(p.As, p, &p.From, int(p.To.Reg)) case 102: /* vushll, vushll2, vuxtl, vuxtl2 */ @@ -6672,15 +6674,15 @@ func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { } else { fp, w := 0, 0 switch as { - case AFMOVS: + case AFMOVS, AVMOVS: fp = 1 w = 0 /* 32-bit SIMD/FP */ - case AFMOVD: + case AFMOVD, AVMOVD: fp = 1 w = 1 /* 64-bit SIMD/FP */ - case AFMOVQ: + case AVMOVQ: fp = 1 w = 2 /* 128-bit SIMD/FP */ diff --git a/src/cmd/internal/obj/arm64/doc.go b/src/cmd/internal/obj/arm64/doc.go index 7515217544..efd4577f56 100644 --- a/src/cmd/internal/obj/arm64/doc.go +++ b/src/cmd/internal/obj/arm64/doc.go @@ -86,6 +86,16 @@ In the following example, PCALIGN at the entry of the function Add will align it MOVD $1, R1 RET +7. Move large constants to vector registers. + +Go asm uses VMOVQ/VMOVD/VMOVS to move 128-bit, 64-bit and 32-bit constants into vector registers, respectively. +And for a 128-bit interger, it take two 64-bit operands, for the high and low parts separately. + + Examples: + VMOVS $0x11223344, V0 + VMOVD $0x1122334455667788, V1 + VMOVQ $0x1122334455667788, $8877665544332211, V2 // V2=0x11223344556677888877665544332211 + Special Cases. (1) umov is written as VMOV. From 2333c6299f340a5f76a73a4fec6db23ffa388e97 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 24 Sep 2020 19:26:33 -0700 Subject: [PATCH 027/281] runtime: use old capacity to decide on append growth regime We grow the backing store on append by 2x for small sizes and 1.25x for large sizes. The threshold we use for picking the growth factor used to depend on the old length, not the old capacity. That's kind of unfortunate, because then doing append(s, 0, 0) and append(append(s, 0), 0) do different things. (If s has one more spot available, then the former expression chooses its growth based on len(s) and the latter on len(s)+1.) If we instead use the old capacity, we get more consistent behavior. (Both expressions use len(s)+1 == cap(s) to decide.) Fixes #41239 Change-Id: I40686471d256edd72ec92aef973a89b52e235d4b Reviewed-on: https://go-review.googlesource.com/c/go/+/257338 Trust: Keith Randall Trust: Josh Bleecher Snyder Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Josh Bleecher Snyder --- src/runtime/slice.go | 2 +- test/fixedbugs/issue41239.go | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue41239.go diff --git a/src/runtime/slice.go b/src/runtime/slice.go index 82a45c78a9..c0647d95a0 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -146,7 +146,7 @@ func growslice(et *_type, old slice, cap int) slice { if cap > doublecap { newcap = cap } else { - if old.len < 1024 { + if old.cap < 1024 { newcap = doublecap } else { // Check 0 < newcap to detect overflow diff --git a/test/fixedbugs/issue41239.go b/test/fixedbugs/issue41239.go new file mode 100644 index 0000000000..3e9ef5eb66 --- /dev/null +++ b/test/fixedbugs/issue41239.go @@ -0,0 +1,19 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. Use of this +// source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +package main + +import "fmt" + +func main() { + const N = 1024 + var a [N]int + x := cap(append(a[:N-1:N], 9, 9)) + y := cap(append(a[:N:N], 9)) + if x != y { + panic(fmt.Sprintf("different capacity on append: %d vs %d", x, y)) + } +} From 2e0f8c379f91f77272d096929cf22391b64d0e34 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 24 Sep 2020 20:58:33 -0400 Subject: [PATCH 028/281] runtime: update go:notinheap documentation The rules for go:notinheap were recently tweaked to disallow stack allocation (CL 249917). This CL updates the documentation about go:notinheap in runtime/HACKING.md. Change-Id: Ibca5d9b9d02e1c22c6af1d303aa84c6303a86d92 Reviewed-on: https://go-review.googlesource.com/c/go/+/257357 Trust: Austin Clements Reviewed-by: Keith Randall --- src/runtime/HACKING.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/runtime/HACKING.md b/src/runtime/HACKING.md index 993edc67d8..fbf22eeb44 100644 --- a/src/runtime/HACKING.md +++ b/src/runtime/HACKING.md @@ -281,11 +281,12 @@ go:notinheap ------------ `go:notinheap` applies to type declarations. It indicates that a type -must never be allocated from the GC'd heap. Specifically, pointers to -this type must always fail the `runtime.inheap` check. The type may be -used for global variables, for stack variables, or for objects in -unmanaged memory (e.g., allocated with `sysAlloc`, `persistentalloc`, -`fixalloc`, or from a manually-managed span). Specifically: +must never be allocated from the GC'd heap or on the stack. +Specifically, pointers to this type must always fail the +`runtime.inheap` check. The type may be used for global variables, or +for objects in unmanaged memory (e.g., allocated with `sysAlloc`, +`persistentalloc`, `fixalloc`, or from a manually-managed span). +Specifically: 1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap allocation of T are disallowed. (Though implicit allocations are From 989ab8a7d67c4111d71bd3a8bb2acbe38e16ff5b Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Fri, 11 Sep 2020 12:14:06 -0400 Subject: [PATCH 029/281] runtime: drop nosplit from primary lockrank functions acquireLockRank and releaseLockRank are called from nosplit context, and thus must be nosplit. lockWithRank, unlockWithRank, and lockWithRankMayAcquire are called from spittable context, and thus don't strictly need to be nosplit. The stated reasoning for making these functions nosplit is to avoid re-entrant calls due to a stack split on function entry taking a lock. There are two potential issues at play here: 1. A stack split on function entry adds a new lock ordering edge before we (a) take lock l, or (b) release lock l. 2. A stack split in a child call (such as to lock2) introduces a new lock ordering edge _in the wrong order_ because e.g., in the case of lockWithRank, we've noted that l is taken, but the stack split in lock2 actually takes stack split locks _before_ l is actually locked. (1) is indeed avoided by marking these functions nosplit, but this is really just a bit of duct tape that generally has no effect overall. Any earlier call can have a stack split and introduce the same new edge. This includes lock/unlock which are not nosplit! I began this CL as a change to extend nosplit to lock and unlock to try to make this mitigation more effective, but I've realized that as long as there is a _single_ nosplit call between a lock and unlock, we can end up with the edge. There seems to be few enough cases without any calls that is does not seem worth the extra cognitive load to extend nosplit throughout all of the locking functions. (2) is a real issue which would cause incorrect ordering, but it is already handled by switching to the system stack before recording the lock ordering. Adding / removing nosplit has no effect on this issue. Change-Id: I94fbd21b2bf928dbf1bf71aabb6788fc0a012829 Reviewed-on: https://go-review.googlesource.com/c/go/+/254367 Run-TryBot: Michael Pratt TryBot-Result: Go Bot Reviewed-by: Dan Scales Trust: Michael Pratt --- src/runtime/lockrank_off.go | 10 ++-------- src/runtime/lockrank_on.go | 26 +++++++++++++++++--------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/runtime/lockrank_off.go b/src/runtime/lockrank_off.go index c04b61edc7..40edf882ee 100644 --- a/src/runtime/lockrank_off.go +++ b/src/runtime/lockrank_off.go @@ -18,30 +18,24 @@ func getLockRank(l *mutex) lockRank { return 0 } -// The following functions may be called in nosplit context. -// Nosplit is not strictly required for lockWithRank, unlockWithRank -// and lockWithRankMayAcquire, but these nosplit annotations must -// be kept consistent with the equivalent functions in lockrank_on.go. - -//go:nosplit func lockWithRank(l *mutex, rank lockRank) { lock2(l) } +// This function may be called in nosplit context and thus must be nosplit. //go:nosplit func acquireLockRank(rank lockRank) { } -//go:nosplit func unlockWithRank(l *mutex) { unlock2(l) } +// This function may be called in nosplit context and thus must be nosplit. //go:nosplit func releaseLockRank(rank lockRank) { } -//go:nosplit func lockWithRankMayAcquire(l *mutex, rank lockRank) { } diff --git a/src/runtime/lockrank_on.go b/src/runtime/lockrank_on.go index 850f7cdd38..db7ff23a58 100644 --- a/src/runtime/lockrank_on.go +++ b/src/runtime/lockrank_on.go @@ -40,15 +40,19 @@ func getLockRank(l *mutex) lockRank { return l.rank } -// The following functions are the entry-points to record lock -// operations. -// All of these are nosplit and switch to the system stack immediately -// to avoid stack growths. Since a stack growth could itself have lock -// operations, this prevents re-entrant calls. - // lockWithRank is like lock(l), but allows the caller to specify a lock rank // when acquiring a non-static lock. -//go:nosplit +// +// Note that we need to be careful about stack splits: +// +// This function is not nosplit, thus it may split at function entry. This may +// introduce a new edge in the lock order, but it is no different from any +// other (nosplit) call before this call (including the call to lock() itself). +// +// However, we switch to the systemstack to record the lock held to ensure that +// we record an accurate lock ordering. e.g., without systemstack, a stack +// split on entry to lock2() would record stack split locks as taken after l, +// even though l is not actually locked yet. func lockWithRank(l *mutex, rank lockRank) { if l == &debuglock || l == &paniclk { // debuglock is only used for println/printlock(). Don't do lock @@ -99,6 +103,8 @@ func printHeldLocks(gp *g) { } // acquireLockRank acquires a rank which is not associated with a mutex lock +// +// This function may be called in nosplit context and thus must be nosplit. //go:nosplit func acquireLockRank(rank lockRank) { gp := getg() @@ -154,7 +160,7 @@ func checkRanks(gp *g, prevRank, rank lockRank) { } } -//go:nosplit +// See comment on lockWithRank regarding stack splitting. func unlockWithRank(l *mutex) { if l == &debuglock || l == &paniclk { // See comment at beginning of lockWithRank. @@ -181,6 +187,8 @@ func unlockWithRank(l *mutex) { } // releaseLockRank releases a rank which is not associated with a mutex lock +// +// This function may be called in nosplit context and thus must be nosplit. //go:nosplit func releaseLockRank(rank lockRank) { gp := getg() @@ -201,7 +209,7 @@ func releaseLockRank(rank lockRank) { }) } -//go:nosplit +// See comment on lockWithRank regarding stack splitting. func lockWithRankMayAcquire(l *mutex, rank lockRank) { gp := getg() if gp.m.locksHeldLen == 0 { From 74c3b508ec8bc5643ba0e6a45f2b399d3c7a34ba Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Fri, 25 Sep 2020 13:24:08 -0400 Subject: [PATCH 030/281] vendor, cmd/vendor: update vendored x/sys and x/net Pick up GOOS=ios changes. This is done by cd $GOROOT/src go get -d golang.org/x/net@latest go mod tidy go mod vendor go get -d golang.org/x/sys@latest go mod tidy go mod vendor cd $GOROOT/src/cmd go get -d golang.org/x/sys@latest go mod tidy go mod vendor Updates #38485. Change-Id: Ic2b54febb1f851814c9d76c4b55a8837ac4779f8 Reviewed-on: https://go-review.googlesource.com/c/go/+/257618 Trust: Cherry Zhang Reviewed-by: Dmitri Shuralyov --- src/cmd/go.mod | 2 +- src/cmd/go.sum | 4 +-- .../vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + .../x/sys/unix/sockcmsg_unix_other.go | 2 +- .../golang.org/x/sys/unix/syscall_bsd.go | 2 +- .../golang.org/x/sys/unix/syscall_illumos.go | 2 +- .../golang.org/x/sys/unix/syscall_linux.go | 25 +++++++++++++++++ .../golang.org/x/sys/unix/zerrors_linux.go | 1 + .../x/sys/unix/zerrors_linux_386.go | 2 ++ .../x/sys/unix/zerrors_linux_amd64.go | 2 ++ .../x/sys/unix/zerrors_linux_arm.go | 2 ++ .../x/sys/unix/zerrors_linux_arm64.go | 2 ++ .../x/sys/unix/zerrors_linux_mips.go | 2 ++ .../x/sys/unix/zerrors_linux_mips64.go | 2 ++ .../x/sys/unix/zerrors_linux_mips64le.go | 2 ++ .../x/sys/unix/zerrors_linux_mipsle.go | 2 ++ .../x/sys/unix/zerrors_linux_ppc64.go | 2 ++ .../x/sys/unix/zerrors_linux_ppc64le.go | 2 ++ .../x/sys/unix/zerrors_linux_riscv64.go | 2 ++ .../x/sys/unix/zerrors_linux_s390x.go | 2 ++ .../x/sys/unix/zerrors_linux_sparc64.go | 2 ++ .../x/sys/unix/zsyscall_illumos_amd64.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux.go | 15 ++++++++++ src/cmd/vendor/modules.txt | 2 +- src/go.mod | 4 +-- src/go.sum | 8 +++--- .../golang.org/x/net/nettest/nettest.go | 6 ++-- src/vendor/golang.org/x/net/route/address.go | 2 +- .../golang.org/x/net/route/route_classic.go | 2 +- .../x/net/route/syscall_go1_11_darwin.go | 28 ------------------- src/vendor/golang.org/x/sys/cpu/cpu_arm64.go | 2 +- src/vendor/modules.txt | 4 +-- 32 files changed, 90 insertions(+), 50 deletions(-) delete mode 100644 src/vendor/golang.org/x/net/route/syscall_go1_11_darwin.go diff --git a/src/cmd/go.mod b/src/cmd/go.mod index c228b04b42..59d6152e2a 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -8,6 +8,6 @@ require ( golang.org/x/arch v0.0.0-20200826200359-b19915210f00 golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 - golang.org/x/sys v0.0.0-20200918174421-af09f7315aff // indirect + golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d // indirect golang.org/x/tools v0.0.0-20200918232735-d647fc253266 ) diff --git a/src/cmd/go.sum b/src/cmd/go.sum index d41f36e147..1b6d680d62 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -26,8 +26,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200918174421-af09f7315aff h1:1CPUrky56AcgSpxz/KfgzQWzfG09u5YOL8MvPYBlrL8= -golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d h1:L/IKR6COd7ubZrs2oTnTi73IhgqJ71c9s80WsQnh0Es= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200918232735-d647fc253266 h1:k7tVuG0g1JwmD3Jh8oAl1vQ1C3jb4Hi/dUl1wWDBJpQ= diff --git a/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh b/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh index e7c51aa409..1bef7148d2 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -517,6 +517,7 @@ ccflags="$@" $2 ~ /^CP_/ || $2 ~ /^CPUSTATES$/ || $2 ~ /^ALG_/ || + $2 ~ /^FI(CLONE|DEDUPERANGE)/ || $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|[GS]ETFLAGS)/ || $2 ~ /^FS_VERITY_/ || diff --git a/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 7d08dae5ba..abdedcf1d5 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -20,7 +20,7 @@ func cmsgAlignOf(salen int) int { case "aix": // There is no alignment on AIX. salign = 1 - case "darwin", "illumos", "solaris": + case "darwin", "ios", "illumos", "solaris": // NOTE: It seems like 64-bit Darwin, Illumos and Solaris // kernels still require 32-bit aligned access to network // subsystem. diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go index 60bbe10adf..9ebe92e4da 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -272,7 +272,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { if err != nil { return } - if runtime.GOOS == "darwin" && len == 0 { + if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && len == 0 { // Accepted socket has no address. // This is likely due to a bug in xnu kernels, // where instead of ECONNABORTED error socket diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_illumos.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_illumos.go index 16e40091ce..bbc4f3ea54 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -56,7 +56,7 @@ func Pwritev(fd int, iovs [][]byte, off int64) (n int, err error) { return n, err } -//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = libsocket.accept4 func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { var rsa RawSockaddrAny diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go index ec7e4c4d36..94dafa4e52 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -112,6 +112,31 @@ func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) { return &value, err } +// IoctlFileClone performs an FICLONERANGE ioctl operation to clone the range of +// data conveyed in value to the file associated with the file descriptor +// destFd. See the ioctl_ficlonerange(2) man page for details. +func IoctlFileCloneRange(destFd int, value *FileCloneRange) error { + err := ioctl(destFd, FICLONERANGE, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + +// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file +// associated with the file description srcFd to the file associated with the +// file descriptor destFd. See the ioctl_ficlone(2) man page for details. +func IoctlFileClone(destFd, srcFd int) error { + return ioctl(destFd, FICLONE, uintptr(srcFd)) +} + +// IoctlFileClone performs an FIDEDUPERANGE ioctl operation to share the range of +// data conveyed in value with the file associated with the file descriptor +// destFd. See the ioctl_fideduperange(2) man page for details. +func IoctlFileDedupeRange(destFd int, value *FileDedupeRange) error { + err := ioctl(destFd, FIDEDUPERANGE, uintptr(unsafe.Pointer(value))) + runtime.KeepAlive(value) + return err +} + //sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) func Link(oldpath string, newpath string) (err error) { diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go index 388050a0f6..79e032f4fb 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -686,6 +686,7 @@ const ( FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 + FIDEDUPERANGE = 0xc0189436 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 11b25f68c2..dd282c08b7 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 FS_IOC_ENABLE_VERITY = 0x40806685 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index f92cff6ea0..82fc93c7bb 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 FS_IOC_ENABLE_VERITY = 0x40806685 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 12bcbf88d6..fe7094f276 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80046601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 84f71e99fe..3b6cc58803 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -73,6 +73,8 @@ const ( EXTRA_MAGIC = 0x45585401 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index eeadea943f..ce3d9ae156 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40046601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 0be6c4ccc0..7a85215ce5 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 0880b745c1..07d4cc1bd5 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index c8a66627aa..d4842ba1c2 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x2000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40046601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 97aae63f16..941e20dace 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000000 FF1 = 0x4000 FFDLY = 0x4000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x800000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index b0c3b0664f..63d3bc5662 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000000 FF1 = 0x4000 FFDLY = 0x4000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x800000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 0c05181935..490bee1ab1 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 0b96bd462e..467b8218e8 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -71,6 +71,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index bd5c305779..79fbafbcf6 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -75,6 +75,8 @@ const ( EXTPROC = 0x10000 FF1 = 0x8000 FFDLY = 0x8000 + FICLONE = 0x80049409 + FICLONERANGE = 0x8020940d FLUSHO = 0x1000 FS_IOC_ENABLE_VERITY = 0x80806685 FS_IOC_GETFLAGS = 0x40086601 diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 8b329c4589..d3af083f4e 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -13,7 +13,7 @@ import ( //go:cgo_import_dynamic libc_preadv preadv "libc.so" //go:cgo_import_dynamic libc_writev writev "libc.so" //go:cgo_import_dynamic libc_pwritev pwritev "libc.so" -//go:cgo_import_dynamic libc_accept4 accept4 "libc.so" +//go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so" //go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" //go:linkname procreadv libc_readv diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go index 68e4974a9a..a92a5019af 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -76,6 +76,21 @@ type Fsid struct { Val [2]int32 } +type FileCloneRange struct { + Src_fd int64 + Src_offset uint64 + Src_length uint64 + Dest_offset uint64 +} + +type FileDedupeRange struct { + Src_offset uint64 + Src_length uint64 + Dest_count uint16 + Reserved1 uint16 + Reserved2 uint32 +} + type FscryptPolicy struct { Version uint8 Contents_encryption_mode uint8 diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index 6482962593..21326f7521 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -40,7 +40,7 @@ golang.org/x/mod/sumdb/dirhash golang.org/x/mod/sumdb/note golang.org/x/mod/sumdb/tlog golang.org/x/mod/zip -# golang.org/x/sys v0.0.0-20200918174421-af09f7315aff +# golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d ## explicit golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix diff --git a/src/go.mod b/src/go.mod index b7de1ff5a4..86e3c8c5b7 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a - golang.org/x/net v0.0.0-20200904194848-62affa334b73 - golang.org/x/sys v0.0.0-20200918174421-af09f7315aff // indirect + golang.org/x/net v0.0.0-20200925080053-05aa5d4ee321 + golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d // indirect golang.org/x/text v0.3.4-0.20200826142016-a8b467125457 // indirect ) diff --git a/src/go.sum b/src/go.sum index 305c856f64..86a8c4be2a 100644 --- a/src/go.sum +++ b/src/go.sum @@ -3,13 +3,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200925080053-05aa5d4ee321 h1:lleNcKRbcaC8MqgLwghIkzZ2JBQAb7QQ9MiwRt1BisA= +golang.org/x/net v0.0.0-20200925080053-05aa5d4ee321/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200918174421-af09f7315aff h1:1CPUrky56AcgSpxz/KfgzQWzfG09u5YOL8MvPYBlrL8= -golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d h1:L/IKR6COd7ubZrs2oTnTi73IhgqJ71c9s80WsQnh0Es= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.4-0.20200826142016-a8b467125457 h1:K2Vq+FTHFbV5auJiAahir8LO9HUqufuVbQYSNzZopos= golang.org/x/text v0.3.4-0.20200826142016-a8b467125457/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/src/vendor/golang.org/x/net/nettest/nettest.go b/src/vendor/golang.org/x/net/nettest/nettest.go index a402a46bb3..953562f769 100644 --- a/src/vendor/golang.org/x/net/nettest/nettest.go +++ b/src/vendor/golang.org/x/net/nettest/nettest.go @@ -97,7 +97,7 @@ func TestableNetwork(network string) bool { switch runtime.GOOS { case "android", "fuchsia", "hurd", "js", "nacl", "plan9", "windows": return false - case "darwin": + case "darwin", "ios": // iOS doesn't support it. if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { return false @@ -118,7 +118,7 @@ func TestableNetwork(network string) bool { return false case "aix": return unixStrmDgramEnabled() - case "darwin": + case "darwin", "ios": // iOS does not support unix, unixgram. if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { return false @@ -126,7 +126,7 @@ func TestableNetwork(network string) bool { } case "unixpacket": switch runtime.GOOS { - case "aix", "android", "fuchsia", "hurd", "darwin", "js", "nacl", "plan9", "windows": + case "aix", "android", "fuchsia", "hurd", "darwin", "ios", "js", "nacl", "plan9", "windows": return false case "netbsd": // It passes on amd64 at least. 386 fails diff --git a/src/vendor/golang.org/x/net/route/address.go b/src/vendor/golang.org/x/net/route/address.go index e3125a473b..3c2fe15a97 100644 --- a/src/vendor/golang.org/x/net/route/address.go +++ b/src/vendor/golang.org/x/net/route/address.go @@ -221,7 +221,7 @@ func parseKernelInetAddr(af int, b []byte) (int, Addr, error) { // to make the tuple to be conformed with // the routing message boundary l := int(b[0]) - if runtime.GOOS == "darwin" { + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { // On Darwin, an address in the kernel form is also // used as a message filler. if l == 0 || len(b) > roundup(l) { diff --git a/src/vendor/golang.org/x/net/route/route_classic.go b/src/vendor/golang.org/x/net/route/route_classic.go index a7d3864646..b83add668e 100644 --- a/src/vendor/golang.org/x/net/route/route_classic.go +++ b/src/vendor/golang.org/x/net/route/route_classic.go @@ -17,7 +17,7 @@ func (m *RouteMessage) marshal() ([]byte, error) { return nil, errUnsupportedMessage } l := w.bodyOff + addrsSpace(m.Addrs) - if runtime.GOOS == "darwin" { + if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { // Fix stray pointer writes on macOS. // See golang.org/issue/22456. l += 1024 diff --git a/src/vendor/golang.org/x/net/route/syscall_go1_11_darwin.go b/src/vendor/golang.org/x/net/route/syscall_go1_11_darwin.go deleted file mode 100644 index 7228e443cd..0000000000 --- a/src/vendor/golang.org/x/net/route/syscall_go1_11_darwin.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.12 - -package route - -import ( - "syscall" - "unsafe" -) - -var zero uintptr - -func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { - var p unsafe.Pointer - if len(mib) > 0 { - p = unsafe.Pointer(&mib[0]) - } else { - p = unsafe.Pointer(&zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), newlen) - if errno != 0 { - return error(errno) - } - return nil -} diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 2d90024387..951078f2e8 100644 --- a/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -39,7 +39,7 @@ func initOptions() { func archInit() { switch runtime.GOOS { - case "android", "darwin", "netbsd": + case "android", "darwin", "ios", "netbsd": // Android and iOS don't seem to allow reading these registers. // // NetBSD: diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index 46f82829f0..d53b647310 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -8,7 +8,7 @@ golang.org/x/crypto/curve25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/subtle golang.org/x/crypto/poly1305 -# golang.org/x/net v0.0.0-20200904194848-62affa334b73 +# golang.org/x/net v0.0.0-20200925080053-05aa5d4ee321 ## explicit golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts @@ -18,7 +18,7 @@ golang.org/x/net/idna golang.org/x/net/lif golang.org/x/net/nettest golang.org/x/net/route -# golang.org/x/sys v0.0.0-20200918174421-af09f7315aff +# golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d ## explicit golang.org/x/sys/cpu # golang.org/x/text v0.3.4-0.20200826142016-a8b467125457 From 8266570ba71fd6af9c07d8fac945b3710010dfc7 Mon Sep 17 00:00:00 2001 From: Carlos Alexandro Becker Date: Sun, 13 Sep 2020 14:40:51 +0000 Subject: [PATCH 031/281] encoding/json: added docs to UnsupportedValueError MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added godoc to UnsupportedValueError. Change-Id: I5fc13bac0b6e14b3a6eba27c9d3331ff5c5269aa GitHub-Last-Rev: 516cd7a92903e1048caa4d560abf5d66339e5a8f GitHub-Pull-Request: golang/go#41364 Reviewed-on: https://go-review.googlesource.com/c/go/+/254540 Reviewed-by: Daniel Martí Trust: Daniel Martí Trust: Heschi Kreinick Run-TryBot: Daniel Martí TryBot-Result: Go Bot --- src/encoding/json/encode.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/encoding/json/encode.go b/src/encoding/json/encode.go index ea5eca51ef..483b9d8f2d 100644 --- a/src/encoding/json/encode.go +++ b/src/encoding/json/encode.go @@ -236,6 +236,8 @@ func (e *UnsupportedTypeError) Error() string { return "json: unsupported type: " + e.Type.String() } +// An UnsupportedValueError is returned by Marshal when attempting +// to encode an unsupported value. type UnsupportedValueError struct { Value reflect.Value Str string From dbb1c5bf743dd556c8a83cba1d064bf3acaf07bb Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Fri, 25 Sep 2020 18:44:58 +0200 Subject: [PATCH 032/281] syscall: remove mksysnum_darwin.pl script Direct syscalls using syscall numbers are no longer supported on darwin since Go 1.12, see https://golang.org/doc/go1.12#darwin. Also, /usr/include/sys/syscall.h is no longer available on recent macOS versions, so remove the generating script. Change-Id: I8e2579c3d0e94a61fc041d06280149ec6ccf13e2 Reviewed-on: https://go-review.googlesource.com/c/go/+/257638 Trust: Tobias Klauser Run-TryBot: Tobias Klauser TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor --- src/syscall/mkall.sh | 2 -- src/syscall/mksysnum_darwin.pl | 32 -------------------------------- 2 files changed, 34 deletions(-) delete mode 100755 src/syscall/mksysnum_darwin.pl diff --git a/src/syscall/mkall.sh b/src/syscall/mkall.sh index 826512a177..2c2fc453d3 100755 --- a/src/syscall/mkall.sh +++ b/src/syscall/mkall.sh @@ -124,14 +124,12 @@ aix_ppc64) darwin_amd64) mkerrors="$mkerrors -m64" mksyscall="./mksyscall.pl -darwin" - mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; darwin_arm64) mkerrors="$mkerrors -m64" mksyscall="./mksyscall.pl -darwin" - mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" mkasm="go run mkasm_darwin.go" ;; diff --git a/src/syscall/mksysnum_darwin.pl b/src/syscall/mksysnum_darwin.pl deleted file mode 100755 index af21e855ae..0000000000 --- a/src/syscall/mksysnum_darwin.pl +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env perl -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. -# -# Generate system call table for Darwin from sys/syscall.h - -use strict; - -my $command = "mksysnum_darwin.pl " . join(' ', @ARGV); - -print <){ - if(/^#define\s+SYS_(\w+)\s+([0-9]+)/){ - my $name = $1; - my $num = $2; - $name =~ y/a-z/A-Z/; - print " SYS_$name = $num;" - } -} - -print < Date: Fri, 25 Sep 2020 18:06:26 +0200 Subject: [PATCH 033/281] syscall: implement Getwd using getcwd from libSystem on darwin Directly wrap the getcwd implementation provided by libSystem.dylib on darwin and use it to implement Getwd like on the BSDs. This allows to drop the custom implementation using getAttrList and to merge the implementation of Getwd for darwin and the BSDs in syscall_bsd.go. Same as CL 257497 did for golang.org/x/sys/unix Change-Id: If30390c4c17cd463bb8fdcb5465f40d6fa11f391 Reviewed-on: https://go-review.googlesource.com/c/go/+/257637 Trust: Tobias Klauser Run-TryBot: Tobias Klauser TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor --- src/syscall/syscall_bsd.go | 15 +++++ src/syscall/syscall_darwin.go | 85 +--------------------------- src/syscall/syscall_getwd_bsd.go | 22 ------- src/syscall/types_darwin.go | 6 ++ src/syscall/zsyscall_darwin_amd64.go | 22 +++++++ src/syscall/zsyscall_darwin_amd64.s | 4 +- src/syscall/zsyscall_darwin_arm64.go | 22 +++++++ src/syscall/zsyscall_darwin_arm64.s | 4 +- src/syscall/ztypes_darwin_amd64.go | 4 ++ src/syscall/ztypes_darwin_arm64.go | 4 ++ 10 files changed, 79 insertions(+), 109 deletions(-) delete mode 100644 src/syscall/syscall_getwd_bsd.go diff --git a/src/syscall/syscall_bsd.go b/src/syscall/syscall_bsd.go index b52de7450f..1c7ec588bc 100644 --- a/src/syscall/syscall_bsd.go +++ b/src/syscall/syscall_bsd.go @@ -17,6 +17,21 @@ import ( "unsafe" ) +const ImplementsGetwd = true + +func Getwd() (string, error) { + var buf [pathMax]byte + _, err := getcwd(buf[:]) + if err != nil { + return "", err + } + n := clen(buf[:]) + if n < 1 { + return "", EINVAL + } + return string(buf[:n]), nil +} + /* * Wrapped */ diff --git a/src/syscall/syscall_darwin.go b/src/syscall/syscall_darwin.go index c84547c628..afdadbf894 100644 --- a/src/syscall/syscall_darwin.go +++ b/src/syscall/syscall_darwin.go @@ -12,28 +12,7 @@ package syscall -import ( - errorspkg "errors" - "unsafe" -) - -const ImplementsGetwd = true - -func Getwd() (string, error) { - buf := make([]byte, 2048) - attrs, err := getAttrList(".", attrList{CommonAttr: attrCmnFullpath}, buf, 0) - if err == nil && len(attrs) == 1 && len(attrs[0]) >= 2 { - wd := string(attrs[0]) - // Sanity check that it's an absolute path and ends - // in a null byte, which we then strip. - if wd[0] == '/' && wd[len(wd)-1] == 0 { - return wd[:len(wd)-1], nil - } - } - // If pkg/os/getwd.go gets ENOTSUP, it will fall back to the - // slow algorithm. - return "", ENOTSUP -} +import "unsafe" type SockaddrDatalink struct { Len uint8 @@ -94,7 +73,6 @@ const ( attrBitMapCount = 5 attrCmnModtime = 0x00000400 attrCmnAcctime = 0x00001000 - attrCmnFullpath = 0x08000000 ) type attrList struct { @@ -107,66 +85,6 @@ type attrList struct { Forkattr uint32 } -func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (attrs [][]byte, err error) { - if len(attrBuf) < 4 { - return nil, errorspkg.New("attrBuf too small") - } - attrList.bitmapCount = attrBitMapCount - - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return nil, err - } - - _, _, e1 := syscall6( - funcPC(libc_getattrlist_trampoline), - uintptr(unsafe.Pointer(_p0)), - uintptr(unsafe.Pointer(&attrList)), - uintptr(unsafe.Pointer(&attrBuf[0])), - uintptr(len(attrBuf)), - uintptr(options), - 0, - ) - if e1 != 0 { - return nil, e1 - } - size := *(*uint32)(unsafe.Pointer(&attrBuf[0])) - - // dat is the section of attrBuf that contains valid data, - // without the 4 byte length header. All attribute offsets - // are relative to dat. - dat := attrBuf - if int(size) < len(attrBuf) { - dat = dat[:size] - } - dat = dat[4:] // remove length prefix - - for i := uint32(0); int(i) < len(dat); { - header := dat[i:] - if len(header) < 8 { - return attrs, errorspkg.New("truncated attribute header") - } - datOff := *(*int32)(unsafe.Pointer(&header[0])) - attrLen := *(*uint32)(unsafe.Pointer(&header[4])) - if datOff < 0 || uint32(datOff)+attrLen > uint32(len(dat)) { - return attrs, errorspkg.New("truncated results; attrBuf too small") - } - end := uint32(datOff) + attrLen - attrs = append(attrs, dat[datOff:end]) - i = end - if r := i % 4; r != 0 { - i += (4 - r) - } - } - return -} - -func libc_getattrlist_trampoline() - -//go:linkname libc_getattrlist libc_getattrlist -//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" - //sysnb pipe(p *[2]int32) (err error) func Pipe(p []int) (err error) { @@ -341,6 +259,7 @@ func Kill(pid int, signum Signal) (err error) { return kill(pid, int(signum), 1) //sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) = SYS_fcntl //sys unlinkat(fd int, path string, flags int) (err error) //sys openat(fd int, path string, flags int, perm uint32) (fdret int, err error) +//sys getcwd(buf []byte) (n int, err error) func init() { execveDarwin = execve diff --git a/src/syscall/syscall_getwd_bsd.go b/src/syscall/syscall_getwd_bsd.go deleted file mode 100644 index b14367936e..0000000000 --- a/src/syscall/syscall_getwd_bsd.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build dragonfly freebsd netbsd openbsd - -package syscall - -const ImplementsGetwd = true - -func Getwd() (string, error) { - var buf [pathMax]byte - _, err := getcwd(buf[:]) - if err != nil { - return "", err - } - n := clen(buf[:]) - if n < 1 { - return "", EINVAL - } - return string(buf[:n]), nil -} diff --git a/src/syscall/types_darwin.go b/src/syscall/types_darwin.go index d8218d6aea..7b3a9d2335 100644 --- a/src/syscall/types_darwin.go +++ b/src/syscall/types_darwin.go @@ -123,6 +123,12 @@ type Fsid C.struct_fsid type Dirent C.struct_dirent +// File system limits + +const ( + pathMax = C.PATH_MAX +) + // Sockets type RawSockaddrInet4 C.struct_sockaddr_in diff --git a/src/syscall/zsyscall_darwin_amd64.go b/src/syscall/zsyscall_darwin_amd64.go index 83214de2fb..093739ebc7 100644 --- a/src/syscall/zsyscall_darwin_amd64.go +++ b/src/syscall/zsyscall_darwin_amd64.go @@ -1943,6 +1943,28 @@ func libc_openat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/src/syscall/zsyscall_darwin_amd64.s b/src/syscall/zsyscall_darwin_amd64.s index 23ddbe06c0..d99656d028 100644 --- a/src/syscall/zsyscall_darwin_amd64.s +++ b/src/syscall/zsyscall_darwin_amd64.s @@ -1,8 +1,6 @@ // go run mkasm_darwin.go amd64 // Code generated by the command above; DO NOT EDIT. #include "textflag.h" -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 @@ -235,6 +233,8 @@ TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 JMP libc_openat(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 diff --git a/src/syscall/zsyscall_darwin_arm64.go b/src/syscall/zsyscall_darwin_arm64.go index 0b77839869..0ff642eb25 100644 --- a/src/syscall/zsyscall_darwin_arm64.go +++ b/src/syscall/zsyscall_darwin_arm64.go @@ -1943,6 +1943,28 @@ func libc_openat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall(funcPC(libc_getcwd_trampoline), uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_getcwd_trampoline() + +//go:linkname libc_getcwd libc_getcwd +//go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/src/syscall/zsyscall_darwin_arm64.s b/src/syscall/zsyscall_darwin_arm64.s index 7b8b3764a8..214851604a 100644 --- a/src/syscall/zsyscall_darwin_arm64.s +++ b/src/syscall/zsyscall_darwin_arm64.s @@ -1,8 +1,6 @@ // go run mkasm_darwin.go arm64 // Code generated by the command above; DO NOT EDIT. #include "textflag.h" -TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 - JMP libc_getattrlist(SB) TEXT ·libc_getfsstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 @@ -235,6 +233,8 @@ TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 JMP libc_openat(SB) +TEXT ·libc_getcwd_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 diff --git a/src/syscall/ztypes_darwin_amd64.go b/src/syscall/ztypes_darwin_amd64.go index bbd5bec385..da56f0da22 100644 --- a/src/syscall/ztypes_darwin_amd64.go +++ b/src/syscall/ztypes_darwin_amd64.go @@ -151,6 +151,10 @@ type Dirent struct { Pad_cgo_0 [3]byte } +const ( + pathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 diff --git a/src/syscall/ztypes_darwin_arm64.go b/src/syscall/ztypes_darwin_arm64.go index e9c8549fa1..82685ff659 100644 --- a/src/syscall/ztypes_darwin_arm64.go +++ b/src/syscall/ztypes_darwin_arm64.go @@ -151,6 +151,10 @@ type Dirent struct { Pad_cgo_0 [3]byte } +const ( + pathMax = 0x400 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 From 66fbb80b7280ae39d91af77f43593388923fc10c Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Tue, 22 Sep 2020 15:31:43 +0200 Subject: [PATCH 034/281] cmd/compile: more amd64 typed rules Passes gotip build -toolexec 'toolstash -cmp' -a std Change-Id: I2621f9ab48199204cf6116941b19b6df4170d0e5 Reviewed-on: https://go-review.googlesource.com/c/go/+/256497 Trust: Alberto Donizetti Run-TryBot: Alberto Donizetti TryBot-Result: Go Bot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 89 ++++--- src/cmd/compile/internal/ssa/rewriteAMD64.go | 235 +++++++++---------- 2 files changed, 156 insertions(+), 168 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 8d6fad4393..6dfe11dcfa 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1224,59 +1224,58 @@ (LEAQ [off+int32(scale)*8] {sym} x) // Absorb InvertFlags into branches. -(LT (InvertFlags cmp) yes no) -> (GT cmp yes no) -(GT (InvertFlags cmp) yes no) -> (LT cmp yes no) -(LE (InvertFlags cmp) yes no) -> (GE cmp yes no) -(GE (InvertFlags cmp) yes no) -> (LE cmp yes no) -(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no) -(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no) -(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no) -(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no) -(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no) -(NE (InvertFlags cmp) yes no) -> (NE cmp yes no) +(LT (InvertFlags cmp) yes no) => (GT cmp yes no) +(GT (InvertFlags cmp) yes no) => (LT cmp yes no) +(LE (InvertFlags cmp) yes no) => (GE cmp yes no) +(GE (InvertFlags cmp) yes no) => (LE cmp yes no) +(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no) +(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no) +(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no) +(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no) +(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no) +(NE (InvertFlags cmp) yes no) => (NE cmp yes no) // Constant comparisons. -(CMPQconst (MOVQconst [x]) [y]) && x==y -> (FlagEQ) -(CMPQconst (MOVQconst [x]) [y]) && x (FlagLT_ULT) -(CMPQconst (MOVQconst [x]) [y]) && xuint64(y) -> (FlagLT_UGT) -(CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x) (FlagGT_ULT) -(CMPQconst (MOVQconst [x]) [y]) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT) -(CMPLconst (MOVLconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) -(CMPLconst (MOVLconst [x]) [y]) && int32(x) (FlagLT_ULT) -(CMPLconst (MOVLconst [x]) [y]) && int32(x)uint32(y) -> (FlagLT_UGT) -(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x) (FlagGT_ULT) -(CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT) -(CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ) -(CMPWconst (MOVLconst [x]) [y]) && int16(x) (FlagLT_ULT) -(CMPWconst (MOVLconst [x]) [y]) && int16(x)uint16(y) -> (FlagLT_UGT) -(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x) (FlagGT_ULT) -(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT) -(CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ) -(CMPBconst (MOVLconst [x]) [y]) && int8(x) (FlagLT_ULT) -(CMPBconst (MOVLconst [x]) [y]) && int8(x)uint8(y) -> (FlagLT_UGT) -(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x) (FlagGT_ULT) -(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT) +(CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ) +(CMPQconst (MOVQconst [x]) [y]) && x (FlagLT_ULT) +(CMPQconst (MOVQconst [x]) [y]) && xuint64(int64(y)) => (FlagLT_UGT) +(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x) (FlagGT_ULT) +(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT) +(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ) +(CMPLconst (MOVLconst [x]) [y]) && x (FlagLT_ULT) +(CMPLconst (MOVLconst [x]) [y]) && xuint32(y) => (FlagLT_UGT) +(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x) (FlagGT_ULT) +(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ) +(CMPWconst (MOVLconst [x]) [y]) && int16(x) (FlagLT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)uint16(y) => (FlagLT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x) (FlagGT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ) +(CMPBconst (MOVLconst [x]) [y]) && int8(x) (FlagLT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)uint8(y) => (FlagLT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x) (FlagGT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT) // CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts. // In theory this applies to any of the simplifications above, // but CMPQ is the only one I've actually seen occur. -(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y -> (FlagEQ) -(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x (FlagLT_ULT) -(CMPQ (MOVQconst [x]) (MOVQconst [y])) && xuint64(y) -> (FlagLT_UGT) -(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x) (FlagGT_ULT) -(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x (FlagLT_ULT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && xuint64(y) => (FlagLT_UGT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x) (FlagGT_ULT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT) // Other known comparisons. -(CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT) -(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT) -(CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT) -(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1< (FlagLT_ULT) -(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1< (FlagLT_ULT) -(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT) -(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT) -(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT) -(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT) -(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT) +(CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT) +(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT) +(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1< (FlagLT_ULT) +(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1< (FlagLT_ULT) +(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= m && int16(m) < n => (FlagLT_ULT) +(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= m && int8(m) < n => (FlagLT_ULT) // TESTQ c c sets flags like CMPQ c 0. (TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c == 0 -> (FlagEQ) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 3f58ad392b..a7b3635b5e 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -6811,90 +6811,90 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (CMPBconst (MOVLconst [x]) [y]) - // cond: int8(x)==int8(y) + // cond: int8(x)==y // result: (FlagEQ) for { - y := v.AuxInt + y := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - x := v_0.AuxInt - if !(int8(x) == int8(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) == y) { break } v.reset(OpAMD64FlagEQ) return true } // match: (CMPBconst (MOVLconst [x]) [y]) - // cond: int8(x)uint8(y) + // cond: int8(x)uint8(y) // result: (FlagLT_UGT) for { - y := v.AuxInt + y := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - x := v_0.AuxInt - if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) < y && uint8(x) > uint8(y)) { break } v.reset(OpAMD64FlagLT_UGT) return true } // match: (CMPBconst (MOVLconst [x]) [y]) - // cond: int8(x)>int8(y) && uint8(x)y && uint8(x) int8(y) && uint8(x) < uint8(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) > y && uint8(x) < uint8(y)) { break } v.reset(OpAMD64FlagGT_ULT) return true } // match: (CMPBconst (MOVLconst [x]) [y]) - // cond: int8(x)>int8(y) && uint8(x)>uint8(y) + // cond: int8(x)>y && uint8(x)>uint8(y) // result: (FlagGT_UGT) for { - y := v.AuxInt + y := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - x := v_0.AuxInt - if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(int8(x) > y && uint8(x) > uint8(y)) { break } v.reset(OpAMD64FlagGT_UGT) return true } // match: (CMPBconst (ANDLconst _ [m]) [n]) - // cond: 0 <= int8(m) && int8(m) < int8(n) + // cond: 0 <= m && int8(m) < n // result: (FlagLT_ULT) for { - n := v.AuxInt + n := auxIntToInt8(v.AuxInt) if v_0.Op != OpAMD64ANDLconst { break } - m := v_0.AuxInt - if !(0 <= int8(m) && int8(m) < int8(n)) { + m := auxIntToInt32(v_0.AuxInt) + if !(0 <= m && int8(m) < n) { break } v.reset(OpAMD64FlagLT_ULT) @@ -7197,75 +7197,75 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (CMPLconst (MOVLconst [x]) [y]) - // cond: int32(x)==int32(y) + // cond: x==y // result: (FlagEQ) for { - y := v.AuxInt + y := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - x := v_0.AuxInt - if !(int32(x) == int32(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(x == y) { break } v.reset(OpAMD64FlagEQ) return true } // match: (CMPLconst (MOVLconst [x]) [y]) - // cond: int32(x)uint32(y) + // cond: xuint32(y) // result: (FlagLT_UGT) for { - y := v.AuxInt + y := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - x := v_0.AuxInt - if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(x < y && uint32(x) > uint32(y)) { break } v.reset(OpAMD64FlagLT_UGT) return true } // match: (CMPLconst (MOVLconst [x]) [y]) - // cond: int32(x)>int32(y) && uint32(x)y && uint32(x) int32(y) && uint32(x) < uint32(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(x > y && uint32(x) < uint32(y)) { break } v.reset(OpAMD64FlagGT_ULT) return true } // match: (CMPLconst (MOVLconst [x]) [y]) - // cond: int32(x)>int32(y) && uint32(x)>uint32(y) + // cond: x>y && uint32(x)>uint32(y) // result: (FlagGT_UGT) for { - y := v.AuxInt + y := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - x := v_0.AuxInt - if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(x > y && uint32(x) > uint32(y)) { break } v.reset(OpAMD64FlagGT_UGT) @@ -7275,11 +7275,11 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool { // cond: 0 <= n && 0 < c && c <= 32 && (1< uint64(y)) { break } @@ -7615,11 +7615,11 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { if v_0.Op != OpAMD64MOVQconst { break } - x := v_0.AuxInt + x := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpAMD64MOVQconst { break } - y := v_1.AuxInt + y := auxIntToInt64(v_1.AuxInt) if !(x > y && uint64(x) < uint64(y)) { break } @@ -7633,11 +7633,11 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { if v_0.Op != OpAMD64MOVQconst { break } - x := v_0.AuxInt + x := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpAMD64MOVQconst { break } - y := v_1.AuxInt + y := auxIntToInt64(v_1.AuxInt) if !(x > y && uint64(x) > uint64(y)) { break } @@ -7730,75 +7730,75 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { return true } // match: (CMPQconst (MOVQconst [x]) [y]) - // cond: x==y + // cond: x==int64(y) // result: (FlagEQ) for { - y := v.AuxInt + y := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - x := v_0.AuxInt - if !(x == y) { + x := auxIntToInt64(v_0.AuxInt) + if !(x == int64(y)) { break } v.reset(OpAMD64FlagEQ) return true } // match: (CMPQconst (MOVQconst [x]) [y]) - // cond: xuint64(y) + // cond: xuint64(int64(y)) // result: (FlagLT_UGT) for { - y := v.AuxInt + y := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - x := v_0.AuxInt - if !(x < y && uint64(x) > uint64(y)) { + x := auxIntToInt64(v_0.AuxInt) + if !(x < int64(y) && uint64(x) > uint64(int64(y))) { break } v.reset(OpAMD64FlagLT_UGT) return true } // match: (CMPQconst (MOVQconst [x]) [y]) - // cond: x>y && uint64(x)int64(y) && uint64(x) y && uint64(x) < uint64(y)) { + x := auxIntToInt64(v_0.AuxInt) + if !(x > int64(y) && uint64(x) < uint64(int64(y))) { break } v.reset(OpAMD64FlagGT_ULT) return true } // match: (CMPQconst (MOVQconst [x]) [y]) - // cond: x>y && uint64(x)>uint64(y) + // cond: x>int64(y) && uint64(x)>uint64(int64(y)) // result: (FlagGT_UGT) for { - y := v.AuxInt + y := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVQconst { break } - x := v_0.AuxInt - if !(x > y && uint64(x) > uint64(y)) { + x := auxIntToInt64(v_0.AuxInt) + if !(x > int64(y) && uint64(x) > uint64(int64(y))) { break } v.reset(OpAMD64FlagGT_UGT) @@ -7808,7 +7808,7 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { // cond: 0xFF < c // result: (FlagLT_ULT) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) { break } @@ -7819,33 +7819,22 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { // cond: 0xFFFF < c // result: (FlagLT_ULT) for { - c := v.AuxInt + c := auxIntToInt32(v.AuxInt) if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) { break } v.reset(OpAMD64FlagLT_ULT) return true } - // match: (CMPQconst (MOVLQZX _) [c]) - // cond: 0xFFFFFFFF < c - // result: (FlagLT_ULT) - for { - c := v.AuxInt - if v_0.Op != OpAMD64MOVLQZX || !(0xFFFFFFFF < c) { - break - } - v.reset(OpAMD64FlagLT_ULT) - return true - } // match: (CMPQconst (SHRQconst _ [c]) [n]) // cond: 0 <= n && 0 < c && c <= 64 && (1<uint16(y) + // cond: int16(x)uint16(y) // result: (FlagLT_UGT) for { - y := v.AuxInt + y := auxIntToInt16(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - x := v_0.AuxInt - if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(int16(x) < y && uint16(x) > uint16(y)) { break } v.reset(OpAMD64FlagLT_UGT) return true } // match: (CMPWconst (MOVLconst [x]) [y]) - // cond: int16(x)>int16(y) && uint16(x)y && uint16(x) int16(y) && uint16(x) < uint16(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(int16(x) > y && uint16(x) < uint16(y)) { break } v.reset(OpAMD64FlagGT_ULT) return true } // match: (CMPWconst (MOVLconst [x]) [y]) - // cond: int16(x)>int16(y) && uint16(x)>uint16(y) + // cond: int16(x)>y && uint16(x)>uint16(y) // result: (FlagGT_UGT) for { - y := v.AuxInt + y := auxIntToInt16(v.AuxInt) if v_0.Op != OpAMD64MOVLconst { break } - x := v_0.AuxInt - if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { + x := auxIntToInt32(v_0.AuxInt) + if !(int16(x) > y && uint16(x) > uint16(y)) { break } v.reset(OpAMD64FlagGT_UGT) return true } // match: (CMPWconst (ANDLconst _ [m]) [n]) - // cond: 0 <= int16(m) && int16(m) < int16(n) + // cond: 0 <= m && int16(m) < n // result: (FlagLT_ULT) for { - n := v.AuxInt + n := auxIntToInt16(v.AuxInt) if v_0.Op != OpAMD64ANDLconst { break } - m := v_0.AuxInt - if !(0 <= int16(m) && int16(m) < int16(n)) { + m := auxIntToInt32(v_0.AuxInt) + if !(0 <= m && int16(m) < n) { break } v.reset(OpAMD64FlagLT_ULT) From f5c3eda4c9519641265279bcb8a484f846750258 Mon Sep 17 00:00:00 2001 From: Constantin Konstantinidis Date: Thu, 24 Sep 2020 19:32:11 +0200 Subject: [PATCH 035/281] cmd/compile: enforce strongly typed rules for ARM (mergesym) Replace mergeSym by mergeSymTyped. L435-L459 toolstash-check successful. Change-Id: Icbefe5c3589ed4ecdbca3dff9b3a758bdba3b34b Reviewed-on: https://go-review.googlesource.com/c/go/+/257642 Reviewed-by: Keith Randall Trust: Alberto Donizetti --- src/cmd/compile/internal/ssa/gen/ARM.rules | 48 +++--- src/cmd/compile/internal/ssa/rewriteARM.go | 168 ++++++++++----------- 2 files changed, 108 insertions(+), 108 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 840b93bb53..9490805f46 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -432,31 +432,31 @@ (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVDstore [off1+off2] {sym} ptr val mem) (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVDstore [off1-off2] {sym} ptr val mem) -(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> - (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> - (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> - (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> - (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> - (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> - (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> - (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVFload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) +(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) -(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> - (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> - (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> - (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> - (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> - (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVFstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) +(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) // replace load from same location as preceding store with zero/sign extension (or copy in case of full width) (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index b790d28cac..4e44165169 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -4563,23 +4563,23 @@ func rewriteValueARM_OpARMMOVBUload(v *Value) bool { } // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVBUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2)) { break } v.reset(OpARMMOVBUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -4781,23 +4781,23 @@ func rewriteValueARM_OpARMMOVBload(v *Value) bool { } // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2)) { break } v.reset(OpARMMOVBload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -4993,15 +4993,15 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { } // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 @@ -5009,8 +5009,8 @@ func rewriteValueARM_OpARMMOVBstore(v *Value) bool { break } v.reset(OpARMMOVBstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -5182,23 +5182,23 @@ func rewriteValueARM_OpARMMOVDload(v *Value) bool { } // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVDload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2)) { break } v.reset(OpARMMOVDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -5266,15 +5266,15 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool { } // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVDstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 @@ -5282,8 +5282,8 @@ func rewriteValueARM_OpARMMOVDstore(v *Value) bool { break } v.reset(OpARMMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -5328,23 +5328,23 @@ func rewriteValueARM_OpARMMOVFload(v *Value) bool { } // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVFload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2)) { break } v.reset(OpARMMOVFload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -5412,15 +5412,15 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool { } // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVFstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 @@ -5428,8 +5428,8 @@ func rewriteValueARM_OpARMMOVFstore(v *Value) bool { break } v.reset(OpARMMOVFstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -5476,23 +5476,23 @@ func rewriteValueARM_OpARMMOVHUload(v *Value) bool { } // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVHUload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2)) { break } v.reset(OpARMMOVHUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -5716,23 +5716,23 @@ func rewriteValueARM_OpARMMOVHload(v *Value) bool { } // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVHload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2)) { break } v.reset(OpARMMOVHload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -5972,15 +5972,15 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { } // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVHstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 @@ -5988,8 +5988,8 @@ func rewriteValueARM_OpARMMOVHstore(v *Value) bool { break } v.reset(OpARMMOVHstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } @@ -6129,23 +6129,23 @@ func rewriteValueARM_OpARMMOVWload(v *Value) bool { } // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 if !(canMergeSym(sym1, sym2)) { break } v.reset(OpARMMOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -6604,15 +6604,15 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { } // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} ptr val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpARMMOVWaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] val := v_1 mem := v_2 @@ -6620,8 +6620,8 @@ func rewriteValueARM_OpARMMOVWstore(v *Value) bool { break } v.reset(OpARMMOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(ptr, val, mem) return true } From ad618689ef06e9dca86c0e2b9b38a2c1b9266f4a Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Sat, 26 Sep 2020 16:30:02 +0200 Subject: [PATCH 036/281] cmd/dist: detect gohostarch on netbsd/arm64 hosts On netbsd/arm64 `uname -m` reports `evbarm` which is mapped to gohostarch=arm. Fix this by checking for "aarch64" in `uname -p` output to fix self-hosted build on netbsd/arm64. Updates #30824 Change-Id: I0f0450ff35af0bec51aeec3b210143ba892216c6 Reviewed-on: https://go-review.googlesource.com/c/go/+/257643 Trust: Tobias Klauser Trust: Benny Siegert Run-TryBot: Tobias Klauser TryBot-Result: Go Bot Reviewed-by: Benny Siegert --- src/cmd/dist/main.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/cmd/dist/main.go b/src/cmd/dist/main.go index b8a8c5f2e6..224b6c0c3e 100644 --- a/src/cmd/dist/main.go +++ b/src/cmd/dist/main.go @@ -108,6 +108,9 @@ func main() { gohostarch = "arm64" case strings.Contains(out, "arm"): gohostarch = "arm" + if gohostos == "netbsd" && strings.Contains(run("", CheckExit, "uname", "-p"), "aarch64") { + gohostarch = "arm64" + } case strings.Contains(out, "ppc64le"): gohostarch = "ppc64le" case strings.Contains(out, "ppc64"): From 6f02578f9cff92e6c0fae4d86df01dcf99673c61 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 25 Sep 2020 13:30:51 -0400 Subject: [PATCH 037/281] cmd/compile: fix logopt log directory naming for windows Allow Windows absolute paths, also fixed URI decoding on Windows. Added a test, reorganized to make the test cleaner. Also put some doc comments on exported functions that did not have them. Fixes #41614. Change-Id: I2871be0e5183fbd53ffb309896d6fe56c15a7727 Reviewed-on: https://go-review.googlesource.com/c/go/+/257677 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Rebecca Stambler --- src/cmd/compile/internal/logopt/log_opts.go | 93 +++++++++++++------ .../compile/internal/logopt/logopt_test.go | 28 ++++++ 2 files changed, 91 insertions(+), 30 deletions(-) diff --git a/src/cmd/compile/internal/logopt/log_opts.go b/src/cmd/compile/internal/logopt/log_opts.go index 22a94b0f2d..37a049d640 100644 --- a/src/cmd/compile/internal/logopt/log_opts.go +++ b/src/cmd/compile/internal/logopt/log_opts.go @@ -19,6 +19,7 @@ import ( "strconv" "strings" "sync" + "unicode" ) // This implements (non)optimization logging for -json option to the Go compiler @@ -223,11 +224,11 @@ type Diagnostic struct { // A LoggedOpt is what the compiler produces and accumulates, // to be converted to JSON for human or IDE consumption. type LoggedOpt struct { - pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON. - pass string // For human/adhoc consumption; does not appear in JSON (yet) - fname string // For human/adhoc consumption; does not appear in JSON (yet) - what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline" - target []interface{} // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant. + pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON. + compilerPass string // Compiler pass. For human/adhoc consumption; does not appear in JSON (yet) + functionName string // Function name. For human/adhoc consumption; does not appear in JSON (yet) + what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline" + target []interface{} // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant. } type logFormat uint8 @@ -240,12 +241,13 @@ const ( var Format = None var dest string +// LogJsonOption parses and validates the version,directory value attached to the -json compiler flag. func LogJsonOption(flagValue string) { version, directory := parseLogFlag("json", flagValue) if version != 0 { log.Fatal("-json version must be 0") } - checkLogPath("json", directory) + dest = checkLogPath(directory) Format = Json0 } @@ -268,51 +270,80 @@ func parseLogFlag(flag, value string) (version int, directory string) { return } +// isWindowsDriveURI returns true if the file URI is of the format used by +// Windows URIs. The url.Parse package does not specially handle Windows paths +// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:"). +// (copied from tools/internal/span/uri.go) +// this is less comprehensive that the processing in filepath.IsAbs on Windows. +func isWindowsDriveURIPath(uri string) bool { + if len(uri) < 4 { + return false + } + return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' +} + +func parseLogPath(destination string) (string, string) { + if filepath.IsAbs(destination) { + return filepath.Clean(destination), "" + } + if strings.HasPrefix(destination, "file://") { // IKWIAD, or Windows C:\foo\bar\baz + uri, err := url.Parse(destination) + if err != nil { + return "", fmt.Sprintf("optimizer logging destination looked like file:// URI but failed to parse: err=%v", err) + } + destination = uri.Host + uri.Path + if isWindowsDriveURIPath(destination) { + // strip leading / from /C: + // unlike tools/internal/span/uri.go, do not uppercase the drive letter -- let filepath.Clean do what it does. + destination = destination[1:] + } + return filepath.Clean(destination), "" + } + return "", fmt.Sprintf("optimizer logging destination %s was neither %s-prefixed directory nor file://-prefixed file URI", destination, string(filepath.Separator)) +} + // checkLogPath does superficial early checking of the string specifying // the directory to which optimizer logging is directed, and if // it passes the test, stores the string in LO_dir -func checkLogPath(flag, destination string) { - sep := string(os.PathSeparator) - if strings.HasPrefix(destination, "/") || strings.HasPrefix(destination, sep) { - err := os.MkdirAll(destination, 0755) - if err != nil { - log.Fatalf("optimizer logging destination ',' but could not create : err=%v", err) - } - } else if strings.HasPrefix(destination, "file://") { // IKWIAD, or Windows C:\foo\bar\baz - uri, err := url.Parse(destination) - if err != nil { - log.Fatalf("optimizer logging destination looked like file:// URI but failed to parse: err=%v", err) - } - destination = uri.Host + uri.Path - err = os.MkdirAll(destination, 0755) - if err != nil { - log.Fatalf("optimizer logging destination ',' but could not create %s: err=%v", destination, err) - } - } else { - log.Fatalf("optimizer logging destination %s was neither %s-prefixed directory nor file://-prefixed file URI", destination, sep) +func checkLogPath(destination string) string { + path, complaint := parseLogPath(destination) + if complaint != "" { + log.Fatalf(complaint) } - dest = destination + err := os.MkdirAll(path, 0755) + if err != nil { + log.Fatalf("optimizer logging destination ',' but could not create : err=%v", err) + } + return path } var loggedOpts []*LoggedOpt var mu = sync.Mutex{} // mu protects loggedOpts. -func NewLoggedOpt(pos src.XPos, what, pass, fname string, args ...interface{}) *LoggedOpt { +// NewLoggedOpt allocates a new LoggedOpt, to later be passed to either NewLoggedOpt or LogOpt as "args". +// Pos is the source position (including inlining), what is the message, pass is which pass created the message, +// funcName is the name of the function +// A typical use for this to accumulate an explanation for a missed optimization, for example, why did something escape? +func NewLoggedOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) *LoggedOpt { pass = strings.Replace(pass, " ", "_", -1) - return &LoggedOpt{pos, pass, fname, what, args} + return &LoggedOpt{pos, pass, funcName, what, args} } -func LogOpt(pos src.XPos, what, pass, fname string, args ...interface{}) { +// Logopt logs information about a (usually missed) optimization performed by the compiler. +// Pos is the source position (including inlining), what is the message, pass is which pass created the message, +// funcName is the name of the function +func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) { if Format == None { return } - lo := NewLoggedOpt(pos, what, pass, fname, args...) + lo := NewLoggedOpt(pos, what, pass, funcName, args...) mu.Lock() defer mu.Unlock() // Because of concurrent calls from back end, no telling what the order will be, but is stable-sorted by outer Pos before use. loggedOpts = append(loggedOpts, lo) } +// Enabled returns whether optimization logging is enabled. func Enabled() bool { switch Format { case None: @@ -459,11 +490,13 @@ func FlushLoggedOpts(ctxt *obj.Link, slashPkgPath string) { } } +// newPointRange returns a single-position Range for the compiler source location p. func newPointRange(p src.Pos) Range { return Range{Start: Position{p.Line(), p.Col()}, End: Position{p.Line(), p.Col()}} } +// newLocation returns the Location for the compiler source location p func newLocation(p src.Pos) Location { loc := Location{URI: uriIfy(uprootedPath(p.Filename())), Range: newPointRange(p)} return loc diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go index df3e70a614..b57a07f12c 100644 --- a/src/cmd/compile/internal/logopt/logopt_test.go +++ b/src/cmd/compile/internal/logopt/logopt_test.go @@ -55,6 +55,34 @@ func wantN(t *testing.T, out string, desired string, n int) { } } +func TestPathStuff(t *testing.T) { + sep := string(filepath.Separator) + if path, whine := parseLogPath("file:///c:foo"); path != "c:foo" || whine != "" { // good path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if path, whine := parseLogPath("file:///foo"); path != sep+"foo" || whine != "" { // good path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if path, whine := parseLogPath("foo"); path != "" || whine == "" { // BAD path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if sep == "\\" { // On WINDOWS ONLY + if path, whine := parseLogPath("C:/foo"); path != "C:\\foo" || whine != "" { // good path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if path, whine := parseLogPath("c:foo"); path != "" || whine == "" { // BAD path + t.Errorf("path='%s', whine='%s'", path, whine) + } + if path, whine := parseLogPath("/foo"); path != "" || whine == "" { // BAD path + t.Errorf("path='%s', whine='%s'", path, whine) + } + } else { // ON UNIX ONLY + if path, whine := parseLogPath("/foo"); path != sep+"foo" || whine != "" { // good path + t.Errorf("path='%s', whine='%s'", path, whine) + } + } +} + func TestLogOpt(t *testing.T) { t.Parallel() From 7bb6fed9b53494e9846689520b41b8e679bd121d Mon Sep 17 00:00:00 2001 From: Changkun Ou Date: Thu, 24 Sep 2020 08:57:00 +0200 Subject: [PATCH 038/281] os: document and emphasize a potential misuse of File.Fd This CL revises the document of File.Fd that explicitly points its user to runtime.SetFinalizer where contains the information that a file descriptor could be closed in a finalizer and therefore causes a failure in syscall.Write if runtime.KeepAlive is not invoked. The CL also suggests an alternative of File.Fd towards File.SyscallConn. Fixes #41505 Change-Id: I6816f0157add48b649bf1fb793cf19dcea6894b5 Reviewed-on: https://go-review.googlesource.com/c/go/+/256899 Reviewed-by: Rob Pike Trust: Ian Lance Taylor --- src/os/file_plan9.go | 9 +++++++-- src/os/file_unix.go | 9 +++++++-- src/os/file_windows.go | 7 +++++-- src/runtime/mfinal.go | 18 +++++++++--------- 4 files changed, 28 insertions(+), 15 deletions(-) diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go index 043500744b..5e0ad68208 100644 --- a/src/os/file_plan9.go +++ b/src/os/file_plan9.go @@ -29,8 +29,13 @@ type file struct { } // Fd returns the integer Plan 9 file descriptor referencing the open file. -// The file descriptor is valid only until f.Close is called or f is garbage collected. -// On Unix systems this will cause the SetDeadline methods to stop working. +// If f is closed, the file descriptor becomes invalid. +// If f is garbage collected, a finalizer may close the file descriptor, +// making it invalid; see runtime.SetFinalizer for more information on when +// a finalizer might be run. On Unix systems this will cause the SetDeadline +// methods to stop working. +// +// As an alternative, see the f.SyscallCon method. func (f *File) Fd() uintptr { if f == nil { return ^(uintptr(0)) diff --git a/src/os/file_unix.go b/src/os/file_unix.go index dc7d868a32..c4dd4fc6a9 100644 --- a/src/os/file_unix.go +++ b/src/os/file_unix.go @@ -62,8 +62,13 @@ type file struct { } // Fd returns the integer Unix file descriptor referencing the open file. -// The file descriptor is valid only until f.Close is called or f is garbage collected. -// On Unix systems this will cause the SetDeadline methods to stop working. +// If f is closed, the file descriptor becomes invalid. +// If f is garbage collected, a finalizer may close the file descriptor, +// making it invalid; see runtime.SetFinalizer for more information on when +// a finalizer might be run. On Unix systems this will cause the SetDeadline +// methods to stop working. +// +// As an alternative, see the f.SyscallCon method. func (f *File) Fd() uintptr { if f == nil { return ^(uintptr(0)) diff --git a/src/os/file_windows.go b/src/os/file_windows.go index cc695fd94c..f744a35023 100644 --- a/src/os/file_windows.go +++ b/src/os/file_windows.go @@ -26,8 +26,11 @@ type file struct { } // Fd returns the Windows handle referencing the open file. -// The handle is valid only until f.Close is called or f is garbage collected. -// On Unix systems this will cause the SetDeadline methods to stop working. +// If f is closed, the file descriptor becomes invalid. +// If f is garbage collected, a finalizer may close the file descriptor, +// making it invalid; see runtime.SetFinalizer for more information on when +// a finalizer might be run. On Unix systems this will cause the SetDeadline +// methods to stop working. func (file *File) Fd() uintptr { if file == nil { return uintptr(syscall.InvalidHandle) diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index d6c85a8b93..cd6196dcab 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -293,15 +293,15 @@ func runfinq() { // pass the object to a call of the KeepAlive function to mark the // last point in the function where the object must be reachable. // -// For example, if p points to a struct that contains a file descriptor d, -// and p has a finalizer that closes that file descriptor, and if the last -// use of p in a function is a call to syscall.Write(p.d, buf, size), then -// p may be unreachable as soon as the program enters syscall.Write. The -// finalizer may run at that moment, closing p.d, causing syscall.Write -// to fail because it is writing to a closed file descriptor (or, worse, -// to an entirely different file descriptor opened by a different goroutine). -// To avoid this problem, call runtime.KeepAlive(p) after the call to -// syscall.Write. +// For example, if p points to a struct, such as os.File, that contains +// a file descriptor d, and p has a finalizer that closes that file +// descriptor, and if the last use of p in a function is a call to +// syscall.Write(p.d, buf, size), then p may be unreachable as soon as +// the program enters syscall.Write. The finalizer may run at that moment, +// closing p.d, causing syscall.Write to fail because it is writing to +// a closed file descriptor (or, worse, to an entirely different +// file descriptor opened by a different goroutine). To avoid this problem, +// call runtime.KeepAlive(p) after the call to syscall.Write. // // A single goroutine runs all finalizers for a program, sequentially. // If a finalizer must run for a long time, it should do so by starting From 72a9dec156408a87548deb920a67b8bf787062db Mon Sep 17 00:00:00 2001 From: Ainar Garipov Date: Fri, 25 Sep 2020 13:30:18 +0300 Subject: [PATCH 039/281] doc/go1.16: document net.ErrClosed usage in crypto/tls Change-Id: I130cf79b93c6456dbe87f0042209e204c4e319b2 Reviewed-on: https://go-review.googlesource.com/c/go/+/257457 Reviewed-by: Ian Lance Taylor Reviewed-by: Emmanuel Odeke Trust: Emmanuel Odeke --- doc/go1.16.html | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/go1.16.html b/doc/go1.16.html index 3164acbb6d..b2cbb58e1a 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -164,6 +164,16 @@ Do not send CLs removing the interior tags from such phrases. TODO

+

crypto/tls

+ +

+ I/O operations on closing or closed TLS connections can now be detected using + the new ErrClosed error. A typical use + would be errors.Is(err, net.ErrClosed). In earlier releases + the only way to reliably detect this case was to match the string returned + by the Error method with "tls: use of closed connection". +

+

net

From 8ab020adb27089fa207d015f2f69600ef3d1d307 Mon Sep 17 00:00:00 2001 From: Benny Siegert Date: Sat, 26 Sep 2020 19:40:17 +0000 Subject: [PATCH 040/281] runtime: netbsd-arm64 fixes Add missing declaration of crosscall1. Fix stack alignment for pipe2 return value. Work around kernel clobbering of r28 on aarch64 by reloading from ucontext. https://nxr.netbsd.org/xref/src/sys/arch/aarch64/aarch64/sig_machdep.c#104 Update #30824 Change-Id: I7f9472939f4c02953f8c207308610118f5d3c54c Reviewed-on: https://go-review.googlesource.com/c/go/+/257645 Reviewed-by: Ian Lance Taylor Trust: Benny Siegert --- src/runtime/cgo/gcc_netbsd_arm64.c | 2 ++ src/runtime/sys_netbsd_arm64.s | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/runtime/cgo/gcc_netbsd_arm64.c b/src/runtime/cgo/gcc_netbsd_arm64.c index b29fab0f8c..694116ce70 100644 --- a/src/runtime/cgo/gcc_netbsd_arm64.c +++ b/src/runtime/cgo/gcc_netbsd_arm64.c @@ -53,6 +53,8 @@ _cgo_sys_thread_start(ThreadStart *ts) } } +extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g); + static void* threadentry(void *v) { diff --git a/src/runtime/sys_netbsd_arm64.s b/src/runtime/sys_netbsd_arm64.s index e70be0fa74..f19a8b78f6 100644 --- a/src/runtime/sys_netbsd_arm64.s +++ b/src/runtime/sys_netbsd_arm64.s @@ -169,11 +169,12 @@ pipeok: // func pipe2(flags int32) (r, w int32, errno int32) TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20 - ADD $8, RSP, R0 + ADD $16, RSP, R0 MOVW flags+0(FP), R1 SVC $SYS_pipe2 - BCC 2(PC) + BCC pipe2ok NEG R0, R0 +pipe2ok: MOVW R0, errno+16(FP) RET @@ -319,6 +320,12 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$192 MOVD R26, 8*11(RSP) MOVD R27, 8*12(RSP) MOVD g, 8*13(RSP) + // Unclobber g for now (kernel uses it as ucontext ptr) + // See https://github.com/golang/go/issues/30824#issuecomment-492772426 + // This is only correct in the non-cgo case. + // XXX should use lwp_getprivate as suggested. + // 8*36 is ucontext.uc_mcontext.__gregs[_REG_X28] + MOVD 8*36(g), g MOVD R29, 8*14(RSP) FMOVD F8, 8*15(RSP) FMOVD F9, 8*16(RSP) From 5755bad42adc23ad4a0c32149ac8cf78ece5d0b0 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Sat, 26 Sep 2020 17:12:14 +0200 Subject: [PATCH 041/281] os: remove ENOTSUP special case in Getwd on darwin ENOTSUP was used as a signaling error in the custom implementation of syscall.Getwd to fall back to the slow algorithm. Since CL 257637 Getwd directly calls the respective function from libSystem.dylib which can no longer return ENOTSUP. Change-Id: I8e65e42b3ea069bf78969a29f2af1c55552e2949 Reviewed-on: https://go-review.googlesource.com/c/go/+/257644 Trust: Tobias Klauser Run-TryBot: Tobias Klauser TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor --- src/os/getwd.go | 8 +------- src/os/getwd_darwin.go | 15 --------------- 2 files changed, 1 insertion(+), 22 deletions(-) delete mode 100644 src/os/getwd_darwin.go diff --git a/src/os/getwd.go b/src/os/getwd.go index f373ce937d..90604cf2f4 100644 --- a/src/os/getwd.go +++ b/src/os/getwd.go @@ -15,10 +15,6 @@ var getwdCache struct { dir string } -// useSyscallwd determines whether to use the return value of -// syscall.Getwd based on its error. -var useSyscallwd = func(error) bool { return true } - // Getwd returns a rooted path name corresponding to the // current directory. If the current directory can be // reached via multiple paths (due to symbolic links), @@ -55,9 +51,7 @@ func Getwd() (dir string, err error) { break } } - if useSyscallwd(e) { - return s, NewSyscallError("getwd", e) - } + return s, NewSyscallError("getwd", e) } // Apply same kludge but to cached dir instead of $PWD. diff --git a/src/os/getwd_darwin.go b/src/os/getwd_darwin.go deleted file mode 100644 index e51ffcd5e7..0000000000 --- a/src/os/getwd_darwin.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package os - -import "syscall" - -func init() { - useSyscallwd = useSyscallwdDarwin -} - -func useSyscallwdDarwin(err error) bool { - return err != syscall.ENOTSUP -} From e572218d1273bf54bf8cafd39f93f22de196dd55 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Sat, 26 Sep 2020 09:42:59 +0200 Subject: [PATCH 042/281] cmd/compile: fix escape reason for MAKESLICE with no cap When explaining why the slice from a make() call escapes for the -m -m message, we print "non-const size" if any one of Isconst(n.Left) and Isconst(n.Right) return false; but for OMAKESLICE nodes with no cap, n.Right is nil, so Isconst(n.Right, CTINT) will be always false. Only call Isconst on n.Right if it's not nil. Fixes #41635 Change-Id: I8729801a9b234b68ae40adad64d66fa7653adf09 Reviewed-on: https://go-review.googlesource.com/c/go/+/257641 Reviewed-by: Cuong Manh Le Reviewed-by: Keith Randall Trust: Alberto Donizetti --- src/cmd/compile/internal/gc/escape.go | 2 +- test/fixedbugs/issue41635.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue41635.go diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index f435d8ff6a..d79d32ec48 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1053,7 +1053,7 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { if mustHeapAlloc(n) { why := "too large for stack" - if n.Op == OMAKESLICE && (!Isconst(n.Left, CTINT) || !Isconst(n.Right, CTINT)) { + if n.Op == OMAKESLICE && (!Isconst(n.Left, CTINT) || (n.Right != nil && !Isconst(n.Right, CTINT))) { why = "non-constant size" } e.flow(e.heapHole().addr(n, why), loc) diff --git a/test/fixedbugs/issue41635.go b/test/fixedbugs/issue41635.go new file mode 100644 index 0000000000..b33c1a07e7 --- /dev/null +++ b/test/fixedbugs/issue41635.go @@ -0,0 +1,18 @@ +//errorcheck -0 -m -m + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func f() { // ERROR "" + b1 := make([]byte, 1<<17) // ERROR "too large for stack" "" + b2 := make([]byte, 100, 1<<17) // ERROR "too large for stack" "" + + n, m := 100, 200 + b1 = make([]byte, n) // ERROR "non-constant size" "" + b2 = make([]byte, 100, m) // ERROR "non-constant size" "" + + _, _ = b1, b2 +} From 05b626e49075d3b9f2fcda65c7cc9054381da047 Mon Sep 17 00:00:00 2001 From: Changkun Ou Date: Mon, 28 Sep 2020 09:46:42 +0200 Subject: [PATCH 043/281] os: fix SyscallConn typos in the File.Fd comments This CL fixes two typos introduced in CL 256899. Change-Id: I47f0a3097deeeec8d6e9bbe7073fcf7a28c5dff9 Reviewed-on: https://go-review.googlesource.com/c/go/+/257997 Trust: Tobias Klauser Trust: Emmanuel Odeke Run-TryBot: Tobias Klauser Reviewed-by: Emmanuel Odeke TryBot-Result: Go Bot --- src/os/file_plan9.go | 2 +- src/os/file_unix.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go index 5e0ad68208..a1a51a1c06 100644 --- a/src/os/file_plan9.go +++ b/src/os/file_plan9.go @@ -35,7 +35,7 @@ type file struct { // a finalizer might be run. On Unix systems this will cause the SetDeadline // methods to stop working. // -// As an alternative, see the f.SyscallCon method. +// As an alternative, see the f.SyscallConn method. func (f *File) Fd() uintptr { if f == nil { return ^(uintptr(0)) diff --git a/src/os/file_unix.go b/src/os/file_unix.go index c4dd4fc6a9..e0f16d809d 100644 --- a/src/os/file_unix.go +++ b/src/os/file_unix.go @@ -68,7 +68,7 @@ type file struct { // a finalizer might be run. On Unix systems this will cause the SetDeadline // methods to stop working. // -// As an alternative, see the f.SyscallCon method. +// As an alternative, see the f.SyscallConn method. func (f *File) Fd() uintptr { if f == nil { return ^(uintptr(0)) From 874b3132a84cf76da6a48978826c04c380a37a50 Mon Sep 17 00:00:00 2001 From: avivklas Date: Fri, 7 Aug 2020 21:50:12 +0300 Subject: [PATCH 044/281] mime/multipart: return overflow errors in Reader.ReadForm Updates Reader.ReadForm to check for overflow errors that may result from a leeway addition of 10MiB to the input argument maxMemory. Fixes #40430 Change-Id: I510b8966c95c51d04695ba9d08fcfe005fd11a5d Reviewed-on: https://go-review.googlesource.com/c/go/+/247477 Run-TryBot: Emmanuel Odeke Trust: Cuong Manh Le Trust: Emmanuel Odeke TryBot-Result: Go Bot Reviewed-by: Emmanuel Odeke --- src/mime/multipart/formdata.go | 4 ++++ src/mime/multipart/formdata_test.go | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/src/mime/multipart/formdata.go b/src/mime/multipart/formdata.go index 832d0ad693..4eb3101294 100644 --- a/src/mime/multipart/formdata.go +++ b/src/mime/multipart/formdata.go @@ -7,6 +7,7 @@ package multipart import ( "bytes" "errors" + "fmt" "io" "io/ioutil" "net/textproto" @@ -41,6 +42,9 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) { // Reserve an additional 10 MB for non-file parts. maxValueBytes := maxMemory + int64(10<<20) + if maxValueBytes <= 0 { + return nil, fmt.Errorf("multipart: integer overflow from maxMemory(%d) + 10MiB for non-file parts", maxMemory) + } for { p, err := r.NextPart() if err == io.EOF { diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go index 7d756c8c24..7112e0d372 100644 --- a/src/mime/multipart/formdata_test.go +++ b/src/mime/multipart/formdata_test.go @@ -7,6 +7,7 @@ package multipart import ( "bytes" "io" + "math" "os" "strings" "testing" @@ -52,6 +53,23 @@ func TestReadFormWithNamelessFile(t *testing.T) { } } +// Issue 40430: Ensure that we report integer overflows in additions of maxMemory, +// instead of silently and subtly failing without indication. +func TestReadFormMaxMemoryOverflow(t *testing.T) { + b := strings.NewReader(strings.ReplaceAll(messageWithTextContentType, "\n", "\r\n")) + r := NewReader(b, boundary) + f, err := r.ReadForm(math.MaxInt64) + if err == nil { + t.Fatal("Unexpected a non-nil error") + } + if f != nil { + t.Fatalf("Unexpected returned a non-nil form: %v\n", f) + } + if g, w := err.Error(), "integer overflow from maxMemory"; !strings.Contains(g, w) { + t.Errorf(`Error mismatch\n%q\ndid not contain\n%q`, g, w) + } +} + func TestReadFormWithTextContentType(t *testing.T) { // From https://github.com/golang/go/issues/24041 b := strings.NewReader(strings.ReplaceAll(messageWithTextContentType, "\n", "\r\n")) From a424f6e45e29960c933a7ccc1cd8fc9bb2766f15 Mon Sep 17 00:00:00 2001 From: Lynn Boger Date: Wed, 23 Sep 2020 11:06:39 -0400 Subject: [PATCH 045/281] cmd/asm,cmd/compile,cmd/internal/obj/ppc64: add extswsli support on power9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for the extswsli instruction which combines extsw followed by a shift. New benchmark demonstrates the improvement: name old time/op new time/op delta ExtShift 1.34µs ± 0% 1.30µs ± 0% -3.15% (p=0.057 n=4+3) Change-Id: I21b410676fdf15d20e0cbbaa75d7c6dcd3bbb7b0 Reviewed-on: https://go-review.googlesource.com/c/go/+/257017 Run-TryBot: Lynn Boger TryBot-Result: Go Bot Reviewed-by: Carlos Eduardo Seo Trust: Lynn Boger --- src/cmd/asm/internal/asm/testdata/ppc64enc.s | 1 + src/cmd/compile/internal/gc/bench_test.go | 12 +++ src/cmd/compile/internal/ppc64/ssa.go | 2 +- src/cmd/compile/internal/ssa/gen/PPC64.rules | 2 + src/cmd/compile/internal/ssa/gen/PPC64Ops.go | 1 + src/cmd/compile/internal/ssa/opGen.go | 15 +++ src/cmd/compile/internal/ssa/rewritePPC64.go | 36 +++++++ src/cmd/internal/obj/ppc64/a.out.go | 2 + src/cmd/internal/obj/ppc64/anames.go | 2 + src/cmd/internal/obj/ppc64/asm9.go | 104 ++++++++++++------- test/codegen/shift.go | 7 +- 11 files changed, 142 insertions(+), 42 deletions(-) diff --git a/src/cmd/asm/internal/asm/testdata/ppc64enc.s b/src/cmd/asm/internal/asm/testdata/ppc64enc.s index e26f6f8933..88a7609ba8 100644 --- a/src/cmd/asm/internal/asm/testdata/ppc64enc.s +++ b/src/cmd/asm/internal/asm/testdata/ppc64enc.s @@ -266,6 +266,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 SRDCC R3, R4 // 7c841c37 ROTLW $16, R3, R4 // 5464803e ROTLW R3, R4, R5 // 5c85183e + EXTSWSLI $3, R4, R5 // 7c851ef4 RLWMI $7, R3, $65535, R6 // 50663c3e RLWMICC $7, R3, $65535, R6 // 50663c3f RLWNM $3, R4, $7, R6 // 54861f7e diff --git a/src/cmd/compile/internal/gc/bench_test.go b/src/cmd/compile/internal/gc/bench_test.go index 09aaf428c3..a2887f2f7b 100644 --- a/src/cmd/compile/internal/gc/bench_test.go +++ b/src/cmd/compile/internal/gc/bench_test.go @@ -20,6 +20,18 @@ func BenchmarkLoadAdd(b *testing.B) { } } +// Added for ppc64 extswsli on power9 +func BenchmarkExtShift(b *testing.B) { + x := make([]int32, 1024) + for i := 0; i < b.N; i++ { + var s int64 + for i := range x { + s ^= int64(x[i]+32) * 8 + } + globl = s + } +} + func BenchmarkModify(b *testing.B) { a := make([]int64, 1024) v := globl diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index 4a83a0bdd7..a5fbdaffba 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -677,7 +677,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[0].Reg() case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst, - ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst: + ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst: p := s.Prog(v.Op.Asm()) p.Reg = v.Args[0].Reg() p.From.Type = obj.TYPE_CONST diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 774d5096de..de30d003e6 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -1025,6 +1025,8 @@ (SLWconst [c] z:(MOVWZreg x)) && z.Uses == 1 && c < 24 => (CLRLSLWI [newPPC64ShiftAuxInt(c,8,31,32)] x) (SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) (SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) +// special case for power9 +(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && objabi.GOPPC64 >= 9 => (EXTSWSLconst [c] x) // Lose widening ops fed to stores (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index ed99c40cd2..28317928a8 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -223,6 +223,7 @@ func init() { {name: "ROTLconst", argLength: 1, reg: gp11, asm: "ROTL", aux: "Int64"}, // arg0 rotate left by auxInt bits {name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits + {name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"}, {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD", clobberFlags: true}, // count leading zeros {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW", clobberFlags: true}, // count leading zeros (32 bit) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 1fc0f7ea79..1fe00c7026 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1865,6 +1865,7 @@ const ( OpPPC64SLWconst OpPPC64ROTLconst OpPPC64ROTLWconst + OpPPC64EXTSWSLconst OpPPC64CNTLZD OpPPC64CNTLZW OpPPC64CNTTZD @@ -24849,6 +24850,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "EXTSWSLconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AEXTSWSLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "CNTLZD", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 12b08824b5..29ec3992f2 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -12877,6 +12877,24 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { } break } + // match: (SLDconst [c] z:(MOVWreg x)) + // cond: c < 32 && objabi.GOPPC64 >= 9 + // result: (EXTSWSLconst [c] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVWreg { + break + } + x := z.Args[0] + if !(c < 32 && objabi.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64EXTSWSLconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } return false } func rewriteValuePPC64_OpPPC64SLW(v *Value) bool { @@ -13000,6 +13018,24 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { } break } + // match: (SLWconst [c] z:(MOVWreg x)) + // cond: c < 32 && objabi.GOPPC64 >= 9 + // result: (EXTSWSLconst [c] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVWreg { + break + } + x := z.Args[0] + if !(c < 32 && objabi.GOPPC64 >= 9) { + break + } + v.reset(OpPPC64EXTSWSLconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } return false } func rewriteValuePPC64_OpPPC64SRAD(v *Value) bool { diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go index f438803fb5..4c97302f83 100644 --- a/src/cmd/internal/obj/ppc64/a.out.go +++ b/src/cmd/internal/obj/ppc64/a.out.go @@ -733,6 +733,8 @@ const ( ASRAD ASRADCC ASRDCC + AEXTSWSLI + AEXTSWSLICC ASTDCCC ATD diff --git a/src/cmd/internal/obj/ppc64/anames.go b/src/cmd/internal/obj/ppc64/anames.go index accd87fe00..fca4b3e355 100644 --- a/src/cmd/internal/obj/ppc64/anames.go +++ b/src/cmd/internal/obj/ppc64/anames.go @@ -329,6 +329,8 @@ var Anames = []string{ "SRAD", "SRADCC", "SRDCC", + "EXTSWSLI", + "EXTSWSLICC", "STDCCC", "TD", "DWORD", diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 60dda72507..9f06bdf8b3 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -160,6 +160,8 @@ var optab = []Optab{ {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0}, {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0}, + {AEXTSWSLI, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0}, + {AEXTSWSLI, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0}, {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0}, {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0}, {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, @@ -1877,6 +1879,9 @@ func buildop(ctxt *obj.Link) { case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ opset(ASRAWCC, r0) + case AEXTSWSLI: + opset(AEXTSWSLICC, r0) + case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ opset(ASRADCC, r0) @@ -2189,49 +2194,54 @@ func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 { return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5 } +func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 { + return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 +} + func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 { return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6 } const ( /* each rhs is OPVCC(_, _, _, _) */ - OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0 - OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0 - OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0 - OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0 - OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0 - OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0 - OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0 - OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0 - OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0 - OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0 - OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0 - OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0 - OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0 - OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0 - OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0 - OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0 - OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0 - OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0 - OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0 - OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0 - OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0 - OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0 - OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0 - OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0 - OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0 - OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0 - OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0 - OP_OR = 31<<26 | 444<<1 | 0<<10 | 0 - OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0 - OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0 - OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0 - OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0 - OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0 - OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0 - OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0 - OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0 - OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0 + OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0 + OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0 + OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0 + OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0 + OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0 + OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0 + OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0 + OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0 + OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0 + OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0 + OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0 + OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0 + OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0 + OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0 + OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0 + OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0 + OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0 + OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0 + OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0 + OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0 + OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0 + OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0 + OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0 + OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0 + OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0 + OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0 + OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0 + OP_OR = 31<<26 | 444<<1 | 0<<10 | 0 + OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0 + OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0 + OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0 + OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0 + OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0 + OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0 + OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0 + OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0 + OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0 + OP_EXTSWSLI = 31<<26 | 445<<2 ) func oclass(a *obj.Addr) int { @@ -2965,14 +2975,21 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case AROTL: a = int(0) op = OP_RLDICL + case AEXTSWSLI: + a = int(v) default: c.ctxt.Diag("unexpected op in sldi case\n%v", p) a = 0 o1 = 0 } - o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a)) - if p.As == ASLDCC || p.As == ASRDCC { + if p.As == AEXTSWSLI || p.As == AEXTSWSLICC { + o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v)) + + } else { + o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a)) + } + if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC { o1 |= 1 // Set the condition code bit } @@ -4350,6 +4367,11 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { case ASRADCC: return OPVCC(31, 794, 0, 1) + case AEXTSWSLI: + return OPVCC(31, 445, 0, 0) + case AEXTSWSLICC: + return OPVCC(31, 445, 0, 1) + case ASRW: return OPVCC(31, 536, 0, 0) case ASRWCC: @@ -5013,6 +5035,10 @@ func (c *ctxt9) opirr(a obj.As) uint32 { return OPVCC(31, (413 << 1), 0, 0) case ASRADCC: return OPVCC(31, (413 << 1), 0, 1) + case AEXTSWSLI: + return OPVCC(31, 445, 0, 0) + case AEXTSWSLICC: + return OPVCC(31, 445, 0, 1) case ASTSW: return OPVCC(31, 725, 0, 0) diff --git a/test/codegen/shift.go b/test/codegen/shift.go index 32214851b5..abc4b091c9 100644 --- a/test/codegen/shift.go +++ b/test/codegen/shift.go @@ -182,7 +182,7 @@ func checkUnneededTrunc(tab *[100000]uint32, d uint64, v uint32, h uint16, b byt return f, g } -func checkCombinedShifts(v8 uint8, v16 uint16, v32 uint32, v64 uint64) (uint8, uint16, uint32, uint64) { +func checkCombinedShifts(v8 uint8, v16 uint16, v32 uint32, x32 int32, v64 uint64) (uint8, uint16, uint32, uint64, int64) { // ppc64le:-"AND","CLRLSLWI" // ppc64:-"AND","CLRLSLWI" @@ -202,7 +202,10 @@ func checkCombinedShifts(v8 uint8, v16 uint16, v32 uint32, v64 uint64) (uint8, u // ppc64le:-"AND","CLRLSLDI" // ppc64:-"AND","CLRLSLDI" i := (v64 & 0xFFFFFFFF) << 5 - return f, g, h, i + // ppc64le/power9:-"SLD","EXTSWSLI" + // ppc64/power9:-"SLD","EXTSWSLI" + j := int64(x32+32)*8 + return f, g, h, i, j } func checkWidenAfterShift(v int64, u uint64) (int64, uint64) { From f33263d11aa0f5e2668bfc6a0805e4edee17b03c Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Mon, 28 Sep 2020 08:59:13 -0700 Subject: [PATCH 046/281] crypto/x509: hardcode RSA PSS parameters rather than generating them Rather than generating the three possible RSA PSS parameters each time they are needed just hardcode them and pick the required one based on the hash function. Fixes #41407 Change-Id: Id43bdaf40b3ca82c4c04c6588e3b643f63107657 Reviewed-on: https://go-review.googlesource.com/c/go/+/258037 Run-TryBot: Roland Shoemaker TryBot-Result: Go Bot Trust: Roland Shoemaker Reviewed-by: Filippo Valsorda --- src/crypto/x509/x509.go | 60 +++++++++--------------------------- src/crypto/x509/x509_test.go | 52 +++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 46 deletions(-) diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go index 49ac059a0e..16655a3c70 100644 --- a/src/crypto/x509/x509.go +++ b/src/crypto/x509/x509.go @@ -351,6 +351,19 @@ var signatureAlgorithmDetails = []struct { {PureEd25519, "Ed25519", oidSignatureEd25519, Ed25519, crypto.Hash(0) /* no pre-hashing */}, } +// hashToPSSParameters contains the DER encoded RSA PSS parameters for the +// SHA256, SHA384, and SHA512 hashes as defined in RFC 3447, Appendix A.2.3. +// The parameters contain the following values: +// * hashAlgorithm contains the associated hash identifier with NULL parameters +// * maskGenAlgorithm always contains the default mgf1SHA1 identifier +// * saltLength contains the length of the associated hash +// * trailerField always contains the default trailerFieldBC value +var hashToPSSParameters = map[crypto.Hash]asn1.RawValue{ + crypto.SHA256: asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 1, 5, 0, 162, 3, 2, 1, 32}}, + crypto.SHA384: asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 2, 5, 0, 162, 3, 2, 1, 48}}, + crypto.SHA512: asn1.RawValue{FullBytes: []byte{48, 52, 160, 15, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 161, 28, 48, 26, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 8, 48, 13, 6, 9, 96, 134, 72, 1, 101, 3, 4, 2, 3, 5, 0, 162, 3, 2, 1, 64}}, +} + // pssParameters reflects the parameters in an AlgorithmIdentifier that // specifies RSA PSS. See RFC 3447, Appendix A.2.3. type pssParameters struct { @@ -363,51 +376,6 @@ type pssParameters struct { TrailerField int `asn1:"optional,explicit,tag:3,default:1"` } -// rsaPSSParameters returns an asn1.RawValue suitable for use as the Parameters -// in an AlgorithmIdentifier that specifies RSA PSS. -func rsaPSSParameters(hashFunc crypto.Hash) asn1.RawValue { - var hashOID asn1.ObjectIdentifier - - switch hashFunc { - case crypto.SHA256: - hashOID = oidSHA256 - case crypto.SHA384: - hashOID = oidSHA384 - case crypto.SHA512: - hashOID = oidSHA512 - } - - params := pssParameters{ - Hash: pkix.AlgorithmIdentifier{ - Algorithm: hashOID, - Parameters: asn1.NullRawValue, - }, - MGF: pkix.AlgorithmIdentifier{ - Algorithm: oidMGF1, - }, - SaltLength: hashFunc.Size(), - TrailerField: 1, - } - - mgf1Params := pkix.AlgorithmIdentifier{ - Algorithm: hashOID, - Parameters: asn1.NullRawValue, - } - - var err error - params.MGF.Parameters.FullBytes, err = asn1.Marshal(mgf1Params) - if err != nil { - panic(err) - } - - serialized, err := asn1.Marshal(params) - if err != nil { - panic(err) - } - - return asn1.RawValue{FullBytes: serialized} -} - func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm { if ai.Algorithm.Equal(oidSignatureEd25519) { // RFC 8410, Section 3 @@ -2015,7 +1983,7 @@ func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgori return } if requestedSigAlgo.isRSAPSS() { - sigAlgo.Parameters = rsaPSSParameters(hashFunc) + sigAlgo.Parameters = hashToPSSParameters[hashFunc] } found = true break diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go index 840f535e55..d0315900e4 100644 --- a/src/crypto/x509/x509_test.go +++ b/src/crypto/x509/x509_test.go @@ -2702,3 +2702,55 @@ func TestCreateRevocationList(t *testing.T) { }) } } + +func TestRSAPSAParameters(t *testing.T) { + generateParams := func(hashFunc crypto.Hash) []byte { + var hashOID asn1.ObjectIdentifier + + switch hashFunc { + case crypto.SHA256: + hashOID = oidSHA256 + case crypto.SHA384: + hashOID = oidSHA384 + case crypto.SHA512: + hashOID = oidSHA512 + } + + params := pssParameters{ + Hash: pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + }, + MGF: pkix.AlgorithmIdentifier{ + Algorithm: oidMGF1, + }, + SaltLength: hashFunc.Size(), + TrailerField: 1, + } + + mgf1Params := pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + } + + var err error + params.MGF.Parameters.FullBytes, err = asn1.Marshal(mgf1Params) + if err != nil { + t.Fatalf("failed to marshal MGF parameters: %s", err) + } + + serialized, err := asn1.Marshal(params) + if err != nil { + t.Fatalf("failed to marshal parameters: %s", err) + } + + return serialized + } + + for h, params := range hashToPSSParameters { + generated := generateParams(h) + if !bytes.Equal(params.FullBytes, generated) { + t.Errorf("hardcoded parameters for %s didn't match generated parameters: got (generated) %x, wanted (hardcoded) %x", h, generated, params.FullBytes) + } + } +} From c4971a14a7cac78849f4d0908e7140263129bdf7 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Thu, 24 Sep 2020 15:33:45 -0700 Subject: [PATCH 047/281] testing: add benchmark for TB.Helper Adds a benchmark for TB.Helper, to use as a judge of future improvements like CL 231717. Change-Id: I17c40d482fc12caa3eb2c1cda39fd8c42356b422 Reviewed-on: https://go-review.googlesource.com/c/go/+/257317 Run-TryBot: Emmanuel Odeke TryBot-Result: Go Bot Reviewed-by: Tobias Klauser Trust: Tobias Klauser Trust: Emmanuel Odeke --- src/testing/helper_test.go | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/testing/helper_test.go b/src/testing/helper_test.go index 7ce58c67fb..8858196cf0 100644 --- a/src/testing/helper_test.go +++ b/src/testing/helper_test.go @@ -70,3 +70,34 @@ func TestTBHelperParallel(t *T) { t.Errorf("got output line %q; want %q", got, want) } } + +type noopWriter int + +func (nw *noopWriter) Write(b []byte) (int, error) { return len(b), nil } + +func BenchmarkTBHelper(b *B) { + w := noopWriter(0) + ctx := newTestContext(1, newMatcher(regexp.MatchString, "", "")) + t1 := &T{ + common: common{ + signal: make(chan bool), + w: &w, + }, + context: ctx, + } + f1 := func() { + t1.Helper() + } + f2 := func() { + t1.Helper() + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if i&1 == 0 { + f1() + } else { + f2() + } + } +} From ad0ab812f8b80416c92ed227974e3194e98f4cdc Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 28 Sep 2020 12:19:56 -0700 Subject: [PATCH 048/281] cmd/compile: fix type checking of "make" arguments As part of type checking make's arguments, we were converting untyped float and complex constant arguments to integers. However, we were doing this without concern for whether the argument was a declared constant. Thus a call like "make([]T, n)" could change n from an untyped float or untyped complex to an untyped integer. The fix here is to simply change checkmake to not call SetVal, which will be handled by defaultlit anyway. However, we also need to properly return the defaultlit result value to the caller, so checkmake's *Node parameter is also changed to **Node. Fixes #41680. Change-Id: I858927a052f384ec38684570d37b10a6906961f7 Reviewed-on: https://go-review.googlesource.com/c/go/+/257966 Trust: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Cuong Manh Le TryBot-Result: Go Bot --- src/cmd/compile/internal/gc/typecheck.go | 16 +++++++++------- test/fixedbugs/issue41680.go | 21 +++++++++++++++++++++ 2 files changed, 30 insertions(+), 7 deletions(-) create mode 100644 test/fixedbugs/issue41680.go diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 8e87fc9df0..0eb0dae373 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1770,7 +1770,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - if !checkmake(t, "len", l) || r != nil && !checkmake(t, "cap", r) { + if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) { n.Type = nil return n } @@ -1794,7 +1794,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - if !checkmake(t, "size", l) { + if !checkmake(t, "size", &l) { n.Type = nil return n } @@ -1815,7 +1815,7 @@ func typecheck1(n *Node, top int) (res *Node) { n.Type = nil return n } - if !checkmake(t, "buffer", l) { + if !checkmake(t, "buffer", &l) { n.Type = nil return n } @@ -3729,7 +3729,8 @@ ret: n.SetWalkdef(1) } -func checkmake(t *types.Type, arg string, n *Node) bool { +func checkmake(t *types.Type, arg string, np **Node) bool { + n := *np if !n.Type.IsInteger() && n.Type.Etype != TIDEAL { yyerror("non-integer %s argument in make(%v) - %v", arg, t, n.Type) return false @@ -3739,12 +3740,12 @@ func checkmake(t *types.Type, arg string, n *Node) bool { // to avoid redundant "constant NNN overflows int" errors. switch consttype(n) { case CTINT, CTRUNE, CTFLT, CTCPLX: - n.SetVal(toint(n.Val())) - if n.Val().U.(*Mpint).CmpInt64(0) < 0 { + v := toint(n.Val()).U.(*Mpint) + if v.CmpInt64(0) < 0 { yyerror("negative %s argument in make(%v)", arg, t) return false } - if n.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 { + if v.Cmp(maxintval[TINT]) > 0 { yyerror("%s argument too large in make(%v)", arg, t) return false } @@ -3756,6 +3757,7 @@ func checkmake(t *types.Type, arg string, n *Node) bool { // for instance, indexlit might be called here and incorporate some // of the bounds checks done for make. n = defaultlit(n, types.Types[TINT]) + *np = n return true } diff --git a/test/fixedbugs/issue41680.go b/test/fixedbugs/issue41680.go new file mode 100644 index 0000000000..9dfeb7d503 --- /dev/null +++ b/test/fixedbugs/issue41680.go @@ -0,0 +1,21 @@ +// compile + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func F(s string) bool { + const m = 16 + const n = 1e5 + _ = make([]int, n) + return len(s) < n*m +} + +func G() { + const n = 1e5 + _ = make([]int, n) + f := n + var _ float64 = f +} From af18bce87cc7ee1ffc68f91abefa241ab209539e Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Sun, 20 Sep 2020 23:29:20 -0400 Subject: [PATCH 049/281] cmd/link: consider interface conversions only in reachable code The linker prunes methods that are not directly reachable if the receiver type is never converted to interface. Currently, this "never" is too strong: it is invalidated even if the interface conversion is in an unreachable function. This CL improves it by only considering interface conversions in reachable code. To do that, we introduce a marker relocation R_USEIFACE, which marks the target symbol as UsedInIface if the source symbol is reached. binary size before after cmd/compile 18897528 18887400 cmd/go 13607372 13470652 Change-Id: I66c6b69eeff9ae02d84d2e6f2bc7f1b29dd53910 Reviewed-on: https://go-review.googlesource.com/c/go/+/256797 Trust: Cherry Zhang Reviewed-by: Jeremy Faller Reviewed-by: Than McIntosh --- src/cmd/compile/internal/gc/pgen.go | 8 ++- src/cmd/compile/internal/gc/sinit.go | 2 +- src/cmd/compile/internal/gc/walk.go | 15 +++-- src/cmd/internal/obj/s390x/asmz.go | 6 +- src/cmd/internal/obj/wasm/wasmobj.go | 2 + src/cmd/internal/obj/x86/asm6.go | 7 +- src/cmd/internal/objabi/reloctype.go | 5 ++ src/cmd/internal/objabi/reloctype_string.go | 66 ++++++++++++++++++- src/cmd/link/internal/ld/data.go | 6 ++ src/cmd/link/internal/ld/deadcode.go | 14 ++++ .../ld/testdata/deadcode/ifacemethod.go | 9 ++- src/cmd/link/internal/loader/loader.go | 1 + src/cmd/link/internal/wasm/asm.go | 3 + 13 files changed, 129 insertions(+), 15 deletions(-) diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 74262595b0..52b1ed351d 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -231,6 +231,11 @@ func compile(fn *Node) { return } + // Set up the function's LSym early to avoid data races with the assemblers. + // Do this before walk, as walk needs the LSym to set attributes/relocations + // (e.g. in markTypeUsedInInterface). + fn.Func.initLSym(true) + walk(fn) if nerrors != 0 { return @@ -250,9 +255,6 @@ func compile(fn *Node) { return } - // Set up the function's LSym early to avoid data races with the assemblers. - fn.Func.initLSym(true) - // Make sure type syms are declared for all types that might // be types of stack objects. We need to do this here // because symbols must be allocated before the parallel diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 71ed558461..af19a96bbc 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -278,7 +278,7 @@ func (s *InitSchedule) staticassign(l *Node, r *Node) bool { return Isconst(val, CTNIL) } - markTypeUsedInInterface(val.Type) + markTypeUsedInInterface(val.Type, l.Sym.Linksym()) var itab *Node if l.Type.IsEmptyInterface() { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 933f16d9a0..d238cc2f45 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -805,8 +805,8 @@ opswitch: fromType := n.Left.Type toType := n.Type - if !fromType.IsInterface() { - markTypeUsedInInterface(fromType) + if !fromType.IsInterface() && !Curfn.Func.Nname.isBlank() { // skip unnamed functions (func _()) + markTypeUsedInInterface(fromType, Curfn.Func.lsym) } // typeword generates the type word of the interface value. @@ -1621,8 +1621,13 @@ opswitch: // markTypeUsedInInterface marks that type t is converted to an interface. // This information is used in the linker in dead method elimination. -func markTypeUsedInInterface(t *types.Type) { - typenamesym(t).Linksym().Set(obj.AttrUsedInIface, true) +func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { + tsym := typenamesym(t).Linksym() + // Emit a marker relocation. The linker will know the type is converted + // to an interface if "from" is reachable. + r := obj.Addrel(from) + r.Sym = tsym + r.Type = objabi.R_USEIFACE } // rtconvfn returns the parameter and result types that will be used by a @@ -3687,6 +3692,8 @@ func usemethod(n *Node) { // Also need to check for reflect package itself (see Issue #38515). if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) { Curfn.Func.SetReflectMethod(true) + // The LSym is initialized at this point. We need to set the attribute on the LSym. + Curfn.Func.lsym.Set(obj.AttrReflectMethod, true) } } diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index 68f01f1c5d..cb3a2c3196 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -461,6 +461,7 @@ func spanz(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { buffer := make([]byte, 0) changed := true loop := 0 + nrelocs0 := len(c.cursym.R) for changed { if loop > 100 { c.ctxt.Diag("stuck in spanz loop") @@ -468,7 +469,10 @@ func spanz(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } changed = false buffer = buffer[:0] - c.cursym.R = make([]obj.Reloc, 0) + for i := range c.cursym.R[nrelocs0:] { + c.cursym.R[nrelocs0+i] = obj.Reloc{} + } + c.cursym.R = c.cursym.R[:nrelocs0] // preserve marker relocations generated by the compiler for p := c.cursym.Func.Text; p != nil; p = p.Link { pc := int64(len(buffer)) if pc != p.Pc { diff --git a/src/cmd/internal/obj/wasm/wasmobj.go b/src/cmd/internal/obj/wasm/wasmobj.go index 70e8e51e65..a9e093a8ad 100644 --- a/src/cmd/internal/obj/wasm/wasmobj.go +++ b/src/cmd/internal/obj/wasm/wasmobj.go @@ -1007,6 +1007,7 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { panic("bad name for Call") } r := obj.Addrel(s) + r.Siz = 1 // actually variable sized r.Off = int32(w.Len()) r.Type = objabi.R_CALL if p.Mark&WasmImport != 0 { @@ -1033,6 +1034,7 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { case AI32Const, AI64Const: if p.From.Name == obj.NAME_EXTERN { r := obj.Addrel(s) + r.Siz = 1 // actually variable sized r.Off = int32(w.Len()) r.Type = objabi.R_ADDR r.Sym = p.From.Sym diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index fb99c620ad..4940c79eaa 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -2100,14 +2100,15 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { var c int32 errors := ctxt.Errors var nops []nopPad // Padding for a particular assembly (reuse slice storage if multiple assemblies) + nrelocs0 := len(s.R) for { // This loop continues while there are reasons to re-assemble // whole block, like the presence of long forward jumps. reAssemble := false - for i := range s.R { - s.R[i] = obj.Reloc{} + for i := range s.R[nrelocs0:] { + s.R[nrelocs0+i] = obj.Reloc{} } - s.R = s.R[:0] + s.R = s.R[:nrelocs0] // preserve marker relocations generated by the compiler s.P = s.P[:0] c = 0 var pPrev *obj.Prog diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go index f029a3c396..1e328d659f 100644 --- a/src/cmd/internal/objabi/reloctype.go +++ b/src/cmd/internal/objabi/reloctype.go @@ -89,6 +89,11 @@ const ( // should be linked into the final binary, even if there are no other // direct references. (This is used for types reachable by reflection.) R_USETYPE + // R_USEIFACE marks a type is converted to an interface in the function this + // relocation is applied to. The target is a type descriptor. + // This is a marker relocation (0-sized), for the linker's reachabililty + // analysis. + R_USEIFACE // R_METHODOFF resolves to a 32-bit offset from the beginning of the section // holding the data being relocated to the referenced symbol. // It is a variant of R_ADDROFF used when linking from the uncommonType of a diff --git a/src/cmd/internal/objabi/reloctype_string.go b/src/cmd/internal/objabi/reloctype_string.go index 83dfe71e07..caf24eea58 100644 --- a/src/cmd/internal/objabi/reloctype_string.go +++ b/src/cmd/internal/objabi/reloctype_string.go @@ -4,9 +4,71 @@ package objabi import "strconv" -const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_WEAKADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_METHODOFFR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[R_ADDR-1] + _ = x[R_ADDRPOWER-2] + _ = x[R_ADDRARM64-3] + _ = x[R_ADDRMIPS-4] + _ = x[R_ADDROFF-5] + _ = x[R_WEAKADDROFF-6] + _ = x[R_SIZE-7] + _ = x[R_CALL-8] + _ = x[R_CALLARM-9] + _ = x[R_CALLARM64-10] + _ = x[R_CALLIND-11] + _ = x[R_CALLPOWER-12] + _ = x[R_CALLMIPS-13] + _ = x[R_CALLRISCV-14] + _ = x[R_CONST-15] + _ = x[R_PCREL-16] + _ = x[R_TLS_LE-17] + _ = x[R_TLS_IE-18] + _ = x[R_GOTOFF-19] + _ = x[R_PLT0-20] + _ = x[R_PLT1-21] + _ = x[R_PLT2-22] + _ = x[R_USEFIELD-23] + _ = x[R_USETYPE-24] + _ = x[R_USEIFACE-25] + _ = x[R_METHODOFF-26] + _ = x[R_POWER_TOC-27] + _ = x[R_GOTPCREL-28] + _ = x[R_JMPMIPS-29] + _ = x[R_DWARFSECREF-30] + _ = x[R_DWARFFILEREF-31] + _ = x[R_ARM64_TLS_LE-32] + _ = x[R_ARM64_TLS_IE-33] + _ = x[R_ARM64_GOTPCREL-34] + _ = x[R_ARM64_GOT-35] + _ = x[R_ARM64_PCREL-36] + _ = x[R_ARM64_LDST8-37] + _ = x[R_ARM64_LDST32-38] + _ = x[R_ARM64_LDST64-39] + _ = x[R_ARM64_LDST128-40] + _ = x[R_POWER_TLS_LE-41] + _ = x[R_POWER_TLS_IE-42] + _ = x[R_POWER_TLS-43] + _ = x[R_ADDRPOWER_DS-44] + _ = x[R_ADDRPOWER_GOT-45] + _ = x[R_ADDRPOWER_PCREL-46] + _ = x[R_ADDRPOWER_TOCREL-47] + _ = x[R_ADDRPOWER_TOCREL_DS-48] + _ = x[R_RISCV_PCREL_ITYPE-49] + _ = x[R_RISCV_PCREL_STYPE-50] + _ = x[R_PCRELDBL-51] + _ = x[R_ADDRMIPSU-52] + _ = x[R_ADDRMIPSTLS-53] + _ = x[R_ADDRCUOFF-54] + _ = x[R_WASMIMPORT-55] + _ = x[R_XCOFFREF-56] +} -var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 60, 66, 72, 81, 92, 101, 112, 122, 133, 140, 147, 155, 163, 171, 177, 183, 189, 199, 208, 219, 230, 240, 249, 262, 276, 290, 304, 320, 331, 344, 357, 371, 385, 400, 414, 428, 439, 453, 468, 485, 503, 524, 543, 562, 572, 583, 596, 607, 619, 629} +const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_WEAKADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_METHODOFFR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" + +var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 60, 66, 72, 81, 92, 101, 112, 122, 133, 140, 147, 155, 163, 171, 177, 183, 189, 199, 208, 218, 229, 240, 250, 259, 272, 286, 300, 314, 330, 341, 354, 367, 381, 395, 410, 424, 438, 449, 463, 478, 495, 513, 534, 553, 572, 582, 593, 606, 617, 629, 639} func (i RelocType) String() string { i -= 1 diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index a730125cf2..0a3418bfc9 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -698,6 +698,9 @@ func windynrelocsym(ctxt *Link, rel *loader.SymbolBuilder, s loader.Sym) { relocs := ctxt.loader.Relocs(s) for ri := 0; ri < relocs.Count(); ri++ { r := relocs.At(ri) + if r.IsMarker() { + continue // skip marker relocations + } targ := r.Sym() if targ == 0 { continue @@ -775,6 +778,9 @@ func dynrelocsym(ctxt *Link, s loader.Sym) { relocs := ldr.Relocs(s) for ri := 0; ri < relocs.Count(); ri++ { r := relocs.At(ri) + if r.IsMarker() { + continue // skip marker relocations + } if ctxt.BuildMode == BuildModePIE && ctxt.LinkMode == LinkInternal { // It's expected that some relocations will be done // later by relocsym (R_TLS_LE, R_ADDROFF), so diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index 7f14aa3d27..816a23b9a7 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -153,6 +153,20 @@ func (d *deadcodePass) flood() { // do nothing for now as we still load all type symbols. continue } + if t == objabi.R_USEIFACE { + // R_USEIFACE is a marker relocation that tells the linker the type is + // converted to an interface, i.e. should have UsedInIface set. See the + // comment below for why we need to unset the Reachable bit and re-mark it. + rs := r.Sym() + if !d.ldr.AttrUsedInIface(rs) { + d.ldr.SetAttrUsedInIface(rs, true) + if d.ldr.AttrReachable(rs) { + d.ldr.SetAttrReachable(rs, false) + d.mark(rs, symIdx) + } + } + continue + } rs := r.Sym() if isgotype && usedInIface && d.ldr.IsGoType(rs) && !d.ldr.AttrUsedInIface(rs) { // If a type is converted to an interface, it is possible to obtain an diff --git a/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod.go b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod.go index b62f18c342..32a24cf6f0 100644 --- a/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod.go +++ b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod.go @@ -18,6 +18,13 @@ var p *T var e interface{} func main() { - p = new(T) // used T, but never converted to interface + p = new(T) // used T, but never converted to interface in any reachable code e.(I).M() // used I and I.M } + +func Unused() { // convert T to interface, but this function is not reachable + var i I = T(0) + i.M() +} + +var Unused2 interface{} = T(1) // convert T to interface, in an unreachable global initializer diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 43a0352e0b..ea99233f67 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -63,6 +63,7 @@ type Reloc struct { func (rel Reloc) Type() objabi.RelocType { return objabi.RelocType(rel.Reloc.Type()) + rel.typ } func (rel Reloc) Sym() Sym { return rel.l.resolve(rel.r, rel.Reloc.Sym()) } func (rel Reloc) SetSym(s Sym) { rel.Reloc.SetSym(goobj.SymRef{PkgIdx: 0, SymIdx: uint32(s)}) } +func (rel Reloc) IsMarker() bool { return rel.Siz() == 0 } func (rel Reloc) SetType(t objabi.RelocType) { if t != objabi.RelocType(uint8(t)) { diff --git a/src/cmd/link/internal/wasm/asm.go b/src/cmd/link/internal/wasm/asm.go index 3bd56a6e3a..31851fbb56 100644 --- a/src/cmd/link/internal/wasm/asm.go +++ b/src/cmd/link/internal/wasm/asm.go @@ -167,6 +167,9 @@ func asmb2(ctxt *ld.Link, ldr *loader.Loader) { off := int32(0) for ri := 0; ri < relocs.Count(); ri++ { r := relocs.At(ri) + if r.Siz() == 0 { + continue // skip marker relocations + } wfn.Write(P[off:r.Off()]) off = r.Off() rs := ldr.ResolveABIAlias(r.Sym()) From 1f4d035178d2d792a74b6b872f6a213bf5fd9326 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 28 Sep 2020 10:11:06 +0200 Subject: [PATCH 050/281] runtime: initialise cpu.HWCap on netbsd/arm64 NetBSD does not supply AT_HWCAP, however we still need to initialise cpu.HWCaps. For now specify the bare minimum until we add some form of capabilities detection. See https://golang.org/issue/30824#issuecomment-494901591 Follows CL 174129 which did the same for openbsd/arm64. Updates #30824 Change-Id: I43a86b583bc60d259a66772703de06970124bb7f Reviewed-on: https://go-review.googlesource.com/c/go/+/257998 Trust: Tobias Klauser Trust: Benny Siegert Run-TryBot: Tobias Klauser Reviewed-by: Ian Lance Taylor Reviewed-by: Benny Siegert TryBot-Result: Go Bot --- src/runtime/os_netbsd.go | 1 + src/runtime/os_netbsd_386.go | 3 +++ src/runtime/os_netbsd_amd64.go | 3 +++ src/runtime/os_netbsd_arm.go | 3 +++ src/runtime/os_netbsd_arm64.go | 12 +++++++++++- 5 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go index f7f90cedc1..c4c3d8e2fe 100644 --- a/src/runtime/os_netbsd.go +++ b/src/runtime/os_netbsd.go @@ -359,6 +359,7 @@ func sysargs(argc int32, argv **byte) { // now argv+n is auxv auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize)) sysauxv(auxv[:]) + archauxv(auxv[:]) } const ( diff --git a/src/runtime/os_netbsd_386.go b/src/runtime/os_netbsd_386.go index 037f7e36dc..c203af9cef 100644 --- a/src/runtime/os_netbsd_386.go +++ b/src/runtime/os_netbsd_386.go @@ -14,3 +14,6 @@ func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintp mc.__gregs[_REG_EDX] = uint32(uintptr(unsafe.Pointer(gp))) mc.__gregs[_REG_ESI] = uint32(fn) } + +func archauxv(auxv []uintptr) { +} diff --git a/src/runtime/os_netbsd_amd64.go b/src/runtime/os_netbsd_amd64.go index 5118b0c4ff..ea9d125492 100644 --- a/src/runtime/os_netbsd_amd64.go +++ b/src/runtime/os_netbsd_amd64.go @@ -14,3 +14,6 @@ func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintp mc.__gregs[_REG_R9] = uint64(uintptr(unsafe.Pointer(gp))) mc.__gregs[_REG_R12] = uint64(fn) } + +func archauxv(auxv []uintptr) { +} diff --git a/src/runtime/os_netbsd_arm.go b/src/runtime/os_netbsd_arm.go index b5ec23e45b..646da9dc0b 100644 --- a/src/runtime/os_netbsd_arm.go +++ b/src/runtime/os_netbsd_arm.go @@ -32,3 +32,6 @@ func cputicks() int64 { // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } + +func archauxv(auxv []uintptr) { +} diff --git a/src/runtime/os_netbsd_arm64.go b/src/runtime/os_netbsd_arm64.go index 8d21b0a430..ae2638c778 100644 --- a/src/runtime/os_netbsd_arm64.go +++ b/src/runtime/os_netbsd_arm64.go @@ -4,7 +4,10 @@ package runtime -import "unsafe" +import ( + "internal/cpu" + "unsafe" +) func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) { // Machine dependent mcontext initialisation for LWP. @@ -21,3 +24,10 @@ func cputicks() int64 { // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } + +func archauxv(auxv []uintptr) { + // NetBSD does not supply AT_HWCAP, however we still need to initialise cpu.HWCaps. + // For now specify the bare minimum until we add some form of capabilities + // detection. See issue https://golang.org/issue/30824#issuecomment-494901591 + cpu.HWCap = 1<<1 | 1<<0 // ASIMD, FP +} From 15c8925df0a1017ee6db96d551f7febae74318bf Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 29 Sep 2020 00:45:12 +0200 Subject: [PATCH 051/281] cmd/go/internal/lockedfile/internal/filelock: remove stale TODO comment This was addressed by CL 255258. Updates #35618 Change-Id: I8dd5b30a846f2d16a3d4752304861d7d2178d1cf Reviewed-on: https://go-review.googlesource.com/c/go/+/257940 Trust: Tobias Klauser Run-TryBot: Tobias Klauser TryBot-Result: Go Bot Reviewed-by: Bryan C. Mills --- .../go/internal/lockedfile/internal/filelock/filelock_fcntl.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go index dc7bbe263f..8776c5741c 100644 --- a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go @@ -12,9 +12,6 @@ // Most platforms provide some alternative API, such as an 'flock' system call // or an F_OFD_SETLK command for 'fcntl', that allows for better concurrency and // does not require per-inode bookkeeping in the application. -// -// TODO(golang.org/issue/35618): add a syscall.Flock binding for Illumos and -// switch it over to use filelock_unix.go. package filelock From 6fc094ceaf87659217dd0b2184e0a8749f6e3d39 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 17 Aug 2020 18:26:00 +0200 Subject: [PATCH 052/281] crypto/x509: define certDirectories per GOOS Split the list of CA certificate directory locations in root_unix.go by GOOS (aix, *bsd, js, linux, solaris). On solaris, also include /etc/certs/CA as documented here: https://docs.oracle.com/cd/E37838_01/html/E61024/kmf-cacerts.html Same as CL 2208 did for certFiles. Change-Id: Id24822d6a674bbbbf4088ebb8fe8437edad232b7 Reviewed-on: https://go-review.googlesource.com/c/go/+/248762 Trust: Tobias Klauser Run-TryBot: Tobias Klauser TryBot-Result: Go Bot Reviewed-by: Filippo Valsorda --- src/crypto/x509/root_aix.go | 6 ++++++ src/crypto/x509/root_bsd.go | 7 +++++++ src/crypto/x509/root_js.go | 4 ++++ src/crypto/x509/root_linux.go | 8 ++++++++ src/crypto/x509/root_solaris.go | 6 ++++++ src/crypto/x509/root_unix.go | 11 ----------- 6 files changed, 31 insertions(+), 11 deletions(-) diff --git a/src/crypto/x509/root_aix.go b/src/crypto/x509/root_aix.go index 6d427739a4..4d50a13473 100644 --- a/src/crypto/x509/root_aix.go +++ b/src/crypto/x509/root_aix.go @@ -8,3 +8,9 @@ package x509 var certFiles = []string{ "/var/ssl/certs/ca-bundle.crt", } + +// Possible directories with certificate files; stop after successfully +// reading at least one file from a directory. +var certDirectories = []string{ + "/var/ssl/certs", +} diff --git a/src/crypto/x509/root_bsd.go b/src/crypto/x509/root_bsd.go index 1371933891..f04b6bd0d6 100644 --- a/src/crypto/x509/root_bsd.go +++ b/src/crypto/x509/root_bsd.go @@ -13,3 +13,10 @@ var certFiles = []string{ "/usr/local/share/certs/ca-root-nss.crt", // DragonFly "/etc/openssl/certs/ca-certificates.crt", // NetBSD } + +// Possible directories with certificate files; stop after successfully +// reading at least one file from a directory. +var certDirectories = []string{ + "/usr/local/share/certs", // FreeBSD + "/etc/openssl/certs", // NetBSD +} diff --git a/src/crypto/x509/root_js.go b/src/crypto/x509/root_js.go index 70abb73f99..4e537a4fe5 100644 --- a/src/crypto/x509/root_js.go +++ b/src/crypto/x509/root_js.go @@ -8,3 +8,7 @@ package x509 // Possible certificate files; stop after finding one. var certFiles = []string{} + +// Possible directories with certificate files; stop after successfully +// reading at least one file from a directory. +var certDirectories = []string{} diff --git a/src/crypto/x509/root_linux.go b/src/crypto/x509/root_linux.go index 267775dc5f..ad6ce5cae7 100644 --- a/src/crypto/x509/root_linux.go +++ b/src/crypto/x509/root_linux.go @@ -13,3 +13,11 @@ var certFiles = []string{ "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7 "/etc/ssl/cert.pem", // Alpine Linux } + +// Possible directories with certificate files; stop after successfully +// reading at least one file from a directory. +var certDirectories = []string{ + "/etc/ssl/certs", // SLES10/SLES11, https://golang.org/issue/12139 + "/etc/pki/tls/certs", // Fedora/RHEL + "/system/etc/security/cacerts", // Android +} diff --git a/src/crypto/x509/root_solaris.go b/src/crypto/x509/root_solaris.go index e6d4e61399..97c19139e3 100644 --- a/src/crypto/x509/root_solaris.go +++ b/src/crypto/x509/root_solaris.go @@ -10,3 +10,9 @@ var certFiles = []string{ "/etc/ssl/certs/ca-certificates.crt", // Joyent SmartOS "/etc/ssl/cacert.pem", // OmniOS } + +// Possible directories with certificate files; stop after successfully +// reading at least one file from a directory. +var certDirectories = []string{ + "/etc/certs/CA", +} diff --git a/src/crypto/x509/root_unix.go b/src/crypto/x509/root_unix.go index b48e618a65..2aa38751f3 100644 --- a/src/crypto/x509/root_unix.go +++ b/src/crypto/x509/root_unix.go @@ -13,17 +13,6 @@ import ( "strings" ) -// Possible directories with certificate files; stop after successfully -// reading at least one file from a directory. -var certDirectories = []string{ - "/etc/ssl/certs", // SLES10/SLES11, https://golang.org/issue/12139 - "/system/etc/security/cacerts", // Android - "/usr/local/share/certs", // FreeBSD - "/etc/pki/tls/certs", // Fedora/RHEL - "/etc/openssl/certs", // NetBSD - "/var/ssl/certs", // AIX -} - const ( // certFileEnv is the environment variable which identifies where to locate // the SSL certificate file. If set this overrides the system default. From 9a7a981ab796038e9ddb148dabd97067d9cbbb01 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Sun, 27 Sep 2020 18:32:18 +0200 Subject: [PATCH 053/281] cmd/compile: convert more amd64 rules to typed aux Passes gotip build -toolexec 'toolstash -cmp' -a std Change-Id: I2927283e444e7075e155cf29680553b92d471667 Reviewed-on: https://go-review.googlesource.com/c/go/+/257897 Trust: Alberto Donizetti Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 218 +-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1446 +++++++++--------- 2 files changed, 832 insertions(+), 832 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 6dfe11dcfa..bfe1b456d4 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1994,164 +1994,164 @@ && clobber(x1, x2, mem2) => (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) -(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> - (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> - (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> - (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> - (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVQload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) +(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) -(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> - (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> - (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> - (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> - (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) +(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) +(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) +(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) -(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> - (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) -(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> - (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) -(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> - (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) -(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> - (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) +(MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVQstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) +(MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVLstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) +(MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) +(MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) -(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {sym} ptr mem) -(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {sym} ptr mem) -(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {sym} ptr mem) -(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {sym} ptr mem) -(MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {sym} ptr val mem) -(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {sym} ptr val mem) -(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} ptr val mem) -(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} ptr val mem) -(MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> - (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) -(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> - (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) -(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> - (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) -(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && ValAndOff(sc).canAdd(off) -> - (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) +(MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVQload [off1+off2] {sym} ptr mem) +(MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVLload [off1+off2] {sym} ptr mem) +(MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem) +(MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem) +(MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVQstore [off1+off2] {sym} ptr val mem) +(MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVLstore [off1+off2] {sym} ptr val mem) +(MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem) +(MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem) +(MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) => + (MOVQstoreconst [sc.addOffset32(off)] {s} ptr mem) +(MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) => + (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem) +(MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) => + (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem) +(MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) => + (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) // Merge load and op // TODO: add indexed variants? -((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem) -((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem) -((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) -((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) -(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) -(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) -> +((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem) +((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) +(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) +(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off] {sym} ptr x mem) -(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) -(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) -> +(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) +(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off] {sym} ptr x mem) // Merge ADDQconst and LEAQ into atomic loads. -(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> +(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem) -(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> - (MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOV(Q|L|B)atomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem) // Merge ADDQconst and LEAQ into atomic stores. -(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> +(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (XCHGQ [off1+off2] {sym} val ptr mem) -(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB -> - (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) -(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> +(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB => + (XCHGQ [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem) +(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (XCHGL [off1+off2] {sym} val ptr mem) -(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB -> - (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) +(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB => + (XCHGL [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem) // Merge ADDQconst into atomic adds. // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions. -(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> +(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (XADDQlock [off1+off2] {sym} val ptr mem) -(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> +(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => (XADDLlock [off1+off2] {sym} val ptr mem) // Merge ADDQconst into atomic compare and swaps. // TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions. -(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) -> +(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) => (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) -(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(off1+off2) -> +(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) => (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) // We don't need the conditional move if we know the arg of BSF is not zero. -(CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 -> x +(CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 => x // Extension is unnecessary for trailing zeros. -(BSFQ (ORQconst [1<<8] (MOVBQZX x))) -> (BSFQ (ORQconst [1<<8] x)) -(BSFQ (ORQconst [1<<16] (MOVWQZX x))) -> (BSFQ (ORQconst [1<<16] x)) +(BSFQ (ORQconst [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst [1<<8] x)) +(BSFQ (ORQconst [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst [1<<16] x)) // Redundant sign/zero extensions // Note: see issue 21963. We have to make sure we use the right type on // the resulting extension (the outer type, not the inner type). -(MOVLQSX (MOVLQSX x)) -> (MOVLQSX x) -(MOVLQSX (MOVWQSX x)) -> (MOVWQSX x) -(MOVLQSX (MOVBQSX x)) -> (MOVBQSX x) -(MOVWQSX (MOVWQSX x)) -> (MOVWQSX x) -(MOVWQSX (MOVBQSX x)) -> (MOVBQSX x) -(MOVBQSX (MOVBQSX x)) -> (MOVBQSX x) -(MOVLQZX (MOVLQZX x)) -> (MOVLQZX x) -(MOVLQZX (MOVWQZX x)) -> (MOVWQZX x) -(MOVLQZX (MOVBQZX x)) -> (MOVBQZX x) -(MOVWQZX (MOVWQZX x)) -> (MOVWQZX x) -(MOVWQZX (MOVBQZX x)) -> (MOVBQZX x) -(MOVBQZX (MOVBQZX x)) -> (MOVBQZX x) +(MOVLQSX (MOVLQSX x)) => (MOVLQSX x) +(MOVLQSX (MOVWQSX x)) => (MOVWQSX x) +(MOVLQSX (MOVBQSX x)) => (MOVBQSX x) +(MOVWQSX (MOVWQSX x)) => (MOVWQSX x) +(MOVWQSX (MOVBQSX x)) => (MOVBQSX x) +(MOVBQSX (MOVBQSX x)) => (MOVBQSX x) +(MOVLQZX (MOVLQZX x)) => (MOVLQZX x) +(MOVLQZX (MOVWQZX x)) => (MOVWQZX x) +(MOVLQZX (MOVBQZX x)) => (MOVBQZX x) +(MOVWQZX (MOVWQZX x)) => (MOVWQZX x) +(MOVWQZX (MOVBQZX x)) => (MOVBQZX x) +(MOVBQZX (MOVBQZX x)) => (MOVBQZX x) (MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) -> - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) => + ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) (MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) -> - ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) => + ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) // float <-> int register moves, with no conversion. // These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}. -(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) -> (MOVQf2i val) -(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) -> (MOVLf2i val) -(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) -> (MOVQi2f val) -(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) -> (MOVLi2f val) +(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val) +(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val) +(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) => (MOVQi2f val) +(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) => (MOVLi2f val) // Other load-like ops. -(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ADDQ x (MOVQf2i y)) -(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ADDL x (MOVLf2i y)) -(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (SUBQ x (MOVQf2i y)) -(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (SUBL x (MOVLf2i y)) -(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ANDQ x (MOVQf2i y)) -(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ANDL x (MOVLf2i y)) -( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> ( ORQ x (MOVQf2i y)) -( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> ( ORL x (MOVLf2i y)) -(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (XORQ x (MOVQf2i y)) -(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (XORL x (MOVLf2i y)) +(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y)) +(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y)) +(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y)) +(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y)) +(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y)) +(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y)) +( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y)) +( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y)) +(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y)) +(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y)) -(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (ADDSD x (MOVQi2f y)) -(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (ADDSS x (MOVLi2f y)) -(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (SUBSD x (MOVQi2f y)) -(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (SUBSS x (MOVLi2f y)) -(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (MULSD x (MOVQi2f y)) -(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (MULSS x (MOVLi2f y)) +(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y)) +(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y)) +(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y)) +(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y)) +(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y)) +(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y)) // Redirect stores to use the other register set. -(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) -> (MOVSDstore [off] {sym} ptr val mem) -(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) -> (MOVSSstore [off] {sym} ptr val mem) -(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) -> (MOVQstore [off] {sym} ptr val mem) -(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) -> (MOVLstore [off] {sym} ptr val mem) +(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem) +(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem) +(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore [off] {sym} ptr val mem) +(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore [off] {sym} ptr val mem) // Load args directly into the register class where it will be used. // We do this by just modifying the type of the Arg. -(MOVQf2i (Arg [off] {sym})) && t.Size() == u.Size() -> @b.Func.Entry (Arg [off] {sym}) -(MOVLf2i (Arg [off] {sym})) && t.Size() == u.Size() -> @b.Func.Entry (Arg [off] {sym}) -(MOVQi2f (Arg [off] {sym})) && t.Size() == u.Size() -> @b.Func.Entry (Arg [off] {sym}) -(MOVLi2f (Arg [off] {sym})) && t.Size() == u.Size() -> @b.Func.Entry (Arg [off] {sym}) +(MOVQf2i (Arg [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg [off] {sym}) +(MOVLf2i (Arg [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg [off] {sym}) +(MOVQi2f (Arg [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg [off] {sym}) +(MOVLi2f (Arg [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg [off] {sym}) // LEAQ is rematerializeable, so this helps to avoid register spill. // See issue 22947 for details -(ADD(Q|L)const [off] x:(SP)) -> (LEA(Q|L) [off] x) +(ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x) // HMULx is commutative, but its first argument must go in AX. // If possible, put a rematerializeable value in the first argument slot, diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index a7b3635b5e..bb25561507 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1469,16 +1469,16 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if l.Op != OpAMD64MOVLload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64ADDLload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -1660,13 +1660,13 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool { // match: (ADDLconst [off] x:(SP)) // result: (LEAL [off] x) for { - off := v.AuxInt + off := auxIntToInt32(v.AuxInt) x := v_0 if x.Op != OpSP { break } v.reset(OpAMD64LEAL) - v.AuxInt = off + v.AuxInt = int32ToAuxInt(off) v.AddArg(x) return true } @@ -1774,11 +1774,11 @@ func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool { // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // result: (ADDL x (MOVLf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -2058,16 +2058,16 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { if l.Op != OpAMD64MOVQload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64ADDQload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -2276,13 +2276,13 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool { // match: (ADDQconst [off] x:(SP)) // result: (LEAQ [off] x) for { - off := v.AuxInt + off := auxIntToInt32(v.AuxInt) x := v_0 if x.Op != OpSP { break } v.reset(OpAMD64LEAQ) - v.AuxInt = off + v.AuxInt = int32ToAuxInt(off) v.AddArg(x) return true } @@ -2390,11 +2390,11 @@ func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool { // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // result: (ADDQ x (MOVQf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -2473,16 +2473,16 @@ func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool { if l.Op != OpAMD64MOVSDload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64ADDSDload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -2544,11 +2544,11 @@ func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool { // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) // result: (ADDSD x (MOVQi2f y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -2576,16 +2576,16 @@ func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool { if l.Op != OpAMD64MOVSSload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64ADDSSload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -2647,11 +2647,11 @@ func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool { // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) // result: (ADDSS x (MOVLi2f y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -2748,16 +2748,16 @@ func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool { if l.Op != OpAMD64MOVLload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64ANDLload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -2971,11 +2971,11 @@ func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool { // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // result: (ANDL x (MOVLf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -3127,16 +3127,16 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { if l.Op != OpAMD64MOVQload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64ANDQload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -3350,11 +3350,11 @@ func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool { // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // result: (ANDQ x (MOVQf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -3430,7 +3430,7 @@ func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool { break } t := v_0.Type - if v_0.AuxInt != 1<<8 { + if auxIntToInt32(v_0.AuxInt) != 1<<8 { break } v_0_0 := v_0.Args[0] @@ -3440,7 +3440,7 @@ func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool { x := v_0_0.Args[0] v.reset(OpAMD64BSFQ) v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) - v0.AuxInt = 1 << 8 + v0.AuxInt = int32ToAuxInt(1 << 8) v0.AddArg(x) v.AddArg(v0) return true @@ -3452,7 +3452,7 @@ func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool { break } t := v_0.Type - if v_0.AuxInt != 1<<16 { + if auxIntToInt32(v_0.AuxInt) != 1<<16 { break } v_0_0 := v_0.Args[0] @@ -3462,7 +3462,7 @@ func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool { x := v_0_0.Args[0] v.reset(OpAMD64BSFQ) v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t) - v0.AuxInt = 1 << 16 + v0.AuxInt = int32ToAuxInt(1 << 16) v0.AddArg(x) v.AddArg(v0) return true @@ -5530,7 +5530,7 @@ func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool { if v_2_0_0.Op != OpAMD64ORQconst { break } - c := v_2_0_0.AuxInt + c := auxIntToInt32(v_2_0_0.AuxInt) if !(c != 0) { break } @@ -8461,25 +8461,25 @@ func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] old := v_1 new_ := v_2 mem := v_3 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64CMPXCHGLlock) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg4(ptr, old, new_, mem) return true } @@ -8491,25 +8491,25 @@ func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] old := v_1 new_ := v_2 mem := v_3 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64CMPXCHGQlock) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg4(ptr, old, new_, mem) return true } @@ -8527,16 +8527,16 @@ func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool { if l.Op != OpAMD64MOVSDload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64DIVSDload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -8605,16 +8605,16 @@ func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool { if l.Op != OpAMD64MOVSSload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64DIVSSload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -10140,45 +10140,45 @@ func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVBatomicload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVBatomicload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVBatomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVBatomicload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -10252,45 +10252,45 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { return true } // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) - // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpAMD64MOVBload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVBload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVBload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -11449,47 +11449,47 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { return true } // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) - // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpAMD64MOVBstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVBstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -11592,45 +11592,45 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool { return true } // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) { break } v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: sc.canAdd32(off) + // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(ValAndOff(sc).canAdd(off)) { + if !(sc.canAdd32(off)) { break } v.reset(OpAMD64MOVBstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } @@ -11897,45 +11897,45 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVLatomicload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVLatomicload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVLatomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVLatomicload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -11953,16 +11953,16 @@ func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool { break } u := v_0.Type - off := v_0.AuxInt - sym := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) if !(t.Size() == u.Size()) { break } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) return true } return false @@ -11979,16 +11979,16 @@ func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool { break } u := v_0.Type - off := v_0.AuxInt - sym := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) if !(t.Size() == u.Size()) { break } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) return true } return false @@ -12063,55 +12063,55 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { return true } // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) - // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVLload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpAMD64MOVLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVLload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) // result: (MOVLf2i val) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpAMD64MOVSSstore || v_1.AuxInt != off || v_1.Aux != sym { + if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { break } val := v_1.Args[1] @@ -12407,47 +12407,47 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { return true } // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) - // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVLstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVLstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -12455,11 +12455,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && clobber(y) // result: (ADDLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 - if y.Op != OpAMD64ADDLload || y.AuxInt != off || y.Aux != sym { + if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { break } mem := y.Args[2] @@ -12468,8 +12468,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64ADDLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12477,11 +12477,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && clobber(y) // result: (ANDLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 - if y.Op != OpAMD64ANDLload || y.AuxInt != off || y.Aux != sym { + if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { break } mem := y.Args[2] @@ -12490,8 +12490,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64ANDLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12499,11 +12499,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && clobber(y) // result: (ORLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 - if y.Op != OpAMD64ORLload || y.AuxInt != off || y.Aux != sym { + if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { break } mem := y.Args[2] @@ -12512,8 +12512,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64ORLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12521,11 +12521,11 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && clobber(y) // result: (XORLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 - if y.Op != OpAMD64XORLload || y.AuxInt != off || y.Aux != sym { + if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { break } mem := y.Args[2] @@ -12534,8 +12534,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64XORLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12543,8 +12543,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (ADDLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64ADDL { @@ -12555,7 +12555,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { l := y_0 - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { continue } mem := l.Args[1] @@ -12567,8 +12567,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { continue } v.reset(OpAMD64ADDLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12578,8 +12578,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (SUBLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SUBL { @@ -12587,7 +12587,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } x := y.Args[1] l := y.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] @@ -12595,8 +12595,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64SUBLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12604,8 +12604,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (ANDLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64ANDL { @@ -12616,7 +12616,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { l := y_0 - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { continue } mem := l.Args[1] @@ -12628,8 +12628,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { continue } v.reset(OpAMD64ANDLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12639,8 +12639,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (ORLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64ORL { @@ -12651,7 +12651,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { l := y_0 - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { continue } mem := l.Args[1] @@ -12663,8 +12663,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { continue } v.reset(OpAMD64ORLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12674,8 +12674,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (XORLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64XORL { @@ -12686,7 +12686,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { l := y_0 - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { continue } mem := l.Args[1] @@ -12698,8 +12698,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { continue } v.reset(OpAMD64XORLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12709,8 +12709,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (BTCLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64BTCL { @@ -12718,7 +12718,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } x := y.Args[1] l := y.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] @@ -12726,8 +12726,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64BTCLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12735,8 +12735,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (BTRLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64BTRL { @@ -12744,7 +12744,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } x := y.Args[1] l := y.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] @@ -12752,8 +12752,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64BTRLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -12761,8 +12761,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (BTSLmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64BTSL { @@ -12770,7 +12770,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { } x := y.Args[1] l := y.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] @@ -12778,205 +12778,205 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { break } v.reset(OpAMD64BTSLmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (ADDLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64ADDLconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64ADDLconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (ANDLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64ANDLconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64ANDLconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (ORLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64ORLconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64ORLconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (XORLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64XORLconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64XORLconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (BTCLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (BTCLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64BTCLconst { break } - c := a.AuxInt + c := auxIntToInt8(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64BTCLconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (BTRLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (BTRLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64BTRLconst { break } - c := a.AuxInt + c := auxIntToInt8(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64BTRLconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (BTSLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (BTSLconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64BTSLconst { break } - c := a.AuxInt + c := auxIntToInt8(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVLload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64BTSLconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) // result: (MOVSSstore [off] {sym} ptr val mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLf2i { break @@ -12984,8 +12984,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { val := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVSSstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -13094,45 +13094,45 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { return true } // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) { break } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: sc.canAdd32(off) + // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(ValAndOff(sc).canAdd(off)) { + if !(sc.canAdd32(off)) { break } v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } @@ -13278,45 +13278,45 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVQatomicload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDQconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVQatomicload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + // result: (MOVQatomicload [off1+off2] {mergeSymTyped(sym1, sym2)} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAQ { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } v.reset(OpAMD64MOVQatomicload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } @@ -13334,16 +13334,16 @@ func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool { break } u := v_0.Type - off := v_0.AuxInt - sym := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) if !(t.Size() == u.Size()) { break } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) return true } return false @@ -13360,16 +13360,16 @@ func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool { break } u := v_0.Type - off := v_0.AuxInt - sym := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) if !(t.Size() == u.Size()) { break } b = b.Func.Entry v0 := b.NewValue0(v.Pos, OpArg, t) v.copyOf(v0) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) return true } return false @@ -13443,55 +13443,55 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { return true } // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) - // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVQload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpAMD64MOVQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVQload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVQload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) // result: (MOVQf2i val) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpAMD64MOVSDstore || v_1.AuxInt != off || v_1.Aux != sym { + if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { break } val := v_1.Args[1] @@ -13588,47 +13588,47 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { return true } // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) - // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVQstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVQstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -13636,11 +13636,11 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && clobber(y) // result: (ADDQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 - if y.Op != OpAMD64ADDQload || y.AuxInt != off || y.Aux != sym { + if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { break } mem := y.Args[2] @@ -13649,8 +13649,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { break } v.reset(OpAMD64ADDQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13658,11 +13658,11 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && clobber(y) // result: (ANDQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 - if y.Op != OpAMD64ANDQload || y.AuxInt != off || y.Aux != sym { + if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { break } mem := y.Args[2] @@ -13671,8 +13671,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { break } v.reset(OpAMD64ANDQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13680,11 +13680,11 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && clobber(y) // result: (ORQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 - if y.Op != OpAMD64ORQload || y.AuxInt != off || y.Aux != sym { + if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { break } mem := y.Args[2] @@ -13693,8 +13693,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { break } v.reset(OpAMD64ORQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13702,11 +13702,11 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && clobber(y) // result: (XORQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 - if y.Op != OpAMD64XORQload || y.AuxInt != off || y.Aux != sym { + if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym { break } mem := y.Args[2] @@ -13715,8 +13715,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { break } v.reset(OpAMD64XORQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13724,8 +13724,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (ADDQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64ADDQ { @@ -13736,7 +13736,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { l := y_0 - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { continue } mem := l.Args[1] @@ -13748,8 +13748,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { continue } v.reset(OpAMD64ADDQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13759,8 +13759,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (SUBQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64SUBQ { @@ -13768,7 +13768,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } x := y.Args[1] l := y.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] @@ -13776,8 +13776,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { break } v.reset(OpAMD64SUBQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13785,8 +13785,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (ANDQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64ANDQ { @@ -13797,7 +13797,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { l := y_0 - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { continue } mem := l.Args[1] @@ -13809,8 +13809,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { continue } v.reset(OpAMD64ANDQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13820,8 +13820,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (ORQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64ORQ { @@ -13832,7 +13832,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { l := y_0 - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { continue } mem := l.Args[1] @@ -13844,8 +13844,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { continue } v.reset(OpAMD64ORQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13855,8 +13855,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (XORQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64XORQ { @@ -13867,7 +13867,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { y_1 := y.Args[1] for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 { l := y_0 - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { continue } mem := l.Args[1] @@ -13879,8 +13879,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { continue } v.reset(OpAMD64XORQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13890,8 +13890,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (BTCQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64BTCQ { @@ -13899,7 +13899,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } x := y.Args[1] l := y.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] @@ -13907,8 +13907,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { break } v.reset(OpAMD64BTCQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13916,8 +13916,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (BTRQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64BTRQ { @@ -13925,7 +13925,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } x := y.Args[1] l := y.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] @@ -13933,8 +13933,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { break } v.reset(OpAMD64BTRQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -13942,8 +13942,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { // cond: y.Uses==1 && l.Uses==1 && clobber(y, l) // result: (BTSQmodify [off] {sym} ptr x mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 y := v_1 if y.Op != OpAMD64BTSQ { @@ -13951,7 +13951,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { } x := y.Args[1] l := y.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] @@ -13959,205 +13959,205 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { break } v.reset(OpAMD64BTSQmodify) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (ADDQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64ADDQconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64ADDQconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (ANDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (ANDQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64ANDQconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64ANDQconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (ORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (ORQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64ORQconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64ORQconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (XORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (XORQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64XORQconst { break } - c := a.AuxInt + c := auxIntToInt32(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64XORQconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (BTCQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (BTCQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64BTCQconst { break } - c := a.AuxInt + c := auxIntToInt8(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64BTCQconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (BTRQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (BTRQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64BTRQconst { break } - c := a.AuxInt + c := auxIntToInt8(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64BTRQconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l, a) - // result: (BTSQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c),int64(off)) && clobber(l, a) + // result: (BTSQconstmodify {sym} [makeValAndOff32(int32(c),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 a := v_1 if a.Op != OpAMD64BTSQconst { break } - c := a.AuxInt + c := auxIntToInt8(a.AuxInt) l := a.Args[0] - if l.Op != OpAMD64MOVQload || l.AuxInt != off || l.Aux != sym { + if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym { break } mem := l.Args[1] ptr2 := l.Args[0] - if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l, a)) { + if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(int64(c), int64(off)) && clobber(l, a)) { break } v.reset(OpAMD64BTSQconstmodify) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(c), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) // result: (MOVSDstore [off] {sym} ptr val mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVQf2i { break @@ -14165,8 +14165,8 @@ func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { val := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVSDstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -14248,45 +14248,45 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { return true } // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVQstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: sc.canAdd32(off) + // result: (MOVQstoreconst [sc.addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(ValAndOff(sc).canAdd(off)) { + if !(sc.canAdd32(off)) { break } v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } @@ -14341,10 +14341,10 @@ func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) // result: (MOVQi2f val) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpAMD64MOVQstore || v_1.AuxInt != off || v_1.Aux != sym { + if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { break } val := v_1.Args[1] @@ -14409,8 +14409,8 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) // result: (MOVQstore [off] {sym} ptr val mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVQi2f { break @@ -14418,8 +14418,8 @@ func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { val := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVQstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -14474,10 +14474,10 @@ func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) // result: (MOVLi2f val) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 - if v_1.Op != OpAMD64MOVLstore || v_1.AuxInt != off || v_1.Aux != sym { + if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym { break } val := v_1.Args[1] @@ -14542,8 +14542,8 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) // result: (MOVLstore [off] {sym} ptr val mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLi2f { break @@ -14551,8 +14551,8 @@ func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { val := v_1.Args[0] mem := v_2 v.reset(OpAMD64MOVLstore) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -14909,45 +14909,45 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { return true } // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWload [off1+off2] {mergeSymTyped(sym1,sym2)} base mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpAMD64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(base, mem) return true } // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVWload [off1+off2] {sym} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -15345,47 +15345,47 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { return true } // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // cond: canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWstore [off1+off2] {mergeSymTyped(sym1,sym2)} base val mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) base := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(base, val, mem) return true } // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (MOVWstore [off1+off2] {sym} ptr val mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off2 := v_0.AuxInt + off2 := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(ptr, val, mem) return true } @@ -15488,45 +15488,45 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { return true } // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) + // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSymTyped(sym1, sym2)} ptr mem) for { - sc := v.AuxInt - sym1 := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + sym1 := auxToSym(v.Aux) if v_0.Op != OpAMD64LEAL { break } - off := v_0.AuxInt - sym2 := v_0.Aux + off := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + if !(canMergeSym(sym1, sym2) && sc.canAdd32(off)) { break } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg2(ptr, mem) return true } // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // cond: sc.canAdd32(off) + // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem) for { - sc := v.AuxInt - s := v.Aux + sc := auxIntToValAndOff(v.AuxInt) + s := auxToSym(v.Aux) if v_0.Op != OpAMD64ADDLconst { break } - off := v_0.AuxInt + off := auxIntToInt32(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(ValAndOff(sc).canAdd(off)) { + if !(sc.canAdd32(off)) { break } v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off)) + v.Aux = symToAux(s) v.AddArg2(ptr, mem) return true } @@ -16448,16 +16448,16 @@ func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool { if l.Op != OpAMD64MOVSDload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64MULSDload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -16519,11 +16519,11 @@ func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool { // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) // result: (MULSD x (MOVQi2f y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -16551,16 +16551,16 @@ func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool { if l.Op != OpAMD64MOVSSload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64MULSSload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -16622,11 +16622,11 @@ func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool { // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) // result: (MULSS x (MOVLi2f y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -18124,16 +18124,16 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { if l.Op != OpAMD64MOVLload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64ORLload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -18325,11 +18325,11 @@ func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool { // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // result: ( ORL x (MOVLf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -19751,16 +19751,16 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { if l.Op != OpAMD64MOVQload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64ORQload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -19952,11 +19952,11 @@ func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool { // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // result: ( ORQ x (MOVQf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -26380,16 +26380,16 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool { if l.Op != OpAMD64MOVLload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64SUBLload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -26474,11 +26474,11 @@ func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool { // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // result: (SUBL x (MOVLf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -26604,16 +26604,16 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool { if l.Op != OpAMD64MOVQload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64SUBQload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -26753,11 +26753,11 @@ func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool { // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // result: (SUBQ x (MOVQf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -26835,16 +26835,16 @@ func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool { if l.Op != OpAMD64MOVSDload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64SUBSDload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -26904,11 +26904,11 @@ func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool { // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) // result: (SUBSD x (MOVQi2f y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVQstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -26935,16 +26935,16 @@ func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool { if l.Op != OpAMD64MOVSSload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64SUBSSload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -27004,11 +27004,11 @@ func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool { // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) // result: (SUBSS x (MOVLi2f y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVLstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -27370,24 +27370,24 @@ func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (XADDLlock [off1+off2] {sym} val ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64XADDLlock) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, ptr, mem) return true } @@ -27398,24 +27398,24 @@ func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (XADDQlock [off1+off2] {sym} val ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64XADDQlock) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, ptr, mem) return true } @@ -27426,47 +27426,47 @@ func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (XCHGL [off1+off2] {sym} val ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64XCHGL) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, ptr, mem) return true } // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB - // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB + // result: (XCHGL [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { break } v.reset(OpAMD64XCHGL) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, ptr, mem) return true } @@ -27477,47 +27477,47 @@ func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) + // cond: is32Bit(int64(off1)+int64(off2)) // result: (XCHGQ [off1+off2] {sym} val ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64ADDQconst { break } - off2 := v_1.AuxInt + off2 := auxIntToInt32(v_1.AuxInt) ptr := v_1.Args[0] mem := v_2 - if !(is32Bit(off1 + off2)) { + if !(is32Bit(int64(off1) + int64(off2))) { break } v.reset(OpAMD64XCHGQ) - v.AuxInt = off1 + off2 - v.Aux = sym + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(sym) v.AddArg3(val, ptr, mem) return true } // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB - // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB + // result: (XCHGQ [off1+off2] {mergeSymTyped(sym1,sym2)} val ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) val := v_0 if v_1.Op != OpAMD64LEAQ { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + off2 := auxIntToInt32(v_1.AuxInt) + sym2 := auxToSym(v_1.Aux) ptr := v_1.Args[0] mem := v_2 - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { + if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) { break } v.reset(OpAMD64XCHGQ) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSymTyped(sym1, sym2)) v.AddArg3(val, ptr, mem) return true } @@ -27676,16 +27676,16 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { if l.Op != OpAMD64MOVLload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64XORLload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -27975,11 +27975,11 @@ func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool { // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) // result: (XORL x (MOVLf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSSstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] @@ -28152,16 +28152,16 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { if l.Op != OpAMD64MOVQload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(OpAMD64XORQload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(x, ptr, mem) return true } @@ -28343,11 +28343,11 @@ func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool { // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) // result: (XORQ x (MOVQf2i y)) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) x := v_0 ptr := v_1 - if v_2.Op != OpAMD64MOVSDstore || v_2.AuxInt != off || v_2.Aux != sym { + if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym { break } y := v_2.Args[1] From 79e681d2a291142aa0ac8297229e182b2d1a78ac Mon Sep 17 00:00:00 2001 From: "Chen.Zhidong" Date: Tue, 29 Sep 2020 09:05:41 +0000 Subject: [PATCH 054/281] crypto/tls: make config.Clone return nil if the source is nil Fixes #40565 Change-Id: I13a67be193f8cd68df02b8729529e627a73d364b GitHub-Last-Rev: b03d2c04fd88db909b40dfd7bd08fe13d8994ab9 GitHub-Pull-Request: golang/go#40566 Reviewed-on: https://go-review.googlesource.com/c/go/+/246637 Run-TryBot: Emmanuel Odeke TryBot-Result: Go Bot Reviewed-by: Filippo Valsorda Reviewed-by: Emmanuel Odeke Trust: Emmanuel Odeke --- src/crypto/tls/common.go | 5 ++++- src/crypto/tls/tls_test.go | 7 +++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/crypto/tls/common.go b/src/crypto/tls/common.go index e8d009137a..e4f18bf5eb 100644 --- a/src/crypto/tls/common.go +++ b/src/crypto/tls/common.go @@ -727,9 +727,12 @@ func (c *Config) ticketKeyFromBytes(b [32]byte) (key ticketKey) { // ticket, and the lifetime we set for tickets we send. const maxSessionTicketLifetime = 7 * 24 * time.Hour -// Clone returns a shallow clone of c. It is safe to clone a Config that is +// Clone returns a shallow clone of c or nil if c is nil. It is safe to clone a Config that is // being used concurrently by a TLS client or server. func (c *Config) Clone() *Config { + if c == nil { + return nil + } c.mutex.RLock() defer c.mutex.RUnlock() return &Config{ diff --git a/src/crypto/tls/tls_test.go b/src/crypto/tls/tls_test.go index 334bfc411a..4ab8a430ba 100644 --- a/src/crypto/tls/tls_test.go +++ b/src/crypto/tls/tls_test.go @@ -841,6 +841,13 @@ func TestCloneNonFuncFields(t *testing.T) { } } +func TestCloneNilConfig(t *testing.T) { + var config *Config + if cc := config.Clone(); cc != nil { + t.Fatalf("Clone with nil should return nil, got: %+v", cc) + } +} + // changeImplConn is a net.Conn which can change its Write and Close // methods. type changeImplConn struct { From a28edbfca276307b228eb4b154bc2d137a3cba4a Mon Sep 17 00:00:00 2001 From: KimMachineGun Date: Tue, 29 Sep 2020 10:03:38 +0000 Subject: [PATCH 055/281] encoding/asn1: error instead of panic on invalid value to Unmarshal Changes Unmarshal to return an error, instead of panicking when its value is nil or not a pointer. This change matches the behavior of other encoding packages like json. Fixes #41509. Change-Id: I92c3af3a960144566e4c2b55d00c3a6fe477c8d5 GitHub-Last-Rev: c668b6e4ad826f84542c2675eb31ccfb010c45bb GitHub-Pull-Request: golang/go#41485 Reviewed-on: https://go-review.googlesource.com/c/go/+/255881 Run-TryBot: Emmanuel Odeke TryBot-Result: Go Bot Reviewed-by: Emmanuel Odeke Reviewed-by: Filippo Valsorda Trust: Emmanuel Odeke --- src/encoding/asn1/asn1.go | 27 ++++++++++++++++++++++++--- src/encoding/asn1/asn1_test.go | 23 +++++++++++++++++++++++ 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go index d809dde278..fa3d4e327b 100644 --- a/src/encoding/asn1/asn1.go +++ b/src/encoding/asn1/asn1.go @@ -1035,7 +1035,8 @@ func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { // Unmarshal parses the DER-encoded ASN.1 data structure b // and uses the reflect package to fill in an arbitrary value pointed at by val. // Because Unmarshal uses the reflect package, the structs -// being written to must use upper case field names. +// being written to must use upper case field names. If val +// is nil or not a pointer, Unmarshal returns an error. // // After parsing b, any bytes that were leftover and not used to fill // val will be returned in rest. When parsing a SEQUENCE into a struct, @@ -1095,11 +1096,31 @@ func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { return UnmarshalWithParams(b, val, "") } +// An invalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type invalidUnmarshalError struct { + Type reflect.Type +} + +func (e *invalidUnmarshalError) Error() string { + if e.Type == nil { + return "asn1: Unmarshal recipient value is nil" + } + + if e.Type.Kind() != reflect.Ptr { + return "asn1: Unmarshal recipient value is non-pointer " + e.Type.String() + } + return "asn1: Unmarshal recipient value is nil " + e.Type.String() +} + // UnmarshalWithParams allows field parameters to be specified for the // top-level element. The form of the params is the same as the field tags. func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { - v := reflect.ValueOf(val).Elem() - offset, err := parseField(v, b, 0, parseFieldParameters(params)) + v := reflect.ValueOf(val) + if v.Kind() != reflect.Ptr || v.IsNil() { + return nil, &invalidUnmarshalError{reflect.TypeOf(val)} + } + offset, err := parseField(v.Elem(), b, 0, parseFieldParameters(params)) if err != nil { return nil, err } diff --git a/src/encoding/asn1/asn1_test.go b/src/encoding/asn1/asn1_test.go index 8daae97faa..8985538468 100644 --- a/src/encoding/asn1/asn1_test.go +++ b/src/encoding/asn1/asn1_test.go @@ -518,6 +518,29 @@ func TestUnmarshal(t *testing.T) { } } +func TestUnmarshalWithNilOrNonPointer(t *testing.T) { + tests := []struct { + b []byte + v interface{} + want string + }{ + {b: []byte{0x05, 0x00}, v: nil, want: "asn1: Unmarshal recipient value is nil"}, + {b: []byte{0x05, 0x00}, v: RawValue{}, want: "asn1: Unmarshal recipient value is non-pointer asn1.RawValue"}, + {b: []byte{0x05, 0x00}, v: (*RawValue)(nil), want: "asn1: Unmarshal recipient value is nil *asn1.RawValue"}, + } + + for _, test := range tests { + _, err := Unmarshal(test.b, test.v) + if err == nil { + t.Errorf("Unmarshal expecting error, got nil") + continue + } + if g, w := err.Error(), test.want; g != w { + t.Errorf("InvalidUnmarshalError mismatch\nGot: %q\nWant: %q", g, w) + } + } +} + type Certificate struct { TBSCertificate TBSCertificate SignatureAlgorithm AlgorithmIdentifier From 815a5e29f434281c9ae3740ad43aadd4464ae15e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Mart=C3=AD?= Date: Sat, 26 Sep 2020 21:00:37 +0100 Subject: [PATCH 056/281] cmd/go: fix doc math for build cache hashing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function takes five 24-bit chunks from the hash, resulting in 120 bits. When base-64 encoded, this results in a 20-byte output string, which is confirmed by "var dst [chunks * 4]byte". It seems like the documented math could have been written for a previous implementation with shorter hashes, using 4 chunks instead of 5, as then the math checks out. Since this code has been working correctly for over three years, let's fix the documentation to reflect the code. Change-Id: I9e908e6bafb5dcc1e3c23915e2b6c8843ed444d6 Reviewed-on: https://go-review.googlesource.com/c/go/+/257646 Trust: Daniel Martí Run-TryBot: Daniel Martí TryBot-Result: Go Bot Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/work/buildid.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go index 6613b6fe3f..a3c9b1a2c1 100644 --- a/src/cmd/go/internal/work/buildid.go +++ b/src/cmd/go/internal/work/buildid.go @@ -110,15 +110,15 @@ func contentID(buildID string) string { // hashToString converts the hash h to a string to be recorded // in package archives and binaries as part of the build ID. -// We use the first 96 bits of the hash and encode it in base64, -// resulting in a 16-byte string. Because this is only used for +// We use the first 120 bits of the hash (5 chunks of 24 bits each) and encode +// it in base64, resulting in a 20-byte string. Because this is only used for // detecting the need to rebuild installed files (not for lookups -// in the object file cache), 96 bits are sufficient to drive the +// in the object file cache), 120 bits are sufficient to drive the // probability of a false "do not need to rebuild" decision to effectively zero. // We embed two different hashes in archives and four in binaries, -// so cutting to 16 bytes is a significant savings when build IDs are displayed. -// (16*4+3 = 67 bytes compared to 64*4+3 = 259 bytes for the -// more straightforward option of printing the entire h in hex). +// so cutting to 20 bytes is a significant savings when build IDs are displayed. +// (20*4+3 = 83 bytes compared to 64*4+3 = 259 bytes for the +// more straightforward option of printing the entire h in base64). func hashToString(h [cache.HashSize]byte) string { const b64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" const chunks = 5 @@ -425,7 +425,7 @@ func (b *Builder) useCache(a *Action, actionHash cache.ActionID, target string) // It's important that the overall buildID be unlikely verging on impossible // to appear in the output by chance, but that should be taken care of by // the actionID half; if it also appeared in the input that would be like an - // engineered 96-bit partial SHA256 collision. + // engineered 120-bit partial SHA256 collision. a.actionID = actionHash actionID := hashToString(actionHash) if a.json != nil { From 0163bdae685c1b060f8108ac5af13ea6374555b1 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Mon, 28 Sep 2020 17:43:44 -0400 Subject: [PATCH 057/281] crypto/tls: fix TestLinkerGC test A test that checks if "tls.(*Conn)" appears in any symbol's name. tls.Conn is a type, so the string "tls.(*Conn)" can only appear in the name of a method of Conn. But the test code doesn't use any of the methods. Not sure why this needs to be live. In particular, the linker is now able to prune all methods of Conn. Remove this requirement. In fact, just drop the only_conn test case, as simply allocating a type doesn't necessarily bring anything live. Change-Id: I754291b75d38e1465b5291b4dea20806615d21b3 Reviewed-on: https://go-review.googlesource.com/c/go/+/257973 Trust: Cherry Zhang Trust: Tobias Klauser Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Filippo Valsorda Reviewed-by: Than McIntosh Reviewed-by: Jeremy Faller --- src/crypto/tls/link_test.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/crypto/tls/link_test.go b/src/crypto/tls/link_test.go index c1fb57e70e..8224216b5c 100644 --- a/src/crypto/tls/link_test.go +++ b/src/crypto/tls/link_test.go @@ -41,19 +41,6 @@ func main() {} "type.crypto/tls.serverHandshakeState", }, }, - { - name: "only_conn", - program: `package main -import "crypto/tls" -var c = new(tls.Conn) -func main() {} -`, - want: []string{"tls.(*Conn)"}, - bad: []string{ - "type.crypto/tls.clientHandshakeState", - "type.crypto/tls.serverHandshakeState", - }, - }, { name: "client_and_server", program: `package main From 0ab72ed020d0c320b5007987abdf40677db34cfc Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 24 Sep 2020 16:54:31 -0400 Subject: [PATCH 058/281] cmd/link, runtime: use a sentinel value for unreachable method In the method table, the method's code pointer is stored as an offset from the start of the text section. Currently, for an unreachable method, the offset is left as 0, which resolves to the start of the text section at run time. It is possible that there is valid code there. If an unreachable method is ever reached (due to a compiler or linker bug), the execution will jump to a wrong location but may continue to run for a while, until it fails with a seemingly unrelated error. This CL changes it to use -1 for unreachable method instead. At run time this will resolve to an invalid address, which makes it fail immediately if it is ever reached. Change-Id: Ied6ed7f1833c4f3b991fdf55d8810d70d307b2e6 Reviewed-on: https://go-review.googlesource.com/c/go/+/257203 Trust: Cherry Zhang Run-TryBot: Cherry Zhang Reviewed-by: Than McIntosh --- src/cmd/link/internal/ld/data.go | 6 ++++++ src/runtime/type.go | 9 ++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 0a3418bfc9..ed948d51b1 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -390,6 +390,12 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) { o = ldr.SymValue(rs) + r.Add() - int64(ldr.SymSect(rs).Vaddr) case objabi.R_WEAKADDROFF, objabi.R_METHODOFF: if !ldr.AttrReachable(rs) { + if rt == objabi.R_METHODOFF { + // Set it to a sentinel value. The runtime knows this is not pointing to + // anything valid. + o = -1 + break + } continue } fallthrough diff --git a/src/runtime/type.go b/src/runtime/type.go index 52b6cb30b4..81455f3532 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -217,7 +217,9 @@ func (t *_type) nameOff(off nameOff) name { } func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type { - if off == 0 { + if off == 0 || off == -1 { + // -1 is the sentinel value for unreachable code. + // See cmd/link/internal/ld/data.go:relocsym. return nil } base := uintptr(ptrInModule) @@ -257,6 +259,11 @@ func (t *_type) typeOff(off typeOff) *_type { } func (t *_type) textOff(off textOff) unsafe.Pointer { + if off == -1 { + // -1 is the sentinel value for unreachable code. + // See cmd/link/internal/ld/data.go:relocsym. + return unsafe.Pointer(^uintptr(0)) + } base := uintptr(unsafe.Pointer(t)) var md *moduledata for next := &firstmoduledata; next != nil; next = next.next { From 39a426d35615da2ef594cd72ea5de54a543305e1 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Sun, 27 Sep 2020 19:13:24 +0200 Subject: [PATCH 059/281] cmd/compile: convert last amd64 rules to typed aux Passes gotip build -toolexec 'toolstash -cmp' -a std Change-Id: I196d3bdef4a4b650534a4ddd3053e65e0846fdcc Reviewed-on: https://go-review.googlesource.com/c/go/+/257898 Reviewed-by: Keith Randall Trust: Alberto Donizetti --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 45 +-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 286 ++++++++++--------- 2 files changed, 173 insertions(+), 158 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index bfe1b456d4..408678f054 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1278,9 +1278,12 @@ (CMPBconst (ANDLconst _ [m]) [n]) && 0 <= m && int8(m) < n => (FlagLT_ULT) // TESTQ c c sets flags like CMPQ c 0. -(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c == 0 -> (FlagEQ) -(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c < 0 -> (FlagLT_UGT) -(TEST(Q|L)const [c] (MOV(Q|L)const [c])) && c > 0 -> (FlagGT_UGT) +(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ) +(TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ) +(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0 => (FlagLT_UGT) +(TESTLconst [c] (MOVLconst [c])) && c < 0 => (FlagLT_UGT) +(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0 => (FlagGT_UGT) +(TESTLconst [c] (MOVLconst [c])) && c > 0 => (FlagGT_UGT) // TODO: DIVxU also. @@ -2157,13 +2160,13 @@ // If possible, put a rematerializeable value in the first argument slot, // to reduce the odds that another value will be have to spilled // specifically to free up AX. -(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() -> (HMUL(Q|L) y x) -(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() -> (HMUL(Q|L)U y x) +(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L) y x) +(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x) // Fold loads into compares // Note: these may be undone by the flagalloc pass. -(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) -> (CMP(Q|L|W|B)load {sym} [off] ptr x mem) -(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) -> (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem)) +(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem) +(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem)) (CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c]) && l.Uses == 1 @@ -2174,22 +2177,22 @@ && clobber(l) => @l.Block (CMP(W|B)constload {sym} [makeValAndOff32(int32(c),off)] ptr mem) -(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,off) -> (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) -(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(c,off) -> (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) -(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),off) -> (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) -(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),off) -> (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem) +(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validValAndOff(c,int64(off)) => (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem) +(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(c),int64(off)) => (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem) +(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int16(c)),int64(off)) => (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem) +(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) && validValAndOff(int64(int8(c)),int64(off)) => (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem) (TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2) && l == l2 && l.Uses == 2 - && validValAndOff(0,off) - && clobber(l) -> - @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0,off)] ptr mem) + && validValAndOff(0, int64(off)) + && clobber(l) => + @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff64(0, int64(off))] ptr mem) -(MOVBload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read8(sym, off))]) -(MOVWload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVLconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))]) -(MOVLload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))]) -(MOVQload [off] {sym} (SB) _) && symIsRO(sym) -> (MOVQconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))]) -(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) -> - (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder))]) - (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder))]) mem)) +(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))]) +(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) => + (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) + (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem)) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index bb25561507..3d7eb8c9a4 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -6767,8 +6767,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { if l.Op != OpAMD64MOVBload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] x := v_1 @@ -6776,8 +6776,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { break } v.reset(OpAMD64CMPBload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -6790,8 +6790,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { if l.Op != OpAMD64MOVBload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoad(v, l) && clobber(l)) { @@ -6799,8 +6799,8 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true @@ -7076,23 +7076,23 @@ func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool { return true } // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem) - // cond: validValAndOff(int64(int8(c)),off) - // result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem) + // cond: validValAndOff(int64(int8(c)),int64(off)) + // result: (CMPBconstload {sym} [makeValAndOff32(int32(int8(c)),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validValAndOff(int64(int8(c)), off)) { + if !(validValAndOff(int64(int8(c)), int64(off))) { break } v.reset(OpAMD64CMPBconstload) - v.AuxInt = makeValAndOff(int64(int8(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int8(c)), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -7153,8 +7153,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { if l.Op != OpAMD64MOVLload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] x := v_1 @@ -7162,8 +7162,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { break } v.reset(OpAMD64CMPLload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -7176,8 +7176,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { if l.Op != OpAMD64MOVLload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoad(v, l) && clobber(l)) { @@ -7185,8 +7185,8 @@ func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true @@ -7477,23 +7477,23 @@ func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool { return true } // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem) - // cond: validValAndOff(c,off) - // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) + // cond: validValAndOff(int64(c),int64(off)) + // result: (CMPLconstload {sym} [makeValAndOff32(c,off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validValAndOff(c, off)) { + if !(validValAndOff(int64(c), int64(off))) { break } v.reset(OpAMD64CMPLconstload) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(c, off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -7652,8 +7652,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { if l.Op != OpAMD64MOVQload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] x := v_1 @@ -7661,8 +7661,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { break } v.reset(OpAMD64CMPQload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -7675,8 +7675,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { if l.Op != OpAMD64MOVQload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoad(v, l) && clobber(l)) { @@ -7684,8 +7684,8 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true @@ -8047,23 +8047,23 @@ func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool { return true } // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem) - // cond: validValAndOff(c,off) - // result: (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem) + // cond: validValAndOff(c,int64(off)) + // result: (CMPQconstload {sym} [makeValAndOff64(c,int64(off))] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVQconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) mem := v_2 - if !(validValAndOff(c, off)) { + if !(validValAndOff(c, int64(off))) { break } v.reset(OpAMD64CMPQconstload) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff64(c, int64(off))) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -8124,8 +8124,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { if l.Op != OpAMD64MOVWload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] x := v_1 @@ -8133,8 +8133,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { break } v.reset(OpAMD64CMPWload) - v.AuxInt = off - v.Aux = sym + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) v.AddArg3(ptr, x, mem) return true } @@ -8147,8 +8147,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { if l.Op != OpAMD64MOVWload { break } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] if !(canMergeLoad(v, l) && clobber(l)) { @@ -8156,8 +8156,8 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool { } v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags) - v0.AuxInt = off - v0.Aux = sym + v0.AuxInt = int32ToAuxInt(off) + v0.Aux = symToAux(sym) v0.AddArg3(ptr, x, mem) v.AddArg(v0) return true @@ -8433,23 +8433,23 @@ func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool { return true } // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) - // cond: validValAndOff(int64(int16(c)),off) - // result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) + // cond: validValAndOff(int64(int16(c)),int64(off)) + // result: (CMPWconstload {sym} [makeValAndOff32(int32(int16(c)),off)] ptr mem) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVLconst { break } - c := v_1.AuxInt + c := auxIntToInt32(v_1.AuxInt) mem := v_2 - if !(validValAndOff(int64(int16(c)), off)) { + if !(validValAndOff(int64(int16(c)), int64(off))) { break } v.reset(OpAMD64CMPWconstload) - v.AuxInt = makeValAndOff(int64(int16(c)), off) - v.Aux = sym + v.AuxInt = valAndOffToAuxInt(makeValAndOff32(int32(int16(c)), off)) + v.Aux = symToAux(sym) v.AddArg2(ptr, mem) return true } @@ -10296,15 +10296,15 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { } // match: (MOVBload [off] {sym} (SB) _) // cond: symIsRO(sym) - // result: (MOVLconst [int64(read8(sym, off))]) + // result: (MOVLconst [int32(read8(sym, int64(off)))]) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpSB || !(symIsRO(sym)) { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = int64(read8(sym, off)) + v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off)))) return true } return false @@ -12124,15 +12124,15 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { } // match: (MOVLload [off] {sym} (SB) _) // cond: symIsRO(sym) - // result: (MOVQconst [int64(read32(sym, off, config.ctxt.Arch.ByteOrder))]) + // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpSB || !(symIsRO(sym)) { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = int64(read32(sym, off, config.ctxt.Arch.ByteOrder)) + v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) return true } return false @@ -13240,16 +13240,16 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { } // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) // cond: symIsRO(srcSym) - // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder))]) mem)) + // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem)) for { - dstOff := v.AuxInt - dstSym := v.Aux + dstOff := auxIntToInt32(v.AuxInt) + dstSym := auxToSym(v.Aux) ptr := v_0 if v_1.Op != OpAMD64MOVOload { break } - srcOff := v_1.AuxInt - srcSym := v_1.Aux + srcOff := auxIntToInt32(v_1.AuxInt) + srcSym := auxToSym(v_1.Aux) v_1_0 := v_1.Args[0] if v_1_0.Op != OpSB { break @@ -13259,15 +13259,15 @@ func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { break } v.reset(OpAMD64MOVQstore) - v.AuxInt = dstOff + 8 - v.Aux = dstSym + v.AuxInt = int32ToAuxInt(dstOff + 8) + v.Aux = symToAux(dstSym) v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64(read64(srcSym, srcOff+8, config.ctxt.Arch.ByteOrder)) + v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))) v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem) - v1.AuxInt = dstOff - v1.Aux = dstSym + v1.AuxInt = int32ToAuxInt(dstOff) + v1.Aux = symToAux(dstSym) v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64) - v2.AuxInt = int64(read64(srcSym, srcOff, config.ctxt.Arch.ByteOrder)) + v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))) v1.AddArg3(ptr, v2, mem) v.AddArg3(ptr, v0, v1) return true @@ -13504,15 +13504,15 @@ func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { } // match: (MOVQload [off] {sym} (SB) _) // cond: symIsRO(sym) - // result: (MOVQconst [int64(read64(sym, off, config.ctxt.Arch.ByteOrder))]) + // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpSB || !(symIsRO(sym)) { break } v.reset(OpAMD64MOVQconst) - v.AuxInt = int64(read64(sym, off, config.ctxt.Arch.ByteOrder)) + v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))) return true } return false @@ -14953,15 +14953,15 @@ func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { } // match: (MOVWload [off] {sym} (SB) _) // cond: symIsRO(sym) - // result: (MOVLconst [int64(read16(sym, off, config.ctxt.Arch.ByteOrder))]) + // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) for { - off := v.AuxInt - sym := v.Aux + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) if v_0.Op != OpSB || !(symIsRO(sym)) { break } v.reset(OpAMD64MOVLconst) - v.AuxInt = int64(read16(sym, off, config.ctxt.Arch.ByteOrder)) + v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))) return true } return false @@ -27044,27 +27044,27 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { break } // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) - // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0,off)] ptr mem) + // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l) + // result: @l.Block (CMPBconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { l := v_0 if l.Op != OpAMD64MOVBload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] l2 := v_1 - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { + if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) { continue } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym + v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off))) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -27112,27 +27112,27 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { break } // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) - // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0,off)] ptr mem) + // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l) + // result: @l.Block (CMPLconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { l := v_0 if l.Op != OpAMD64MOVLload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] l2 := v_1 - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { + if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) { continue } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym + v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off))) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -27146,8 +27146,8 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { // cond: c == 0 // result: (FlagEQ) for { - c := v.AuxInt - if v_0.Op != OpAMD64MOVLconst || v_0.AuxInt != c || !(c == 0) { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) { break } v.reset(OpAMD64FlagEQ) @@ -27157,8 +27157,8 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { // cond: c < 0 // result: (FlagLT_UGT) for { - c := v.AuxInt - if v_0.Op != OpAMD64MOVLconst || v_0.AuxInt != c || !(c < 0) { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) { break } v.reset(OpAMD64FlagLT_UGT) @@ -27168,8 +27168,8 @@ func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool { // cond: c > 0 // result: (FlagGT_UGT) for { - c := v.AuxInt - if v_0.Op != OpAMD64MOVLconst || v_0.AuxInt != c || !(c > 0) { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) { break } v.reset(OpAMD64FlagGT_UGT) @@ -27217,27 +27217,27 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { break } // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) - // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0,off)] ptr mem) + // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l) + // result: @l.Block (CMPQconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { l := v_0 if l.Op != OpAMD64MOVQload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] l2 := v_1 - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { + if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) { continue } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym + v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off))) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } @@ -27247,34 +27247,46 @@ func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { } func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool { v_0 := v.Args[0] - // match: (TESTQconst [c] (MOVQconst [c])) - // cond: c == 0 + // match: (TESTQconst [c] (MOVQconst [d])) + // cond: int64(c) == d && c == 0 // result: (FlagEQ) for { - c := v.AuxInt - if v_0.Op != OpAMD64MOVQconst || v_0.AuxInt != c || !(c == 0) { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(int64(c) == d && c == 0) { break } v.reset(OpAMD64FlagEQ) return true } - // match: (TESTQconst [c] (MOVQconst [c])) - // cond: c < 0 + // match: (TESTQconst [c] (MOVQconst [d])) + // cond: int64(c) == d && c < 0 // result: (FlagLT_UGT) for { - c := v.AuxInt - if v_0.Op != OpAMD64MOVQconst || v_0.AuxInt != c || !(c < 0) { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(int64(c) == d && c < 0) { break } v.reset(OpAMD64FlagLT_UGT) return true } - // match: (TESTQconst [c] (MOVQconst [c])) - // cond: c > 0 + // match: (TESTQconst [c] (MOVQconst [d])) + // cond: int64(c) == d && c > 0 // result: (FlagGT_UGT) for { - c := v.AuxInt - if v_0.Op != OpAMD64MOVQconst || v_0.AuxInt != c || !(c > 0) { + c := auxIntToInt32(v.AuxInt) + if v_0.Op != OpAMD64MOVQconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(int64(c) == d && c > 0) { break } v.reset(OpAMD64FlagGT_UGT) @@ -27318,27 +27330,27 @@ func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool { break } // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2) - // cond: l == l2 && l.Uses == 2 && validValAndOff(0,off) && clobber(l) - // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0,off)] ptr mem) + // cond: l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l) + // result: @l.Block (CMPWconstload {sym} [makeValAndOff64(0, int64(off))] ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { l := v_0 if l.Op != OpAMD64MOVWload { continue } - off := l.AuxInt - sym := l.Aux + off := auxIntToInt32(l.AuxInt) + sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] l2 := v_1 - if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) { + if !(l == l2 && l.Uses == 2 && validValAndOff(0, int64(off)) && clobber(l)) { continue } b = l.Block v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags) v.copyOf(v0) - v0.AuxInt = makeValAndOff(0, off) - v0.Aux = sym + v0.AuxInt = valAndOffToAuxInt(makeValAndOff64(0, int64(off))) + v0.Aux = symToAux(sym) v0.AddArg2(ptr, mem) return true } From 39dde09126be02f5f8c38ddf7590ae8f9825fcaa Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Mon, 21 Sep 2020 20:44:53 -0400 Subject: [PATCH 060/281] cmd/link: retain only used interface methods Currently, in the linker's deadcode pass, when an interface type is live, the linker thinks all its methods are live, and uses them to match methods on concrete types. The interface method may never be used, though. This CL changes it to only keep used interface methods, for matching concrete type methods. To do that, when an interface method is used, the compiler generates a mark relocation. The linker uses the marker relocations to mark used interface methods, and only the used ones. binary size before after cmd/compile 18887400 18812200 cmd/go 13470652 13470492 Change-Id: I3cfd9df4a53783330ba87735853f2a0ec3c42802 Reviewed-on: https://go-review.googlesource.com/c/go/+/256798 Trust: Cherry Zhang Reviewed-by: Than McIntosh Reviewed-by: Jeremy Faller --- src/cmd/compile/internal/gc/reflect.go | 19 +++++- src/cmd/compile/internal/gc/walk.go | 15 +++++ src/cmd/internal/objabi/reloctype.go | 6 ++ src/cmd/internal/objabi/reloctype_string.go | 67 ++++++++++--------- src/cmd/link/internal/ld/deadcode.go | 53 +++++++-------- src/cmd/link/internal/ld/deadcode_test.go | 1 + .../ld/testdata/deadcode/ifacemethod4.go | 23 +++++++ 7 files changed, 120 insertions(+), 64 deletions(-) create mode 100644 src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 49b2a0ed49..ae3e2f8e65 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -61,8 +61,9 @@ const ( MAXELEMSIZE = 128 ) -func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) -func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) +func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) +func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) +func commonSize() int { return 4*Widthptr + 8 + 8 } // Sizeof(runtime._type{}) func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) if t.Sym == nil && len(methods(t)) == 0 { @@ -1422,6 +1423,20 @@ func dtypesym(t *types.Type) *obj.LSym { return lsym } +// ifaceMethodOffset returns the offset of the i-th method in the interface +// type descriptor, ityp. +func ifaceMethodOffset(ityp *types.Type, i int64) int64 { + // interface type descriptor layout is struct { + // _type // commonSize + // pkgpath // 1 word + // []imethod // 3 words (pointing to [...]imethod below) + // uncommontype // uncommonSize + // [...]imethod + // } + // The size of imethod is 8. + return int64(commonSize()+4*Widthptr+uncommonSize(ityp)) + i*8 +} + // for each itabEntry, gather the methods on // the concrete type that implement the interface func peekitabs() { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index d238cc2f45..8e45059eab 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -565,6 +565,7 @@ opswitch: case OCALLINTER, OCALLFUNC, OCALLMETH: if n.Op == OCALLINTER { usemethod(n) + markUsedIfaceMethod(n) } if n.Op == OCALLFUNC && n.Left.Op == OCLOSURE { @@ -1630,6 +1631,20 @@ func markTypeUsedInInterface(t *types.Type, from *obj.LSym) { r.Type = objabi.R_USEIFACE } +// markUsedIfaceMethod marks that an interface method is used in the current +// function. n is OCALLINTER node. +func markUsedIfaceMethod(n *Node) { + ityp := n.Left.Left.Type + tsym := typenamesym(ityp).Linksym() + r := obj.Addrel(Curfn.Func.lsym) + r.Sym = tsym + // n.Left.Xoffset is the method index * Widthptr (the offset of code pointer + // in itab). + midx := n.Left.Xoffset / int64(Widthptr) + r.Add = ifaceMethodOffset(ityp, midx) + r.Type = objabi.R_USEIFACEMETHOD +} + // rtconvfn returns the parameter and result types that will be used by a // runtime function to convert from type src to type dst. The runtime function // name can be derived from the names of the returned types. diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go index 1e328d659f..9e2e4a150a 100644 --- a/src/cmd/internal/objabi/reloctype.go +++ b/src/cmd/internal/objabi/reloctype.go @@ -94,6 +94,12 @@ const ( // This is a marker relocation (0-sized), for the linker's reachabililty // analysis. R_USEIFACE + // R_USEIFACEMETHOD marks an interface method that is used in the function + // this relocation is applied to. The target is an interface type descriptor. + // The addend is the offset of the method in the type descriptor. + // This is a marker relocation (0-sized), for the linker's reachabililty + // analysis. + R_USEIFACEMETHOD // R_METHODOFF resolves to a 32-bit offset from the beginning of the section // holding the data being relocated to the referenced symbol. // It is a variant of R_ADDROFF used when linking from the uncommonType of a diff --git a/src/cmd/internal/objabi/reloctype_string.go b/src/cmd/internal/objabi/reloctype_string.go index caf24eea58..01df4cce62 100644 --- a/src/cmd/internal/objabi/reloctype_string.go +++ b/src/cmd/internal/objabi/reloctype_string.go @@ -33,42 +33,43 @@ func _() { _ = x[R_USEFIELD-23] _ = x[R_USETYPE-24] _ = x[R_USEIFACE-25] - _ = x[R_METHODOFF-26] - _ = x[R_POWER_TOC-27] - _ = x[R_GOTPCREL-28] - _ = x[R_JMPMIPS-29] - _ = x[R_DWARFSECREF-30] - _ = x[R_DWARFFILEREF-31] - _ = x[R_ARM64_TLS_LE-32] - _ = x[R_ARM64_TLS_IE-33] - _ = x[R_ARM64_GOTPCREL-34] - _ = x[R_ARM64_GOT-35] - _ = x[R_ARM64_PCREL-36] - _ = x[R_ARM64_LDST8-37] - _ = x[R_ARM64_LDST32-38] - _ = x[R_ARM64_LDST64-39] - _ = x[R_ARM64_LDST128-40] - _ = x[R_POWER_TLS_LE-41] - _ = x[R_POWER_TLS_IE-42] - _ = x[R_POWER_TLS-43] - _ = x[R_ADDRPOWER_DS-44] - _ = x[R_ADDRPOWER_GOT-45] - _ = x[R_ADDRPOWER_PCREL-46] - _ = x[R_ADDRPOWER_TOCREL-47] - _ = x[R_ADDRPOWER_TOCREL_DS-48] - _ = x[R_RISCV_PCREL_ITYPE-49] - _ = x[R_RISCV_PCREL_STYPE-50] - _ = x[R_PCRELDBL-51] - _ = x[R_ADDRMIPSU-52] - _ = x[R_ADDRMIPSTLS-53] - _ = x[R_ADDRCUOFF-54] - _ = x[R_WASMIMPORT-55] - _ = x[R_XCOFFREF-56] + _ = x[R_USEIFACEMETHOD-26] + _ = x[R_METHODOFF-27] + _ = x[R_POWER_TOC-28] + _ = x[R_GOTPCREL-29] + _ = x[R_JMPMIPS-30] + _ = x[R_DWARFSECREF-31] + _ = x[R_DWARFFILEREF-32] + _ = x[R_ARM64_TLS_LE-33] + _ = x[R_ARM64_TLS_IE-34] + _ = x[R_ARM64_GOTPCREL-35] + _ = x[R_ARM64_GOT-36] + _ = x[R_ARM64_PCREL-37] + _ = x[R_ARM64_LDST8-38] + _ = x[R_ARM64_LDST32-39] + _ = x[R_ARM64_LDST64-40] + _ = x[R_ARM64_LDST128-41] + _ = x[R_POWER_TLS_LE-42] + _ = x[R_POWER_TLS_IE-43] + _ = x[R_POWER_TLS-44] + _ = x[R_ADDRPOWER_DS-45] + _ = x[R_ADDRPOWER_GOT-46] + _ = x[R_ADDRPOWER_PCREL-47] + _ = x[R_ADDRPOWER_TOCREL-48] + _ = x[R_ADDRPOWER_TOCREL_DS-49] + _ = x[R_RISCV_PCREL_ITYPE-50] + _ = x[R_RISCV_PCREL_STYPE-51] + _ = x[R_PCRELDBL-52] + _ = x[R_ADDRMIPSU-53] + _ = x[R_ADDRMIPSTLS-54] + _ = x[R_ADDRCUOFF-55] + _ = x[R_WASMIMPORT-56] + _ = x[R_XCOFFREF-57] } -const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_WEAKADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_METHODOFFR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" +const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_WEAKADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CALLRISCVR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_METHODOFFR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_LDST8R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREF" -var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 60, 66, 72, 81, 92, 101, 112, 122, 133, 140, 147, 155, 163, 171, 177, 183, 189, 199, 208, 218, 229, 240, 250, 259, 272, 286, 300, 314, 330, 341, 354, 367, 381, 395, 410, 424, 438, 449, 463, 478, 495, 513, 534, 553, 572, 582, 593, 606, 617, 629, 639} +var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 60, 66, 72, 81, 92, 101, 112, 122, 133, 140, 147, 155, 163, 171, 177, 183, 189, 199, 208, 218, 234, 245, 256, 266, 275, 288, 302, 316, 330, 346, 357, 370, 383, 397, 411, 426, 440, 454, 465, 479, 494, 511, 529, 550, 569, 588, 598, 609, 622, 633, 645, 655} func (i RelocType) String() string { i -= 1 diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index 816a23b9a7..74d61fa495 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -106,25 +106,16 @@ func (d *deadcodePass) flood() { if isgotype { usedInIface = d.ldr.AttrUsedInIface(symIdx) - p := d.ldr.Data(symIdx) - if len(p) != 0 && decodetypeKind(d.ctxt.Arch, p)&kindMask == kindInterface { - for _, sig := range d.decodeIfaceMethods(d.ldr, d.ctxt.Arch, symIdx, &relocs) { - if d.ctxt.Debugvlog > 1 { - d.ctxt.Logf("reached iface method: %v\n", sig) - } - d.ifaceMethod[sig] = true - } - } } methods = methods[:0] for i := 0; i < relocs.Count(); i++ { r := relocs.At(i) t := r.Type() - if t == objabi.R_WEAKADDROFF { + switch t { + case objabi.R_WEAKADDROFF: continue - } - if t == objabi.R_METHODOFF { + case objabi.R_METHODOFF: if i+2 >= relocs.Count() { panic("expect three consecutive R_METHODOFF relocs") } @@ -146,14 +137,12 @@ func (d *deadcodePass) flood() { } i += 2 continue - } - if t == objabi.R_USETYPE { + case objabi.R_USETYPE: // type symbol used for DWARF. we need to load the symbol but it may not // be otherwise reachable in the program. // do nothing for now as we still load all type symbols. continue - } - if t == objabi.R_USEIFACE { + case objabi.R_USEIFACE: // R_USEIFACE is a marker relocation that tells the linker the type is // converted to an interface, i.e. should have UsedInIface set. See the // comment below for why we need to unset the Reachable bit and re-mark it. @@ -166,6 +155,18 @@ func (d *deadcodePass) flood() { } } continue + case objabi.R_USEIFACEMETHOD: + // R_USEIFACEMETHOD is a marker relocation that marks an interface + // method as used. + rs := r.Sym() + if d.ldr.SymType(rs) != sym.SDYNIMPORT { // don't decode DYNIMPORT symbol (we'll mark all exported methods anyway) + m := d.decodeIfaceMethod(d.ldr, d.ctxt.Arch, rs, r.Add()) + if d.ctxt.Debugvlog > 1 { + d.ctxt.Logf("reached iface method: %v\n", m) + } + d.ifaceMethod[m] = true + } + continue } rs := r.Sym() if isgotype && usedInIface && d.ldr.IsGoType(rs) && !d.ldr.AttrUsedInIface(rs) { @@ -378,23 +379,17 @@ func (d *deadcodePass) decodeMethodSig(ldr *loader.Loader, arch *sys.Arch, symId return methods } -func (d *deadcodePass) decodeIfaceMethods(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs) []methodsig { +// Decode the method of interface type symbol symIdx at offset off. +func (d *deadcodePass) decodeIfaceMethod(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, off int64) methodsig { p := ldr.Data(symIdx) if decodetypeKind(arch, p)&kindMask != kindInterface { panic(fmt.Sprintf("symbol %q is not an interface", ldr.SymName(symIdx))) } - rel := decodeReloc(ldr, symIdx, relocs, int32(commonsize(arch)+arch.PtrSize)) - s := rel.Sym() - if s == 0 { - return nil - } - if s != symIdx { - panic(fmt.Sprintf("imethod slice pointer in %q leads to a different symbol", ldr.SymName(symIdx))) - } - off := int(rel.Add()) // array of reflect.imethod values - numMethods := int(decodetypeIfaceMethodCount(arch, p)) - sizeofIMethod := 4 + 4 - return d.decodeMethodSig(ldr, arch, symIdx, relocs, off, sizeofIMethod, numMethods) + relocs := ldr.Relocs(symIdx) + var m methodsig + m.name = decodetypeName(ldr, symIdx, &relocs, int(off)) + m.typ = decodeRelocSym(ldr, symIdx, &relocs, int32(off+4)) + return m } func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs) []methodsig { diff --git a/src/cmd/link/internal/ld/deadcode_test.go b/src/cmd/link/internal/ld/deadcode_test.go index ab836dc8f8..b756091613 100644 --- a/src/cmd/link/internal/ld/deadcode_test.go +++ b/src/cmd/link/internal/ld/deadcode_test.go @@ -33,6 +33,7 @@ func TestDeadcode(t *testing.T) { {"ifacemethod", "", "main.T.M"}, {"ifacemethod2", "main.T.M", ""}, {"ifacemethod3", "main.S.M", ""}, + {"ifacemethod4", "", "main.T.M"}, } for _, test := range tests { test := test diff --git a/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go new file mode 100644 index 0000000000..52ee2e3d86 --- /dev/null +++ b/src/cmd/link/internal/ld/testdata/deadcode/ifacemethod4.go @@ -0,0 +1,23 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a live type's method is not live even if +// it matches an interface method, as long as the interface +// method is not used. + +package main + +type T int + +func (T) M() {} + +type I interface{ M() } + +var p *T +var pp *I + +func main() { + p = new(T) // use type T + pp = new(I) // use type I +} From 66770f4b1de37d9c5c962edb2980a70102e09ec3 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Mon, 28 Sep 2020 13:10:30 -0400 Subject: [PATCH 061/281] cmd/compile: mark type namedata symbols content-addressable Type namedata symbols are for type/field/method names and package paths. We can use content-addressable symbol mechanism for them. Change-Id: I923fda17b7094c7a0e46aad7c450622eb3826294 Reviewed-on: https://go-review.googlesource.com/c/go/+/257960 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Than McIntosh Reviewed-by: Jeremy Faller --- src/cmd/compile/internal/gc/reflect.go | 2 ++ src/cmd/internal/obj/objfile.go | 7 +++++++ src/cmd/internal/obj/sym.go | 5 ++++- src/cmd/link/internal/loader/loader.go | 4 ++-- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index ae3e2f8e65..21429af782 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -511,6 +511,7 @@ func dimportpath(p *types.Pkg) { s := Ctxt.Lookup("type..importpath." + p.Prefix + ".") ot := dnameData(s, 0, str, "", nil, false) ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) + s.Set(obj.AttrContentAddressable, true) p.Pathsym = s } @@ -638,6 +639,7 @@ func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym { } ot := dnameData(s, 0, name, tag, pkg, exported) ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA) + s.Set(obj.AttrContentAddressable, true) return s } diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index e4b9620568..186016c970 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -372,6 +372,13 @@ func contentHash64(s *LSym) goobj.Hash64Type { // hashed symbols. func (w *writer) contentHash(s *LSym) goobj.HashType { h := sha1.New() + // Don't dedup type symbols with others, as they are in a different + // section. + if strings.HasPrefix(s.Name, "type.") { + h.Write([]byte{'T'}) + } else { + h.Write([]byte{0}) + } // The compiler trims trailing zeros _sometimes_. We just do // it always. h.Write(bytes.TrimRight(s.P, "\x00")) diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go index d58877ee15..e5d7b2cbfd 100644 --- a/src/cmd/internal/obj/sym.go +++ b/src/cmd/internal/obj/sym.go @@ -38,6 +38,7 @@ import ( "log" "math" "sort" + "strings" ) func Linknew(arch *LinkArch) *Link { @@ -204,7 +205,9 @@ func (ctxt *Link) NumberSyms() { // if Pkgpath is unknown, cannot hash symbols with relocations, as it // may reference named symbols whose names are not fully expanded. if s.ContentAddressable() && (ctxt.Pkgpath != "" || len(s.R) == 0) { - if len(s.P) <= 8 && len(s.R) == 0 { // we can use short hash only for symbols without relocations + if len(s.P) <= 8 && len(s.R) == 0 && !strings.HasPrefix(s.Name, "type.") { + // We can use short hash only for symbols without relocations. + // Don't use short hash for type symbols, as they need special handling. s.PkgIdx = goobj.PkgIdxHashed64 s.SymIdx = hashed64idx if hashed64idx != int32(len(ctxt.hashed64defs)) { diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index ea99233f67..4025edc23f 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -2153,11 +2153,11 @@ func (l *Loader) LoadNonpkgSyms(arch *sys.Arch) { l.npkgsyms = l.NSym() // Preallocate some space (a few hundreds KB) for some symbols. // As of Go 1.15, linking cmd/compile has ~8000 hashed64 symbols and - // ~13000 hashed symbols. + // ~27000 hashed symbols. st := loadState{ l: l, hashed64Syms: make(map[uint64]symAndSize, 10000), - hashedSyms: make(map[goobj.HashType]symAndSize, 15000), + hashedSyms: make(map[goobj.HashType]symAndSize, 30000), } for _, o := range l.objs[goObjStart:] { st.preloadSyms(o.r, hashed64Def) From 567ef8bd8e76bdbc00df6b1903976b89b34a84d8 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 29 Sep 2020 15:13:17 +0200 Subject: [PATCH 062/281] runtime: correct arguments to pipe2 syscall in pipe on netbsd/arm64 Follow the implementation of pipe2 to pass arguments and handle return values. With this fix the runtime tests pass on netbsd/arm64 (tested using gomote on the netbsd-arm64-bsiegert builder). Update #30824 Change-Id: I346db68fc9dde8dc7f11351af05c1ad3105f4a32 Reviewed-on: https://go-review.googlesource.com/c/go/+/258000 Trust: Tobias Klauser Trust: Benny Siegert Run-TryBot: Tobias Klauser Reviewed-by: Benny Siegert TryBot-Result: Go Bot --- src/runtime/sys_netbsd_arm64.s | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/runtime/sys_netbsd_arm64.s b/src/runtime/sys_netbsd_arm64.s index f19a8b78f6..4d9b05478f 100644 --- a/src/runtime/sys_netbsd_arm64.s +++ b/src/runtime/sys_netbsd_arm64.s @@ -152,19 +152,13 @@ ok: // func pipe() (r, w int32, errno int32) TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12 - MOVW $0, R0 + ADD $8, RSP, R0 + MOVW $0, R1 SVC $SYS_pipe2 BCC pipeok - MOVW $-1,R1 - MOVW R1, r+0(FP) - MOVW R1, w+4(FP) NEG R0, R0 - MOVW R0, errno+8(FP) - RET pipeok: - MOVW R0, r+0(FP) - MOVW R1, w+4(FP) - MOVW ZR, errno+8(FP) + MOVW R0, errno+8(FP) RET // func pipe2(flags int32) (r, w int32, errno int32) From af9c5e5dbc3a5abc49aa3ac45da1b533b0d238a6 Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Fri, 8 May 2020 15:57:25 -0700 Subject: [PATCH 063/281] crypto/x509: prioritize potential parents in chain building When building a x509 chain the algorithm currently looks for parents that have a subject key identifier (SKID) that matches the child authority key identifier (AKID), if it is present, and returns all matches. If the child doesn't have an AKID, or there are no parents with matching SKID it will instead return all parents that have a subject DN matching the child's issuer DN. Prioritizing AKID/SKID matches over issuer/subject matches means that later in buildChains we have to throw away any pairs where these DNs do not match. This also prevents validation when a child has a SKID with two possible parents, one with matching AKID but mismatching subject DN, and one with a matching subject but missing AKID. In this case the former will be chosen and the latter ignored, meaning a valid chain cannot be built. This change alters how possible parents are chosen. Instead of doing a two step search it instead only consults the CertPool.byName subject DN map, avoiding issues where possible parents may be shadowed by parents that have SKID but bad subject DNs. Additionally it orders the list of possible parents by the likelihood that they are in fact a match. This ordering follows this pattern: * AKID and SKID match * AKID present, SKID missing / AKID missing, SKID present * AKID and SKID don't match In an ideal world this should save a handful of cycles when there are multiple possible matching parents by prioritizing parents that have the highest likelihood. This does diverge from past behavior in that it also means there are cases where _more_ parents will be considered than in the past. Another version of this change could just retain the past behavior, and only consider parents where both the subject and issuer DNs match, and if both parent and child have SKID and AKID also compare those, without any prioritization of the candidate parents. This change removes an existing test case as it assumes that the CertPool will return a possible candidate where the issuer/subject DNs do not match. Fixes #30079 Change-Id: I629f579cabb0b3d0c8cae5ad0429cc5a536b3e58 Reviewed-on: https://go-review.googlesource.com/c/go/+/232993 Trust: Roland Shoemaker Run-TryBot: Roland Shoemaker TryBot-Result: Go Bot Reviewed-by: Filippo Valsorda --- src/crypto/x509/cert_pool.go | 58 ++++++++++++++++++------------- src/crypto/x509/verify_test.go | 62 ++++++++++++++++++++++++---------- 2 files changed, 79 insertions(+), 41 deletions(-) diff --git a/src/crypto/x509/cert_pool.go b/src/crypto/x509/cert_pool.go index 59ec4b6894..167390da9f 100644 --- a/src/crypto/x509/cert_pool.go +++ b/src/crypto/x509/cert_pool.go @@ -5,6 +5,7 @@ package x509 import ( + "bytes" "encoding/pem" "errors" "runtime" @@ -12,29 +13,21 @@ import ( // CertPool is a set of certificates. type CertPool struct { - bySubjectKeyId map[string][]int - byName map[string][]int - certs []*Certificate + byName map[string][]int + certs []*Certificate } // NewCertPool returns a new, empty CertPool. func NewCertPool() *CertPool { return &CertPool{ - bySubjectKeyId: make(map[string][]int), - byName: make(map[string][]int), + byName: make(map[string][]int), } } func (s *CertPool) copy() *CertPool { p := &CertPool{ - bySubjectKeyId: make(map[string][]int, len(s.bySubjectKeyId)), - byName: make(map[string][]int, len(s.byName)), - certs: make([]*Certificate, len(s.certs)), - } - for k, v := range s.bySubjectKeyId { - indexes := make([]int, len(v)) - copy(indexes, v) - p.bySubjectKeyId[k] = indexes + byName: make(map[string][]int, len(s.byName)), + certs: make([]*Certificate, len(s.certs)), } for k, v := range s.byName { indexes := make([]int, len(v)) @@ -70,19 +63,42 @@ func SystemCertPool() (*CertPool, error) { } // findPotentialParents returns the indexes of certificates in s which might -// have signed cert. The caller must not modify the returned slice. +// have signed cert. func (s *CertPool) findPotentialParents(cert *Certificate) []int { if s == nil { return nil } - var candidates []int - if len(cert.AuthorityKeyId) > 0 { - candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)] + // consider all candidates where cert.Issuer matches cert.Subject. + // when picking possible candidates the list is built in the order + // of match plausibility as to save cycles in buildChains: + // AKID and SKID match + // AKID present, SKID missing / AKID missing, SKID present + // AKID and SKID don't match + var matchingKeyID, oneKeyID, mismatchKeyID []int + for _, c := range s.byName[string(cert.RawIssuer)] { + candidate := s.certs[c] + kidMatch := bytes.Equal(candidate.SubjectKeyId, cert.AuthorityKeyId) + switch { + case kidMatch: + matchingKeyID = append(matchingKeyID, c) + case (len(candidate.SubjectKeyId) == 0 && len(cert.AuthorityKeyId) > 0) || + (len(candidate.SubjectKeyId) > 0 && len(cert.AuthorityKeyId) == 0): + oneKeyID = append(oneKeyID, c) + default: + mismatchKeyID = append(mismatchKeyID, c) + } } - if len(candidates) == 0 { - candidates = s.byName[string(cert.RawIssuer)] + + found := len(matchingKeyID) + len(oneKeyID) + len(mismatchKeyID) + if found == 0 { + return nil } + candidates := make([]int, 0, found) + candidates = append(candidates, matchingKeyID...) + candidates = append(candidates, oneKeyID...) + candidates = append(candidates, mismatchKeyID...) + return candidates } @@ -115,10 +131,6 @@ func (s *CertPool) AddCert(cert *Certificate) { n := len(s.certs) s.certs = append(s.certs, cert) - if len(cert.SubjectKeyId) > 0 { - keyId := string(cert.SubjectKeyId) - s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n) - } name := string(cert.RawSubject) s.byName[name] = append(s.byName[name], n) } diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go index 76d1ab9a47..c7a715bbcb 100644 --- a/src/crypto/x509/verify_test.go +++ b/src/crypto/x509/verify_test.go @@ -284,18 +284,6 @@ var verifyTests = []verifyTest{ errorCallback: expectHostnameError("certificate is valid for"), }, - { - // The issuer name in the leaf doesn't exactly match the - // subject name in the root. Go does not perform - // canonicalization and so should reject this. See issue 14955. - name: "IssuerSubjectMismatch", - leaf: issuerSubjectMatchLeaf, - roots: []string{issuerSubjectMatchRoot}, - currentTime: 1475787715, - systemSkip: true, // does not chain to a system root - - errorCallback: expectSubjectIssuerMismatcthError, - }, { // An X.509 v1 certificate should not be accepted as an // intermediate. @@ -430,6 +418,20 @@ var verifyTests = []verifyTest{ {"Acme LLC", "Acme Co"}, }, }, + { + // When there are two parents, one with a incorrect subject but matching SKID + // and one with a correct subject but missing SKID, the latter should be + // considered as a possible parent. + leaf: leafMatchingAKIDMatchingIssuer, + roots: []string{rootMatchingSKIDMismatchingSubject, rootMismatchingSKIDMatchingSubject}, + currentTime: 1550000000, + dnsName: "example", + systemSkip: true, + + expectedChains: [][]string{ + {"Leaf", "Root B"}, + }, + }, } func expectHostnameError(msg string) func(*testing.T, error) { @@ -474,12 +476,6 @@ func expectHashError(t *testing.T, err error) { } } -func expectSubjectIssuerMismatcthError(t *testing.T, err error) { - if inval, ok := err.(CertificateInvalidError); !ok || inval.Reason != NameMismatch { - t.Fatalf("error was not a NameMismatch: %v", err) - } -} - func expectNameConstraintsError(t *testing.T, err error) { if inval, ok := err.(CertificateInvalidError); !ok || inval.Reason != CANotAuthorizedForThisName { t.Fatalf("error was not a CANotAuthorizedForThisName: %v", err) @@ -1615,6 +1611,36 @@ ssWvTAveakIwEgYDVR0RBAswCYIHZXhhbXBsZTAKBggqhkjOPQQDAgNHADBEAiBk ZZMqeJS7JldLx91sPUArY5A= -----END CERTIFICATE-----` +const rootMatchingSKIDMismatchingSubject = `-----BEGIN CERTIFICATE----- +MIIBQjCB6aADAgECAgEAMAoGCCqGSM49BAMCMBExDzANBgNVBAMTBlJvb3QgQTAe +Fw0wOTExMTAyMzAwMDBaFw0xOTExMDgyMzAwMDBaMBExDzANBgNVBAMTBlJvb3Qg +QTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABPK4p1uXq2aAeDtKDHIokg2rTcPM +2gq3N9Y96wiW6/7puBK1+INEW//cO9x6FpzkcsHw/TriAqy4sck/iDAvf9WjMjAw +MA8GA1UdJQQIMAYGBFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAMBgNVHQ4EBQQDAQID +MAoGCCqGSM49BAMCA0gAMEUCIQDgtAp7iVHxMnKxZPaLQPC+Tv2r7+DJc88k2SKH +MPs/wQIgFjjNvBoQEl7vSHTcRGCCcFMdlN4l0Dqc9YwGa9fyrQs= +-----END CERTIFICATE-----` + +const rootMismatchingSKIDMatchingSubject = `-----BEGIN CERTIFICATE----- +MIIBNDCB26ADAgECAgEAMAoGCCqGSM49BAMCMBExDzANBgNVBAMTBlJvb3QgQjAe +Fw0wOTExMTAyMzAwMDBaFw0xOTExMDgyMzAwMDBaMBExDzANBgNVBAMTBlJvb3Qg +QjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABI1YRFcIlkWzm9BdEVrIsEQJ2dT6 +qiW8/WV9GoIhmDtX9SEDHospc0Cgm+TeD2QYW2iMrS5mvNe4GSw0Jezg/bOjJDAi +MA8GA1UdJQQIMAYGBFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNI +ADBFAiEAukWOiuellx8bugRiwCS5XQ6IOJ1SZcjuZxj76WojwxkCIHqa71qNw8FM +DtA5yoL9M2pDFF6ovFWnaCe+KlzSwAW/ +-----END CERTIFICATE-----` + +const leafMatchingAKIDMatchingIssuer = `-----BEGIN CERTIFICATE----- +MIIBNTCB26ADAgECAgEAMAoGCCqGSM49BAMCMBExDzANBgNVBAMTBlJvb3QgQjAe +Fw0wOTExMTAyMzAwMDBaFw0xOTExMDgyMzAwMDBaMA8xDTALBgNVBAMTBExlYWYw +WTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASNWERXCJZFs5vQXRFayLBECdnU+qol +vP1lfRqCIZg7V/UhAx6LKXNAoJvk3g9kGFtojK0uZrzXuBksNCXs4P2zoyYwJDAO +BgNVHSMEBzAFgAMBAgMwEgYDVR0RBAswCYIHZXhhbXBsZTAKBggqhkjOPQQDAgNJ +ADBGAiEAnV9XV7a4h0nfJB8pWv+pBUXRlRFA2uZz3mXEpee8NYACIQCWa+wL70GL +ePBQCV1F9sE2q4ZrnsT9TZoNrSe/bMDjzA== +-----END CERTIFICATE-----` + var unknownAuthorityErrorTests = []struct { cert string expected string From 0e85fd7561de869add933801c531bf25dee9561c Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 29 Sep 2020 02:11:10 -0700 Subject: [PATCH 064/281] cmd/compile: report type loop for invalid recursive types Similar to how we report initialization loops in initorder.go and type alias loops in typecheck.go, this CL updates align.go to warn about invalid recursive types. The code is based on the loop code from initorder.go, with minimal changes to adapt from detecting variable/function initialization loops to detecting type declaration loops. Thanks to Cuong Manh Le for investigating this, helping come up with test cases, and exploring solutions. Fixes #41575 Updates #41669. Change-Id: Idb2cb8c5e1d645e62900e178fcb50af33e1700a1 Reviewed-on: https://go-review.googlesource.com/c/go/+/258177 Run-TryBot: Matthew Dempsky TryBot-Result: Go Bot Reviewed-by: Robert Griesemer Reviewed-by: Cuong Manh Le Trust: Matthew Dempsky Trust: Cuong Manh Le --- src/cmd/compile/internal/gc/align.go | 98 +++++++++++++++++++++++++--- src/cmd/compile/internal/gc/subr.go | 10 +++ test/fixedbugs/bug195.go | 16 ++--- test/fixedbugs/issue22904.go | 4 +- test/fixedbugs/issue23823.go | 3 +- test/fixedbugs/issue41575.go | 36 ++++++++++ 6 files changed, 147 insertions(+), 20 deletions(-) create mode 100644 test/fixedbugs/issue41575.go diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index ab578ee8c7..5af403afa3 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -5,7 +5,9 @@ package gc import ( + "bytes" "cmd/compile/internal/types" + "fmt" "sort" ) @@ -173,6 +175,91 @@ func widstruct(errtype *types.Type, t *types.Type, o int64, flag int) int64 { return o } +// findTypeLoop searches for an invalid type declaration loop involving +// type t and reports whether one is found. If so, path contains the +// loop. +// +// path points to a slice used for tracking the sequence of types +// visited. Using a pointer to a slice allows the slice capacity to +// grow and limit reallocations. +func findTypeLoop(t *types.Type, path *[]*types.Type) bool { + // We implement a simple DFS loop-finding algorithm. This + // could be faster, but type cycles are rare. + + if t.Sym != nil { + // Declared type. Check for loops and otherwise + // recurse on the type expression used in the type + // declaration. + + for i, x := range *path { + if x == t { + *path = (*path)[i:] + return true + } + } + + *path = append(*path, t) + if findTypeLoop(asNode(t.Nod).Name.Param.Ntype.Type, path) { + return true + } + *path = (*path)[:len(*path)-1] + } else { + // Anonymous type. Recurse on contained types. + + switch t.Etype { + case TARRAY: + if findTypeLoop(t.Elem(), path) { + return true + } + case TSTRUCT: + for _, f := range t.Fields().Slice() { + if findTypeLoop(f.Type, path) { + return true + } + } + case TINTER: + for _, m := range t.Methods().Slice() { + if m.Type.IsInterface() { // embedded interface + if findTypeLoop(m.Type, path) { + return true + } + } + } + } + } + + return false +} + +func reportTypeLoop(t *types.Type) { + if t.Broke() { + return + } + + var l []*types.Type + if !findTypeLoop(t, &l) { + Fatalf("failed to find type loop for: %v", t) + } + + // Rotate loop so that the earliest type declaration is first. + i := 0 + for j, t := range l[1:] { + if typePos(t).Before(typePos(l[i])) { + i = j + 1 + } + } + l = append(l[i:], l[:i]...) + + var msg bytes.Buffer + fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0]) + for _, t := range l { + fmt.Fprintf(&msg, "\t%v: %v refers to\n", linestr(typePos(t)), t) + t.SetBroke(true) + } + fmt.Fprintf(&msg, "\t%v: %v", linestr(typePos(l[0])), l[0]) + yyerrorl(typePos(l[0]), msg.String()) +} + // dowidth calculates and stores the size and alignment for t. // If sizeCalculationDisabled is set, and the size/alignment // have not already been calculated, it calls Fatal. @@ -192,11 +279,7 @@ func dowidth(t *types.Type) { } if t.Width == -2 { - if !t.Broke() { - t.SetBroke(true) - yyerrorl(asNode(t.Nod).Pos, "invalid recursive type %v", t) - } - + reportTypeLoop(t) t.Width = 0 t.Align = 1 return @@ -308,10 +391,7 @@ func dowidth(t *types.Type) { checkwidth(t.Key()) case TFORW: // should have been filled in - if !t.Broke() { - t.SetBroke(true) - yyerror("invalid recursive type %v", t) - } + reportTypeLoop(t) w = 1 // anything will do case TANY: diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index b5527e2f83..07547df36e 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1921,3 +1921,13 @@ func ifaceData(pos src.XPos, n *Node, t *types.Type) *Node { ind.SetBounded(true) return ind } + +// typePos returns the position associated with t. +// This is where t was declared or where it appeared as a type expression. +func typePos(t *types.Type) src.XPos { + n := asNode(t.Nod) + if n == nil || !n.Pos.IsKnown() { + Fatalf("bad type: %v", t) + } + return n.Pos +} diff --git a/test/fixedbugs/bug195.go b/test/fixedbugs/bug195.go index 496c0be610..aef7bd2d89 100644 --- a/test/fixedbugs/bug195.go +++ b/test/fixedbugs/bug195.go @@ -6,22 +6,22 @@ package main -type I1 interface { I2 } // ERROR "interface" +type I1 interface{ I2 } // ERROR "interface" type I2 int -type I3 interface { int } // ERROR "interface" +type I3 interface{ int } // ERROR "interface" type S struct { - x interface{ S } // ERROR "interface" + x interface{ S } // ERROR "interface" } -type I4 interface { // GC_ERROR "invalid recursive type" - I4 // GCCGO_ERROR "interface" +type I4 interface { // GC_ERROR "invalid recursive type I4\n\tLINE: I4 refers to\n\tLINE: I4$" + I4 // GCCGO_ERROR "interface" } -type I5 interface { // GC_ERROR "invalid recursive type" - I6 // GCCGO_ERROR "interface" +type I5 interface { // GC_ERROR "invalid recursive type I5\n\tLINE: I5 refers to\n\tLINE+4: I6 refers to\n\tLINE: I5$" + I6 // GCCGO_ERROR "interface" } type I6 interface { - I5 // GCCGO_ERROR "interface" + I5 // GCCGO_ERROR "interface" } diff --git a/test/fixedbugs/issue22904.go b/test/fixedbugs/issue22904.go index 46cb7c048a..09f4a2118e 100644 --- a/test/fixedbugs/issue22904.go +++ b/test/fixedbugs/issue22904.go @@ -9,8 +9,8 @@ package p -type a struct{ b } -type b struct{ a } // ERROR "invalid recursive type" +type a struct{ b } // ERROR "invalid recursive type" +type b struct{ a } var x interface{} diff --git a/test/fixedbugs/issue23823.go b/test/fixedbugs/issue23823.go index 2f802d0988..fe6cef1fb4 100644 --- a/test/fixedbugs/issue23823.go +++ b/test/fixedbugs/issue23823.go @@ -10,6 +10,7 @@ type I1 = interface { I2 } -type I2 interface { // ERROR "invalid recursive type" +// BAD: type loop should mention I1; see also #41669 +type I2 interface { // ERROR "invalid recursive type I2\n\tLINE: I2 refers to\n\tLINE: I2$" I1 } diff --git a/test/fixedbugs/issue41575.go b/test/fixedbugs/issue41575.go new file mode 100644 index 0000000000..d03d1c8b3e --- /dev/null +++ b/test/fixedbugs/issue41575.go @@ -0,0 +1,36 @@ +// errorcheck + +// Copyright 2020 The Go Authors. All rights reserved. Use of this +// source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +package p + +type T1 struct { // ERROR "invalid recursive type T1\n\tLINE: T1 refers to\n\tLINE+4: T2 refers to\n\tLINE: T1$" + f2 T2 +} + +type T2 struct { + f1 T1 +} + +type a b +type b c // ERROR "invalid recursive type b\n\tLINE: b refers to\n\tLINE+1: c refers to\n\tLINE: b$" +type c b + +type d e +type e f +type f f // ERROR "invalid recursive type f\n\tLINE: f refers to\n\tLINE: f$" + +type g struct { // ERROR "invalid recursive type g\n\tLINE: g refers to\n\tLINE: g$" + h struct { + g + } +} + +type w x +type x y // ERROR "invalid recursive type x\n\tLINE: x refers to\n\tLINE+1: y refers to\n\tLINE+2: z refers to\n\tLINE: x$" +type y struct{ z } +type z [10]x + +type w2 w // refer to the type loop again From 3caaaddffd605c0ec1787d68295b732fff73026b Mon Sep 17 00:00:00 2001 From: Andrei Vagin Date: Tue, 29 Sep 2020 04:45:55 +0000 Subject: [PATCH 065/281] runtime: don't crash if vsyscall and vdso are disabled on x86_64 If vdso is disabled, the goruntime calls gettimeofday from vsyscall, but if vsyscall is disabled too, all golang binaries crash: SIGSEGV {si_signo=SIGSEGV, si_code=SEGV_MAPERR, si_addr=0xffffffffff600000} --- killed by SIGSEGV (core dumped) ++ vsyscall doesn't work as it was designed for a long time due to security reasons and now vsyscall is a little more expensive than real syscalls: https://github.com/torvalds/linux/commit/5cec93c216db This patch reworks the code to call syscalls if the vdso library isn't available. Change-Id: I16cbf3f49871bea91e26af1f49aa0ae2fbd3a01d GitHub-Last-Rev: 1d133cd30a5dee1fea9aee0fb4ea0b07e0e87f2a GitHub-Pull-Request: golang/go#41681 Reviewed-on: https://go-review.googlesource.com/c/go/+/257982 Run-TryBot: Tobias Klauser TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor Reviewed-by: Michael Pratt Trust: Michael Pratt --- src/runtime/sys_linux_amd64.s | 31 +++++++++++-------------------- src/runtime/vdso_linux_amd64.go | 5 ++--- src/syscall/asm_linux_amd64.s | 10 +++++++++- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s index 8d90813589..681cd20274 100644 --- a/src/runtime/sys_linux_amd64.s +++ b/src/runtime/sys_linux_amd64.s @@ -40,6 +40,7 @@ #define SYS_futex 202 #define SYS_sched_getaffinity 204 #define SYS_epoll_create 213 +#define SYS_clock_gettime 228 #define SYS_exit_group 231 #define SYS_epoll_ctl 233 #define SYS_tgkill 234 @@ -241,15 +242,15 @@ noswitch: SUBQ $16, SP // Space for results ANDQ $~15, SP // Align for C code + MOVL $0, DI // CLOCK_REALTIME + LEAQ 0(SP), SI MOVQ runtime·vdsoClockgettimeSym(SB), AX CMPQ AX, $0 JEQ fallback - MOVL $0, DI // CLOCK_REALTIME - LEAQ 0(SP), SI CALL AX +ret: MOVQ 0(SP), AX // sec MOVQ 8(SP), DX // nsec -ret: MOVQ R12, SP // Restore real SP // Restore vdsoPC, vdsoSP // We don't worry about being signaled between the two stores. @@ -264,13 +265,8 @@ ret: MOVL DX, nsec+8(FP) RET fallback: - LEAQ 0(SP), DI - MOVQ $0, SI - MOVQ runtime·vdsoGettimeofdaySym(SB), AX - CALL AX - MOVQ 0(SP), AX // sec - MOVL 8(SP), DX // usec - IMULQ $1000, DX + MOVQ $SYS_clock_gettime, AX + SYSCALL JMP ret // func nanotime1() int64 @@ -306,15 +302,15 @@ noswitch: SUBQ $16, SP // Space for results ANDQ $~15, SP // Align for C code + MOVL $1, DI // CLOCK_MONOTONIC + LEAQ 0(SP), SI MOVQ runtime·vdsoClockgettimeSym(SB), AX CMPQ AX, $0 JEQ fallback - MOVL $1, DI // CLOCK_MONOTONIC - LEAQ 0(SP), SI CALL AX +ret: MOVQ 0(SP), AX // sec MOVQ 8(SP), DX // nsec -ret: MOVQ R12, SP // Restore real SP // Restore vdsoPC, vdsoSP // We don't worry about being signaled between the two stores. @@ -332,13 +328,8 @@ ret: MOVQ AX, ret+0(FP) RET fallback: - LEAQ 0(SP), DI - MOVQ $0, SI - MOVQ runtime·vdsoGettimeofdaySym(SB), AX - CALL AX - MOVQ 0(SP), AX // sec - MOVL 8(SP), DX // usec - IMULQ $1000, DX + MOVQ $SYS_clock_gettime, AX + SYSCALL JMP ret TEXT runtime·rtsigprocmask(SB),NOSPLIT,$0-28 diff --git a/src/runtime/vdso_linux_amd64.go b/src/runtime/vdso_linux_amd64.go index d9ab4ab3c6..4e9f748f4a 100644 --- a/src/runtime/vdso_linux_amd64.go +++ b/src/runtime/vdso_linux_amd64.go @@ -17,8 +17,7 @@ var vdsoSymbolKeys = []vdsoSymbolKey{ {"__vdso_clock_gettime", 0xd35ec75, 0x6e43a318, &vdsoClockgettimeSym}, } -// initialize with vsyscall fallbacks var ( - vdsoGettimeofdaySym uintptr = 0xffffffffff600000 - vdsoClockgettimeSym uintptr = 0 + vdsoGettimeofdaySym uintptr + vdsoClockgettimeSym uintptr ) diff --git a/src/syscall/asm_linux_amd64.s b/src/syscall/asm_linux_amd64.s index 2c3374338f..ba22179dc2 100644 --- a/src/syscall/asm_linux_amd64.s +++ b/src/syscall/asm_linux_amd64.s @@ -9,6 +9,8 @@ // System calls for AMD64, Linux // +#define SYS_gettimeofday 96 + // func Syscall(trap int64, a1, a2, a3 uintptr) (r1, r2, err uintptr); // Trap # in AX, args in DI SI DX R10 R8 R9, return in AX DX // Note that this differs from "standard" ABI convention, which @@ -144,13 +146,19 @@ TEXT ·gettimeofday(SB),NOSPLIT,$0-16 MOVQ tv+0(FP), DI MOVQ $0, SI MOVQ runtime·vdsoGettimeofdaySym(SB), AX + TESTQ AX, AX + JZ fallback CALL AX - +ret: CMPQ AX, $0xfffffffffffff001 JLS ok7 NEGQ AX MOVQ AX, err+8(FP) RET +fallback: + MOVL $SYS_gettimeofday, AX + SYSCALL + JMP ret ok7: MOVQ $0, err+8(FP) RET From 8b0d00b1645c47076f5b20dc692b2ca6d9bac19b Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Tue, 29 Sep 2020 16:40:57 -0400 Subject: [PATCH 066/281] cmd/go: error if -modfile used with 'go install pkg@version' 'go install pkg@version' runs without a main module or a module root directory. The -modfile flag cannot be used to set the module root directory or to substitute a different go.mod file. This error won't be reported if -modfile is set in GOFLAGS. Unsupported flags there are generally ignored. For #40276 Change-Id: I0b39b1fa9184c15c6e863b647d43c328710920f4 Reviewed-on: https://go-review.googlesource.com/c/go/+/258297 Trust: Jay Conrod Run-TryBot: Jay Conrod Reviewed-by: Bryan C. Mills TryBot-Result: Go Bot --- src/cmd/go/internal/modload/init.go | 5 +++-- .../go/testdata/script/mod_install_pkg_version.txt | 11 +++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 9d05eadda5..3344242489 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -163,8 +163,9 @@ func Init() { // Running 'go mod init': go.mod will be created in current directory. modRoot = base.Cwd } else if RootMode == NoRoot { - // TODO(jayconrod): report an error if -mod -modfile is explicitly set on - // the command line. Ignore those flags if they come from GOFLAGS. + if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") { + base.Fatalf("go: -modfile cannot be used with commands that ignore the current module") + } modRoot = "" } else { modRoot = findModuleRoot(base.Cwd) diff --git a/src/cmd/go/testdata/script/mod_install_pkg_version.txt b/src/cmd/go/testdata/script/mod_install_pkg_version.txt index 7e6d4e8e7c..dc4a329688 100644 --- a/src/cmd/go/testdata/script/mod_install_pkg_version.txt +++ b/src/cmd/go/testdata/script/mod_install_pkg_version.txt @@ -26,6 +26,17 @@ rm $GOPATH/bin/a cd .. +# 'go install -modfile=x.mod pkg@version' reports an error, but only if +# -modfile is specified explicitly on the command line. +cd m +env GOFLAGS=-modfile=go.mod +go install example.com/cmd/a@latest # same as above +env GOFLAGS= +! go install -modfile=go.mod example.com/cmd/a@latest +stderr '^go: -modfile cannot be used with commands that ignore the current module$' +cd .. + + # Every test case requires linking, so we only cover the most important cases # when -short is set. [short] stop From 54a112d7197ec320527614e7502a3243eab93b6e Mon Sep 17 00:00:00 2001 From: Ruixin Bao Date: Tue, 29 Sep 2020 15:55:19 -0400 Subject: [PATCH 067/281] crypto/ecdsa: use FillBytes on s390x MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Originally, zeroExtendAndCopy is used to pad src with leading zeros and copy the padded src into the destination. It is no longer needed after CL 230397 introduced FillBytes. We can simply use that and remove the zeroExtendAndCopy function. It is cleaner and reduces some allocation. In addition, this patch tries to avoid calling hashToInt function in both Sign and Verify function so some allocation is reduced. Benchmarks: name old alloc/op new alloc/op delta SignP256-8 1.60kB ± 0% 1.49kB ± 0% -7.23% (p=0.000 n=20+20) SignP384-8 1.74kB ± 0% 1.59kB ± 0% -8.50% (p=0.000 n=20+18) VerifyP256-8 176B ± 0% 0B -100.00% (p=0.000 n=20+20) KeyGeneration-8 640B ± 0% 640B ± 0% ~ (all equal) name old allocs/op new allocs/op delta SignP256-8 22.0 ± 0% 17.0 ± 0% -22.73% (p=0.000 n=20+20) SignP384-8 22.0 ± 0% 17.0 ± 0% -22.73% (p=0.000 n=20+20) VerifyP256-8 7.00 ± 0% 0.00 -100.00% (p=0.000 n=20+20) KeyGeneration-8 13.0 ± 0% 13.0 ± 0% ~ (all equal) Change-Id: Ic4c95191eded55deb3420d97db501689f3b173c9 Reviewed-on: https://go-review.googlesource.com/c/go/+/232297 Reviewed-by: Michael Munday Run-TryBot: Michael Munday TryBot-Result: Go Bot Trust: Filippo Valsorda --- src/crypto/ecdsa/ecdsa_s390x.go | 59 +++++++++++++++------------------ 1 file changed, 27 insertions(+), 32 deletions(-) diff --git a/src/crypto/ecdsa/ecdsa_s390x.go b/src/crypto/ecdsa/ecdsa_s390x.go index d8d2c716db..0a1d73e7a4 100644 --- a/src/crypto/ecdsa/ecdsa_s390x.go +++ b/src/crypto/ecdsa/ecdsa_s390x.go @@ -41,26 +41,29 @@ func canUseKDSA(c elliptic.Curve) (functionCode uint64, blockSize int, ok bool) return 0, 0, false // A mismatch } -// zeroExtendAndCopy pads src with leading zeros until it has the size given. -// It then copies the padded src into the dst. Bytes beyond size in dst are -// not modified. -func zeroExtendAndCopy(dst, src []byte, size int) { - nz := size - len(src) - if nz < 0 { - panic("src is too long") +func hashToBytes(dst, hash []byte, c elliptic.Curve) { + l := len(dst) + if n := c.Params().N.BitLen(); n == l*8 { + // allocation free path for curves with a length that is a whole number of bytes + if len(hash) >= l { + // truncate hash + copy(dst, hash[:l]) + return + } + // pad hash with leading zeros + p := l - len(hash) + for i := 0; i < p; i++ { + dst[i] = 0 + } + copy(dst[p:], hash) + return } - // the compiler should replace this loop with a memclr call - z := dst[:nz] - for i := range z { - z[i] = 0 - } - copy(dst[nz:size], src[:size-nz]) - return + // TODO(mundaym): avoid hashToInt call here + hashToInt(hash, c).FillBytes(dst) } func sign(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, hash []byte) (r, s *big.Int, err error) { if functionCode, blockSize, ok := canUseKDSA(c); ok { - e := hashToInt(hash, c) for { var k *big.Int k, err = randFieldElement(c, *csprng) @@ -89,17 +92,12 @@ func sign(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, hash // different curves and is set by canUseKDSA function. var params [4096]byte - startingOffset := 2 * blockSize // Set the starting location for copying // Copy content into the parameter block. In the sign case, // we copy hashed message, private key and random number into - // the parameter block. Since those are consecutive components in the parameter - // block, we use a for loop here. - for i, v := range []*big.Int{e, priv.D, k} { - startPosition := startingOffset + i*blockSize - endPosition := startPosition + blockSize - zeroExtendAndCopy(params[startPosition:endPosition], v.Bytes(), blockSize) - } - + // the parameter block. + hashToBytes(params[2*blockSize:3*blockSize], hash, c) + priv.D.FillBytes(params[3*blockSize : 4*blockSize]) + k.FillBytes(params[4*blockSize : 5*blockSize]) // Convert verify function code into a sign function code by adding 8. // We also need to set the 'deterministic' bit in the function code, by // adding 128, in order to stop the instruction using its own random number @@ -124,7 +122,6 @@ func sign(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, hash func verify(pub *PublicKey, c elliptic.Curve, hash []byte, r, s *big.Int) bool { if functionCode, blockSize, ok := canUseKDSA(c); ok { - e := hashToInt(hash, c) // The parameter block looks like the following for verify: // +---------------------+ // | Signature(R) | @@ -149,13 +146,11 @@ func verify(pub *PublicKey, c elliptic.Curve, hash []byte, r, s *big.Int) bool { // Copy content into the parameter block. In the verify case, // we copy signature (r), signature(s), hashed message, public key x component, // and public key y component into the parameter block. - // Since those are consecutive components in the parameter block, we use a for loop here. - for i, v := range []*big.Int{r, s, e, pub.X, pub.Y} { - startPosition := i * blockSize - endPosition := startPosition + blockSize - zeroExtendAndCopy(params[startPosition:endPosition], v.Bytes(), blockSize) - } - + r.FillBytes(params[0*blockSize : 1*blockSize]) + s.FillBytes(params[1*blockSize : 2*blockSize]) + hashToBytes(params[2*blockSize:3*blockSize], hash, c) + pub.X.FillBytes(params[3*blockSize : 4*blockSize]) + pub.Y.FillBytes(params[4*blockSize : 5*blockSize]) return kdsa(functionCode, ¶ms) == 0 } return verifyGeneric(pub, c, hash, r, s) From 27280d8c14331c1c46cd90206be9f3c924f6b4c4 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 2 Sep 2020 13:44:36 +0300 Subject: [PATCH 068/281] crypto/x509: return errors instead of panicking Eliminate a panic in x509.CreateCertificate when passing templates with unknown ExtKeyUsage; return an error instead. Fixes #41169 Change-Id: Ia229d3b0d4a1bdeef05928439d97dab228687b3c Reviewed-on: https://go-review.googlesource.com/c/go/+/252557 Reviewed-by: Roland Shoemaker Reviewed-by: Filippo Valsorda Run-TryBot: Roland Shoemaker TryBot-Result: Go Bot --- src/crypto/x509/x509.go | 3 ++- src/crypto/x509/x509_test.go | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go index 16655a3c70..5fd4f6fa17 100644 --- a/src/crypto/x509/x509.go +++ b/src/crypto/x509/x509.go @@ -1689,7 +1689,8 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId if oid, ok := oidFromExtKeyUsage(u); ok { oids = append(oids, oid) } else { - panic("internal error") + err = errors.New("x509: unknown extended key usage") + return } } diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go index d0315900e4..6345c3f5ab 100644 --- a/src/crypto/x509/x509_test.go +++ b/src/crypto/x509/x509_test.go @@ -2754,3 +2754,22 @@ func TestRSAPSAParameters(t *testing.T) { } } } + +func TestUnknownExtKey(t *testing.T) { + const errorContains = "unknown extended key usage" + + template := &Certificate{ + SerialNumber: big.NewInt(10), + DNSNames: []string{"foo"}, + ExtKeyUsage: []ExtKeyUsage{ExtKeyUsage(-1)}, + } + signer, err := rsa.GenerateKey(rand.Reader, 1024) + if err != nil { + t.Errorf("failed to generate key for TestUnknownExtKey") + } + + _, err = CreateCertificate(rand.Reader, template, template, signer.Public(), signer) + if !strings.Contains(err.Error(), errorContains) { + t.Errorf("expected error containing %q, got %s", errorContains, err) + } +} From ae329abec0f78743ab2fbf30ef5b488376fe3c85 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Wed, 23 Sep 2020 12:28:29 -0400 Subject: [PATCH 069/281] cmd/go: add another test case for package/module ambiguity in 'go get' For #37438 Change-Id: Iae00ef7f97144e85f4f710cdb3087c2548b4b8f0 Reviewed-on: https://go-review.googlesource.com/c/go/+/256799 Trust: Bryan C. Mills Trust: Jay Conrod Run-TryBot: Bryan C. Mills TryBot-Result: Go Bot Reviewed-by: Michael Matloob Reviewed-by: Jay Conrod --- .../mod/example.net_pkgremoved_v0.1.0.txt | 16 ++++++++ .../mod/example.net_pkgremoved_v0.2.0.txt | 15 ++++++++ .../mod/example.net_pkgremoved_v0.2.1.txt | 15 ++++++++ .../go/testdata/script/mod_get_patchmod.txt | 38 +++++++++++++++++++ 4 files changed, 84 insertions(+) create mode 100644 src/cmd/go/testdata/mod/example.net_pkgremoved_v0.1.0.txt create mode 100644 src/cmd/go/testdata/mod/example.net_pkgremoved_v0.2.0.txt create mode 100644 src/cmd/go/testdata/mod/example.net_pkgremoved_v0.2.1.txt create mode 100644 src/cmd/go/testdata/script/mod_get_patchmod.txt diff --git a/src/cmd/go/testdata/mod/example.net_pkgremoved_v0.1.0.txt b/src/cmd/go/testdata/mod/example.net_pkgremoved_v0.1.0.txt new file mode 100644 index 0000000000..f5e76b00c9 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.net_pkgremoved_v0.1.0.txt @@ -0,0 +1,16 @@ +Written by hand. +Test module with a root package added in v0.1.0 and removed in v0.2.0. + +-- .mod -- +module example.net/pkgremoved + +go 1.16 +-- .info -- +{"Version": "v0.1.0"} +-- go.mod -- +module example.net/pkgremoved + +go 1.16 +-- pkgremoved.go -- +// Package pkgremoved exists in v0.1.0. +package pkgremoved diff --git a/src/cmd/go/testdata/mod/example.net_pkgremoved_v0.2.0.txt b/src/cmd/go/testdata/mod/example.net_pkgremoved_v0.2.0.txt new file mode 100644 index 0000000000..f1fc9fb61f --- /dev/null +++ b/src/cmd/go/testdata/mod/example.net_pkgremoved_v0.2.0.txt @@ -0,0 +1,15 @@ +Written by hand. +Test module with a root package added in v0.1.0 and removed in v0.2.0. + +-- .mod -- +module example.net/pkgremoved + +go 1.16 +-- .info -- +{"Version": "v0.2.0"} +-- go.mod -- +module example.net/pkgremoved + +go 1.16 +-- README.txt -- +Package pkgremove was removed in v0.2.0. diff --git a/src/cmd/go/testdata/mod/example.net_pkgremoved_v0.2.1.txt b/src/cmd/go/testdata/mod/example.net_pkgremoved_v0.2.1.txt new file mode 100644 index 0000000000..0e961853d5 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.net_pkgremoved_v0.2.1.txt @@ -0,0 +1,15 @@ +Written by hand. +Test module with a root package added in v0.1.0 and removed in v0.2.0. + +-- .mod -- +module example.net/pkgremoved + +go 1.16 +-- .info -- +{"Version": "v0.2.1"} +-- go.mod -- +module example.net/pkgremoved + +go 1.16 +-- README.txt -- +Package pkgremove was removed in v0.2.0. diff --git a/src/cmd/go/testdata/script/mod_get_patchmod.txt b/src/cmd/go/testdata/script/mod_get_patchmod.txt new file mode 100644 index 0000000000..45d680d021 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_patchmod.txt @@ -0,0 +1,38 @@ +# example.net/pkgremoved@v0.1.0 refers to a package. +go get -d example.net/pkgremoved@v0.1.0 + +go list example.net/pkgremoved +stdout '^example.net/pkgremoved' + +# When we resolve a new dependency on example.net/other, +# it will change the meaning of the path "example.net/pkgremoved" +# from a package (at v0.1.0) to only a module (at v0.2.0). +# +# If we simultaneously 'get' that module at the query "patch", the module should +# be upgraded to its patch release (v0.2.1) even though it no longer matches a +# package. +# +# BUG(#37438): Today, the pattern is only interpreted as its initial kind +# (a package), so the 'go get' invocation fails. + +! go get -d example.net/pkgremoved@patch example.net/other@v0.1.0 + +stderr '^go get example.net/pkgremoved@patch: module example.net/pkgremoved@latest found \(v0.2.1\), but does not contain package example.net/pkgremoved$' + + +-- go.mod -- +module example + +go 1.16 + +replace ( + example.net/other v0.1.0 => ./other +) +-- other/go.mod -- +module example.net/other + +go 1.16 + +require example.net/pkgremoved v0.2.0 +-- other/other.go -- +package other From 1eeaff75f9e02c65d29d9910c1884c6c0ecc1430 Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Sat, 23 May 2020 10:15:46 -0700 Subject: [PATCH 070/281] crypto/x509: enforce SAN IA5String encoding restrictions Extends the IA5String encoding restrictions that are currently applied to name constraints to dNSName, rfc822Name, and uniformResourceIdentifier elements of the SAN. The utility function isIA5String is updated to use unicode.MaxASCII rather than utf8.RuneSelf as it is somewhat more readable. Certificates that include these badly encoded names do exist, but are exceedingly rare. zlint and other linters enforce this encoding and searching censys.io reveals only three currently trusted certificates with this particular encoding issue. Fixes #26362 Change-Id: I7a4f3e165a1754e5b4bfaeabc03e01eb7367f3c9 Reviewed-on: https://go-review.googlesource.com/c/go/+/235078 Run-TryBot: Roland Shoemaker TryBot-Result: Go Bot Trust: Roland Shoemaker Reviewed-by: Filippo Valsorda --- doc/go1.16.html | 10 ++++ src/crypto/x509/x509.go | 39 ++++++++++++---- src/crypto/x509/x509_test.go | 90 ++++++++++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+), 8 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index b2cbb58e1a..2ecf7db7c7 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -174,6 +174,16 @@ Do not send CLs removing the interior tags from such phrases. by the Error method with "tls: use of closed connection".

+

crypto/x509

+ +

+ ParseCertificate and + CreateCertificate both + now enforce string encoding restrictions for the fields DNSNames, + EmailAddresses, and URIs. These fields can only + contain strings with characters within the ASCII range. +

+

net

diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go index 5fd4f6fa17..93dca03840 100644 --- a/src/crypto/x509/x509.go +++ b/src/crypto/x509/x509.go @@ -28,7 +28,7 @@ import ( "strconv" "strings" "time" - "unicode/utf8" + "unicode" "golang.org/x/crypto/cryptobyte" cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" @@ -1085,17 +1085,29 @@ func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddre err = forEachSAN(value, func(tag int, data []byte) error { switch tag { case nameTypeEmail: - emailAddresses = append(emailAddresses, string(data)) + email := string(data) + if err := isIA5String(email); err != nil { + return errors.New("x509: SAN rfc822Name is malformed") + } + emailAddresses = append(emailAddresses, email) case nameTypeDNS: - dnsNames = append(dnsNames, string(data)) + name := string(data) + if err := isIA5String(name); err != nil { + return errors.New("x509: SAN dNSName is malformed") + } + dnsNames = append(dnsNames, string(name)) case nameTypeURI: - uri, err := url.Parse(string(data)) + uriStr := string(data) + if err := isIA5String(uriStr); err != nil { + return errors.New("x509: SAN uniformResourceIdentifier is malformed") + } + uri, err := url.Parse(uriStr) if err != nil { - return fmt.Errorf("x509: cannot parse URI %q: %s", string(data), err) + return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) } if len(uri.Host) > 0 { if _, ok := domainToReverseLabels(uri.Host); !ok { - return fmt.Errorf("x509: cannot parse URI %q: invalid domain", string(data)) + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) } } uris = append(uris, uri) @@ -1625,9 +1637,15 @@ func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) boo func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) (derBytes []byte, err error) { var rawValues []asn1.RawValue for _, name := range dnsNames { + if err := isIA5String(name); err != nil { + return nil, err + } rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) } for _, email := range emailAddresses { + if err := isIA5String(email); err != nil { + return nil, err + } rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) } for _, rawIP := range ipAddresses { @@ -1639,14 +1657,19 @@ func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris [ rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) } for _, uri := range uris { - rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())}) + uriStr := uri.String() + if err := isIA5String(uriStr); err != nil { + return nil, err + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uriStr)}) } return asn1.Marshal(rawValues) } func isIA5String(s string) error { for _, r := range s { - if r >= utf8.RuneSelf { + // Per RFC5280 "IA5String is limited to the set of ASCII characters" + if r > unicode.MaxASCII { return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s) } } diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go index 6345c3f5ab..e87294bde5 100644 --- a/src/crypto/x509/x509_test.go +++ b/src/crypto/x509/x509_test.go @@ -2773,3 +2773,93 @@ func TestUnknownExtKey(t *testing.T) { t.Errorf("expected error containing %q, got %s", errorContains, err) } } + +func TestIA5SANEnforcement(t *testing.T) { + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("ecdsa.GenerateKey failed: %s", err) + } + + testURL, err := url.Parse("https://example.com/") + if err != nil { + t.Fatalf("url.Parse failed: %s", err) + } + testURL.RawQuery = "∞" + + marshalTests := []struct { + name string + template *Certificate + expectedError string + }{ + { + name: "marshal: unicode dNSName", + template: &Certificate{ + SerialNumber: big.NewInt(0), + DNSNames: []string{"∞"}, + }, + expectedError: "x509: \"∞\" cannot be encoded as an IA5String", + }, + { + name: "marshal: unicode rfc822Name", + template: &Certificate{ + SerialNumber: big.NewInt(0), + EmailAddresses: []string{"∞"}, + }, + expectedError: "x509: \"∞\" cannot be encoded as an IA5String", + }, + { + name: "marshal: unicode uniformResourceIdentifier", + template: &Certificate{ + SerialNumber: big.NewInt(0), + URIs: []*url.URL{testURL}, + }, + expectedError: "x509: \"https://example.com/?∞\" cannot be encoded as an IA5String", + }, + } + + for _, tc := range marshalTests { + t.Run(tc.name, func(t *testing.T) { + _, err := CreateCertificate(rand.Reader, tc.template, tc.template, k.Public(), k) + if err == nil { + t.Errorf("expected CreateCertificate to fail with template: %v", tc.template) + } else if err.Error() != tc.expectedError { + t.Errorf("unexpected error: got %q, want %q", err.Error(), tc.expectedError) + } + }) + } + + unmarshalTests := []struct { + name string + cert string + expectedError string + }{ + { + name: "unmarshal: unicode dNSName", + cert: "308201083081aea003020102020100300a06082a8648ce3d04030230003022180f30303031303130313030303030305a180f30303031303130313030303030305a30003059301306072a8648ce3d020106082a8648ce3d0301070342000424bcc48180d8d9db794028f2575ebe3cac79f04d7b0d0151c5292e588aac3668c495f108c626168462e0668c9705e08a211dd103a659d2684e0adf8c2bfd47baa315301330110603551d110101ff040730058203e2889e300a06082a8648ce3d04030203490030460221008ac7827ac326a6ee0fa70b2afe99af575ec60b975f820f3c25f60fff43fbccd0022100bffeed93556722d43d13e461d5b3e33efc61f6349300327d3a0196cb6da501c2", + expectedError: "x509: SAN dNSName is malformed", + }, + { + name: "unmarshal: unicode rfc822Name", + cert: "308201083081aea003020102020100300a06082a8648ce3d04030230003022180f30303031303130313030303030305a180f30303031303130313030303030305a30003059301306072a8648ce3d020106082a8648ce3d0301070342000405cb4c4ba72aac980f7b11b0285191425e29e196ce7c5df1c83f56886566e517f196657cc1b73de89ab84ce503fd634e2f2af88fde24c63ca536dc3a5eed2665a315301330110603551d110101ff040730058103e2889e300a06082a8648ce3d0403020349003046022100ed1431cd4b9bb03d88d1511a0ec128a51204375764c716280dc36e2a60142c8902210088c96d25cfaf97eea851ff17d87bb6fe619d6546656e1739f35c3566051c3d0f", + expectedError: "x509: SAN rfc822Name is malformed", + }, + { + name: "unmarshal: unicode uniformResourceIdentifier", + cert: "3082011b3081c3a003020102020100300a06082a8648ce3d04030230003022180f30303031303130313030303030305a180f30303031303130313030303030305a30003059301306072a8648ce3d020106082a8648ce3d03010703420004ce0a79b511701d9188e1ea76bcc5907f1db51de6cc1a037b803f256e8588145ca409d120288bfeb4e38f3088104674d374b35bb91fc80d768d1d519dbe2b0b5aa32a302830260603551d110101ff041c301a861868747470733a2f2f6578616d706c652e636f6d2f3fe2889e300a06082a8648ce3d0403020347003044022044f4697779fd1dae1e382d2452413c5c5ca67851e267d6bc64a8d164977c172c0220505015e657637aa1945d46e7650b6f59b968fc1508ca8b152c99f782446dfc81", + expectedError: "x509: SAN uniformResourceIdentifier is malformed", + }, + } + + for _, tc := range unmarshalTests { + der, err := hex.DecodeString(tc.cert) + if err != nil { + t.Fatalf("failed to decode test cert: %s", err) + } + _, err = ParseCertificate(der) + if err == nil { + t.Error("expected CreateCertificate to fail") + } else if err.Error() != tc.expectedError { + t.Errorf("unexpected error: got %q, want %q", err.Error(), tc.expectedError) + } + } +} From bb9b319228760f9e4b2517114b6eecc6fe0cef30 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Wed, 23 Sep 2020 16:32:59 -0400 Subject: [PATCH 071/281] cmd/go: add yet another test case for ambiguous arguments to 'go get' For #37438 Change-Id: Ie40971ff677d36ddadbf9834bba2d366a0fc34d0 Reviewed-on: https://go-review.googlesource.com/c/go/+/256922 Trust: Bryan C. Mills Trust: Jay Conrod Run-TryBot: Bryan C. Mills TryBot-Result: Go Bot Reviewed-by: Michael Matloob Reviewed-by: Jay Conrod --- .../example.net_ambiguous_nested_v0.1.0.txt | 19 ++++ .../mod/example.net_ambiguous_v0.1.0.txt | 19 ++++ .../mod/example.net_ambiguous_v0.2.0.txt | 18 ++++ .../testdata/script/mod_get_ambiguous_arg.txt | 8 +- .../testdata/script/mod_get_ambiguous_pkg.txt | 101 ++++++++++++++++++ 5 files changed, 161 insertions(+), 4 deletions(-) create mode 100644 src/cmd/go/testdata/mod/example.net_ambiguous_nested_v0.1.0.txt create mode 100644 src/cmd/go/testdata/mod/example.net_ambiguous_v0.1.0.txt create mode 100644 src/cmd/go/testdata/mod/example.net_ambiguous_v0.2.0.txt create mode 100644 src/cmd/go/testdata/script/mod_get_ambiguous_pkg.txt diff --git a/src/cmd/go/testdata/mod/example.net_ambiguous_nested_v0.1.0.txt b/src/cmd/go/testdata/mod/example.net_ambiguous_nested_v0.1.0.txt new file mode 100644 index 0000000000..8c9de7a5f4 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.net_ambiguous_nested_v0.1.0.txt @@ -0,0 +1,19 @@ +Written by hand. + +Test module containing a package that is also provided by a nested module tagged +with the same version. + +-- .mod -- +module example.net/ambiguous/nested + +go 1.16 +-- .info -- +{"Version": "v0.1.0"} +-- go.mod -- +module example.net/ambiguous/nested + +go 1.16 +-- pkg/pkg.go -- +// Package pkg exists in both example.net/ambiguous v0.1.0 +// and example.net/ambiguous/nested v0.1.0 +package pkg diff --git a/src/cmd/go/testdata/mod/example.net_ambiguous_v0.1.0.txt b/src/cmd/go/testdata/mod/example.net_ambiguous_v0.1.0.txt new file mode 100644 index 0000000000..8fa6d83346 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.net_ambiguous_v0.1.0.txt @@ -0,0 +1,19 @@ +Written by hand. + +Test module containing a package that is also provided by a nested module tagged +with the same version. + +-- .mod -- +module example.net/ambiguous + +go 1.16 +-- .info -- +{"Version": "v0.1.0"} +-- go.mod -- +module example.net/ambiguous + +go 1.16 +-- nested/pkg/pkg.go -- +// Package pkg exists in both example.net/ambiguous v0.1.0 +// and example.net/ambiguous/nested v0.1.0 +package pkg diff --git a/src/cmd/go/testdata/mod/example.net_ambiguous_v0.2.0.txt b/src/cmd/go/testdata/mod/example.net_ambiguous_v0.2.0.txt new file mode 100644 index 0000000000..7589ad76a3 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.net_ambiguous_v0.2.0.txt @@ -0,0 +1,18 @@ +Written by hand. + +Test module containing a package that is also provided by a nested module tagged +with the same version. + +-- .mod -- +module example.net/ambiguous + +go 1.16 +-- .info -- +{"Version": "v0.2.0"} +-- go.mod -- +module example.net/ambiguous + +go 1.16 +-- nested/pkg/README.txt -- +// Package pkg no longer exists in this module at v0.2.0. +// Find it in module example.net/ambiguous/nested instead. diff --git a/src/cmd/go/testdata/script/mod_get_ambiguous_arg.txt b/src/cmd/go/testdata/script/mod_get_ambiguous_arg.txt index 7729f29ced..f64da3a3fd 100644 --- a/src/cmd/go/testdata/script/mod_get_ambiguous_arg.txt +++ b/src/cmd/go/testdata/script/mod_get_ambiguous_arg.txt @@ -70,7 +70,7 @@ module m go 1.16 -- m01/README.txt -- -Module m at v0.3.0 does not yet contain package p. +Module m at v0.1.0 does not yet contain package p. -- m02/go.mod -- module m @@ -107,18 +107,18 @@ module m/p go 1.16 -- mp01/README.txt -- This module is m/p. -Package m/p no longer exists. +Package m/p does not exist in this module. -- mp02/go.mod -- module m/p go 1.16 -- mp02/README.txt -- This module is m/p. -Package m/p no longer exists. +Package m/p does not exist in this module. -- mp03/go.mod -- module m/p go 1.16 -- mp03/README.txt -- This module is m/p. -Package m/p no longer exists. +Package m/p does not exist in this module. diff --git a/src/cmd/go/testdata/script/mod_get_ambiguous_pkg.txt b/src/cmd/go/testdata/script/mod_get_ambiguous_pkg.txt new file mode 100644 index 0000000000..f00f99ee8c --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_ambiguous_pkg.txt @@ -0,0 +1,101 @@ +# Both example.net/ambiguous v0.1.0 and example.net/ambiguous/pkg v0.1.0 exist. +# 'go mod tidy' would arbitrarily choose the one with the longer path, +# but 'go mod tidy' also arbitrarily chooses the latest version. + +cp go.mod go.mod.orig + + +# From a clean slate, 'go get' currently does the same thing as 'go mod tidy': +# it resolves the package from the module with the longest matching prefix. + +go get -d example.net/ambiguous/nested/pkg@v0.1.0 +go list -m all +stdout '^example.net/ambiguous/nested v0.1.0$' +! stdout '^example.net/ambiguous ' + + +# From an initial state that already depends on the shorter path, +# the same 'go get' command attempts to add the longer path and fails. +# +# TODO(bcmills): What should really happen here? +# Should we match the versioned package path against the existing package +# (reducing unexpected errors), or give it the same meaning regardless of the +# initial state? + +cp go.mod.orig go.mod +go mod edit -require=example.net/ambiguous@v0.1.0 + +! go get -d example.net/ambiguous/nested/pkg@v0.1.0 +stderr '^go get example.net/ambiguous/nested/pkg@v0.1.0: ambiguous import: found package example.net/ambiguous/nested/pkg in multiple modules:\n\texample.net/ambiguous v0.1.0 \(.*\)\n\texample.net/ambiguous/nested v0.1.0 \(.*\)\n\z' + + +# The user should be able to fix the aforementioned failure by explicitly +# upgrading the conflicting module. + +go get -d example.net/ambiguous@v0.2.0 example.net/ambiguous/nested/pkg@v0.1.0 +go list -m all +stdout '^example.net/ambiguous/nested v0.1.0$' +stdout '^example.net/ambiguous v0.2.0$' + + +# ...or by explicitly NOT adding the conflicting module. +# +# BUG(#37438): Today, this does not work: explicit module version constraints do +# not affect the package-to-module mapping during package upgrades, so the +# arguments are interpreted as specifying conflicting versions of the longer +# module path. + +cp go.mod.orig go.mod +go mod edit -require=example.net/ambiguous@v0.1.0 + +! go get -d example.net/ambiguous/nested/pkg@v0.1.0 example.net/ambiguous/nested@none +stderr '^go get: conflicting versions for module example.net/ambiguous/nested: v0.1.0 and none$' + + # go list -m all + # ! stdout '^example.net/ambiguous/nested ' + # stdout '^example.net/ambiguous v0.1.0$' + + +# The user should also be able to fix it by *downgrading* the conflicting module +# away. +# +# BUG(#37438): Today, this does not work: the "ambiguous import" error causes +# 'go get' to fail before applying the requested downgrade. + +cp go.mod.orig go.mod +go mod edit -require=example.net/ambiguous@v0.1.0 + +! go get -d example.net/ambiguous@none example.net/ambiguous/nested/pkg@v0.1.0 +stderr '^go get example.net/ambiguous/nested/pkg@v0.1.0: ambiguous import: found package example.net/ambiguous/nested/pkg in multiple modules:\n\texample.net/ambiguous v0.1.0 \(.*\)\n\texample.net/ambiguous/nested v0.1.0 \(.*\)\n\z' + + # go list -m all + # stdout '^example.net/ambiguous/nested v0.1.0$' + # !stdout '^example.net/ambiguous ' + + +# In contrast, if we do the same thing tacking a wildcard pattern ('/...') on +# the end of the package path, we get different behaviors depending on the +# initial state, and no error. (This seems to contradict the “same meaning +# regardless of the initial state” point above, but maybe that's ok?) + +cp go.mod.orig go.mod + +go get -d example.net/ambiguous/nested/pkg/...@v0.1.0 +go list -m all +stdout '^example.net/ambiguous/nested v0.1.0$' +! stdout '^example.net/ambiguous ' + + +cp go.mod.orig go.mod +go mod edit -require=example.net/ambiguous@v0.1.0 + +go get -d example.net/ambiguous/nested/pkg/...@v0.1.0 +go list -m all +! stdout '^example.net/ambiguous/nested ' +stdout '^example.net/ambiguous v0.1.0$' + + +-- go.mod -- +module test + +go 1.16 From f811663f0483b05bb9986ce648bb653564217c6e Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Tue, 29 Sep 2020 10:23:07 -0400 Subject: [PATCH 072/281] cmd/go: test more commands in mod_build_info_error For #26909 For #41688 Change-Id: I22f28d426ce499fce6f0f1295dbde425998042aa Reviewed-on: https://go-review.googlesource.com/c/go/+/258219 Trust: Bryan C. Mills Trust: Jay Conrod Run-TryBot: Bryan C. Mills TryBot-Result: Go Bot Reviewed-by: Jay Conrod Reviewed-by: Michael Matloob --- src/cmd/go/testdata/script/mod_build_info_err.txt | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/cmd/go/testdata/script/mod_build_info_err.txt b/src/cmd/go/testdata/script/mod_build_info_err.txt index a6853b5c86..4a6ee9e8bb 100644 --- a/src/cmd/go/testdata/script/mod_build_info_err.txt +++ b/src/cmd/go/testdata/script/mod_build_info_err.txt @@ -1,8 +1,19 @@ # This test verifies that line numbers are included in module import errors. # Verifies golang.org/issue/34393. -go list -e -deps -f '{{with .Error}}{{.Pos}}: {{.Err}}{{end}}' ./main -stdout 'bad[/\\]bad.go:3:8: malformed import path "🐧.example.com/string": invalid char ''🐧''' +go list -e -mod=mod -deps -f '{{with .Error}}{{.Pos}}: {{.Err}}{{end}}' ./main +stdout '^bad[/\\]bad.go:3:8: malformed import path "🐧.example.com/string": invalid char ''🐧''$' + +# TODO(#26909): This should include an import stack. +# (Today it includes only a file and line.) +! go build ./main +stderr '^bad[/\\]bad.go:3:8: malformed import path "🐧.example.com/string": invalid char ''🐧''$' + +# TODO(#41688): This should include a file and line, and report the reason for the error.. +# (Today it includes only an import stack, and does not indicate the actual problem.) +! go get -d ./main +stderr '^m/main imports\n\tm/bad imports\n\t🐧.example.com/string: import missing$' + -- go.mod -- module m From 2f6e7f0ed16004e1ac40e7108878fd4ec99bb15d Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Wed, 30 Sep 2020 00:47:48 -0700 Subject: [PATCH 073/281] src/go.mod, net/http: update bundled and latest golang.org/x/net Updates x/net/http2 to git rev 5d4f7005572804eaf7f5ecdd2473a62557f733ba http2: send WINDOW_UPDATE on a body's write failure https://golang.org/cl/245158 (fixes #40423) also updates the vendored version of golang.org/x/net as per $ go get golang.org/x/net@5d4f700557 $ go mod tidy $ go mod vendor $ go generate -run bundle std For #40423. Change-Id: I3270d0fb6f28889266596f7365d36d30ef2bb368 Reviewed-on: https://go-review.googlesource.com/c/go/+/258359 Run-TryBot: Emmanuel Odeke TryBot-Result: Go Bot Trust: Emmanuel Odeke Reviewed-by: Dmitri Shuralyov --- src/go.mod | 2 +- src/go.sum | 4 ++-- src/net/http/h2_bundle.go | 1 + src/vendor/modules.txt | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/go.mod b/src/go.mod index 86e3c8c5b7..3bdfbef052 100644 --- a/src/go.mod +++ b/src/go.mod @@ -4,7 +4,7 @@ go 1.16 require ( golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a - golang.org/x/net v0.0.0-20200925080053-05aa5d4ee321 + golang.org/x/net v0.0.0-20200927032502-5d4f70055728 golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d // indirect golang.org/x/text v0.3.4-0.20200826142016-a8b467125457 // indirect ) diff --git a/src/go.sum b/src/go.sum index 86a8c4be2a..6a03887409 100644 --- a/src/go.sum +++ b/src/go.sum @@ -3,8 +3,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200925080053-05aa5d4ee321 h1:lleNcKRbcaC8MqgLwghIkzZ2JBQAb7QQ9MiwRt1BisA= -golang.org/x/net v0.0.0-20200925080053-05aa5d4ee321/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200927032502-5d4f70055728 h1:5wtQIAulKU5AbLQOkjxl32UufnIOqgBX72pS0AV14H0= +golang.org/x/net v0.0.0-20200927032502-5d4f70055728/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go index 458e0b7646..5b92eb234b 100644 --- a/src/net/http/h2_bundle.go +++ b/src/net/http/h2_bundle.go @@ -5265,6 +5265,7 @@ func (sc *http2serverConn) processData(f *http2DataFrame) error { if len(data) > 0 { wrote, err := st.body.Write(data) if err != nil { + sc.sendWindowUpdate(nil, int(f.Length)-wrote) return http2streamError(id, http2ErrCodeStreamClosed) } if wrote != len(data) { diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index d53b647310..36d76e77b5 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -8,7 +8,7 @@ golang.org/x/crypto/curve25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/subtle golang.org/x/crypto/poly1305 -# golang.org/x/net v0.0.0-20200925080053-05aa5d4ee321 +# golang.org/x/net v0.0.0-20200927032502-5d4f70055728 ## explicit golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts From e674b7703e48e357cd46939413620f21cb84027d Mon Sep 17 00:00:00 2001 From: Jeremy Faller Date: Mon, 28 Sep 2020 14:11:02 -0400 Subject: [PATCH 074/281] [dev.link] cmd/link run generators in parallel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Small runtime win: Stats for darwin, building cmd/compile: Asmb 20.7ms ±14% 18.3ms ±14% -11.54% (p=0.002 n=10+10) TotalTime 365ms ±10% 351ms ± 2% ~ (p=0.211 n=10+9) Change-Id: Ia8afdf6948111d59b0c89e52cb50557a10f33c40 Reviewed-on: https://go-review.googlesource.com/c/go/+/257964 Trust: Jeremy Faller Run-TryBot: Jeremy Faller TryBot-Result: Go Bot Reviewed-by: Austin Clements --- src/cmd/link/internal/ld/main.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 6f4ccbfb7a..778b0e9245 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -36,12 +36,14 @@ import ( "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/benchmark" + "cmd/link/internal/loader" "flag" "log" "os" "runtime" "runtime/pprof" "strings" + "sync" ) var ( @@ -324,9 +326,15 @@ func Main(arch *sys.Arch, theArch Arch) { bench.Start("Asmb") asmb(ctxt) // Generate large symbols. + var wg sync.WaitGroup for s, f := range ctxt.generatorSyms { - f(ctxt, s) + wg.Add(1) + go func(f generatorFunc, s loader.Sym) { + defer wg.Done() + f(ctxt, s) + }(f, s) } + wg.Wait() bench.Start("Asmb2") asmb2(ctxt) From 846dce9d05f19a1f53465e62a304dea21b99f910 Mon Sep 17 00:00:00 2001 From: mengxiaodong <920432478@qq.com> Date: Mon, 28 Sep 2020 17:38:13 +0800 Subject: [PATCH 075/281] runtime: code cleanup about map 1.Revise ambiguous comments: "all current buckets" means buckets in hmap.buckets, actually current bucket and all the overflow buckets connected to it are full 2.All the pointer address add use src/runtime/stubs.go:add, keep the code style uniform Change-Id: Idc7224dbe6c391e1b03bf5d009c3734bc75187ce Reviewed-on: https://go-review.googlesource.com/c/go/+/257979 Reviewed-by: Austin Clements Reviewed-by: Keith Randall Run-TryBot: Austin Clements TryBot-Result: Go Bot --- src/runtime/map.go | 4 ++-- src/runtime/map_fast32.go | 8 ++++---- src/runtime/map_fast64.go | 8 ++++---- src/runtime/map_faststr.go | 4 ++-- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/runtime/map.go b/src/runtime/map.go index 6f31f23d6f..5ac3a9958b 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -599,7 +599,7 @@ again: if h.growing() { growWork(t, h, bucket) } - b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) top := tophash(hash) var inserti *uint8 @@ -650,7 +650,7 @@ bucketloop: } if inserti == nil { - // all current buckets are full, allocate a new one. + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. newb := h.newoverflow(t, b) inserti = &newb.tophash[0] insertk = add(unsafe.Pointer(newb), dataOffset) diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go index d80f5eac78..8d52dad217 100644 --- a/src/runtime/map_fast32.go +++ b/src/runtime/map_fast32.go @@ -114,7 +114,7 @@ again: if h.growing() { growWork_fast32(t, h, bucket) } - b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) var insertb *bmap var inserti uintptr @@ -158,7 +158,7 @@ bucketloop: } if insertb == nil { - // all current buckets are full, allocate a new one. + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } @@ -204,7 +204,7 @@ again: if h.growing() { growWork_fast32(t, h, bucket) } - b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) var insertb *bmap var inserti uintptr @@ -248,7 +248,7 @@ bucketloop: } if insertb == nil { - // all current buckets are full, allocate a new one. + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go index 3bc84bbdd3..f1368dc774 100644 --- a/src/runtime/map_fast64.go +++ b/src/runtime/map_fast64.go @@ -114,7 +114,7 @@ again: if h.growing() { growWork_fast64(t, h, bucket) } - b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) var insertb *bmap var inserti uintptr @@ -158,7 +158,7 @@ bucketloop: } if insertb == nil { - // all current buckets are full, allocate a new one. + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } @@ -204,7 +204,7 @@ again: if h.growing() { growWork_fast64(t, h, bucket) } - b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) var insertb *bmap var inserti uintptr @@ -248,7 +248,7 @@ bucketloop: } if insertb == nil { - // all current buckets are full, allocate a new one. + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go index 108c502394..2d1ac762a8 100644 --- a/src/runtime/map_faststr.go +++ b/src/runtime/map_faststr.go @@ -225,7 +225,7 @@ again: if h.growing() { growWork_faststr(t, h, bucket) } - b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) top := tophash(hash) var insertb *bmap @@ -274,7 +274,7 @@ bucketloop: } if insertb == nil { - // all current buckets are full, allocate a new one. + // The current bucket and all the overflow buckets connected to it are full, allocate a new one. insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } From c863e14a6c15e174ac0979ddd7f9530d6a4ec9cc Mon Sep 17 00:00:00 2001 From: Jeremy Faller Date: Tue, 18 Aug 2020 13:38:04 -0400 Subject: [PATCH 076/281] [dev.link] cmd/link: use generator symbols for the rest of pclntab MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the rest of pclntab creation to generator symbols. Any savings in pclntab generation CPU time is eaten by the generators run in Asmb phase. Stats for Darwin, cmd/compile: alloc/op: Pclntab_GC 13.9MB ± 0% 6.4MB ± 0% -53.68% (p=0.000 n=10+10) allocs/op Pclntab_GC 86.5k ± 0% 61.5k ± 0% -28.90% (p=0.000 n=10+10) liveB: Pclntab_GC 24.3M ± 0% 22.9M ± 0% -5.57% (p=0.000 n=10+10) Timing: Pclntab 32.1ms ± 2% 24.2ms ± 2% -24.35% (p=0.000 n=9+9) Asmb 18.3ms ±14% 27.4ms ± 9% +49.55% (p=0.000 n=10+10) TotalTime 351ms ± 2% 347ms ± 3% ~ (p=0.200 n=9+8) Change-Id: I5c6b6df5953f6f255240e07578f1c9f8c5f68500 Reviewed-on: https://go-review.googlesource.com/c/go/+/249023 Trust: Jeremy Faller Run-TryBot: Jeremy Faller TryBot-Result: Go Bot Reviewed-by: Austin Clements --- src/cmd/link/internal/ld/data.go | 4 +- src/cmd/link/internal/ld/pcln.go | 558 +++++++++++++++++++------------ 2 files changed, 350 insertions(+), 212 deletions(-) diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 23357e4c1b..5aecdf29b7 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -1932,7 +1932,7 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.cutab", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.filetab", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pctab", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pclntab_old", 0), sect) + ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.functab", 0), sect) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.epclntab", 0), sect) if ctxt.HeadType == objabi.Haix { xcoffUpdateOuterSize(ctxt, int64(sect.Length), sym.SPCLNTAB) @@ -2519,7 +2519,7 @@ func (ctxt *Link) address() []*sym.Segment { ctxt.defineInternal("runtime.cutab", sym.SRODATA) ctxt.defineInternal("runtime.filetab", sym.SRODATA) ctxt.defineInternal("runtime.pctab", sym.SRODATA) - ctxt.defineInternal("runtime.pclntab_old", sym.SRODATA) + ctxt.defineInternal("runtime.functab", sym.SRODATA) ctxt.xdefine("runtime.epclntab", sym.SRODATA, int64(pclntab.Vaddr+pclntab.Length)) ctxt.xdefine("runtime.noptrdata", sym.SNOPTRDATA, int64(noptr.Vaddr)) ctxt.xdefine("runtime.enoptrdata", sym.SNOPTRDATA, int64(noptr.Vaddr+noptr.Length)) diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index 33476ec292..75e63248df 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -18,6 +18,9 @@ import ( // pclntab holds the state needed for pclntab generation. type pclntab struct { + // The size of the func object in the runtime. + funcSize uint32 + // The first and last functions found. firstFunc, lastFunc loader.Sym @@ -66,7 +69,10 @@ func (state *pclntab) addGeneratedSym(ctxt *Link, name string, size int64, f gen func makePclntab(ctxt *Link, container loader.Bitmap) (*pclntab, []*sym.CompilationUnit, []loader.Sym) { ldr := ctxt.loader - state := &pclntab{} + state := &pclntab{ + // This is the size of the _func object in runtime/runtime2.go. + funcSize: uint32(ctxt.Arch.PtrSize + 9*4), + } // Gather some basic stats and info. seenCUs := make(map[*sym.CompilationUnit]struct{}) @@ -216,6 +222,22 @@ func genInlTreeSym(ctxt *Link, cu *sym.CompilationUnit, fi loader.FuncInfo, arch return its } +// makeInlSyms returns a map of loader.Sym that are created inlSyms. +func makeInlSyms(ctxt *Link, funcs []loader.Sym, nameOffsets map[loader.Sym]uint32) map[loader.Sym]loader.Sym { + ldr := ctxt.loader + // Create the inline symbols we need. + inlSyms := make(map[loader.Sym]loader.Sym) + for _, s := range funcs { + if fi := ldr.FuncInfo(s); fi.Valid() { + fi.Preload() + if fi.NumInlTree() > 0 { + inlSyms[s] = genInlTreeSym(ctxt, ldr.SymUnit(s), fi, ctxt.Arch, nameOffsets) + } + } + } + return inlSyms +} + // generatePCHeader creates the runtime.pcheader symbol, setting it up as a // generator to fill in its data later. func (state *pclntab) generatePCHeader(ctxt *Link) { @@ -488,6 +510,327 @@ func (state *pclntab) generatePctab(ctxt *Link, funcs []loader.Sym) { state.pctab = state.addGeneratedSym(ctxt, "runtime.pctab", size, writePctab) } +// numPCData returns the number of PCData syms for the FuncInfo. +// NB: Preload must be called on valid FuncInfos before calling this function. +func numPCData(fi loader.FuncInfo) uint32 { + if !fi.Valid() { + return 0 + } + numPCData := uint32(len(fi.Pcdata())) + if fi.NumInlTree() > 0 { + if numPCData < objabi.PCDATA_InlTreeIndex+1 { + numPCData = objabi.PCDATA_InlTreeIndex + 1 + } + } + return numPCData +} + +// Helper types for iterating pclntab. +type pclnSetAddr func(*loader.SymbolBuilder, *sys.Arch, int64, loader.Sym, int64) int64 +type pclnSetUint func(*loader.SymbolBuilder, *sys.Arch, int64, uint64) int64 + +// generateFunctab creates the runtime.functab +// +// runtime.functab contains two things: +// +// - pc->func look up table. +// - array of func objects, interleaved with pcdata and funcdata +// +// Because of timing in the linker, generating this table takes two passes. +// The first pass is executed early in the link, and it creates any needed +// relocations to layout the data. The pieces that need relocations are: +// 1) the PC->func table. +// 2) The entry points in the func objects. +// 3) The funcdata. +// (1) and (2) are handled in walkPCToFunc. (3) is handled in walkFuncdata. +// +// After relocations, once we know where to write things in the output buffer, +// we execute the second pass, which is actually writing the data. +func (state *pclntab) generateFunctab(ctxt *Link, funcs []loader.Sym, inlSyms map[loader.Sym]loader.Sym, cuOffsets []uint32, nameOffsets map[loader.Sym]uint32) { + // Calculate the size of the table. + size, startLocations := state.calculateFunctabSize(ctxt, funcs) + + // If we are internally linking a static executable, the function addresses + // are known, so we can just use them instead of emitting relocations. For + // other cases we still need to emit relocations. + // + // This boolean just helps us figure out which callback to use. + useSymValue := ctxt.IsExe() && ctxt.IsInternal() + + writePcln := func(ctxt *Link, s loader.Sym) { + ldr := ctxt.loader + sb := ldr.MakeSymbolUpdater(s) + + // Create our callbacks. + var setAddr pclnSetAddr + if useSymValue { + // We need to write the offset. + setAddr = func(s *loader.SymbolBuilder, arch *sys.Arch, off int64, tgt loader.Sym, add int64) int64 { + if v := ldr.SymValue(tgt); v != 0 { + s.SetUint(arch, off, uint64(v+add)) + } + return 0 + } + } else { + // We already wrote relocations. + setAddr = func(s *loader.SymbolBuilder, arch *sys.Arch, off int64, tgt loader.Sym, add int64) int64 { return 0 } + } + + // Write the data. + writePcToFunc(ctxt, sb, funcs, startLocations, setAddr, (*loader.SymbolBuilder).SetUint) + writeFuncs(ctxt, sb, funcs, inlSyms, startLocations, cuOffsets, nameOffsets) + state.writeFuncData(ctxt, sb, funcs, inlSyms, startLocations, setAddr, (*loader.SymbolBuilder).SetUint) + } + + state.pclntab = state.addGeneratedSym(ctxt, "runtime.functab", size, writePcln) + + // Create the relocations we need. + ldr := ctxt.loader + sb := ldr.MakeSymbolUpdater(state.pclntab) + + var setAddr pclnSetAddr + if useSymValue { + // If we should use the symbol value, and we don't have one, write a relocation. + setAddr = func(sb *loader.SymbolBuilder, arch *sys.Arch, off int64, tgt loader.Sym, add int64) int64 { + if v := ldr.SymValue(tgt); v == 0 { + sb.SetAddrPlus(arch, off, tgt, add) + } + return 0 + } + } else { + // If we're externally linking, write a relocation. + setAddr = (*loader.SymbolBuilder).SetAddrPlus + } + setUintNOP := func(*loader.SymbolBuilder, *sys.Arch, int64, uint64) int64 { return 0 } + writePcToFunc(ctxt, sb, funcs, startLocations, setAddr, setUintNOP) + if !useSymValue { + // Generate relocations for funcdata when externally linking. + state.writeFuncData(ctxt, sb, funcs, inlSyms, startLocations, setAddr, setUintNOP) + } +} + +// funcData returns the funcdata and offsets for the FuncInfo. +// The funcdata and offsets are written into runtime.functab after each func +// object. This is a helper function to make querying the FuncInfo object +// cleaner. +// +// Note, the majority of fdOffsets are 0, meaning there is no offset between +// the compiler's generated symbol, and what the runtime needs. They are +// plumbed through for no loss of generality. +// +// NB: Preload must be called on the FuncInfo before calling. +// NB: fdSyms and fdOffs are used as scratch space. +func funcData(fi loader.FuncInfo, inlSym loader.Sym, fdSyms []loader.Sym, fdOffs []int64) ([]loader.Sym, []int64) { + fdSyms, fdOffs = fdSyms[:0], fdOffs[:0] + if fi.Valid() { + numOffsets := int(fi.NumFuncdataoff()) + for i := 0; i < numOffsets; i++ { + fdOffs = append(fdOffs, fi.Funcdataoff(i)) + } + fdSyms = fi.Funcdata(fdSyms) + if fi.NumInlTree() > 0 { + if len(fdSyms) < objabi.FUNCDATA_InlTree+1 { + fdSyms = append(fdSyms, make([]loader.Sym, objabi.FUNCDATA_InlTree+1-len(fdSyms))...) + fdOffs = append(fdOffs, make([]int64, objabi.FUNCDATA_InlTree+1-len(fdOffs))...) + } + fdSyms[objabi.FUNCDATA_InlTree] = inlSym + } + } + return fdSyms, fdOffs +} + +// calculateFunctabSize calculates the size of the pclntab, and the offsets in +// the output buffer for individual func entries. +func (state pclntab) calculateFunctabSize(ctxt *Link, funcs []loader.Sym) (int64, []uint32) { + ldr := ctxt.loader + startLocations := make([]uint32, len(funcs)) + + // Allocate space for the pc->func table. This structure consists of a pc + // and an offset to the func structure. After that, we have a single pc + // value that marks the end of the last function in the binary. + size := int64(int(state.nfunc)*2*ctxt.Arch.PtrSize + ctxt.Arch.PtrSize) + + // Now find the space for the func objects. We do this in a running manner, + // so that we can find individual starting locations, and because funcdata + // requires alignment. + for i, s := range funcs { + size = Rnd(size, int64(ctxt.Arch.PtrSize)) + startLocations[i] = uint32(size) + fi := ldr.FuncInfo(s) + size += int64(state.funcSize) + if fi.Valid() { + fi.Preload() + numFuncData := int(fi.NumFuncdataoff()) + if fi.NumInlTree() > 0 { + if numFuncData < objabi.FUNCDATA_InlTree+1 { + numFuncData = objabi.FUNCDATA_InlTree + 1 + } + } + size += int64(numPCData(fi) * 4) + if numFuncData > 0 { // Func data is aligned. + size = Rnd(size, int64(ctxt.Arch.PtrSize)) + } + size += int64(numFuncData * ctxt.Arch.PtrSize) + } + } + + return size, startLocations +} + +// writePcToFunc writes the PC->func lookup table. +// This function walks the pc->func lookup table, executing callbacks +// to generate relocations and writing the values for the table. +func writePcToFunc(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, startLocations []uint32, setAddr pclnSetAddr, setUint pclnSetUint) { + ldr := ctxt.loader + var prevFunc loader.Sym + prevSect := ldr.SymSect(funcs[0]) + funcIndex := 0 + for i, s := range funcs { + if thisSect := ldr.SymSect(s); thisSect != prevSect { + // With multiple text sections, there may be a hole here in the + // address space. We use an invalid funcoff value to mark the hole. + // See also runtime/symtab.go:findfunc + prevFuncSize := int64(ldr.SymSize(prevFunc)) + setAddr(sb, ctxt.Arch, int64(funcIndex*2*ctxt.Arch.PtrSize), prevFunc, prevFuncSize) + setUint(sb, ctxt.Arch, int64((funcIndex*2+1)*ctxt.Arch.PtrSize), ^uint64(0)) + funcIndex++ + prevSect = thisSect + } + prevFunc = s + // TODO: We don't actually need these relocations, provided we go to a + // module->func look-up-table like we do for filenames. We could have a + // single relocation for the module, and have them all laid out as + // offsets from the beginning of that module. + setAddr(sb, ctxt.Arch, int64(funcIndex*2*ctxt.Arch.PtrSize), s, 0) + setUint(sb, ctxt.Arch, int64((funcIndex*2+1)*ctxt.Arch.PtrSize), uint64(startLocations[i])) + funcIndex++ + + // Write the entry location. + setAddr(sb, ctxt.Arch, int64(startLocations[i]), s, 0) + } + + // Final entry of table is just end pc. + setAddr(sb, ctxt.Arch, int64(funcIndex)*2*int64(ctxt.Arch.PtrSize), prevFunc, ldr.SymSize(prevFunc)) +} + +// writeFuncData writes the funcdata tables. +// +// This function executes a callback for each funcdata needed in +// runtime.functab. It should be called once for internally linked static +// binaries, or twice (once to generate the needed relocations) for other +// build modes. +// +// Note the output of this function is interwoven with writeFuncs, but this is +// a separate function, because it's needed in different passes in +// generateFunctab. +func (state *pclntab) writeFuncData(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, inlSyms map[loader.Sym]loader.Sym, startLocations []uint32, setAddr pclnSetAddr, setUint pclnSetUint) { + ldr := ctxt.loader + funcdata, funcdataoff := []loader.Sym{}, []int64{} + for i, s := range funcs { + fi := ldr.FuncInfo(s) + if !fi.Valid() { + continue + } + fi.Preload() + + // funcdata, must be pointer-aligned and we're only int32-aligned. + // Missing funcdata will be 0 (nil pointer). + funcdata, funcdataoff := funcData(fi, inlSyms[s], funcdata, funcdataoff) + if len(funcdata) > 0 { + off := int64(startLocations[i] + state.funcSize + numPCData(fi)*4) + off = Rnd(off, int64(ctxt.Arch.PtrSize)) + for j := range funcdata { + dataoff := off + int64(ctxt.Arch.PtrSize*j) + if funcdata[j] == 0 { + setUint(sb, ctxt.Arch, dataoff, uint64(funcdataoff[j])) + continue + } + // TODO: Does this need deduping? + setAddr(sb, ctxt.Arch, dataoff, funcdata[j], funcdataoff[j]) + } + } + } +} + +// writeFuncs writes the func structures and pcdata to runtime.functab. +func writeFuncs(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, inlSyms map[loader.Sym]loader.Sym, startLocations, cuOffsets []uint32, nameOffsets map[loader.Sym]uint32) { + ldr := ctxt.loader + deferReturnSym := ldr.Lookup("runtime.deferreturn", sym.SymVerABIInternal) + funcdata, funcdataoff := []loader.Sym{}, []int64{} + + // Write the individual func objects. + for i, s := range funcs { + fi := ldr.FuncInfo(s) + if fi.Valid() { + fi.Preload() + } + + // Note we skip the space for the entry value -- that's handled inn + // walkPCToFunc. We don't write it here, because it might require a + // relocation. + off := startLocations[i] + uint32(ctxt.Arch.PtrSize) // entry + + // name int32 + nameoff, ok := nameOffsets[s] + if !ok { + panic("couldn't find function name offset") + } + off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(nameoff))) + + // args int32 + // TODO: Move into funcinfo. + args := uint32(0) + if fi.Valid() { + args = uint32(fi.Args()) + } + off = uint32(sb.SetUint32(ctxt.Arch, int64(off), args)) + + // deferreturn + deferreturn := computeDeferReturn(ctxt, deferReturnSym, s) + off = uint32(sb.SetUint32(ctxt.Arch, int64(off), deferreturn)) + + // pcdata + if fi.Valid() { + off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcsp())))) + off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcfile())))) + off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcline())))) + } else { + off += 12 + } + off = uint32(sb.SetUint32(ctxt.Arch, int64(off), uint32(numPCData(fi)))) + + // Store the offset to compilation unit's file table. + cuIdx := ^uint32(0) + if cu := ldr.SymUnit(s); cu != nil { + cuIdx = cuOffsets[cu.PclnIndex] + } + off = uint32(sb.SetUint32(ctxt.Arch, int64(off), cuIdx)) + + // funcID uint8 + var funcID objabi.FuncID + if fi.Valid() { + funcID = fi.FuncID() + } + off = uint32(sb.SetUint8(ctxt.Arch, int64(off), uint8(funcID))) + + off += 2 // pad + + // nfuncdata must be the final entry. + funcdata, funcdataoff = funcData(fi, 0, funcdata, funcdataoff) + off = uint32(sb.SetUint8(ctxt.Arch, int64(off), uint8(len(funcdata)))) + + // Output the pcdata. + if fi.Valid() { + for j, pcSym := range fi.Pcdata() { + sb.SetUint32(ctxt.Arch, int64(off+uint32(j*4)), uint32(ldr.SymValue(pcSym))) + } + if fi.NumInlTree() > 0 { + sb.SetUint32(ctxt.Arch, int64(off+objabi.PCDATA_InlTreeIndex*4), uint32(ldr.SymValue(fi.Pcinline()))) + } + } + } +} + // pclntab initializes the pclntab symbol with // runtime function and file name information. @@ -522,10 +865,10 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { // runtime.pctab // []byte of deduplicated pc data. // - // runtime.pclntab_old + // runtime.functab // function table, alternating PC and offset to func struct [each entry thearch.ptrsize bytes] // end PC [thearch.ptrsize bytes] - // func structures, pcdata tables. + // func structures, pcdata offsets, func data. state, compUnits, funcs := makePclntab(ctxt, container) @@ -534,217 +877,12 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { ldr.MakeSymbolUpdater(state.carrier).SetType(sym.SPCLNTAB) ldr.SetAttrReachable(state.carrier, true) - // runtime.pclntab_old is just a placeholder,and will eventually be deleted. - // It contains the pieces of runtime.pclntab that haven't moved to a more - // rational form. - state.pclntab = ldr.LookupOrCreateSym("runtime.pclntab_old", 0) state.generatePCHeader(ctxt) nameOffsets := state.generateFuncnametab(ctxt, funcs) cuOffsets := state.generateFilenameTabs(ctxt, compUnits, funcs) state.generatePctab(ctxt, funcs) - - // Used to when computing defer return. - deferReturnSym := ldr.Lookup("runtime.deferreturn", sym.SymVerABIInternal) - - funcdataBytes := int64(0) - ldr.SetCarrierSym(state.pclntab, state.carrier) - ldr.SetAttrNotInSymbolTable(state.pclntab, true) - ftab := ldr.MakeSymbolUpdater(state.pclntab) - ftab.SetValue(state.size) - ftab.SetType(sym.SPCLNTAB) - ftab.SetReachable(true) - - ftab.Grow(int64(state.nfunc)*2*int64(ctxt.Arch.PtrSize) + int64(ctxt.Arch.PtrSize) + 4) - - setAddr := (*loader.SymbolBuilder).SetAddrPlus - if ctxt.IsExe() && ctxt.IsInternal() { - // Internal linking static executable. At this point the function - // addresses are known, so we can just use them instead of emitting - // relocations. - // For other cases we are generating a relocatable binary so we - // still need to emit relocations. - setAddr = func(s *loader.SymbolBuilder, arch *sys.Arch, off int64, tgt loader.Sym, add int64) int64 { - if v := ldr.SymValue(tgt); v != 0 { - return s.SetUint(arch, off, uint64(v+add)) - } - return s.SetAddrPlus(arch, off, tgt, add) - } - } - - funcdata := []loader.Sym{} - funcdataoff := []int64{} - - var nfunc int32 - prevFunc := ctxt.Textp[0] - for _, s := range funcs { - thisSect := ldr.SymSect(s) - prevSect := ldr.SymSect(prevFunc) - if thisSect != prevSect { - // With multiple text sections, there may be a hole here - // in the address space (see the comment above). We use an - // invalid funcoff value to mark the hole. See also - // runtime/symtab.go:findfunc - prevFuncSize := int64(ldr.SymSize(prevFunc)) - setAddr(ftab, ctxt.Arch, int64(nfunc)*2*int64(ctxt.Arch.PtrSize), prevFunc, prevFuncSize) - ftab.SetUint(ctxt.Arch, int64(nfunc)*2*int64(ctxt.Arch.PtrSize)+int64(ctxt.Arch.PtrSize), ^uint64(0)) - nfunc++ - } - prevFunc = s - - var numPCData int32 - funcdataoff = funcdataoff[:0] - funcdata = funcdata[:0] - fi := ldr.FuncInfo(s) - if fi.Valid() { - fi.Preload() - numPCData = int32(len(fi.Pcdata())) - nfd := fi.NumFuncdataoff() - for i := uint32(0); i < nfd; i++ { - funcdataoff = append(funcdataoff, fi.Funcdataoff(int(i))) - } - funcdata = fi.Funcdata(funcdata) - } - - writeInlPCData := false - if fi.Valid() && fi.NumInlTree() > 0 { - writeInlPCData = true - if numPCData <= objabi.PCDATA_InlTreeIndex { - numPCData = objabi.PCDATA_InlTreeIndex + 1 - } - if len(funcdataoff) <= objabi.FUNCDATA_InlTree { - // Create inline tree funcdata. - newfuncdata := make([]loader.Sym, objabi.FUNCDATA_InlTree+1) - newfuncdataoff := make([]int64, objabi.FUNCDATA_InlTree+1) - copy(newfuncdata, funcdata) - copy(newfuncdataoff, funcdataoff) - funcdata = newfuncdata - funcdataoff = newfuncdataoff - } - } - - dSize := len(ftab.Data()) - funcstart := int32(dSize) - funcstart += int32(-dSize) & (int32(ctxt.Arch.PtrSize) - 1) // align to ptrsize - - setAddr(ftab, ctxt.Arch, int64(nfunc)*2*int64(ctxt.Arch.PtrSize), s, 0) - ftab.SetUint(ctxt.Arch, int64(nfunc)*2*int64(ctxt.Arch.PtrSize)+int64(ctxt.Arch.PtrSize), uint64(funcstart)) - - // Write runtime._func. Keep in sync with ../../../../runtime/runtime2.go:/_func - // and package debug/gosym. - - // fixed size of struct, checked below - off := funcstart - - end := funcstart + int32(ctxt.Arch.PtrSize) + 3*4 + 6*4 + numPCData*4 + int32(len(funcdata))*int32(ctxt.Arch.PtrSize) - if len(funcdata) > 0 && (end&int32(ctxt.Arch.PtrSize-1) != 0) { - end += 4 - } - ftab.Grow(int64(end)) - - // entry uintptr - off = int32(setAddr(ftab, ctxt.Arch, int64(off), s, 0)) - - // name int32 - nameoff, ok := nameOffsets[s] - if !ok { - panic("couldn't find function name offset") - } - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(nameoff))) - - // args int32 - // TODO: Move into funcinfo. - args := uint32(0) - if fi.Valid() { - args = uint32(fi.Args()) - } - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), args)) - - // deferreturn - deferreturn := computeDeferReturn(ctxt, deferReturnSym, s) - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), deferreturn)) - - cu := ldr.SymUnit(s) - - if fi.Valid() && fi.NumInlTree() > 0 { - its := genInlTreeSym(ctxt, cu, fi, ctxt.Arch, nameOffsets) - funcdata[objabi.FUNCDATA_InlTree] = its - } - - // pcdata - if fi.Valid() { - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcsp())))) - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcfile())))) - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(ldr.SymValue(fi.Pcline())))) - } else { - off += 12 - } - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(numPCData))) - - // Store the offset to compilation unit's file table. - cuIdx := ^uint32(0) - if cu := ldr.SymUnit(s); cu != nil { - cuIdx = cuOffsets[cu.PclnIndex] - } - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), cuIdx)) - - // funcID uint8 - var funcID objabi.FuncID - if fi.Valid() { - funcID = fi.FuncID() - } - off = int32(ftab.SetUint8(ctxt.Arch, int64(off), uint8(funcID))) - - off += 2 // pad - - // nfuncdata must be the final entry. - off = int32(ftab.SetUint8(ctxt.Arch, int64(off), uint8(len(funcdata)))) - - // Output the pcdata. - if fi.Valid() { - for i, pcSym := range fi.Pcdata() { - ftab.SetUint32(ctxt.Arch, int64(off+int32(i*4)), uint32(ldr.SymValue(pcSym))) - } - if writeInlPCData { - ftab.SetUint32(ctxt.Arch, int64(off+objabi.PCDATA_InlTreeIndex*4), uint32(ldr.SymValue(fi.Pcinline()))) - } - } - off += numPCData * 4 - - // funcdata, must be pointer-aligned and we're only int32-aligned. - // Missing funcdata will be 0 (nil pointer). - if len(funcdata) > 0 { - if off&int32(ctxt.Arch.PtrSize-1) != 0 { - off += 4 - } - for i := range funcdata { - dataoff := int64(off) + int64(ctxt.Arch.PtrSize)*int64(i) - if funcdata[i] == 0 { - ftab.SetUint(ctxt.Arch, dataoff, uint64(funcdataoff[i])) - continue - } - // TODO: Dedup. - funcdataBytes += int64(len(ldr.Data(funcdata[i]))) - setAddr(ftab, ctxt.Arch, dataoff, funcdata[i], funcdataoff[i]) - } - off += int32(len(funcdata)) * int32(ctxt.Arch.PtrSize) - } - - if off != end { - ctxt.Errorf(s, "bad math in functab: funcstart=%d off=%d but end=%d (npcdata=%d nfuncdata=%d ptrsize=%d)", funcstart, off, end, numPCData, len(funcdata), ctxt.Arch.PtrSize) - errorexit() - } - - nfunc++ - } - - // Final entry of table is just end pc. - setAddr(ftab, ctxt.Arch, int64(nfunc)*2*int64(ctxt.Arch.PtrSize), state.lastFunc, ldr.SymSize(state.lastFunc)) - - ftab.SetSize(int64(len(ftab.Data()))) - - if ctxt.Debugvlog != 0 { - ctxt.Logf("pclntab=%d bytes, funcdata total %d bytes\n", ftab.Size(), funcdataBytes) - } + inlSyms := makeInlSyms(ctxt, funcs, nameOffsets) + state.generateFunctab(ctxt, funcs, inlSyms, cuOffsets, nameOffsets) return state } From b8ec1d5f49ec5f9350f2b0bd99560e4aadfcb70c Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 30 Sep 2020 17:37:24 -0700 Subject: [PATCH 077/281] internal/poll: use ignoringEINTR in Darwin Fsync Also add comment explaining why we don't use ignoringEINTR around call to close. Fixes #41115 Change-Id: Ia7bbe01eaf26003f70d184b7e82803efef2b2c18 Reviewed-on: https://go-review.googlesource.com/c/go/+/258542 Trust: Ian Lance Taylor Run-TryBot: Ian Lance Taylor TryBot-Result: Go Bot Reviewed-by: Bryan C. Mills --- src/internal/poll/fd_fsync_darwin.go | 7 ++++--- src/internal/poll/fd_unix.go | 7 +++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/internal/poll/fd_fsync_darwin.go b/src/internal/poll/fd_fsync_darwin.go index 91751496a4..48e7596922 100644 --- a/src/internal/poll/fd_fsync_darwin.go +++ b/src/internal/poll/fd_fsync_darwin.go @@ -14,7 +14,8 @@ func (fd *FD) Fsync() error { return err } defer fd.decref() - - _, e1 := fcntl(fd.Sysfd, syscall.F_FULLFSYNC, 0) - return e1 + return ignoringEINTR(func() error { + _, err := fcntl(fd.Sysfd, syscall.F_FULLFSYNC, 0) + return err + }) } diff --git a/src/internal/poll/fd_unix.go b/src/internal/poll/fd_unix.go index f6f6c52f31..2e77e76c87 100644 --- a/src/internal/poll/fd_unix.go +++ b/src/internal/poll/fd_unix.go @@ -74,7 +74,14 @@ func (fd *FD) destroy() error { // Poller may want to unregister fd in readiness notification mechanism, // so this must be executed before CloseFunc. fd.pd.close() + + // We don't use ignoringEINTR here because POSIX does not define + // whether the descriptor is closed if close returns EINTR. + // If the descriptor is indeed closed, using a loop would race + // with some other goroutine opening a new descriptor. + // (The Linux kernel guarantees that it is closed on an EINTR error.) err := CloseFunc(fd.Sysfd) + fd.Sysfd = -1 runtime_Semrelease(&fd.csema) return err From 9b1518aeda297f87d6d06218ddb744c71fefb80d Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 30 Sep 2020 15:48:14 -0400 Subject: [PATCH 078/281] io: make clear that EOF should not be wrapped For #40827. Change-Id: Ifd108421abd8d0988dd7b985e4f9e2bd5356964a Reviewed-on: https://go-review.googlesource.com/c/go/+/258524 Trust: Russ Cox Reviewed-by: Rob Pike --- src/io/io.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/io/io.go b/src/io/io.go index 3dea70b947..adc0c0d550 100644 --- a/src/io/io.go +++ b/src/io/io.go @@ -31,6 +31,8 @@ var ErrShortWrite = errors.New("short write") var ErrShortBuffer = errors.New("short buffer") // EOF is the error returned by Read when no more input is available. +// (Read must return EOF itself, not an error wrapping EOF, +// because callers will test for EOF using ==.) // Functions should return EOF only to signal a graceful end of input. // If the EOF occurs unexpectedly in a structured data stream, // the appropriate error is either ErrUnexpectedEOF or some other error From 069aef4067480ab29f5788b31171054954577661 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 1 Oct 2020 10:25:20 +0200 Subject: [PATCH 079/281] syscall: use correct cmsg alignment for netbsd/arm64 netbsd/arm64 requires 128-bit alignment for cmsgs. Re-submit of CL 258437 which was dropped due to #41718. Change-Id: I898043d79f513bebe1a5eb931e7ebd8e291a5aec Reviewed-on: https://go-review.googlesource.com/c/go/+/258677 Trust: Tobias Klauser Trust: Benny Siegert Run-TryBot: Tobias Klauser TryBot-Result: Go Bot Reviewed-by: Benny Siegert --- src/syscall/sockcmsg_unix_other.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/syscall/sockcmsg_unix_other.go b/src/syscall/sockcmsg_unix_other.go index 3aaf7c3616..40f03142a6 100644 --- a/src/syscall/sockcmsg_unix_other.go +++ b/src/syscall/sockcmsg_unix_other.go @@ -32,6 +32,10 @@ func cmsgAlignOf(salen int) int { if runtime.GOARCH == "arm" { salign = 8 } + // NetBSD aarch64 requires 128-bit alignment. + if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" { + salign = 16 + } } return (salen + salign - 1) & ^(salign - 1) From 734790716469c7dd887a1f31b8700d42e9cb3e29 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 28 Sep 2020 11:13:14 -0400 Subject: [PATCH 080/281] go/types: turn TestBenchmark into a normal benchmark TestBenchmark doesn't use the -bench flag, so that it can format custom output -- the number of checked lines per second. This is a barrier both to discoverability, and to piping benchmark output into analysis tools such as benchstat. Using testing.B.ReportMetric and a bit of manual timing, we can achieve similar results while conforming to normal benchmark output. Do this, and rename the test func to BenchmarkCheck (for symmetry with TestCheck). Change-Id: Ie8f2259c1ca9e6986f0137287acf8eb2843f96b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/257958 Run-TryBot: Robert Findley TryBot-Result: Go Bot Trust: Robert Findley Reviewed-by: Robert Griesemer --- src/go/types/self_test.go | 80 ++++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 35 deletions(-) diff --git a/src/go/types/self_test.go b/src/go/types/self_test.go index 04c9cd3458..b5f6bfe532 100644 --- a/src/go/types/self_test.go +++ b/src/go/types/self_test.go @@ -5,12 +5,11 @@ package types_test import ( - "flag" - "fmt" "go/ast" "go/importer" "go/parser" "go/token" + "path" "path/filepath" "testing" "time" @@ -18,8 +17,6 @@ import ( . "go/types" ) -var benchmark = flag.Bool("b", false, "run benchmarks") - func TestSelf(t *testing.T) { fset := token.NewFileSet() files, err := pkgFiles(fset, ".") @@ -39,46 +36,39 @@ func TestSelf(t *testing.T) { } } -func TestBenchmark(t *testing.T) { - if !*benchmark { - return - } - - // We're not using testing's benchmarking mechanism directly - // because we want custom output. - +func BenchmarkCheck(b *testing.B) { for _, p := range []string{ "net/http", "go/parser", "go/constant", filepath.Join("go", "internal", "gcimporter"), } { - path := filepath.Join("..", "..", p) - runbench(t, path, false) - runbench(t, path, true) - fmt.Println() + b.Run(path.Base(p), func(b *testing.B) { + path := filepath.Join("..", "..", p) + for _, ignoreFuncBodies := range []bool{false, true} { + name := "funcbodies" + if ignoreFuncBodies { + name = "nofuncbodies" + } + b.Run(name, func(b *testing.B) { + b.Run("info", func(b *testing.B) { + runbench(b, path, ignoreFuncBodies, true) + }) + b.Run("noinfo", func(b *testing.B) { + runbench(b, path, ignoreFuncBodies, false) + }) + }) + } + }) } } -func runbench(t *testing.T, path string, ignoreFuncBodies bool) { +func runbench(b *testing.B, path string, ignoreFuncBodies, writeInfo bool) { fset := token.NewFileSet() files, err := pkgFiles(fset, path) if err != nil { - t.Fatal(err) + b.Fatal(err) } - - b := testing.Benchmark(func(b *testing.B) { - for i := 0; i < b.N; i++ { - conf := Config{ - IgnoreFuncBodies: ignoreFuncBodies, - Importer: importer.Default(), - } - if _, err := conf.Check(path, fset, files, nil); err != nil { - t.Fatal(err) - } - } - }) - // determine line count lines := 0 fset.Iterate(func(f *token.File) bool { @@ -86,10 +76,30 @@ func runbench(t *testing.T, path string, ignoreFuncBodies bool) { return true }) - d := time.Duration(b.NsPerOp()) - fmt.Printf("%s (ignoreFuncBodies = %v):\n", filepath.Base(path), ignoreFuncBodies) - fmt.Printf("\t%s for %d lines (%.0f lines/s)\n", d, lines, float64(lines)/d.Seconds()) - fmt.Printf("\t%s\n", b.MemString()) + b.ResetTimer() + start := time.Now() + for i := 0; i < b.N; i++ { + conf := Config{ + IgnoreFuncBodies: ignoreFuncBodies, + Importer: importer.Default(), + } + var info *Info + if writeInfo { + info = &Info{ + Types: make(map[ast.Expr]TypeAndValue), + Defs: make(map[*ast.Ident]Object), + Uses: make(map[*ast.Ident]Object), + Implicits: make(map[ast.Node]Object), + Selections: make(map[*ast.SelectorExpr]*Selection), + Scopes: make(map[ast.Node]*Scope), + } + } + if _, err := conf.Check(path, fset, files, info); err != nil { + b.Fatal(err) + } + } + b.StopTimer() + b.ReportMetric(float64(lines)*float64(b.N)/time.Since(start).Seconds(), "lines/s") } func pkgFiles(fset *token.FileSet, path string) ([]*ast.File, error) { From 56dac60074698d23dc6acc047e61d2ad59c9610d Mon Sep 17 00:00:00 2001 From: Quim Date: Thu, 17 Sep 2020 01:59:14 +0200 Subject: [PATCH 081/281] cmd/link: enable ASLR on windows binaries built with -buildmode=c-shared Windows binaries built with -buildmode=c-shared set will have IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag set, and IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag set for windows/amd64. ASLR can be disabled on windows by using the new linker -aslr flag. RELNOTE=yes Fixes #41421 Change-Id: I62bd88c6d7e0f87173b093a0ad8e1a4d269ec790 Reviewed-on: https://go-review.googlesource.com/c/go/+/255259 Reviewed-by: Alex Brainman Reviewed-by: Cherry Zhang Trust: Alex Brainman Trust: Cherry Zhang Run-TryBot: Alex Brainman TryBot-Result: Go Bot --- src/cmd/link/internal/ld/ld_test.go | 70 +++++++++++++++++++++++++++++ src/cmd/link/internal/ld/lib.go | 24 +++++++--- src/cmd/link/internal/ld/main.go | 6 +++ 3 files changed, 93 insertions(+), 7 deletions(-) diff --git a/src/cmd/link/internal/ld/ld_test.go b/src/cmd/link/internal/ld/ld_test.go index db339b484d..4367c1028e 100644 --- a/src/cmd/link/internal/ld/ld_test.go +++ b/src/cmd/link/internal/ld/ld_test.go @@ -5,6 +5,7 @@ package ld import ( + "debug/pe" "fmt" "internal/testenv" "io/ioutil" @@ -167,3 +168,72 @@ func TestPPC64LargeTextSectionSplitting(t *testing.T) { t.Fatal(err) } } + +func TestWindowsBuildmodeCSharedASLR(t *testing.T) { + platform := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH) + switch platform { + case "windows/amd64", "windows/386": + default: + t.Skip("skipping windows amd64/386 only test") + } + + t.Run("aslr", func(t *testing.T) { + testWindowsBuildmodeCSharedASLR(t, true) + }) + t.Run("no-aslr", func(t *testing.T) { + testWindowsBuildmodeCSharedASLR(t, false) + }) +} + +func testWindowsBuildmodeCSharedASLR(t *testing.T, useASLR bool) { + t.Parallel() + testenv.MustHaveGoBuild(t) + + dir, err := ioutil.TempDir("", "go-build") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + srcfile := filepath.Join(dir, "test.go") + objfile := filepath.Join(dir, "test.dll") + if err := ioutil.WriteFile(srcfile, []byte(`package main; func main() { print("hello") }`), 0666); err != nil { + t.Fatal(err) + } + argv := []string{"build", "-buildmode=c-shared"} + if !useASLR { + argv = append(argv, "-ldflags", "-aslr=false") + } + argv = append(argv, "-o", objfile, srcfile) + out, err := exec.Command(testenv.GoToolPath(t), argv...).CombinedOutput() + if err != nil { + t.Fatalf("build failure: %s\n%s\n", err, string(out)) + } + + f, err := pe.Open(objfile) + if err != nil { + t.Fatal(err) + } + defer f.Close() + var dc uint16 + switch oh := f.OptionalHeader.(type) { + case *pe.OptionalHeader32: + dc = oh.DllCharacteristics + case *pe.OptionalHeader64: + dc = oh.DllCharacteristics + hasHEVA := (dc & pe.IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA) != 0 + if useASLR && !hasHEVA { + t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag is not set") + } else if !useASLR && hasHEVA { + t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag should not be set") + } + default: + t.Fatalf("unexpected optional header type of %T", f.OptionalHeader) + } + hasASLR := (dc & pe.IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) != 0 + if useASLR && !hasASLR { + t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set") + } else if !useASLR && hasASLR { + t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag should not be set") + } +} diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index b2ca658c3c..0cce98a447 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1290,6 +1290,17 @@ func (ctxt *Link) hostlink() { argv = append(argv, "-Wl,-bbigtoc") } + // Enable ASLR on Windows. + addASLRargs := func(argv []string) []string { + // Enable ASLR. + argv = append(argv, "-Wl,--dynamicbase") + // enable high-entropy ASLR on 64-bit. + if ctxt.Arch.PtrSize >= 8 { + argv = append(argv, "-Wl,--high-entropy-va") + } + return argv + } + switch ctxt.BuildMode { case BuildModeExe: if ctxt.HeadType == objabi.Hdarwin { @@ -1302,12 +1313,7 @@ func (ctxt *Link) hostlink() { switch ctxt.HeadType { case objabi.Hdarwin, objabi.Haix: case objabi.Hwindows: - // Enable ASLR. - argv = append(argv, "-Wl,--dynamicbase") - // enable high-entropy ASLR on 64-bit. - if ctxt.Arch.PtrSize >= 8 { - argv = append(argv, "-Wl,--high-entropy-va") - } + argv = addASLRargs(argv) // Work around binutils limitation that strips relocation table for dynamicbase. // See https://sourceware.org/bugzilla/show_bug.cgi?id=19011 argv = append(argv, "-Wl,--export-all-symbols") @@ -1331,7 +1337,11 @@ func (ctxt *Link) hostlink() { argv = append(argv, "-Wl,-z,relro") } argv = append(argv, "-shared") - if ctxt.HeadType != objabi.Hwindows { + if ctxt.HeadType == objabi.Hwindows { + if *flagAslr { + argv = addASLRargs(argv) + } + } else { // Pass -z nodelete to mark the shared library as // non-closeable: a dlclose will do nothing. argv = append(argv, "-Wl,-z,nodelete") diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 6f4ccbfb7a..0e030218c5 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -65,6 +65,7 @@ var ( flagDumpDep = flag.Bool("dumpdep", false, "dump symbol dependency graph") flagRace = flag.Bool("race", false, "enable race detector") flagMsan = flag.Bool("msan", false, "enable MSan interface") + flagAslr = flag.Bool("aslr", true, "enable ASLR for buildmode=c-shared on windows") flagFieldTrack = flag.String("k", "", "set field tracking `symbol`") flagLibGCC = flag.String("libgcc", "", "compiler support lib for internal linking; use \"none\" to disable") @@ -157,6 +158,11 @@ func Main(arch *sys.Arch, theArch Arch) { ctxt.HeadType.Set(objabi.GOOS) } + if !*flagAslr && ctxt.BuildMode != BuildModeCShared { + Errorf(nil, "-aslr=false is only allowed for -buildmode=c-shared") + usage() + } + checkStrictDups = *FlagStrictDups startProfile() From 2ca2e94731b1cb2ffe7f3cc68d6afdbbe2fd99ef Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Thu, 1 Oct 2020 07:57:00 -0700 Subject: [PATCH 082/281] doc/go1.16: fix crypto typo Change-Id: Icf962098cc22f16b0acf75db1e82eaddb9fa0c80 Reviewed-on: https://go-review.googlesource.com/c/go/+/258777 Trust: Roland Shoemaker Run-TryBot: Roland Shoemaker Reviewed-by: Brad Fitzpatrick --- doc/go1.16.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index 2ecf7db7c7..c6e3d92cc6 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -164,7 +164,7 @@ Do not send CLs removing the interior tags from such phrases. TODO

-

crypto/tls

+

crypto/tls

I/O operations on closing or closed TLS connections can now be detected using From ad8447bed94ccb89338b05e7e38f7d53874f0340 Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 27 Jul 2020 16:46:35 -0400 Subject: [PATCH 083/281] cmd/compile: fix late call expansion for SSA-able aggregate results and arguments This change incorporates the decision that it should be possible to run call expansion relatively late in the optimization chain, so that (1) calls themselves can be exposed to useful optimizations (2) the effect of selectors on aggregates is seen at the rewrite, so that assignment of parts into registers is less complicated (at least I hope it works that way). That means that selectors feeding into SelectN need to be processed, and Make* feeding into call parameters need to be processed. This does however require that call expansion run before decompose builtins. This doesn't yet handle rewrites of strings, slices, interfaces, and complex numbers. Passes run.bash and race.bash Change-Id: I71ff23d3c491043beb30e926949970c4f63ef1a4 Reviewed-on: https://go-review.googlesource.com/c/go/+/245133 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/ssa.go | 11 +- src/cmd/compile/internal/ssa/compile.go | 2 +- src/cmd/compile/internal/ssa/config.go | 8 + src/cmd/compile/internal/ssa/expand_calls.go | 400 ++++++++++++++++--- src/cmd/compile/internal/ssa/writebarrier.go | 6 +- 5 files changed, 364 insertions(+), 63 deletions(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 815ff7f99f..aebb40568c 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -10,7 +10,6 @@ import ( "html" "os" "sort" - "strings" "bufio" "bytes" @@ -2560,19 +2559,19 @@ func (s *state) expr(n *Node) *ssa.Value { if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall { // Do the old thing addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) - return s.load(n.Type, addr) + return s.rawLoad(n.Type, addr) } which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset) if which == -1 { // Do the old thing // TODO: Panic instead. addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) - return s.load(n.Type, addr) + return s.rawLoad(n.Type, addr) } if canSSAType(n.Type) { return s.newValue1I(ssa.OpSelectN, n.Type, which, s.prevCall) } else { addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type), which, s.prevCall) - return s.load(n.Type, addr) + return s.rawLoad(n.Type, addr) } case ODEREF: @@ -4377,7 +4376,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { case OCALLFUNC: if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC { sym = fn.Sym - if !returnResultAddr && strings.Contains(sym.Name, "testLateExpansion") { + if !returnResultAddr && ssa.LateCallExpansionEnabledWithin(s.f) { testLateExpansion = true } break @@ -4394,7 +4393,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { } if k == callNormal { sym = fn.Sym - if !returnResultAddr && strings.Contains(sym.Name, "testLateExpansion") { + if !returnResultAddr && ssa.LateCallExpansionEnabledWithin(s.f) { testLateExpansion = true } break diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index 4eed612977..3dec1cd85b 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -431,9 +431,9 @@ var passes = [...]pass{ {name: "nilcheckelim", fn: nilcheckelim}, {name: "prove", fn: prove}, {name: "early fuse", fn: fuseEarly}, + {name: "expand calls", fn: expandCalls, required: true}, {name: "decompose builtin", fn: decomposeBuiltIn, required: true}, {name: "softfloat", fn: softfloat, required: true}, - {name: "expand calls", fn:expandCalls, required: true}, {name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules {name: "dead auto elim", fn: elimDeadAutosGeneric}, {name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 7f01f8047f..a73bcf8fca 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -196,6 +196,14 @@ const ( ClassParamOut // return value ) +const go116lateCallExpansion = true + +// LateCallExpansionEnabledWithin returns true if late call expansion should be tested +// within compilation of a function/method triggered by GOSSAHASH (defaults to "yes"). +func LateCallExpansionEnabledWithin(f *Func) bool { + return go116lateCallExpansion && f.DebugTest // Currently set up for GOSSAHASH bug searches +} + // NewConfig returns a new configuration object for the given architecture. func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config { c := &Config{arch: arch, Types: types} diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index 34cff51c00..8456dbab8d 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -4,14 +4,59 @@ package ssa -import "cmd/compile/internal/types" +import ( + "cmd/compile/internal/types" + "cmd/internal/src" + "fmt" + "sort" +) // expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form -// that is more oriented to a platform's ABI. The SelectN operations that extract results are also rewritten into -// more appropriate forms. +// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into +// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are +// reached (for now, Strings, Slices, Complex, and Interface are not decomposed because they are rewritten in +// a subsequent phase, but that may need to change for a register ABI in case one of those composite values is +// split between registers and memory). +// +// TODO: when it comes time to use registers, might want to include builtin selectors as well, but currently that happens in lower. func expandCalls(f *Func) { + if !LateCallExpansionEnabledWithin(f) { + return + } canSSAType := f.fe.CanSSA + regSize := f.Config.RegSize sp, _ := f.spSb() + + debug := f.pass.debug > 0 + + // For 32-bit, need to deal with decomposition of 64-bit integers + tUint32 := types.Types[types.TUINT32] + tInt32 := types.Types[types.TINT32] + var hiOffset, lowOffset int64 + if f.Config.BigEndian { + lowOffset = 4 + } else { + hiOffset = 4 + } + pairTypes := func(et types.EType) (tHi, tLo *types.Type) { + tHi = tUint32 + if et == types.TINT64 { + tHi = tInt32 + } + tLo = tUint32 + return + } + + // isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type + // that was expanded in an earlier phase (small user-defined arrays and structs, lowered in decomposeUser). + // Other aggregate types are expanded in decomposeBuiltin, which comes later. + isAlreadyExpandedAggregateType := func(t *types.Type) bool { + if !canSSAType(t) { + return false + } + return t.IsStruct() || t.IsArray() || regSize == 4 && t.Size() > 4 && t.IsInteger() + } + // Calls that need lowering have some number of inputs, including a memory input, // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able. @@ -21,31 +66,258 @@ func expandCalls(f *Func) { // With the current ABI, the outputs need to be converted to loads, which will all use the call's // memory output as their input. - // Step 1: find all references to calls as values and rewrite those. + // rewriteSelect recursively walks leaf selector to a root (OpSelectN) through + // a chain of Struct/Array Select operations. If the chain of selectors does not + // end in OpSelectN, it does nothing (this can happen depending on compiler phase ordering). + // It emits the code necessary to implement the leaf select operation that leads to the call. + // TODO when registers really arrive, must also decompose anything split across two registers or registers and memory. + var rewriteSelect func(leaf *Value, selector *Value, offset int64) + rewriteSelect = func(leaf *Value, selector *Value, offset int64) { + switch selector.Op { + case OpSelectN: + // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there. + call := selector.Args[0] + aux := call.Aux.(*AuxCall) + which := selector.AuxInt + if which == aux.NResults() { // mem is after the results. + // rewrite v as a Copy of call -- the replacement call will produce a mem. + leaf.copyOf(call) + } else { + leafType := leaf.Type + pt := types.NewPtr(leafType) + if canSSAType(leafType) { + off := f.ConstOffPtrSP(pt, offset+aux.OffsetOfResult(which), sp) + // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input. + if leaf.Block == call.Block { + leaf.reset(OpLoad) + leaf.SetArgs2(off, call) + } else { + w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call) + leaf.copyOf(w) + } + } else { + panic("Should not have non-SSA-able OpSelectN") + } + } + case OpStructSelect: + w := selector.Args[0] + if w.Type.Etype != types.TSTRUCT { + fmt.Printf("Bad type for w:\nv=%v\nsel=%v\nw=%v\n,f=%s\n", leaf.LongString(), selector.LongString(), w.LongString(), f.Name) + } + rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt))) + + case OpInt64Hi: + w := selector.Args[0] + rewriteSelect(leaf, w, offset+hiOffset) + + case OpInt64Lo: + w := selector.Args[0] + rewriteSelect(leaf, w, offset+lowOffset) + + case OpArraySelect: + w := selector.Args[0] + rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt) + default: + // Ignore dead ends; on 32-bit, these can occur running before decompose builtins. + } + } + + // storeArg converts stores of SSA-able aggregates into a series of stores of smaller types into + // individual parameter slots. + // TODO when registers really arrive, must also decompose anything split across two registers or registers and memory. + var storeArg func(pos src.XPos, b *Block, a *Value, t *types.Type, offset int64, mem *Value) *Value + storeArg = func(pos src.XPos, b *Block, a *Value, t *types.Type, offset int64, mem *Value) *Value { + switch a.Op { + case OpArrayMake0, OpStructMake0: + return mem + case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4: + for i := 0; i < t.NumFields(); i++ { + fld := t.Field(i) + mem = storeArg(pos, b, a.Args[i], fld.Type, offset+fld.Offset, mem) + } + return mem + case OpArrayMake1: + return storeArg(pos, b, a.Args[0], t.Elem(), offset, mem) + + case OpInt64Make: + tHi, tLo := pairTypes(t.Etype) + mem = storeArg(pos, b, a.Args[0], tHi, offset+hiOffset, mem) + return storeArg(pos, b, a.Args[1], tLo, offset+lowOffset, mem) + } + dst := f.ConstOffPtrSP(types.NewPtr(t), offset, sp) + x := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, a, mem) + if debug { + fmt.Printf("storeArg(%v) returns %s\n", a, x.LongString()) + } + return x + } + + // offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP + // TODO should also optimize offsets from SB? + offsetFrom := func(dst *Value, offset int64, t *types.Type) *Value { + pt := types.NewPtr(t) + if offset == 0 && dst.Type == pt { // this is not actually likely + return dst + } + if dst.Op != OpOffPtr { + return dst.Block.NewValue1I(dst.Pos.WithNotStmt(), OpOffPtr, pt, offset, dst) + } + // Simplify OpOffPtr + from := dst.Args[0] + offset += dst.AuxInt + if from == sp { + return f.ConstOffPtrSP(pt, offset, sp) + } + return dst.Block.NewValue1I(dst.Pos.WithNotStmt(), OpOffPtr, pt, offset, from) + } + + // splitStore converts a store of an SSA-able aggregate into a series of smaller stores, emitting + // appropriate Struct/Array Select operations (which will soon go dead) to obtain the parts. + var splitStore func(dst, src, mem, v *Value, t *types.Type, offset int64, firstStorePos src.XPos) *Value + splitStore = func(dst, src, mem, v *Value, t *types.Type, offset int64, firstStorePos src.XPos) *Value { + // TODO might be worth commoning up duplicate selectors, but since they go dead, maybe no point. + pos := v.Pos.WithNotStmt() + switch t.Etype { + case types.TINT64, types.TUINT64: + if t.Width == regSize { + break + } + tHi, tLo := pairTypes(t.Etype) + sel := src.Block.NewValue1(pos, OpInt64Hi, tHi, src) + mem = splitStore(dst, sel, mem, v, tHi, offset+hiOffset, firstStorePos) + firstStorePos = firstStorePos.WithNotStmt() + sel = src.Block.NewValue1(pos, OpInt64Lo, tLo, src) + return splitStore(dst, sel, mem, v, tLo, offset+lowOffset, firstStorePos) + + case types.TARRAY: + elt := t.Elem() + for i := int64(0); i < t.NumElem(); i++ { + sel := src.Block.NewValue1I(pos, OpArraySelect, elt, i, src) + mem = splitStore(dst, sel, mem, v, elt, offset+i*elt.Width, firstStorePos) + firstStorePos = firstStorePos.WithNotStmt() + } + return mem + case types.TSTRUCT: + if src.Op == OpIData && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == regSize { + // This peculiar test deals with accesses to immediate interface data. + // It works okay because everything is the same size. + // Example code that triggers this can be found in go/constant/value.go, function ToComplex + // v119 (+881) = IData v6 + // v121 (+882) = StaticLECall {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1 + // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)" + // Guard against "struct{struct{*foo}}" + for t.Etype == types.TSTRUCT && t.NumFields() == 1 { + t = t.Field(0).Type + } + if t.Etype == types.TSTRUCT || t.Etype == types.TARRAY { + f.Fatalf("Did not expect to find IDATA-immediate with non-trivial struct in it") + } + break // handle the leaf type. + } + for i := 0; i < t.NumFields(); i++ { + fld := t.Field(i) + sel := src.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), src) + mem = splitStore(dst, sel, mem, v, fld.Type, offset+fld.Offset, firstStorePos) + firstStorePos = firstStorePos.WithNotStmt() + } + return mem + } + // Default, including for aggregates whose single element exactly fills their container + // TODO this will be a problem for cast interfaces containing floats when we move to registers. + x := v.Block.NewValue3A(firstStorePos, OpStore, types.TypeMem, t, offsetFrom(dst, offset, t), src, mem) + if debug { + fmt.Printf("splitStore(%v, %v, %v, %v) returns %s\n", dst, src, mem, v, x.LongString()) + } + return x + } + + // Step 0: rewrite the calls to convert incoming args to stores. for _, b := range f.Blocks { for _, v := range b.Values { switch v.Op { - case OpSelectN: - call := v.Args[0] - aux := call.Aux.(*AuxCall) - which := v.AuxInt - t := v.Type - if which == aux.NResults() { // mem is after the results. - // rewrite v as a Copy of call -- the replacement call will produce a mem. - v.copyOf(call) - } else { - pt := types.NewPtr(t) - if canSSAType(t) { - off := f.ConstOffPtrSP(pt, aux.OffsetOfResult(which), sp) - v.reset(OpLoad) - v.SetArgs2(off, call) + case OpStaticLECall: + // Thread the stores on the memory arg + m0 := v.MemoryArg() + mem := m0 + pos := v.Pos.WithNotStmt() + aux := v.Aux.(*AuxCall) + for i, a := range v.Args { + if a == m0 { // mem is last. + break + } + if a.Op == OpDereference { + // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move + // TODO this will be more complicated with registers in the picture. + if a.MemoryArg() != m0 { + f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString()) + } + src := a.Args[0] + dst := f.ConstOffPtrSP(src.Type, aux.OffsetOfArg(int64(i)), sp) + if a.Uses == 1 { + a.reset(OpMove) + a.Pos = pos + a.Type = types.TypeMem + a.Aux = aux.TypeOfArg(int64(i)) + a.AuxInt = aux.SizeOfArg(int64(i)) + a.SetArgs3(dst, src, mem) + mem = a + } else { + mem = a.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(int64(i)), dst, src, mem) + mem.AuxInt = aux.SizeOfArg(int64(i)) + } } else { - panic("Should not have non-SSA-able OpSelectN") + mem = storeArg(pos, b, a, aux.TypeOfArg(int64(i)), aux.OffsetOfArg(int64(i)), mem) } } - v.Type = t // not right for the mem operand yet, but will be when call is rewritten. + v.resetArgs() + v.SetArgs1(mem) + } + } + } + // Step 1: any stores of aggregates remaining are believed to be sourced from call results. + // Decompose those stores into a series of smaller stores, adding selection ops as necessary. + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Op == OpStore { + t := v.Aux.(*types.Type) + if isAlreadyExpandedAggregateType(t) { + dst, src, mem := v.Args[0], v.Args[1], v.Args[2] + mem = splitStore(dst, src, mem, v, t, 0, v.Pos) + v.copyOf(mem) + } + } + } + } + + val2Preds := make(map[*Value]int32) // Used to accumulate dependency graph of selection operations for topological ordering. + + // Step 2: accumulate selection operations for rewrite in topological order. + // Any select-for-addressing applied to call results can be transformed directly. + // TODO this is overkill; with the transformation of aggregate references into series of leaf references, it is only necessary to remember and recurse on the leaves. + for _, b := range f.Blocks { + for _, v := range b.Values { + // Accumulate chains of selectors for processing in topological order + switch v.Op { + case OpStructSelect, OpArraySelect, OpInt64Hi, OpInt64Lo: + w := v.Args[0] + switch w.Op { + case OpStructSelect, OpArraySelect, OpInt64Hi, OpInt64Lo, OpSelectN: + val2Preds[w] += 1 + if debug { + fmt.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w]) + } + } + fallthrough + case OpSelectN: + if _, ok := val2Preds[v]; !ok { + val2Preds[v] = 0 + if debug { + fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v]) + } + } case OpSelectNAddr: + // Do these directly, there are no chains of selectors. call := v.Args[0] which := v.AuxInt aux := call.Aux.(*AuxCall) @@ -56,44 +328,66 @@ func expandCalls(f *Func) { } } - // Step 2: rewrite the calls + // Compilation must be deterministic + var ordered []*Value + less := func(i, j int) bool { return ordered[i].ID < ordered[j].ID } + + // Step 3: Rewrite in topological order. All chains of selectors end up in same block as the call. + for len(val2Preds) > 0 { + ordered = ordered[:0] + for v, n := range val2Preds { + if n == 0 { + ordered = append(ordered, v) + } + } + sort.Slice(ordered, less) + for _, v := range ordered { + for { + w := v.Args[0] + if debug { + fmt.Printf("About to rewrite %s, args[0]=%s\n", v.LongString(), w.LongString()) + } + delete(val2Preds, v) + rewriteSelect(v, v, 0) + v = w + n, ok := val2Preds[v] + if !ok { + break + } + if n != 1 { + val2Preds[v] = n - 1 + break + } + // Loop on new v; val2Preds[v] == 1 will be deleted in that iteration, no need to store zero. + } + } + } + + // Step 4: rewrite the calls themselves, correcting the type for _, b := range f.Blocks { for _, v := range b.Values { switch v.Op { case OpStaticLECall: - // Thread the stores on the memory arg - m0 := v.Args[len(v.Args)-1] - mem := m0 - pos := v.Pos.WithNotStmt() - aux := v.Aux.(*AuxCall) - auxInt := v.AuxInt - for i, a := range v.Args { - if a == m0 { - break - } - if a.Op == OpDereference { - // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move - src := a.Args[0] - dst := f.ConstOffPtrSP(src.Type, aux.OffsetOfArg(int64(i)), sp) - a.reset(OpMove) - a.Pos = pos - a.Type = types.TypeMem - a.Aux = aux.TypeOfArg(int64(i)) - a.AuxInt = aux.SizeOfArg(int64(i)) - a.SetArgs3(dst, src, mem) - mem = a - } else { - // Add a new store. - t := aux.TypeOfArg(int64(i)) - dst := f.ConstOffPtrSP(types.NewPtr(t), aux.OffsetOfArg(int64(i)), sp) - mem = b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, a, mem) - } - } - v.reset(OpStaticCall) + v.Op = OpStaticCall v.Type = types.TypeMem - v.Aux = aux - v.AuxInt = auxInt - v.SetArgs1(mem) + } + } + } + + // Step 5: elide any copies introduced. + for _, b := range f.Blocks { + for _, v := range b.Values { + for i, a := range v.Args { + if a.Op != OpCopy { + continue + } + aa := copySource(a) + v.SetArg(i, aa) + for a.Uses == 0 { + b := a.Args[0] + a.reset(OpInvalid) + a = b + } } } } diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index df54a45b0f..849c9e8967 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -527,7 +527,7 @@ func IsStackAddr(v *Value) bool { v = v.Args[0] } switch v.Op { - case OpSP, OpLocalAddr: + case OpSP, OpLocalAddr, OpSelectNAddr: return true } return false @@ -593,7 +593,7 @@ func IsSanitizerSafeAddr(v *Value) bool { v = v.Args[0] } switch v.Op { - case OpSP, OpLocalAddr: + case OpSP, OpLocalAddr, OpSelectNAddr: // Stack addresses are always safe. return true case OpITab, OpStringPtr, OpGetClosurePtr: @@ -609,7 +609,7 @@ func IsSanitizerSafeAddr(v *Value) bool { // isVolatile reports whether v is a pointer to argument region on stack which // will be clobbered by a function call. func isVolatile(v *Value) bool { - for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy { + for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy || v.Op == OpSelectNAddr { v = v.Args[0] } return v.Op == OpSP From 75ea9953a812dcb2f64ea949054e529d9748d553 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 5 Aug 2020 11:42:44 -0400 Subject: [PATCH 084/281] cmd/compile: enable late expansion for address-of static calls passes run.bash and race.bash (on Darwin-amd64) Change-Id: I2abda9636b681d050e85e88fc357ebe5220d2ba2 Reviewed-on: https://go-review.googlesource.com/c/go/+/246938 Trust: David Chase Run-TryBot: David Chase Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/ssa.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index aebb40568c..e2fbd6f096 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4376,7 +4376,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { case OCALLFUNC: if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC { sym = fn.Sym - if !returnResultAddr && ssa.LateCallExpansionEnabledWithin(s.f) { + if ssa.LateCallExpansionEnabledWithin(s.f) { testLateExpansion = true } break @@ -4393,7 +4393,7 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { } if k == callNormal { sym = fn.Sym - if !returnResultAddr && ssa.LateCallExpansionEnabledWithin(s.f) { + if ssa.LateCallExpansionEnabledWithin(s.f) { testLateExpansion = true } break @@ -4605,7 +4605,11 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { } fp := res.Field(0) if returnResultAddr { - return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize()) + pt := types.NewPtr(fp.Type) + if testLateExpansion { + return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call) + } + return s.constOffPtrSP(pt, fp.Offset+Ctxt.FixedFrameSize()) } if testLateExpansion { From adef4deeb85ede59201f37f5145763ed55a807f7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 7 Aug 2020 22:46:43 -0400 Subject: [PATCH 085/281] cmd/compile: enable late expansion for interface calls Includes a few tweaks to Value.copyOf(a) (make it a no-op for a self-copy) and new pattern hack "___" (3 underscores) is like ellipsis, except the replacement doesn't need to have matching ellipsis/underscores. Moved the arg-length check in generated pattern-matching code BEFORE the args are probed, because not all instances of variable length OpFoo will have all the args mentioned in some rule for OpFoo, and when that happens, the compiler panics without the early check. Change-Id: I66de40672b3794a6427890ff96c805a488d783f4 Reviewed-on: https://go-review.googlesource.com/c/go/+/247537 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/ssa.go | 22 +++-- src/cmd/compile/internal/ssa/expand_calls.go | 93 ++++++++++++------- .../compile/internal/ssa/gen/generic.rules | 7 ++ .../compile/internal/ssa/gen/genericOps.go | 10 +- src/cmd/compile/internal/ssa/gen/rulegen.go | 22 ++++- src/cmd/compile/internal/ssa/op.go | 11 +++ src/cmd/compile/internal/ssa/opGen.go | 16 ++++ src/cmd/compile/internal/ssa/rewrite.go | 30 ++++++ .../compile/internal/ssa/rewritegeneric.go | 62 ++++++++++++- src/cmd/compile/internal/ssa/value.go | 3 + 10 files changed, 221 insertions(+), 55 deletions(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index e2fbd6f096..e01ebd6e89 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2556,7 +2556,7 @@ func (s *state) expr(n *Node) *ssa.Value { return s.addr(n.Left) case ORESULT: - if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall { + if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { // Do the old thing addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset) return s.rawLoad(n.Type, addr) @@ -4409,6 +4409,9 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { iclosure, rcvr = s.getClosureAndRcvr(fn) if k == callNormal { codeptr = s.load(types.Types[TUINTPTR], iclosure) + if ssa.LateCallExpansionEnabledWithin(s.f) { + testLateExpansion = true + } } else { closure = iclosure } @@ -4555,16 +4558,17 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { codeptr = s.rawLoad(types.Types[TUINTPTR], closure) call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem()) case codeptr != nil: - call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem()) + if testLateExpansion { + aux := ssa.InterfaceAuxCall(ACArgs, ACResults) + call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr) + call.AddArgs(callArgs...) + } else { + call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem()) + } case sym != nil: if testLateExpansion { - var tys []*types.Type aux := ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults) - for i := int64(0); i < aux.NResults(); i++ { - tys = append(tys, aux.TypeOfResult(i)) - } - tys = append(tys, types.TypeMem) - call = s.newValue0A(ssa.OpStaticLECall, types.NewResults(tys), aux) + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) call.AddArgs(callArgs...) } else { call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem()) @@ -4713,7 +4717,7 @@ func (s *state) addr(n *Node) *ssa.Value { } case ORESULT: // load return from callee - if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall { + if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall { return s.constOffPtrSP(t, n.Xoffset) } which := s.prevCall.Aux.(*ssa.AuxCall).ResultForOffset(n.Xoffset) diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index 8456dbab8d..7b1d656b64 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -38,6 +38,7 @@ func expandCalls(f *Func) { } else { hiOffset = 4 } + pairTypes := func(et types.EType) (tHi, tLo *types.Type) { tHi = tUint32 if et == types.TINT64 { @@ -231,46 +232,64 @@ func expandCalls(f *Func) { return x } + rewriteArgs := func(v *Value, firstArg int) *Value { + // Thread the stores on the memory arg + aux := v.Aux.(*AuxCall) + pos := v.Pos.WithNotStmt() + m0 := v.Args[len(v.Args)-1] + mem := m0 + for i, a := range v.Args { + if i < firstArg { + continue + } + if a == m0 { // mem is last. + break + } + auxI := int64(i - firstArg) + if a.Op == OpDereference { + if a.MemoryArg() != m0 { + f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString()) + } + // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move + // TODO this will be more complicated with registers in the picture. + src := a.Args[0] + dst := f.ConstOffPtrSP(src.Type, aux.OffsetOfArg(auxI), sp) + if a.Uses == 1 { + a.reset(OpMove) + a.Pos = pos + a.Type = types.TypeMem + a.Aux = aux.TypeOfArg(auxI) + a.AuxInt = aux.SizeOfArg(auxI) + a.SetArgs3(dst, src, mem) + mem = a + } else { + mem = a.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(auxI), dst, src, mem) + mem.AuxInt = aux.SizeOfArg(auxI) + } + } else { + mem = storeArg(pos, v.Block, a, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI), mem) + } + } + v.resetArgs() + return mem + } + // Step 0: rewrite the calls to convert incoming args to stores. for _, b := range f.Blocks { for _, v := range b.Values { switch v.Op { case OpStaticLECall: - // Thread the stores on the memory arg - m0 := v.MemoryArg() - mem := m0 - pos := v.Pos.WithNotStmt() - aux := v.Aux.(*AuxCall) - for i, a := range v.Args { - if a == m0 { // mem is last. - break - } - if a.Op == OpDereference { - // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move - // TODO this will be more complicated with registers in the picture. - if a.MemoryArg() != m0 { - f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString()) - } - src := a.Args[0] - dst := f.ConstOffPtrSP(src.Type, aux.OffsetOfArg(int64(i)), sp) - if a.Uses == 1 { - a.reset(OpMove) - a.Pos = pos - a.Type = types.TypeMem - a.Aux = aux.TypeOfArg(int64(i)) - a.AuxInt = aux.SizeOfArg(int64(i)) - a.SetArgs3(dst, src, mem) - mem = a - } else { - mem = a.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(int64(i)), dst, src, mem) - mem.AuxInt = aux.SizeOfArg(int64(i)) - } - } else { - mem = storeArg(pos, b, a, aux.TypeOfArg(int64(i)), aux.OffsetOfArg(int64(i)), mem) - } - } - v.resetArgs() + mem := rewriteArgs(v, 0) v.SetArgs1(mem) + case OpClosureLECall: + code := v.Args[0] + context := v.Args[1] + mem := rewriteArgs(v, 2) + v.SetArgs3(code, context, mem) + case OpInterLECall: + code := v.Args[0] + mem := rewriteArgs(v, 1) + v.SetArgs2(code, mem) } } } @@ -370,6 +389,12 @@ func expandCalls(f *Func) { case OpStaticLECall: v.Op = OpStaticCall v.Type = types.TypeMem + case OpClosureLECall: + v.Op = OpClosureCall + v.Type = types.TypeMem + case OpInterLECall: + v.Op = OpInterCall + v.Type = types.TypeMem } } } diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index 39f8cc8889..588077422c 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -2024,6 +2024,13 @@ (InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) && devirt(v, auxCall, itab, off) != nil => (StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem) +// De-virtualize late-expanded interface calls into late-expanded static calls. +// Note that (ITab (IMake)) doesn't get rewritten until after the first opt pass, +// so this rule should trigger reliably. +// devirtLECall removes the first argument, adds the devirtualized symbol to the AuxCall, and changes the opcode +(InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___) && devirtLESym(v, auxCall, itab, off) != + nil => devirtLECall(v, devirtLESym(v, auxCall, itab, off)) + // Move and Zero optimizations. // Move source and destination may overlap. diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index 95edff4c8c..3518dd1e3c 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -389,10 +389,12 @@ var genericOps = []opData{ // TODO(josharian): ClosureCall and InterCall should have Int32 aux // to match StaticCall's 32 bit arg size limit. // TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff? - {name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. - {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory. - {name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory. - {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. + {name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. + {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory. + {name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory. + {name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. + {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. + {name: "InterLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded interface call. arg0=code pointer, arg1..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. // Conversions: signed extensions, zero (unsigned) extensions, truncations {name: "SignExt8to16", argLength: 1, typ: "Int16"}, diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index be51a7c5f8..504ee2bd0a 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -50,8 +50,12 @@ import ( // variable ::= some token // opcode ::= one of the opcodes from the *Ops.go files +// special rules: trailing ellipsis "..." (in the outermost sexpr?) must match on both sides of a rule. +// trailing three underscore "___" in the outermost match sexpr indicate the presence of +// extra ignored args that need not appear in the replacement + // extra conditions is just a chunk of Go that evaluates to a boolean. It may use -// variables declared in the matching sexpr. The variable "v" is predefined to be +// variables declared in the matching tsexpr. The variable "v" is predefined to be // the value matched by the entire rule. // If multiple rules match, the first one in file order is selected. @@ -1019,6 +1023,19 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, pos = v + ".Pos" } + // If the last argument is ___, it means "don't care about trailing arguments, really" + // The likely/intended use is for rewrites that are too tricky to express in the existing pattern language + // Do a length check early because long patterns fed short (ultimately not-matching) inputs will + // do an indexing error in pattern-matching. + if op.argLength == -1 { + l := len(args) + if l == 0 || args[l-1] != "___" { + rr.add(breakf("len(%s.Args) != %d", v, l)) + } else if l > 1 && args[l-1] == "___" { + rr.add(breakf("len(%s.Args) < %d", v, l-1)) + } + } + for _, e := range []struct { name, field, dclType string }{ @@ -1159,9 +1176,6 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, } } - if op.argLength == -1 { - rr.add(breakf("len(%s.Args) != %d", v, len(args))) - } return pos, checkOp } diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 9b45dd53c7..62f5cddcfc 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -127,6 +127,17 @@ func (a *AuxCall) NResults() int64 { return int64(len(a.results)) } +// LateExpansionResultType returns the result type (including trailing mem) +// for a call that will be expanded later in the SSA phase. +func (a *AuxCall) LateExpansionResultType() *types.Type { + var tys []*types.Type + for i := int64(0); i < a.NResults(); i++ { + tys = append(tys, a.TypeOfResult(i)) + } + tys = append(tys, types.TypeMem) + return types.NewResults(tys) +} + // NArgs returns the number of arguments func (a *AuxCall) NArgs() int64 { return int64(len(a.args)) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 1fe00c7026..9fe943c2e0 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2732,7 +2732,9 @@ const ( OpClosureCall OpStaticCall OpInterCall + OpClosureLECall OpStaticLECall + OpInterLECall OpSignExt8to16 OpSignExt8to32 OpSignExt8to64 @@ -34851,6 +34853,13 @@ var opcodeTable = [...]opInfo{ call: true, generic: true, }, + { + name: "ClosureLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, { name: "StaticLECall", auxType: auxCallOff, @@ -34858,6 +34867,13 @@ var opcodeTable = [...]opInfo{ call: true, generic: true, }, + { + name: "InterLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, { name: "SignExt8to16", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index d9c3e455a0..9f4de83a77 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -764,6 +764,36 @@ func devirt(v *Value, aux interface{}, sym Sym, offset int64) *AuxCall { return StaticAuxCall(lsym, va.args, va.results) } +// de-virtualize an InterLECall +// 'sym' is the symbol for the itab +func devirtLESym(v *Value, aux interface{}, sym Sym, offset int64) *obj.LSym { + n, ok := sym.(*obj.LSym) + if !ok { + return nil + } + + f := v.Block.Func + lsym := f.fe.DerefItab(n, offset) + if f.pass.debug > 0 { + if lsym != nil { + f.Warnl(v.Pos, "de-virtualizing call") + } else { + f.Warnl(v.Pos, "couldn't de-virtualize call") + } + } + if lsym == nil { + return nil + } + return lsym +} + +func devirtLECall(v *Value, sym *obj.LSym) *Value { + v.Op = OpStaticLECall + v.Aux.(*AuxCall).Fn = sym + v.RemoveArg(0) + return v +} + // isSamePtr reports whether p1 and p2 point to the same address. func isSamePtr(p1, p2 *Value) bool { if p1 == p2 { diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 925ff53fd1..ade0a69a10 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -124,6 +124,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpIMake(v) case OpInterCall: return rewriteValuegeneric_OpInterCall(v) + case OpInterLECall: + return rewriteValuegeneric_OpInterLECall(v) case OpIsInBounds: return rewriteValuegeneric_OpIsInBounds(v) case OpIsNonNil: @@ -8522,6 +8524,46 @@ func rewriteValuegeneric_OpInterCall(v *Value) bool { } return false } +func rewriteValuegeneric_OpInterLECall(v *Value) bool { + // match: (InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___) + // cond: devirtLESym(v, auxCall, itab, off) != nil + // result: devirtLECall(v, devirtLESym(v, auxCall, itab, off)) + for { + if len(v.Args) < 1 { + break + } + auxCall := auxToCall(v.Aux) + v_0 := v.Args[0] + if v_0.Op != OpLoad { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpOffPtr { + break + } + off := auxIntToInt64(v_0_0.AuxInt) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpITab { + break + } + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpIMake { + break + } + v_0_0_0_0_0 := v_0_0_0_0.Args[0] + if v_0_0_0_0_0.Op != OpAddr { + break + } + itab := auxToSym(v_0_0_0_0_0.Aux) + v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] + if v_0_0_0_0_0_0.Op != OpSB || !(devirtLESym(v, auxCall, itab, off) != nil) { + break + } + v.copyOf(devirtLECall(v, devirtLESym(v, auxCall, itab, off))) + return true + } + return false +} func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -18549,6 +18591,9 @@ func rewriteValuegeneric_OpPhi(v *Value) bool { // match: (Phi (Const8 [c]) (Const8 [c])) // result: (Const8 [c]) for { + if len(v.Args) != 2 { + break + } _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpConst8 { @@ -18556,7 +18601,7 @@ func rewriteValuegeneric_OpPhi(v *Value) bool { } c := auxIntToInt8(v_0.AuxInt) v_1 := v.Args[1] - if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c || len(v.Args) != 2 { + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c { break } v.reset(OpConst8) @@ -18566,6 +18611,9 @@ func rewriteValuegeneric_OpPhi(v *Value) bool { // match: (Phi (Const16 [c]) (Const16 [c])) // result: (Const16 [c]) for { + if len(v.Args) != 2 { + break + } _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpConst16 { @@ -18573,7 +18621,7 @@ func rewriteValuegeneric_OpPhi(v *Value) bool { } c := auxIntToInt16(v_0.AuxInt) v_1 := v.Args[1] - if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c || len(v.Args) != 2 { + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c { break } v.reset(OpConst16) @@ -18583,6 +18631,9 @@ func rewriteValuegeneric_OpPhi(v *Value) bool { // match: (Phi (Const32 [c]) (Const32 [c])) // result: (Const32 [c]) for { + if len(v.Args) != 2 { + break + } _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpConst32 { @@ -18590,7 +18641,7 @@ func rewriteValuegeneric_OpPhi(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) v_1 := v.Args[1] - if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c || len(v.Args) != 2 { + if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c { break } v.reset(OpConst32) @@ -18600,6 +18651,9 @@ func rewriteValuegeneric_OpPhi(v *Value) bool { // match: (Phi (Const64 [c]) (Const64 [c])) // result: (Const64 [c]) for { + if len(v.Args) != 2 { + break + } _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpConst64 { @@ -18607,7 +18661,7 @@ func rewriteValuegeneric_OpPhi(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) v_1 := v.Args[1] - if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || len(v.Args) != 2 { + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c { break } v.reset(OpConst64) diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 94b8763d5d..edc43aaae7 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -348,6 +348,9 @@ func (v *Value) reset(op Op) { // It modifies v to be (Copy a). //go:noinline func (v *Value) copyOf(a *Value) { + if v == a { + return + } if v.InCache { v.Block.Func.unCache(v) } From 8c84dcfe8c6a795ed6ae6be540ffc638841144ce Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 10 Aug 2020 18:30:11 -0400 Subject: [PATCH 086/281] cmd/compile: enable late expansion for closure calls This works for "normal" calls. Defer func() and Go func() still pending. RT calls still pending. Change-Id: I29cbdad8c877d12c08bbf7f3f0696611de877da9 Reviewed-on: https://go-review.googlesource.com/c/go/+/247771 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/ssa.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index e01ebd6e89..7e377f9b84 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4386,6 +4386,9 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { // Deferred nil function needs to panic when the function is invoked, // not the point of defer statement. s.maybeNilCheckClosure(closure, k) + if k == callNormal && ssa.LateCallExpansionEnabledWithin(s.f) { + testLateExpansion = true + } } case OCALLMETH: if fn.Op != ODOTMETH { @@ -4556,7 +4559,13 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { // critical that we not clobber any arguments already // stored onto the stack. codeptr = s.rawLoad(types.Types[TUINTPTR], closure) - call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem()) + if testLateExpansion { + aux := ssa.ClosureAuxCall(ACArgs, ACResults) + call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure) + call.AddArgs(callArgs...) + } else { + call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem()) + } case codeptr != nil: if testLateExpansion { aux := ssa.InterfaceAuxCall(ACArgs, ACResults) From 4ad5dd63a7b5bc57312a95bd7dcdb6c209456a6f Mon Sep 17 00:00:00 2001 From: David Chase Date: Tue, 11 Aug 2020 08:39:17 -0400 Subject: [PATCH 087/281] cmd/compile: late call expansion for go func and simple defer func Passes run.bash and race.bash on darwin/amd64. Change-Id: Icbccaa2f2e7c3eac7c328c5253f331e598e11542 Reviewed-on: https://go-review.googlesource.com/c/go/+/247898 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/ssa.go | 37 +++++++++++++++++++----------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 7e377f9b84..7effa9bd4b 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4374,11 +4374,11 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { switch n.Op { case OCALLFUNC: + if k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) { + testLateExpansion = true + } if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC { sym = fn.Sym - if ssa.LateCallExpansionEnabledWithin(s.f) { - testLateExpansion = true - } break } closure = s.expr(fn) @@ -4386,19 +4386,16 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { // Deferred nil function needs to panic when the function is invoked, // not the point of defer statement. s.maybeNilCheckClosure(closure, k) - if k == callNormal && ssa.LateCallExpansionEnabledWithin(s.f) { - testLateExpansion = true - } } case OCALLMETH: if fn.Op != ODOTMETH { s.Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn) } + if k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) { + testLateExpansion = true + } if k == callNormal { sym = fn.Sym - if ssa.LateCallExpansionEnabledWithin(s.f) { - testLateExpansion = true - } break } closure = s.getMethodClosure(fn) @@ -4408,13 +4405,13 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { if fn.Op != ODOTINTER { s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) } + if k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f) { + testLateExpansion = true + } var iclosure *ssa.Value iclosure, rcvr = s.getClosureAndRcvr(fn) if k == callNormal { codeptr = s.load(types.Types[TUINTPTR], iclosure) - if ssa.LateCallExpansionEnabledWithin(s.f) { - testLateExpansion = true - } } else { closure = iclosure } @@ -4549,9 +4546,21 @@ func (s *state) call(n *Node, k callKind, returnResultAddr bool) *ssa.Value { // call target switch { case k == callDefer: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferproc, ACArgs, ACResults), s.mem()) + aux := ssa.StaticAuxCall(deferproc, ACArgs, ACResults) + if testLateExpansion { + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + call.AddArgs(callArgs...) + } else { + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) + } case k == callGo: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(newproc, ACArgs, ACResults), s.mem()) + aux := ssa.StaticAuxCall(newproc, ACArgs, ACResults) + if testLateExpansion { + call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) + call.AddArgs(callArgs...) + } else { + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem()) + } case closure != nil: // rawLoad because loading the code pointer from a // closure is always safe, but IsSanitizerSafeAddr From cc2a5cf4b8b0aeaccd3dd439f8d3d68f25eef358 Mon Sep 17 00:00:00 2001 From: Lynn Boger Date: Mon, 28 Sep 2020 18:20:12 -0400 Subject: [PATCH 088/281] cmd/compile,cmd/internal/obj/ppc64: fix some shift rules due to a regression A recent change to improve shifts was generating some invalid cases when the rule was based on an AND. The extended mnemonics CLRLSLDI and CLRLSLWI only allow certain values for the operands and in the mask case those values were not being checked properly. This adds a check to those rules to verify that the 'b' and 'n' values used when an AND was part of the rule have correct values. There was a bug in some diag messages in asm9. The message expected 3 values but only provided 2. Those are corrected here also. The test/codegen/shift.go was updated to add a few more cases to check for the case mentioned here. Some of the comments that mention the order of operands in these extended mnemonics were wrong and those have been corrected. Fixes #41683. Change-Id: If5bb860acaa5051b9e0cd80784b2868b85898c31 Reviewed-on: https://go-review.googlesource.com/c/go/+/258138 Run-TryBot: Lynn Boger Reviewed-by: Paul Murphy Reviewed-by: Carlos Eduardo Seo TryBot-Result: Go Bot Trust: Lynn Boger --- src/cmd/asm/internal/asm/testdata/ppc64enc.s | 4 +-- src/cmd/compile/internal/ppc64/ssa.go | 12 +++---- src/cmd/compile/internal/ssa/gen/PPC64.rules | 9 +++-- src/cmd/compile/internal/ssa/rewrite.go | 4 +-- src/cmd/compile/internal/ssa/rewritePPC64.go | 34 +++++-------------- src/cmd/internal/obj/ppc64/asm9.go | 35 ++++++++++---------- test/codegen/shift.go | 17 ++++++---- 7 files changed, 50 insertions(+), 65 deletions(-) diff --git a/src/cmd/asm/internal/asm/testdata/ppc64enc.s b/src/cmd/asm/internal/asm/testdata/ppc64enc.s index 88a7609ba8..869f8c2d4f 100644 --- a/src/cmd/asm/internal/asm/testdata/ppc64enc.s +++ b/src/cmd/asm/internal/asm/testdata/ppc64enc.s @@ -287,8 +287,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 RLDICRCC $0, R4, $15, R6 // 788603c5 RLDIC $0, R4, $15, R6 // 788603c8 RLDICCC $0, R4, $15, R6 // 788603c9 - CLRLSLWI $16, R5, $8, R4 // 54a4861e - CLRLSLDI $2, R4, $24, R3 // 78831588 + CLRLSLWI $8, R5, $6, R4 // 54a430b2 + CLRLSLDI $24, R4, $4, R3 // 78832508 BEQ 0(PC) // 41820000 BGE 0(PC) // 40800000 diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index a5fbdaffba..d83b2df379 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -570,9 +570,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { r1 := v.Args[0].Reg() shifts := v.AuxInt p := s.Prog(v.Op.Asm()) - // clrlslwi ra,rs,sh,mb will become rlwinm ra,rs,sh,mb-sh,31-n as described in ISA - p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)} - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}) + // clrlslwi ra,rs,mb,sh will become rlwinm ra,rs,sh,mb-sh,31-sh as described in ISA + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)} + p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}) p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r @@ -582,9 +582,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { r1 := v.Args[0].Reg() shifts := v.AuxInt p := s.Prog(v.Op.Asm()) - // clrlsldi ra,rs,sh,mb will become rldic ra,rs,sh,mb-sh - p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)} - p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}) + // clrlsldi ra,rs,mb,sh will become rldic ra,rs,sh,mb-sh + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)} + p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}) p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index de30d003e6..83ee4c499b 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -1018,13 +1018,12 @@ (SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x) (SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x) -(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) -(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) +(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) +(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) (SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x) (SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x) -(SLWconst [c] z:(MOVWZreg x)) && z.Uses == 1 && c < 24 => (CLRLSLWI [newPPC64ShiftAuxInt(c,8,31,32)] x) -(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) -(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) +(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) +(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) // special case for power9 (SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && objabi.GOPPC64 >= 9 => (EXTSWSLconst [c] x) diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 9f4de83a77..5d8b3ddc4e 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -1380,8 +1380,8 @@ func GetPPC64Shiftme(auxint int64) int64 { return int64(int8(auxint)) } -// Catch the simple ones first -// TODO: Later catch more cases +// This verifies that the mask occupies the +// rightmost bits. func isPPC64ValidShiftMask(v int64) bool { if ((v + 1) & v) == 0 { return true diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 29ec3992f2..9822637b05 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -12831,7 +12831,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { return true } // match: (SLDconst [c] z:(ANDconst [d] x)) - // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) for { c := auxIntToInt64(v.AuxInt) @@ -12841,7 +12841,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { } d := auxIntToInt64(z.AuxInt) x := z.Args[0] - if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) { + if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) { break } v.reset(OpPPC64CLRLSLDI) @@ -12850,7 +12850,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { return true } // match: (SLDconst [c] z:(AND (MOVDconst [d]) x)) - // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) for { c := auxIntToInt64(v.AuxInt) @@ -12867,7 +12867,7 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { } d := auxIntToInt64(z_0.AuxInt) x := z_1 - if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) { + if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) { continue } v.reset(OpPPC64CLRLSLDI) @@ -12953,26 +12953,8 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { v.AddArg(x) return true } - // match: (SLWconst [c] z:(MOVWZreg x)) - // cond: z.Uses == 1 && c < 24 - // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,8,31,32)] x) - for { - c := auxIntToInt64(v.AuxInt) - z := v_0 - if z.Op != OpPPC64MOVWZreg { - break - } - x := z.Args[0] - if !(z.Uses == 1 && c < 24) { - break - } - v.reset(OpPPC64CLRLSLWI) - v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 8, 31, 32)) - v.AddArg(x) - return true - } // match: (SLWconst [c] z:(ANDconst [d] x)) - // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) for { c := auxIntToInt64(v.AuxInt) @@ -12982,7 +12964,7 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { } d := auxIntToInt64(z.AuxInt) x := z.Args[0] - if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) { + if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) { break } v.reset(OpPPC64CLRLSLWI) @@ -12991,7 +12973,7 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { return true } // match: (SLWconst [c] z:(AND (MOVDconst [d]) x)) - // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) for { c := auxIntToInt64(v.AuxInt) @@ -13008,7 +12990,7 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { } d := auxIntToInt64(z_0.AuxInt) x := z_1 - if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) { + if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) { continue } v.reset(OpPPC64CLRLSLWI) diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 9f06bdf8b3..928e299f43 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -2749,7 +2749,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { me := int(d) sh := c.regoff(&p.From) if me < 0 || me > 63 || sh > 63 { - c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh) + c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p) } o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me)) @@ -2757,19 +2757,19 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { mb := int(d) sh := c.regoff(&p.From) if mb < 0 || mb > 63 || sh > 63 { - c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh) + c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p) } o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb)) case ACLRLSLDI: // This is an extended mnemonic defined in the ISA section C.8.1 - // clrlsldi ra,rs,n,b --> rldic ra,rs,n,b-n + // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n // It maps onto RLDIC so is directly generated here based on the operands from // the clrlsldi. - b := int(d) - n := c.regoff(&p.From) - if n > int32(b) || b > 63 { - c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b) + n := int32(d) + b := c.regoff(&p.From) + if n > b || b > 63 { + c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p) } o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n)) @@ -3395,14 +3395,15 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { v := c.regoff(&p.From) switch p.As { case ACLRLSLWI: - b := c.regoff(p.GetFrom3()) + n := c.regoff(p.GetFrom3()) // This is an extended mnemonic described in the ISA C.8.2 - // clrlslwi ra,rs,n,b -> rlwinm ra,rs,n,b-n,31-n + // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n // It maps onto rlwinm which is directly generated here. - if v < 0 || v > 32 || b > 32 { - c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, b) + if n > v || v >= 32 { + c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p) } - o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(v), uint32(b-v), uint32(31-v)) + + o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n)) default: var mask [2]uint8 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) @@ -3414,16 +3415,16 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { v := c.regoff(&p.From) switch p.As { case ACLRLSLWI: - b := c.regoff(p.GetFrom3()) - if v > b || b > 32 { + n := c.regoff(p.GetFrom3()) + if n > v || v >= 32 { // Message will match operands from the ISA even though in the // code it uses 'v' - c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, b) + c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p) } // This is an extended mnemonic described in the ISA C.8.2 - // clrlslwi ra,rs,n,b -> rlwinm ra,rs,n,b-n,31-n + // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n // It generates the rlwinm directly here. - o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(v), uint32(b-v), uint32(31-v)) + o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n)) default: var mask [2]uint8 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) diff --git a/test/codegen/shift.go b/test/codegen/shift.go index abc4b091c9..bbfc85ffbb 100644 --- a/test/codegen/shift.go +++ b/test/codegen/shift.go @@ -187,8 +187,8 @@ func checkCombinedShifts(v8 uint8, v16 uint16, v32 uint32, x32 int32, v64 uint64 // ppc64le:-"AND","CLRLSLWI" // ppc64:-"AND","CLRLSLWI" f := (v8 &0xF) << 2 - // ppc64le:-"AND","CLRLSLWI" - // ppc64:-"AND","CLRLSLWI" + // ppc64le:"CLRLSLWI" + // ppc64:"CLRLSLWI" f += byte(v16)<<3 // ppc64le:-"AND","CLRLSLWI" // ppc64:-"AND","CLRLSLWI" @@ -196,12 +196,15 @@ func checkCombinedShifts(v8 uint8, v16 uint16, v32 uint32, x32 int32, v64 uint64 // ppc64le:-"AND","CLRLSLWI" // ppc64:-"AND","CLRLSLWI" h := (v32 & 0xFFFFF) << 2 - // ppc64le:-"AND","CLRLSLWI" - // ppc64:-"AND","CLRLSLWI" - h += uint32(v64)<<4 - // ppc64le:-"AND","CLRLSLDI" - // ppc64:-"AND","CLRLSLDI" + // ppc64le:"CLRLSLDI" + // ppc64:"CLRLSLDI" i := (v64 & 0xFFFFFFFF) << 5 + // ppc64le:-"CLRLSLDI" + // ppc64:-"CLRLSLDI" + i += (v64 & 0xFFFFFFF) << 38 + // ppc64le/power9:-"CLRLSLDI" + // ppc64/power9:-"CLRLSLDI" + i += (v64 & 0xFFFF00) << 10 // ppc64le/power9:-"SLD","EXTSWSLI" // ppc64/power9:-"SLD","EXTSWSLI" j := int64(x32+32)*8 From 5756b3560141d0c09c4a27d2025f5438f49f59f2 Mon Sep 17 00:00:00 2001 From: Michael Anthony Knyszek Date: Thu, 10 Sep 2020 21:20:46 +0000 Subject: [PATCH 089/281] runtime: align 12-byte objects to 8 bytes on 32-bit systems Currently on 32-bit systems 8-byte fields in a struct have an alignment of 4 bytes, which means that atomic instructions may fault. This issue is tracked in #36606. Our current workaround is to allocate memory and put any such atomically accessed fields at the beginning of the object. This workaround fails because the tiny allocator might not align the object right. This case specifically only happens with 12-byte objects because a type's size is rounded up to its alignment. So if e.g. we have a type like: type obj struct { a uint64 b byte } then its size will be 12 bytes, because "a" will require a 4 byte alignment. This argument may be extended to all objects of size 9-15 bytes. So, make this workaround work by specifically aligning such objects to 8 bytes on 32-bit systems. This change leaves a TODO to remove the code once #36606 gets resolved. It also adds a test which will presumably no longer be necessary (the compiler should enforce the right alignment) when it gets resolved as well. Fixes #37262. Change-Id: I3a34e5b014b3c37ed2e5e75e62d71d8640aa42bc Reviewed-on: https://go-review.googlesource.com/c/go/+/254057 Reviewed-by: Cherry Zhang Reviewed-by: Austin Clements Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Trust: Michael Knyszek --- src/runtime/malloc.go | 8 ++++++ src/runtime/malloc_test.go | 57 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 4fa14996c2..c71f856f09 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1016,6 +1016,14 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Align tiny pointer for required (conservative) alignment. if size&7 == 0 { off = alignUp(off, 8) + } else if sys.PtrSize == 4 && size == 12 { + // Conservatively align 12-byte objects to 8 bytes on 32-bit + // systems so that objects whose first field is a 64-bit + // value is aligned to 8 bytes and does not cause a fault on + // atomic access. See issue 37262. + // TODO(mknyszek): Remove this workaround if/when issue 36606 + // is resolved. + off = alignUp(off, 8) } else if size&3 == 0 { off = alignUp(off, 4) } else if size&1 == 0 { diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go index 5c97f548fd..4ba94d0494 100644 --- a/src/runtime/malloc_test.go +++ b/src/runtime/malloc_test.go @@ -12,8 +12,10 @@ import ( "os" "os/exec" "reflect" + "runtime" . "runtime" "strings" + "sync/atomic" "testing" "time" "unsafe" @@ -168,6 +170,61 @@ func TestTinyAlloc(t *testing.T) { } } +var ( + tinyByteSink *byte + tinyUint32Sink *uint32 + tinyObj12Sink *obj12 +) + +type obj12 struct { + a uint64 + b uint32 +} + +func TestTinyAllocIssue37262(t *testing.T) { + // Try to cause an alignment access fault + // by atomically accessing the first 64-bit + // value of a tiny-allocated object. + // See issue 37262 for details. + + // GC twice, once to reach a stable heap state + // and again to make sure we finish the sweep phase. + runtime.GC() + runtime.GC() + + // Make 1-byte allocations until we get a fresh tiny slot. + aligned := false + for i := 0; i < 16; i++ { + tinyByteSink = new(byte) + if uintptr(unsafe.Pointer(tinyByteSink))&0xf == 0xf { + aligned = true + break + } + } + if !aligned { + t.Fatal("unable to get a fresh tiny slot") + } + + // Create a 4-byte object so that the current + // tiny slot is partially filled. + tinyUint32Sink = new(uint32) + + // Create a 12-byte object, which fits into the + // tiny slot. If it actually gets place there, + // then the field "a" will be improperly aligned + // for atomic access on 32-bit architectures. + // This won't be true if issue 36606 gets resolved. + tinyObj12Sink = new(obj12) + + // Try to atomically access "x.a". + atomic.StoreUint64(&tinyObj12Sink.a, 10) + + // Clear the sinks. + tinyByteSink = nil + tinyUint32Sink = nil + tinyObj12Sink = nil +} + func TestPageCacheLeak(t *testing.T) { defer GOMAXPROCS(GOMAXPROCS(1)) leaked := PageCachePagesLeaked() From c78c79f152dff010820a22e9a79cfffe48322914 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 1 Oct 2020 16:22:19 -0400 Subject: [PATCH 090/281] cmd/compile: activate revert switch for late call expansion regression Not a fix, but things will work while I fix it. Credit @andybons "for we revert switches for scary stuff". Updates #41736 Change-Id: I55f90860eae919765aac4f6d9f108a54139027e1 Reviewed-on: https://go-review.googlesource.com/c/go/+/258897 Trust: David Chase Run-TryBot: David Chase Reviewed-by: Cherry Zhang TryBot-Result: Go Bot --- src/cmd/compile/internal/ssa/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index a73bcf8fca..88a406deb9 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -196,7 +196,7 @@ const ( ClassParamOut // return value ) -const go116lateCallExpansion = true +const go116lateCallExpansion = false // LateCallExpansionEnabledWithin returns true if late call expansion should be tested // within compilation of a function/method triggered by GOSSAHASH (defaults to "yes"). From f4cbf3477f1456b4d28a7b74b31820ee60b7e6d1 Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 31 Aug 2020 14:29:58 -0400 Subject: [PATCH 091/281] cmd/compile: allow directory specification for GOSSAFUNC output This was useful for debugging failures occurring during make.bash. The added flush also ensures that any hints in the GOSSAFUNC output are flushed before fatal exit. The environment variable GOSSADIR specifies where the SSA html debugging files should be placed. To avoid collisions, each one is written into the [package].[functionOrMethod].html, where [package] is the filepath separator separated package name, function is the function name, and method is either (*Type).Method, or Type.Method, as appropriate. Directories are created as necessary to make this work. Change-Id: I420927426b618b633bb1ffc51cf0f223b8f6d49c Reviewed-on: https://go-review.googlesource.com/c/go/+/252338 Trust: David Chase Run-TryBot: David Chase Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/main.go | 1 + src/cmd/compile/internal/gc/ssa.go | 10 +++++++++- src/cmd/compile/internal/ssa/compile.go | 10 ++++++++++ src/cmd/compile/internal/ssa/html.go | 13 +++++++++---- 4 files changed, 29 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 7ad3bfe0c8..e4e4ce72fd 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -516,6 +516,7 @@ func Main(archInit func(*Arch)) { } ssaDump = os.Getenv("GOSSAFUNC") + ssaDir = os.Getenv("GOSSADIR") if ssaDump != "" { if strings.HasSuffix(ssaDump, "+") { ssaDump = ssaDump[:len(ssaDump)-1] diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 7effa9bd4b..1d50cefe54 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -9,6 +9,7 @@ import ( "fmt" "html" "os" + "path/filepath" "sort" "bufio" @@ -26,6 +27,7 @@ var ssaConfig *ssa.Config var ssaCaches []ssa.Cache var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for +var ssaDir string // optional destination for ssa dump file var ssaDumpStdout bool // whether to dump to stdout var ssaDumpCFG string // generate CFGs for these phases const ssaDumpFile = "ssa.html" @@ -346,7 +348,13 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.f.Entry.Pos = fn.Pos if printssa { - s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDumpFile, s.f, ssaDumpCFG) + ssaDF := ssaDumpFile + if ssaDir != "" { + ssaDF = filepath.Join(ssaDir, myimportpath+"."+name+".html") + ssaD := filepath.Dir(ssaDF) + os.MkdirAll(ssaD, 0755) + } + s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG) // TODO: generate and print a mapping from nodes to values and blocks dumpSourcesColumn(s.f.HTMLWriter, fn) s.f.HTMLWriter.WriteAST("AST", astBuf) diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index 3dec1cd85b..0664c0ba46 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -47,6 +47,9 @@ func Compile(f *Func) { stack := make([]byte, 16384) n := runtime.Stack(stack, false) stack = stack[:n] + if f.HTMLWriter != nil { + f.HTMLWriter.flushPhases() + } f.Fatalf("panic during %s while compiling %s:\n\n%v\n\n%s\n", phaseName, f.Name, err, stack) } }() @@ -201,6 +204,13 @@ func (p *pass) addDump(s string) { p.dump[s] = true } +func (p *pass) String() string { + if p == nil { + return "nil pass" + } + return p.name +} + // Run consistency checker between each phase var ( checkEnabled = false diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go index c781ca92cc..a9d52fa4ee 100644 --- a/src/cmd/compile/internal/ssa/html.go +++ b/src/cmd/compile/internal/ssa/html.go @@ -28,18 +28,23 @@ type HTMLWriter struct { } func NewHTMLWriter(path string, f *Func, cfgMask string) *HTMLWriter { + path = strings.Replace(path, "/", string(filepath.Separator), -1) out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { f.Fatalf("%v", err) } - pwd, err := os.Getwd() - if err != nil { - f.Fatalf("%v", err) + reportPath := path + if !filepath.IsAbs(reportPath) { + pwd, err := os.Getwd() + if err != nil { + f.Fatalf("%v", err) + } + reportPath = filepath.Join(pwd, path) } html := HTMLWriter{ w: out, Func: f, - path: filepath.Join(pwd, path), + path: reportPath, dot: newDotWriter(cfgMask), } html.start() From 507a88c39bb1089b9d44facb7dd3449a9b5a3e10 Mon Sep 17 00:00:00 2001 From: Jay Conrod Date: Thu, 1 Oct 2020 13:37:06 -0400 Subject: [PATCH 092/281] cmd/go/internal/modfetch: always extract module directories in place Previously by default, we extracted modules to a temporary directory, then renamed it into place. This failed with ERROR_ACCESS_DENIED on Windows if another process (usually an anti-virus scanner) opened files in the temporary directory. Since Go 1.15, users have been able to set GODEBUG=modcacheunzipinplace=1 to opt into new behavior: we extract modules at their final location, and we create and later delete a .partial file to prevent the directory from being used if we crash. .partial files are recognized by Go 1.14.2 and later. With this change, the new behavior is the only behavior. modcacheunzipinplace is no longer recognized. Fixes #36568 Change-Id: Iff19fca5cd6eaa3597975a69fa05c4cb1b834bd6 Reviewed-on: https://go-review.googlesource.com/c/go/+/258798 Run-TryBot: Jay Conrod TryBot-Result: Go Bot Trust: Jay Conrod Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/modfetch/fetch.go | 107 ++++++------------ .../script/mod_concurrent_unzipinplace.txt | 17 --- .../script/mod_download_concurrent_read.txt | 23 ++-- .../testdata/script/mod_download_partial.txt | 1 - 4 files changed, 39 insertions(+), 109 deletions(-) delete mode 100644 src/cmd/go/testdata/script/mod_concurrent_unzipinplace.txt diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go index 01d8f007ac..1d90002faa 100644 --- a/src/cmd/go/internal/modfetch/fetch.go +++ b/src/cmd/go/internal/modfetch/fetch.go @@ -63,12 +63,9 @@ func download(ctx context.Context, mod module.Version) (dir string, err error) { ctx, span := trace.StartSpan(ctx, "modfetch.download "+mod.String()) defer span.Done() - // If the directory exists, and no .partial file exists, the module has - // already been completely extracted. .partial files may be created when a - // module zip directory is extracted in place instead of being extracted to a - // temporary directory and renamed. dir, err = DownloadDir(mod) if err == nil { + // The directory has already been completely extracted (no .partial file exists). return dir, nil } else if dir == "" || !errors.Is(err, os.ErrNotExist) { return "", err @@ -88,6 +85,9 @@ func download(ctx context.Context, mod module.Version) (dir string, err error) { } defer unlock() + ctx, span = trace.StartSpan(ctx, "unzip "+zipfile) + defer span.Done() + // Check whether the directory was populated while we were waiting on the lock. _, dirErr := DownloadDir(mod) if dirErr == nil { @@ -95,10 +95,11 @@ func download(ctx context.Context, mod module.Version) (dir string, err error) { } _, dirExists := dirErr.(*DownloadDirPartialError) - // Clean up any remaining temporary directories from previous runs, as well - // as partially extracted diectories created by future versions of cmd/go. - // This is only safe to do because the lock file ensures that their writers - // are no longer active. + // Clean up any remaining temporary directories created by old versions + // (before 1.16), as well as partially extracted directories (indicated by + // DownloadDirPartialError, usually because of a .partial file). This is only + // safe to do because the lock file ensures that their writers are no longer + // active. parentDir := filepath.Dir(dir) tmpPrefix := filepath.Base(dir) + ".tmp-" if old, err := filepath.Glob(filepath.Join(parentDir, tmpPrefix+"*")); err == nil { @@ -116,88 +117,44 @@ func download(ctx context.Context, mod module.Version) (dir string, err error) { if err != nil { return "", err } - if err := os.Remove(partialPath); err != nil && !os.IsNotExist(err) { - return "", err - } - // Extract the module zip directory. + // Extract the module zip directory at its final location. // - // By default, we extract to a temporary directory, then atomically rename to - // its final location. We use the existence of the source directory to signal - // that it has been extracted successfully (see DownloadDir). If someone - // deletes the entire directory (e.g., as an attempt to prune out file - // corruption), the module cache will still be left in a recoverable - // state. + // To prevent other processes from reading the directory if we crash, + // create a .partial file before extracting the directory, and delete + // the .partial file afterward (all while holding the lock). // - // Unfortunately, os.Rename may fail with ERROR_ACCESS_DENIED on Windows if - // another process opens files in the temporary directory. This is partially - // mitigated by using robustio.Rename, which retries os.Rename for a short - // time. + // Before Go 1.16, we extracted to a temporary directory with a random name + // then renamed it into place with os.Rename. On Windows, this failed with + // ERROR_ACCESS_DENIED when another process (usually an anti-virus scanner) + // opened files in the temporary directory. // - // To avoid this error completely, if unzipInPlace is set, we instead create a - // .partial file (indicating the directory isn't fully extracted), then we - // extract the directory at its final location, then we delete the .partial - // file. This is not the default behavior because older versions of Go may - // simply stat the directory to check whether it exists without looking for a - // .partial file. If multiple versions run concurrently, the older version may - // assume a partially extracted directory is complete. - // TODO(golang.org/issue/36568): when these older versions are no longer - // supported, remove the old default behavior and the unzipInPlace flag. + // Go 1.14.2 and higher respect .partial files. Older versions may use + // partially extracted directories. 'go mod verify' can detect this, + // and 'go clean -modcache' can fix it. if err := os.MkdirAll(parentDir, 0777); err != nil { return "", err } - - ctx, span = trace.StartSpan(ctx, "unzip "+zipfile) - if unzipInPlace { - if err := ioutil.WriteFile(partialPath, nil, 0666); err != nil { - return "", err - } - if err := modzip.Unzip(dir, mod, zipfile); err != nil { - fmt.Fprintf(os.Stderr, "-> %s\n", err) - if rmErr := RemoveAll(dir); rmErr == nil { - os.Remove(partialPath) - } - return "", err - } - if err := os.Remove(partialPath); err != nil { - return "", err - } - } else { - tmpDir, err := ioutil.TempDir(parentDir, tmpPrefix) - if err != nil { - return "", err - } - if err := modzip.Unzip(tmpDir, mod, zipfile); err != nil { - fmt.Fprintf(os.Stderr, "-> %s\n", err) - RemoveAll(tmpDir) - return "", err - } - if err := robustio.Rename(tmpDir, dir); err != nil { - RemoveAll(tmpDir) - return "", err - } + if err := ioutil.WriteFile(partialPath, nil, 0666); err != nil { + return "", err + } + if err := modzip.Unzip(dir, mod, zipfile); err != nil { + fmt.Fprintf(os.Stderr, "-> %s\n", err) + if rmErr := RemoveAll(dir); rmErr == nil { + os.Remove(partialPath) + } + return "", err + } + if err := os.Remove(partialPath); err != nil { + return "", err } - defer span.Done() if !cfg.ModCacheRW { - // Make dir read-only only *after* renaming it. - // os.Rename was observed to fail for read-only directories on macOS. makeDirsReadOnly(dir) } return dir, nil } -var unzipInPlace bool - -func init() { - for _, f := range strings.Split(os.Getenv("GODEBUG"), ",") { - if f == "modcacheunzipinplace=1" { - unzipInPlace = true - break - } - } -} - var downloadZipCache par.Cache // DownloadZip downloads the specific module version to the diff --git a/src/cmd/go/testdata/script/mod_concurrent_unzipinplace.txt b/src/cmd/go/testdata/script/mod_concurrent_unzipinplace.txt deleted file mode 100644 index 473be71c9c..0000000000 --- a/src/cmd/go/testdata/script/mod_concurrent_unzipinplace.txt +++ /dev/null @@ -1,17 +0,0 @@ -# This tests checks the GODEBUG=modcacheunzipinplace=1 flag, used as part of -# a migration in golang.org/issue/36568. -# -# Concurrent downloads with and without GODEBUG=modcacheunzipinplace=1 should -# not conflict. This is meant to simulate an old version and a new version -# of Go accessing the cache concurrently. -go mod download & -env GODEBUG=modcacheunzipinplace=1 -go mod download -wait - --- go.mod -- -module golang.org/issue/36568 - -go 1.14 - -require rsc.io/quote v1.5.2 diff --git a/src/cmd/go/testdata/script/mod_download_concurrent_read.txt b/src/cmd/go/testdata/script/mod_download_concurrent_read.txt index bb9c588896..caf105c6e5 100644 --- a/src/cmd/go/testdata/script/mod_download_concurrent_read.txt +++ b/src/cmd/go/testdata/script/mod_download_concurrent_read.txt @@ -1,27 +1,18 @@ # This test simulates a process watching for changes and reading files in # module cache as a module is extracted. # -# By default, we unzip a downloaded module into a temporary directory with a -# random name, then rename the directory into place. On Windows, this fails -# with ERROR_ACCESS_DENIED if another process (e.g., antivirus) opens files -# in the directory. +# Before Go 1.16, we extracted each module zip to a temporary directory with +# a random name, then renamed that into place with os.Rename. On Windows, +# this failed with ERROR_ACCESS_DENIED when another process (usually an +# anti-virus scanner) opened files in the temporary directory. This test +# simulates that behavior, verifying golang.org/issue/36568. # -# Setting GODEBUG=modcacheunzipinplace=1 opts into new behavior: a downloaded -# module is unzipped in place. A .partial file is created elsewhere to indicate -# that the extraction is incomplete. -# -# Verifies golang.org/issue/36568. +# Since 1.16, we extract to the final directory, but we create a .partial file +# so that if we crash, other processes know the directory is incomplete. [!windows] skip [short] skip -# Control case: check that the default behavior fails. -# This is commented out to avoid flakiness. We can't reproduce the failure -# 100% of the time. -# ! go run downloader.go - -# Experiment: check that the new behavior does not fail. -env GODEBUG=modcacheunzipinplace=1 go run downloader.go -- go.mod -- diff --git a/src/cmd/go/testdata/script/mod_download_partial.txt b/src/cmd/go/testdata/script/mod_download_partial.txt index 8d31970160..0aab60ddaf 100644 --- a/src/cmd/go/testdata/script/mod_download_partial.txt +++ b/src/cmd/go/testdata/script/mod_download_partial.txt @@ -46,7 +46,6 @@ rm $GOPATH/pkg/mod/rsc.io/quote@v1.5.2/go.mod # 'go mod download' should not leave behind a directory or a .partial file # if there is an error extracting the zip file. -env GODEBUG=modcacheunzipinplace=1 rm $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 cp empty $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip ! go mod download From 41df0e22184a0fcfb1e67e994c993239e9c2efc7 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Thu, 1 Oct 2020 14:55:11 -0700 Subject: [PATCH 093/281] reflect: add test for variadic reflect.Type.Method For #41737 Change-Id: Id065880dd7da54dec1b45662c202aeb7f8397c60 Reviewed-on: https://go-review.googlesource.com/c/go/+/258819 Trust: Ian Lance Taylor Run-TryBot: Ian Lance Taylor TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/reflect/all_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index abdfe41908..ec87ec0c8a 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -2405,8 +2405,14 @@ func TestVariadicMethodValue(t *testing.T) { points := []Point{{20, 21}, {22, 23}, {24, 25}} want := int64(p.TotalDist(points[0], points[1], points[2])) + // Variadic method of type. + tfunc := TypeOf((func(Point, ...Point) int)(nil)) + if tt := TypeOf(p).Method(4).Type; tt != tfunc { + t.Errorf("Variadic Method Type from TypeOf is %s; want %s", tt, tfunc) + } + // Curried method of value. - tfunc := TypeOf((func(...Point) int)(nil)) + tfunc = TypeOf((func(...Point) int)(nil)) v := ValueOf(p).Method(4) if tt := v.Type(); tt != tfunc { t.Errorf("Variadic Method Type is %s; want %s", tt, tfunc) From fe2cfb74ba6352990f5b41260b99e80f78e4a90a Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 1 Oct 2020 14:49:33 -0700 Subject: [PATCH 094/281] all: drop 387 support My last 387 CL. So sad ... ... ... ... not! Fixes #40255 Change-Id: I8d4ddb744b234b8adc735db2f7c3c7b6d8bbdfa4 Reviewed-on: https://go-review.googlesource.com/c/go/+/258957 Trust: Keith Randall Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/asm/internal/asm/endtoend_test.go | 7 +- src/cmd/compile/internal/gc/float_test.go | 19 - src/cmd/compile/internal/gc/go.go | 5 - src/cmd/compile/internal/gc/ssa.go | 15 +- src/cmd/compile/internal/ssa/config.go | 6 - src/cmd/compile/internal/ssa/gen/386.rules | 10 +- src/cmd/compile/internal/ssa/gen/386Ops.go | 14 - src/cmd/compile/internal/ssa/opGen.go | 13 - src/cmd/compile/internal/ssa/regalloc.go | 14 - src/cmd/compile/internal/ssa/rewrite386.go | 84 +---- src/cmd/compile/internal/x86/387.go | 403 --------------------- src/cmd/compile/internal/x86/galign.go | 17 +- src/cmd/compile/internal/x86/ssa.go | 2 - src/cmd/dist/build.go | 15 - src/cmd/dist/buildruntime.go | 2 - src/cmd/dist/cpuid_386.s | 16 - src/cmd/dist/cpuid_amd64.s | 16 - src/cmd/dist/cpuid_default.s | 10 - src/cmd/dist/util_gc.go | 12 - src/cmd/dist/util_gccgo.go | 13 - src/cmd/go/alldocs.go | 3 - src/cmd/go/internal/cfg/cfg.go | 3 - src/cmd/go/internal/envcmd/env.go | 5 +- src/cmd/go/internal/help/helpdoc.go | 3 - src/cmd/go/internal/work/exec.go | 4 +- src/cmd/internal/objabi/util.go | 9 +- src/internal/cfg/cfg.go | 1 - src/reflect/all_test.go | 18 - src/runtime/mkpreempt.go | 33 +- src/runtime/preempt_386.s | 45 +-- src/runtime/vlrt.go | 5 +- test/codegen/arithmetic.go | 6 +- test/codegen/floats.go | 19 +- test/codegen/math.go | 2 +- test/codegen/memops.go | 32 +- test/run.go | 12 +- 36 files changed, 97 insertions(+), 796 deletions(-) delete mode 100644 src/cmd/compile/internal/x86/387.go delete mode 100644 src/cmd/dist/cpuid_386.s delete mode 100644 src/cmd/dist/cpuid_amd64.s delete mode 100644 src/cmd/dist/cpuid_default.s diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go index 0759b7d10f..15202dc5dc 100644 --- a/src/cmd/asm/internal/asm/endtoend_test.go +++ b/src/cmd/asm/internal/asm/endtoend_test.go @@ -353,12 +353,7 @@ func testErrors(t *testing.T, goarch, file string) { } func Test386EndToEnd(t *testing.T) { - defer func(old string) { objabi.GO386 = old }(objabi.GO386) - for _, go386 := range []string{"387", "sse2"} { - t.Logf("GO386=%v", go386) - objabi.GO386 = go386 - testEndToEnd(t, "386", "386") - } + testEndToEnd(t, "386", "386") } func TestARMEndToEnd(t *testing.T) { diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go index 6ae363be22..c619d25705 100644 --- a/src/cmd/compile/internal/gc/float_test.go +++ b/src/cmd/compile/internal/gc/float_test.go @@ -6,17 +6,9 @@ package gc import ( "math" - "os" - "runtime" "testing" ) -// For GO386=387, make sure fucomi* opcodes are not used -// for comparison operations. -// Note that this test will fail only on a Pentium MMX -// processor (with GOARCH=386 GO386=387), as it just runs -// some code and looks for an unimplemented instruction fault. - //go:noinline func compare1(a, b float64) bool { return a < b @@ -137,9 +129,6 @@ func TestFloatCompareFolded(t *testing.T) { } } -// For GO386=387, make sure fucomi* opcodes are not used -// for float->int conversions. - //go:noinline func cvt1(a float64) uint64 { return uint64(a) @@ -370,14 +359,6 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) { // are not converted to quiet NaN (qNaN) values during compilation. // See issue #27193 for more information. - // TODO: this method for detecting 387 won't work if the compiler has been - // built using GOARCH=386 GO386=387 and either the target is a different - // architecture or the GO386=387 environment variable is not set when the - // test is run. - if runtime.GOARCH == "386" && os.Getenv("GO386") == "387" { - t.Skip("signaling NaNs are not propagated on 387 (issue #27516)") - } - // signaling NaNs { const nan = uint32(0x7f800001) // sNaN diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 9079ce2afc..2fbdf71055 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -259,7 +259,6 @@ type Arch struct { REGSP int MAXWIDTH int64 - Use387 bool // should 386 backend use 387 FP instructions instead of sse2. SoftFloat bool PadFrame func(int64) int64 @@ -328,10 +327,6 @@ var ( BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym - // GO386=387 - ControlWord64trunc, - ControlWord32 *obj.LSym - // Wasm WasmMove, WasmZero, diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 1d50cefe54..32394c4b1a 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -62,9 +62,6 @@ func initssaconfig() { _ = types.NewPtr(types.Errortype) // *error types.NewPtrCacheEnabled = false ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug['N'] == 0) - if thearch.LinkArch.Name == "386" { - ssaConfig.Set387(thearch.Use387) - } ssaConfig.SoftFloat = thearch.SoftFloat ssaConfig.Race = flag_race ssaCaches = make([]ssa.Cache, nBackendWorkers) @@ -175,10 +172,6 @@ func initssaconfig() { ExtendCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicExtendSlice3CU") } - // GO386=387 runtime definitions - ControlWord64trunc = sysvar("controlWord64trunc") // uint16 - ControlWord32 = sysvar("controlWord32") // uint16 - // Wasm (all asm funcs with special ABIs) WasmMove = sysvar("wasmMove") WasmZero = sysvar("wasmZero") @@ -5946,9 +5939,7 @@ type SSAGenState struct { // bstart remembers where each block starts (indexed by block ID) bstart []*obj.Prog - // 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?) - SSEto387 map[int16]int16 - // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8. + // Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include PPC and Sparc V8. ScratchFpMem *Node maxarg int64 // largest frame size for arguments to calls made by the function @@ -6115,10 +6106,6 @@ func genssa(f *ssa.Func, pp *Progs) { progToBlock[s.pp.next] = f.Blocks[0] } - if thearch.Use387 { - s.SSEto387 = map[int16]int16{} - } - s.ScratchFpMem = e.scratchFpMem if Ctxt.Flag_locationlists { diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 88a406deb9..649b5ba820 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -38,7 +38,6 @@ type Config struct { useSSE bool // Use SSE for non-float operations useAvg bool // Use optimizations that need Avg* operations useHmul bool // Use optimizations that need Hmul* operations - use387 bool // GO386=387 SoftFloat bool // Race bool // race detector enabled NeedsFpScratch bool // No direct move between GP and FP register sets @@ -387,9 +386,4 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config return c } -func (c *Config) Set387(b bool) { - c.NeedsFpScratch = b - c.use387 = b -} - func (c *Config) Ctxt() *obj.Link { return c.ctxt } diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules index 4a8244eb27..6a0b87cab4 100644 --- a/src/cmd/compile/internal/ssa/gen/386.rules +++ b/src/cmd/compile/internal/ssa/gen/386.rules @@ -38,10 +38,8 @@ (Xor(32|16|8) ...) => (XORL ...) (Neg(32|16|8) ...) => (NEGL ...) -(Neg32F x) && !config.use387 => (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) -(Neg64F x) && !config.use387 => (PXOR x (MOVSDconst [math.Copysign(0, -1)])) -(Neg32F x) && config.use387 => (FCHS x) -(Neg64F x) && config.use387 => (FCHS x) +(Neg32F x) => (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) +(Neg64F x) => (PXOR x (MOVSDconst [math.Copysign(0, -1)])) (Com(32|16|8) ...) => (NOTL ...) @@ -670,8 +668,8 @@ // Merge load/store to op ((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem) -((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) -((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) (MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go index ddabde7d3d..737b99c371 100644 --- a/src/cmd/compile/internal/ssa/gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/gen/386Ops.go @@ -51,17 +51,6 @@ var regNames386 = []string{ "SB", } -// Notes on 387 support. -// - The 387 has a weird stack-register setup for floating-point registers. -// We use these registers when SSE registers are not available (when GO386=387). -// - We use the same register names (X0-X7) but they refer to the 387 -// floating-point registers. That way, most of the SSA backend is unchanged. -// - The instruction generation pass maintains an SSE->387 register mapping. -// This mapping is updated whenever the FP stack is pushed or popped so that -// we can always find a given SSE register even when the TOS pointer has changed. -// - To facilitate the mapping from SSE to 387, we enforce that -// every basic block starts and ends with an empty floating-point stack. - func init() { // Make map from reg names to reg integers. if len(regNames386) > 64 { @@ -552,9 +541,6 @@ func init() { {name: "FlagGT_UGT"}, // signed > and unsigned < {name: "FlagGT_ULT"}, // signed > and unsigned > - // Special op for -x on 387 - {name: "FCHS", argLength: 1, reg: fp11}, - // Special ops for PIC floating-point constants. // MOVSXconst1 loads the address of the constant-pool entry into a register. // MOVSXconst2 loads the constant from that address. diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9fe943c2e0..d7d2b24a48 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -536,7 +536,6 @@ const ( Op386FlagLT_UGT Op386FlagGT_UGT Op386FlagGT_ULT - Op386FCHS Op386MOVSSconst1 Op386MOVSDconst1 Op386MOVSSconst2 @@ -6060,18 +6059,6 @@ var opcodeTable = [...]opInfo{ argLen: 0, reg: regInfo{}, }, - { - name: "FCHS", - argLen: 1, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 - }, - outputs: []outputInfo{ - {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 - }, - }, - }, { name: "MOVSSconst1", auxType: auxFloat32, diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 64c6aed3e7..691530ec0b 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -625,9 +625,6 @@ func (s *regAllocState) init(f *Func) { s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch) } } - if s.f.Config.use387 { - s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go) - } // Linear scan register allocation can be influenced by the order in which blocks appear. // Decouple the register allocation order from the generated block order. @@ -1024,9 +1021,6 @@ func (s *regAllocState) regalloc(f *Func) { if phiRegs[i] != noRegister { continue } - if s.f.Config.use387 && v.Type.IsFloat() { - continue // 387 can't handle floats in registers between blocks - } m := s.compatRegs(v.Type) &^ phiUsed &^ s.used if m != 0 { r := pickReg(m) @@ -1528,11 +1522,6 @@ func (s *regAllocState) regalloc(f *Func) { s.freeUseRecords = u } - // Spill any values that can't live across basic block boundaries. - if s.f.Config.use387 { - s.freeRegs(s.f.Config.fpRegMask) - } - // If we are approaching a merge point and we are the primary // predecessor of it, find live values that we use soon after // the merge point and promote them to registers now. @@ -1562,9 +1551,6 @@ func (s *regAllocState) regalloc(f *Func) { continue } v := s.orig[vid] - if s.f.Config.use387 && v.Type.IsFloat() { - continue // 387 can't handle floats in registers between blocks - } m := s.compatRegs(v.Type) &^ s.used if m&^desired.avoid != 0 { m &^= desired.avoid diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index fc1e0541b2..0f08160f44 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -1310,10 +1310,8 @@ func rewriteValue386_Op386ADDLmodify(v *Value) bool { func rewriteValue386_Op386ADDSD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - config := b.Func.Config // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSDload x [off] {sym} ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1326,7 +1324,7 @@ func rewriteValue386_Op386ADDSD(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(Op386ADDSDload) @@ -1395,10 +1393,8 @@ func rewriteValue386_Op386ADDSDload(v *Value) bool { func rewriteValue386_Op386ADDSS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - config := b.Func.Config // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSSload x [off] {sym} ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1411,7 +1407,7 @@ func rewriteValue386_Op386ADDSS(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(Op386ADDSSload) @@ -2640,10 +2636,8 @@ func rewriteValue386_Op386CMPWload(v *Value) bool { func rewriteValue386_Op386DIVSD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - config := b.Func.Config // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (DIVSDload x [off] {sym} ptr mem) for { x := v_0 @@ -2655,7 +2649,7 @@ func rewriteValue386_Op386DIVSD(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386DIVSDload) @@ -2722,10 +2716,8 @@ func rewriteValue386_Op386DIVSDload(v *Value) bool { func rewriteValue386_Op386DIVSS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - config := b.Func.Config // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (DIVSSload x [off] {sym} ptr mem) for { x := v_0 @@ -2737,7 +2729,7 @@ func rewriteValue386_Op386DIVSS(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386DIVSSload) @@ -6104,10 +6096,8 @@ func rewriteValue386_Op386MULLload(v *Value) bool { func rewriteValue386_Op386MULSD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - config := b.Func.Config // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -6120,7 +6110,7 @@ func rewriteValue386_Op386MULSD(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(Op386MULSDload) @@ -6189,10 +6179,8 @@ func rewriteValue386_Op386MULSDload(v *Value) bool { func rewriteValue386_Op386MULSS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - config := b.Func.Config // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -6205,7 +6193,7 @@ func rewriteValue386_Op386MULSS(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { continue } v.reset(Op386MULSSload) @@ -8187,10 +8175,8 @@ func rewriteValue386_Op386SUBLmodify(v *Value) bool { func rewriteValue386_Op386SUBSD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - config := b.Func.Config // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBSDload x [off] {sym} ptr mem) for { x := v_0 @@ -8202,7 +8188,7 @@ func rewriteValue386_Op386SUBSD(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386SUBSDload) @@ -8269,10 +8255,8 @@ func rewriteValue386_Op386SUBSDload(v *Value) bool { func rewriteValue386_Op386SUBSS(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - config := b.Func.Config // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBSSload x [off] {sym} ptr mem) for { x := v_0 @@ -8284,7 +8268,7 @@ func rewriteValue386_Op386SUBSS(v *Value) bool { sym := auxToSym(l.Aux) mem := l.Args[1] ptr := l.Args[0] - if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386SUBSSload) @@ -10043,68 +10027,32 @@ func rewriteValue386_OpMove(v *Value) bool { func rewriteValue386_OpNeg32F(v *Value) bool { v_0 := v.Args[0] b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types // match: (Neg32F x) - // cond: !config.use387 // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) for { x := v_0 - if !(!config.use387) { - break - } v.reset(Op386PXOR) v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32) v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) v.AddArg2(x, v0) return true } - // match: (Neg32F x) - // cond: config.use387 - // result: (FCHS x) - for { - x := v_0 - if !(config.use387) { - break - } - v.reset(Op386FCHS) - v.AddArg(x) - return true - } - return false } func rewriteValue386_OpNeg64F(v *Value) bool { v_0 := v.Args[0] b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types // match: (Neg64F x) - // cond: !config.use387 // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) for { x := v_0 - if !(!config.use387) { - break - } v.reset(Op386PXOR) v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64) v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) v.AddArg2(x, v0) return true } - // match: (Neg64F x) - // cond: config.use387 - // result: (FCHS x) - for { - x := v_0 - if !(config.use387) { - break - } - v.reset(Op386FCHS) - v.AddArg(x) - return true - } - return false } func rewriteValue386_OpNeq16(v *Value) bool { v_1 := v.Args[1] diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go deleted file mode 100644 index 594adb2cd5..0000000000 --- a/src/cmd/compile/internal/x86/387.go +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package x86 - -import ( - "cmd/compile/internal/gc" - "cmd/compile/internal/ssa" - "cmd/compile/internal/types" - "cmd/internal/obj" - "cmd/internal/obj/x86" - "math" -) - -// Generates code for v using 387 instructions. -func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { - // The SSA compiler pretends that it has an SSE backend. - // If we don't have one of those, we need to translate - // all the SSE ops to equivalent 387 ops. That's what this - // function does. - - switch v.Op { - case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst: - iv := uint64(v.AuxInt) - if iv == 0x0000000000000000 { // +0.0 - s.Prog(x86.AFLDZ) - } else if iv == 0x3ff0000000000000 { // +1.0 - s.Prog(x86.AFLD1) - } else if iv == 0x8000000000000000 { // -0.0 - s.Prog(x86.AFLDZ) - s.Prog(x86.AFCHS) - } else if iv == 0xbff0000000000000 { // -1.0 - s.Prog(x86.AFLD1) - s.Prog(x86.AFCHS) - } else if iv == 0x400921fb54442d18 { // +pi - s.Prog(x86.AFLDPI) - } else if iv == 0xc00921fb54442d18 { // -pi - s.Prog(x86.AFLDPI) - s.Prog(x86.AFCHS) - } else { // others - p := s.Prog(loadPush(v.Type)) - p.From.Type = obj.TYPE_FCONST - p.From.Val = math.Float64frombits(iv) - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_F0 - } - popAndSave(s, v) - - case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2: - p := s.Prog(loadPush(v.Type)) - p.From.Type = obj.TYPE_MEM - p.From.Reg = v.Args[0].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_F0 - popAndSave(s, v) - - case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, ssa.Op386MOVSSloadidx4, ssa.Op386MOVSDloadidx8: - p := s.Prog(loadPush(v.Type)) - p.From.Type = obj.TYPE_MEM - p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) - switch v.Op { - case ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1: - p.From.Scale = 1 - p.From.Index = v.Args[1].Reg() - if p.From.Index == x86.REG_SP { - p.From.Reg, p.From.Index = p.From.Index, p.From.Reg - } - case ssa.Op386MOVSSloadidx4: - p.From.Scale = 4 - p.From.Index = v.Args[1].Reg() - case ssa.Op386MOVSDloadidx8: - p.From.Scale = 8 - p.From.Index = v.Args[1].Reg() - } - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_F0 - popAndSave(s, v) - - case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore: - // Push to-be-stored value on top of stack. - push(s, v.Args[1]) - - // Pop and store value. - var op obj.As - switch v.Op { - case ssa.Op386MOVSSstore: - op = x86.AFMOVFP - case ssa.Op386MOVSDstore: - op = x86.AFMOVDP - } - p := s.Prog(op) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_F0 - p.To.Type = obj.TYPE_MEM - p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) - - case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVSDstoreidx8: - push(s, v.Args[2]) - var op obj.As - switch v.Op { - case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSSstoreidx4: - op = x86.AFMOVFP - case ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSDstoreidx8: - op = x86.AFMOVDP - } - p := s.Prog(op) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_F0 - p.To.Type = obj.TYPE_MEM - p.To.Reg = v.Args[0].Reg() - gc.AddAux(&p.To, v) - switch v.Op { - case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1: - p.To.Scale = 1 - p.To.Index = v.Args[1].Reg() - if p.To.Index == x86.REG_SP { - p.To.Reg, p.To.Index = p.To.Index, p.To.Reg - } - case ssa.Op386MOVSSstoreidx4: - p.To.Scale = 4 - p.To.Index = v.Args[1].Reg() - case ssa.Op386MOVSDstoreidx8: - p.To.Scale = 8 - p.To.Index = v.Args[1].Reg() - } - - case ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD, - ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD: - if v.Reg() != v.Args[0].Reg() { - v.Fatalf("input[0] and output not in same register %s", v.LongString()) - } - - // Push arg1 on top of stack - push(s, v.Args[1]) - - // Set precision if needed. 64 bits is the default. - switch v.Op { - case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS: - // Save AX so we can use it as scratch space. - p := s.Prog(x86.AMOVL) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_AX - s.AddrScratch(&p.To) - // Install a 32-bit version of the control word. - installControlWord(s, gc.ControlWord32, x86.REG_AX) - // Restore AX. - p = s.Prog(x86.AMOVL) - s.AddrScratch(&p.From) - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_AX - } - - var op obj.As - switch v.Op { - case ssa.Op386ADDSS, ssa.Op386ADDSD: - op = x86.AFADDDP - case ssa.Op386SUBSS, ssa.Op386SUBSD: - op = x86.AFSUBDP - case ssa.Op386MULSS, ssa.Op386MULSD: - op = x86.AFMULDP - case ssa.Op386DIVSS, ssa.Op386DIVSD: - op = x86.AFDIVDP - } - p := s.Prog(op) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_F0 - p.To.Type = obj.TYPE_REG - p.To.Reg = s.SSEto387[v.Reg()] + 1 - - // Restore precision if needed. - switch v.Op { - case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS: - restoreControlWord(s) - } - - case ssa.Op386UCOMISS, ssa.Op386UCOMISD: - push(s, v.Args[0]) - - // Compare. - p := s.Prog(x86.AFUCOMP) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_F0 - p.To.Type = obj.TYPE_REG - p.To.Reg = s.SSEto387[v.Args[1].Reg()] + 1 - - // Save AX. - p = s.Prog(x86.AMOVL) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_AX - s.AddrScratch(&p.To) - - // Move status word into AX. - p = s.Prog(x86.AFSTSW) - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_AX - - // Then move the flags we need to the integer flags. - s.Prog(x86.ASAHF) - - // Restore AX. - p = s.Prog(x86.AMOVL) - s.AddrScratch(&p.From) - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_AX - - case ssa.Op386SQRTSD: - push(s, v.Args[0]) - s.Prog(x86.AFSQRT) - popAndSave(s, v) - - case ssa.Op386FCHS: - push(s, v.Args[0]) - s.Prog(x86.AFCHS) - popAndSave(s, v) - - case ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD: - p := s.Prog(x86.AMOVL) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[0].Reg() - s.AddrScratch(&p.To) - p = s.Prog(x86.AFMOVL) - s.AddrScratch(&p.From) - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_F0 - popAndSave(s, v) - - case ssa.Op386CVTTSD2SL, ssa.Op386CVTTSS2SL: - push(s, v.Args[0]) - - // Load control word which truncates (rounds towards zero). - installControlWord(s, gc.ControlWord64trunc, v.Reg()) - - // Now do the conversion. - p := s.Prog(x86.AFMOVLP) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_F0 - s.AddrScratch(&p.To) - p = s.Prog(x86.AMOVL) - s.AddrScratch(&p.From) - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - - // Restore control word. - restoreControlWord(s) - - case ssa.Op386CVTSS2SD: - // float32 -> float64 is a nop - push(s, v.Args[0]) - popAndSave(s, v) - - case ssa.Op386CVTSD2SS: - // Round to nearest float32. - push(s, v.Args[0]) - p := s.Prog(x86.AFMOVFP) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_F0 - s.AddrScratch(&p.To) - p = s.Prog(x86.AFMOVF) - s.AddrScratch(&p.From) - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_F0 - popAndSave(s, v) - - case ssa.OpLoadReg: - if !v.Type.IsFloat() { - ssaGenValue(s, v) - return - } - // Load+push the value we need. - p := s.Prog(loadPush(v.Type)) - gc.AddrAuto(&p.From, v.Args[0]) - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_F0 - // Move the value to its assigned register. - popAndSave(s, v) - - case ssa.OpStoreReg: - if !v.Type.IsFloat() { - ssaGenValue(s, v) - return - } - push(s, v.Args[0]) - var op obj.As - switch v.Type.Size() { - case 4: - op = x86.AFMOVFP - case 8: - op = x86.AFMOVDP - } - p := s.Prog(op) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_F0 - gc.AddrAuto(&p.To, v) - - case ssa.OpCopy: - if !v.Type.IsFloat() { - ssaGenValue(s, v) - return - } - push(s, v.Args[0]) - popAndSave(s, v) - - case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter: - flush387(s) // Calls must empty the FP stack. - fallthrough // then issue the call as normal - default: - ssaGenValue(s, v) - } -} - -// push pushes v onto the floating-point stack. v must be in a register. -func push(s *gc.SSAGenState, v *ssa.Value) { - p := s.Prog(x86.AFMOVD) - p.From.Type = obj.TYPE_REG - p.From.Reg = s.SSEto387[v.Reg()] - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_F0 -} - -// popAndSave pops a value off of the floating-point stack and stores -// it in the register assigned to v. -func popAndSave(s *gc.SSAGenState, v *ssa.Value) { - r := v.Reg() - if _, ok := s.SSEto387[r]; ok { - // Pop value, write to correct register. - p := s.Prog(x86.AFMOVDP) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_F0 - p.To.Type = obj.TYPE_REG - p.To.Reg = s.SSEto387[v.Reg()] + 1 - } else { - // Don't actually pop value. This 387 register is now the - // new home for the not-yet-assigned-a-home SSE register. - // Increase the register mapping of all other registers by one. - for rSSE, r387 := range s.SSEto387 { - s.SSEto387[rSSE] = r387 + 1 - } - s.SSEto387[r] = x86.REG_F0 - } -} - -// loadPush returns the opcode for load+push of the given type. -func loadPush(t *types.Type) obj.As { - if t.Size() == 4 { - return x86.AFMOVF - } - return x86.AFMOVD -} - -// flush387 removes all entries from the 387 floating-point stack. -func flush387(s *gc.SSAGenState) { - for k := range s.SSEto387 { - p := s.Prog(x86.AFMOVDP) - p.From.Type = obj.TYPE_REG - p.From.Reg = x86.REG_F0 - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_F0 - delete(s.SSEto387, k) - } -} - -func ssaGenBlock387(s *gc.SSAGenState, b, next *ssa.Block) { - // Empty the 387's FP stack before the block ends. - flush387(s) - - ssaGenBlock(s, b, next) -} - -// installControlWord saves the current floating-point control -// word and installs a new one loaded from cw. -// scratchReg must be an unused register. -// This call must be paired with restoreControlWord. -// Bytes 4-5 of the scratch space (s.AddrScratch) are used between -// this call and restoreControlWord. -func installControlWord(s *gc.SSAGenState, cw *obj.LSym, scratchReg int16) { - // Save current control word. - p := s.Prog(x86.AFSTCW) - s.AddrScratch(&p.To) - p.To.Offset += 4 - - // Materialize address of new control word. - // Note: this must be a seperate instruction to handle PIE correctly. - // See issue 41503. - p = s.Prog(x86.ALEAL) - p.From.Type = obj.TYPE_MEM - p.From.Name = obj.NAME_EXTERN - p.From.Sym = cw - p.To.Type = obj.TYPE_REG - p.To.Reg = scratchReg - - // Load replacement control word. - p = s.Prog(x86.AFLDCW) - p.From.Type = obj.TYPE_MEM - p.From.Reg = scratchReg -} -func restoreControlWord(s *gc.SSAGenState) { - p := s.Prog(x86.AFLDCW) - s.AddrScratch(&p.From) - p.From.Offset += 4 -} diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go index 56c6989d93..2d20b6a6d0 100644 --- a/src/cmd/compile/internal/x86/galign.go +++ b/src/cmd/compile/internal/x86/galign.go @@ -7,26 +7,13 @@ package x86 import ( "cmd/compile/internal/gc" "cmd/internal/obj/x86" - "cmd/internal/objabi" - "fmt" - "os" ) func Init(arch *gc.Arch) { arch.LinkArch = &x86.Link386 arch.REGSP = x86.REGSP - switch v := objabi.GO386; v { - case "387": - arch.Use387 = true - arch.SSAGenValue = ssaGenValue387 - arch.SSAGenBlock = ssaGenBlock387 - case "sse2": - arch.SSAGenValue = ssaGenValue - arch.SSAGenBlock = ssaGenBlock - default: - fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v) - gc.Exit(1) - } + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock arch.MAXWIDTH = (1 << 32) - 1 arch.ZeroRange = zerorange diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index c21ac32297..74a4570770 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -852,8 +852,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers gc.Warnl(v.Pos, "generated nil check") } - case ssa.Op386FCHS: - v.Fatalf("FCHS in non-387 mode") case ssa.OpClobber: p := s.Prog(x86.AMOVL) p.From.Type = obj.TYPE_CONST diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index 3ac742fa55..5d62c1e8fa 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -30,7 +30,6 @@ var ( gohostos string goos string goarm string - go386 string gomips string gomips64 string goppc64 string @@ -142,16 +141,6 @@ func xinit() { } goarm = b - b = os.Getenv("GO386") - if b == "" { - if cansse2() { - b = "sse2" - } else { - b = "387" - } - } - go386 = b - b = os.Getenv("GOMIPS") if b == "" { b = "hardfloat" @@ -223,7 +212,6 @@ func xinit() { defaultldso = os.Getenv("GO_LDSO") // For tools being invoked but also for os.ExpandEnv. - os.Setenv("GO386", go386) os.Setenv("GOARCH", goarch) os.Setenv("GOARM", goarm) os.Setenv("GOHOSTARCH", gohostarch) @@ -1165,9 +1153,6 @@ func cmdenv() { if goarch == "arm" { xprintf(format, "GOARM", goarm) } - if goarch == "386" { - xprintf(format, "GO386", go386) - } if goarch == "mips" || goarch == "mipsle" { xprintf(format, "GOMIPS", gomips) } diff --git a/src/cmd/dist/buildruntime.go b/src/cmd/dist/buildruntime.go index 2744951597..67d1d72db4 100644 --- a/src/cmd/dist/buildruntime.go +++ b/src/cmd/dist/buildruntime.go @@ -41,7 +41,6 @@ func mkzversion(dir, file string) { // package objabi // // const defaultGOROOT = -// const defaultGO386 = // const defaultGOARM = // const defaultGOMIPS = // const defaultGOMIPS64 = @@ -70,7 +69,6 @@ func mkzbootstrap(file string) { fmt.Fprintln(&buf) fmt.Fprintf(&buf, "import \"runtime\"\n") fmt.Fprintln(&buf) - fmt.Fprintf(&buf, "const defaultGO386 = `%s`\n", go386) fmt.Fprintf(&buf, "const defaultGOARM = `%s`\n", goarm) fmt.Fprintf(&buf, "const defaultGOMIPS = `%s`\n", gomips) fmt.Fprintf(&buf, "const defaultGOMIPS64 = `%s`\n", gomips64) diff --git a/src/cmd/dist/cpuid_386.s b/src/cmd/dist/cpuid_386.s deleted file mode 100644 index 65fbb2dcb7..0000000000 --- a/src/cmd/dist/cpuid_386.s +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -TEXT ·cpuid(SB),$0-8 - MOVL ax+4(FP), AX - CPUID - MOVL info+0(FP), DI - MOVL AX, 0(DI) - MOVL BX, 4(DI) - MOVL CX, 8(DI) - MOVL DX, 12(DI) - RET - diff --git a/src/cmd/dist/cpuid_amd64.s b/src/cmd/dist/cpuid_amd64.s deleted file mode 100644 index ea0b9d4dc9..0000000000 --- a/src/cmd/dist/cpuid_amd64.s +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !gccgo - -TEXT ·cpuid(SB),$0-12 - MOVL ax+8(FP), AX - CPUID - MOVQ info+0(FP), DI - MOVL AX, 0(DI) - MOVL BX, 4(DI) - MOVL CX, 8(DI) - MOVL DX, 12(DI) - RET - diff --git a/src/cmd/dist/cpuid_default.s b/src/cmd/dist/cpuid_default.s deleted file mode 100644 index 6412a507a9..0000000000 --- a/src/cmd/dist/cpuid_default.s +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !386,!amd64,!gccgo - -#include "textflag.h" - -TEXT ·cpuid(SB),NOSPLIT,$0-0 - RET diff --git a/src/cmd/dist/util_gc.go b/src/cmd/dist/util_gc.go index 698beef704..17a0e6fbb5 100644 --- a/src/cmd/dist/util_gc.go +++ b/src/cmd/dist/util_gc.go @@ -6,18 +6,6 @@ package main -func cpuid(info *[4]uint32, ax uint32) - -func cansse2() bool { - if gohostarch != "386" && gohostarch != "amd64" { - return false - } - - var info [4]uint32 - cpuid(&info, 1) - return info[3]&(1<<26) != 0 // SSE2 -} - // useVFPv1 tries to execute one VFPv1 instruction on ARM. // It will crash the current process if VFPv1 is missing. func useVFPv1() diff --git a/src/cmd/dist/util_gccgo.go b/src/cmd/dist/util_gccgo.go index f9f01dc048..dc897236fb 100644 --- a/src/cmd/dist/util_gccgo.go +++ b/src/cmd/dist/util_gccgo.go @@ -6,19 +6,6 @@ package main -/* -int supports_sse2() { -#if defined(__i386__) || defined(__x86_64__) - return __builtin_cpu_supports("sse2"); -#else - return 0; -#endif -} -*/ -import "C" - -func cansse2() bool { return C.supports_sse2() != 0 } - func useVFPv1() {} func useVFPv3() {} diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 4bc87008ff..500682ed02 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -1853,9 +1853,6 @@ // GOARM // For GOARCH=arm, the ARM architecture for which to compile. // Valid values are 5, 6, 7. -// GO386 -// For GOARCH=386, the floating point instruction set. -// Valid values are 387, sse2. // GOMIPS // For GOARCH=mips{,le}, whether to use floating point instructions. // Valid values are hardfloat (default), softfloat. diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go index 9bf1db73ef..ebbaf04115 100644 --- a/src/cmd/go/internal/cfg/cfg.go +++ b/src/cmd/go/internal/cfg/cfg.go @@ -244,7 +244,6 @@ var ( // Used in envcmd.MkEnv and build ID computations. GOARM = envOr("GOARM", fmt.Sprint(objabi.GOARM)) - GO386 = envOr("GO386", objabi.GO386) GOMIPS = envOr("GOMIPS", objabi.GOMIPS) GOMIPS64 = envOr("GOMIPS64", objabi.GOMIPS64) GOPPC64 = envOr("GOPPC64", fmt.Sprintf("%s%d", "power", objabi.GOPPC64)) @@ -268,8 +267,6 @@ func GetArchEnv() (key, val string) { switch Goarch { case "arm": return "GOARM", GOARM - case "386": - return "GO386", GO386 case "mips", "mipsle": return "GOMIPS", GOMIPS case "mips64", "mips64le": diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index 7bd75f7305..ee0bb0d0b2 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -497,7 +497,10 @@ func lineToKey(line string) string { } // sortKeyValues sorts a sequence of lines by key. -// It differs from sort.Strings in that GO386= sorts after GO=. +// It differs from sort.Strings in that keys which are GOx where x is an ASCII +// character smaller than = sort after GO=. +// (There are no such keys currently. It used to matter for GO386 which was +// removed in Go 1.16.) func sortKeyValues(lines []string) { sort.Slice(lines, func(i, j int) bool { return lineToKey(lines[i]) < lineToKey(lines[j]) diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go index 0ae5fd7ca9..befa10a0e4 100644 --- a/src/cmd/go/internal/help/helpdoc.go +++ b/src/cmd/go/internal/help/helpdoc.go @@ -581,9 +581,6 @@ Architecture-specific environment variables: GOARM For GOARCH=arm, the ARM architecture for which to compile. Valid values are 5, 6, 7. - GO386 - For GOARCH=386, the floating point instruction set. - Valid values are 387, sse2. GOMIPS For GOARCH=mips{,le}, whether to use floating point instructions. Valid values are hardfloat (default), softfloat. diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index 51fc2b588d..e68b322c7d 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -271,7 +271,7 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID { fmt.Fprintf(h, "asm %q %q %q\n", b.toolID("asm"), forcedAsmflags, p.Internal.Asmflags) } - // GO386, GOARM, GOMIPS, etc. + // GOARM, GOMIPS, etc. key, val := cfg.GetArchEnv() fmt.Fprintf(h, "%s=%s\n", key, val) @@ -1175,7 +1175,7 @@ func (b *Builder) printLinkerConfig(h io.Writer, p *load.Package) { fmt.Fprintf(h, "linkflags %q\n", p.Internal.Ldflags) } - // GO386, GOARM, GOMIPS, etc. + // GOARM, GOMIPS, etc. key, val := cfg.GetArchEnv() fmt.Fprintf(h, "%s=%s\n", key, val) diff --git a/src/cmd/internal/objabi/util.go b/src/cmd/internal/objabi/util.go index b81b73a022..cedb2d0a26 100644 --- a/src/cmd/internal/objabi/util.go +++ b/src/cmd/internal/objabi/util.go @@ -24,7 +24,6 @@ var ( GOROOT = envOr("GOROOT", defaultGOROOT) GOARCH = envOr("GOARCH", defaultGOARCH) GOOS = envOr("GOOS", defaultGOOS) - GO386 = envOr("GO386", defaultGO386) GOAMD64 = goamd64() GOARM = goarm() GOMIPS = gomips() @@ -136,6 +135,14 @@ func init() { if GOARCH != "amd64" { Regabi_enabled = 0 } + + if v := os.Getenv("GO386"); v != "" && v != "sse2" { + msg := fmt.Sprintf("unsupported setting GO386=%s", v) + if v == "387" { + msg += ". 387 support was dropped in Go 1.16. Consider using gccgo instead." + } + log.Fatal(msg) + } } // Note: must agree with runtime.framepointer_enabled. diff --git a/src/internal/cfg/cfg.go b/src/internal/cfg/cfg.go index bdbe9df3e7..023429e441 100644 --- a/src/internal/cfg/cfg.go +++ b/src/internal/cfg/cfg.go @@ -32,7 +32,6 @@ const KnownEnv = ` FC GCCGO GO111MODULE - GO386 GOARCH GOARM GOBIN diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index ec87ec0c8a..0684eab973 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -4265,24 +4265,6 @@ var gFloat32 float32 func TestConvertNaNs(t *testing.T) { const snan uint32 = 0x7f800001 - - // Test to see if a store followed by a load of a signaling NaN - // maintains the signaling bit. The only platform known to fail - // this test is 386,GO386=387. The real test below will always fail - // if the platform can't even store+load a float without mucking - // with the bits. - gFloat32 = math.Float32frombits(snan) - runtime.Gosched() // make sure we don't optimize the store/load away - r := math.Float32bits(gFloat32) - if r != snan { - // This should only happen on 386,GO386=387. We have no way to - // test for 387, so we just make sure we're at least on 386. - if runtime.GOARCH != "386" { - t.Errorf("store/load of sNaN not faithful") - } - t.Skip("skipping test, float store+load not faithful") - } - type myFloat32 float32 x := V(myFloat32(math.Float32frombits(snan))) y := x.Convert(TypeOf(float32(0))) diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index c2e14cdcd6..c5bfb0f207 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -190,40 +190,25 @@ func (l *layout) restore() { func gen386() { p("PUSHFL") - // Save general purpose registers. + // Assign stack offsets. var l = layout{sp: "SP"} for _, reg := range regNames386 { - if reg == "SP" || strings.HasPrefix(reg, "X") { + if reg == "SP" { continue } - l.add("MOVL", reg, 4) + if strings.HasPrefix(reg, "X") { + l.add("MOVUPS", reg, 16) + } else { + l.add("MOVL", reg, 4) + } } - // Save the 387 state. - l.addSpecial( - "FSAVE %d(SP)\nFLDCW runtime·controlWord64(SB)", - "FRSTOR %d(SP)", - 108) - - // Save SSE state only if supported. - lSSE := layout{stack: l.stack, sp: "SP"} - for i := 0; i < 8; i++ { - lSSE.add("MOVUPS", fmt.Sprintf("X%d", i), 16) - } - - p("ADJSP $%d", lSSE.stack) + p("ADJSP $%d", l.stack) p("NOP SP") l.save() - p("CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1\nJNE nosse") - lSSE.save() - label("nosse:") p("CALL ·asyncPreempt2(SB)") - p("CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1\nJNE nosse2") - lSSE.restore() - label("nosse2:") l.restore() - p("ADJSP $%d", -lSSE.stack) - + p("ADJSP $%d", -l.stack) p("POPFL") p("RET") } diff --git a/src/runtime/preempt_386.s b/src/runtime/preempt_386.s index a00ac8f385..5c9b8ea224 100644 --- a/src/runtime/preempt_386.s +++ b/src/runtime/preempt_386.s @@ -5,7 +5,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 PUSHFL - ADJSP $264 + ADJSP $156 NOP SP MOVL AX, 0(SP) MOVL CX, 4(SP) @@ -14,32 +14,23 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVL BP, 16(SP) MOVL SI, 20(SP) MOVL DI, 24(SP) - FSAVE 28(SP) - FLDCW runtime·controlWord64(SB) - CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1 - JNE nosse - MOVUPS X0, 136(SP) - MOVUPS X1, 152(SP) - MOVUPS X2, 168(SP) - MOVUPS X3, 184(SP) - MOVUPS X4, 200(SP) - MOVUPS X5, 216(SP) - MOVUPS X6, 232(SP) - MOVUPS X7, 248(SP) -nosse: + MOVUPS X0, 28(SP) + MOVUPS X1, 44(SP) + MOVUPS X2, 60(SP) + MOVUPS X3, 76(SP) + MOVUPS X4, 92(SP) + MOVUPS X5, 108(SP) + MOVUPS X6, 124(SP) + MOVUPS X7, 140(SP) CALL ·asyncPreempt2(SB) - CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1 - JNE nosse2 - MOVUPS 248(SP), X7 - MOVUPS 232(SP), X6 - MOVUPS 216(SP), X5 - MOVUPS 200(SP), X4 - MOVUPS 184(SP), X3 - MOVUPS 168(SP), X2 - MOVUPS 152(SP), X1 - MOVUPS 136(SP), X0 -nosse2: - FRSTOR 28(SP) + MOVUPS 140(SP), X7 + MOVUPS 124(SP), X6 + MOVUPS 108(SP), X5 + MOVUPS 92(SP), X4 + MOVUPS 76(SP), X3 + MOVUPS 60(SP), X2 + MOVUPS 44(SP), X1 + MOVUPS 28(SP), X0 MOVL 24(SP), DI MOVL 20(SP), SI MOVL 16(SP), BP @@ -47,6 +38,6 @@ nosse2: MOVL 8(SP), DX MOVL 4(SP), CX MOVL 0(SP), AX - ADJSP $-264 + ADJSP $-156 POPFL RET diff --git a/src/runtime/vlrt.go b/src/runtime/vlrt.go index 38e0b32801..996c0611fd 100644 --- a/src/runtime/vlrt.go +++ b/src/runtime/vlrt.go @@ -263,7 +263,7 @@ func slowdodiv(n, d uint64) (q, r uint64) { return q, n } -// Floating point control word values for GOARCH=386 GO386=387. +// Floating point control word values. // Bits 0-5 are bits to disable floating-point exceptions. // Bits 8-9 are the precision control: // 0 = single precision a.k.a. float32 @@ -273,6 +273,5 @@ func slowdodiv(n, d uint64) (q, r uint64) { // 3 = round toward zero var ( controlWord64 uint16 = 0x3f + 2<<8 + 0<<10 - controlWord32 = 0x3f + 0<<8 + 0<<10 - controlWord64trunc = 0x3f + 2<<8 + 3<<10 + controlWord64trunc uint16 = 0x3f + 2<<8 + 3<<10 ) diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go index 0bdb66a376..30f39a8da1 100644 --- a/test/codegen/arithmetic.go +++ b/test/codegen/arithmetic.go @@ -125,7 +125,7 @@ func Mul_n120(n int) int { func MulMemSrc(a []uint32, b []float32) { // 386:`IMULL\s4\([A-Z]+\),\s[A-Z]+` a[0] *= a[1] - // 386/sse2:`MULSS\s4\([A-Z]+\),\sX[0-9]+` + // 386:`MULSS\s4\([A-Z]+\),\sX[0-9]+` // amd64:`MULSS\s4\([A-Z]+\),\sX[0-9]+` b[0] *= b[1] } @@ -167,7 +167,7 @@ func MergeMuls5(a, n int) int { // -------------- // func DivMemSrc(a []float64) { - // 386/sse2:`DIVSD\s8\([A-Z]+\),\sX[0-9]+` + // 386:`DIVSD\s8\([A-Z]+\),\sX[0-9]+` // amd64:`DIVSD\s8\([A-Z]+\),\sX[0-9]+` a[0] /= a[1] } @@ -211,7 +211,7 @@ func ConstDivs(n1 uint, n2 int) (uint, int) { func FloatDivs(a []float32) float32 { // amd64:`DIVSS\s8\([A-Z]+\),\sX[0-9]+` - // 386/sse2:`DIVSS\s8\([A-Z]+\),\sX[0-9]+` + // 386:`DIVSS\s8\([A-Z]+\),\sX[0-9]+` return a[1] / a[2] } diff --git a/test/codegen/floats.go b/test/codegen/floats.go index 3fae1a327c..d115800a67 100644 --- a/test/codegen/floats.go +++ b/test/codegen/floats.go @@ -6,8 +6,6 @@ package codegen -import "math" - // This file contains codegen tests related to arithmetic // simplifications and optimizations on float types. // For codegen tests on integer types, see arithmetic.go. @@ -17,8 +15,7 @@ import "math" // --------------------- // func Mul2(f float64) float64 { - // 386/sse2:"ADDSD",-"MULSD" - // 386/387:"FADDDP",-"FMULDP" + // 386:"ADDSD",-"MULSD" // amd64:"ADDSD",-"MULSD" // arm/7:"ADDD",-"MULD" // arm64:"FADDD",-"FMULD" @@ -28,8 +25,7 @@ func Mul2(f float64) float64 { } func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { - // 386/sse2:"MULSD",-"DIVSD" - // 386/387:"FMULDP",-"FDIVDP" + // 386:"MULSD",-"DIVSD" // amd64:"MULSD",-"DIVSD" // arm/7:"MULD",-"DIVD" // arm64:"FMULD",-"FDIVD" @@ -37,8 +33,7 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { // ppc64le:"FMUL",-"FDIV" x := f1 / 16.0 - // 386/sse2:"MULSD",-"DIVSD" - // 386/387:"FMULDP",-"FDIVDP" + // 386:"MULSD",-"DIVSD" // amd64:"MULSD",-"DIVSD" // arm/7:"MULD",-"DIVD" // arm64:"FMULD",-"FDIVD" @@ -46,8 +41,7 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { // ppc64le:"FMUL",-"FDIVD" y := f2 / 0.125 - // 386/sse2:"ADDSD",-"DIVSD",-"MULSD" - // 386/387:"FADDDP",-"FDIVDP",-"FMULDP" + // 386:"ADDSD",-"DIVSD",-"MULSD" // amd64:"ADDSD",-"DIVSD",-"MULSD" // arm/7:"ADDD",-"MULD",-"DIVD" // arm64:"FADDD",-"FMULD",-"FDIVD" @@ -58,11 +52,6 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { return x, y, z } -func getPi() float64 { - // 386/387:"FLDPI" - return math.Pi -} - func indexLoad(b0 []float32, b1 float32, idx int) float32 { // arm64:`FMOVS\s\(R[0-9]+\)\(R[0-9]+\),\sF[0-9]+` return b0[idx] * b1 diff --git a/test/codegen/math.go b/test/codegen/math.go index 1ebfda0405..fe678eea23 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -46,7 +46,7 @@ func approx(x float64) { func sqrt(x float64) float64 { // amd64:"SQRTSD" - // 386/387:"FSQRT" 386/sse2:"SQRTSD" + // 386:"SQRTSD" // arm64:"FSQRTD" // arm/7:"SQRTD" // mips/hardfloat:"SQRTD" mips/softfloat:-"SQRTD" diff --git a/test/codegen/memops.go b/test/codegen/memops.go index a234283146..4b003ad861 100644 --- a/test/codegen/memops.go +++ b/test/codegen/memops.go @@ -175,33 +175,33 @@ func idxInt64(x, y []int64, i int) { func idxFloat32(x, y []float32, i int) { var t float32 - // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` - // 386/sse2: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // 386: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` t = x[i+1] - // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` - // 386/sse2: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` y[i+1] = t - // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` - // 386/sse2: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` + // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` + // 386: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` t = x[16*i+1] - // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` - // 386/sse2: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // 386: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` y[16*i+1] = t } func idxFloat64(x, y []float64, i int) { var t float64 - // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` - // 386/sse2: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // 386: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` t = x[i+1] - // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` - // 386/sse2: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // 386: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` y[i+1] = t - // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` - // 386/sse2: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` + // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` + // 386: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` t = x[16*i+1] - // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` - // 386/sse2: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` + // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` + // 386: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` y[16*i+1] = t } diff --git a/test/run.go b/test/run.go index 95b94b7277..77710fd89a 100644 --- a/test/run.go +++ b/test/run.go @@ -1489,7 +1489,7 @@ var ( // value[0] is the variant-changing environment variable, and values[1:] // are the supported variants. archVariants = map[string][]string{ - "386": {"GO386", "387", "sse2"}, + "386": {}, "amd64": {}, "arm": {"GOARM", "5", "6", "7"}, "arm64": {}, @@ -1511,12 +1511,12 @@ type wantedAsmOpcode struct { found bool // true if the opcode check matched at least one in the output } -// A build environment triplet separated by slashes (eg: linux/386/sse2). +// A build environment triplet separated by slashes (eg: linux/arm/7). // The third field can be empty if the arch does not support variants (eg: "plan9/amd64/") type buildEnv string // Environ returns the environment it represents in cmd.Environ() "key=val" format -// For instance, "linux/386/sse2".Environ() returns {"GOOS=linux", "GOARCH=386", "GO386=sse2"} +// For instance, "linux/arm/7".Environ() returns {"GOOS=linux", "GOARCH=arm", "GOARM=7"} func (b buildEnv) Environ() []string { fields := strings.Split(string(b), "/") if len(fields) != 3 { @@ -1571,11 +1571,11 @@ func (t *test) wantedAsmOpcodes(fn string) asmChecks { var arch, subarch, os string switch { - case archspec[2] != "": // 3 components: "linux/386/sse2" + case archspec[2] != "": // 3 components: "linux/arm/7" os, arch, subarch = archspec[0], archspec[1][1:], archspec[2][1:] - case archspec[1] != "": // 2 components: "386/sse2" + case archspec[1] != "": // 2 components: "arm/7" os, arch, subarch = "linux", archspec[0], archspec[1][1:] - default: // 1 component: "386" + default: // 1 component: "arm" os, arch, subarch = "linux", archspec[0], "" if arch == "wasm" { os = "js" From 15bf061b699593953b6350236eb93c316f8741d4 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Fri, 2 Oct 2020 09:58:45 +0200 Subject: [PATCH 095/281] cmd/go: remove test checking GO386=387 invalidates cache Since GO386=387 is no longer supported, this change deletes a cmd/go test checking that building something with GO386=387, and then with sse2, invalidated the build cache. Fixes the longtest builders. Change-Id: I5f645ef4ddf1cddb26dcf9390cee94907fc45a70 Reviewed-on: https://go-review.googlesource.com/c/go/+/259017 Trust: Alberto Donizetti Trust: Tobias Klauser Reviewed-by: Tobias Klauser --- src/cmd/go/testdata/script/build_cache_arch_mode.txt | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/cmd/go/testdata/script/build_cache_arch_mode.txt b/src/cmd/go/testdata/script/build_cache_arch_mode.txt index 68e662555f..931827fbde 100644 --- a/src/cmd/go/testdata/script/build_cache_arch_mode.txt +++ b/src/cmd/go/testdata/script/build_cache_arch_mode.txt @@ -1,15 +1,7 @@ -# Issue 9737: verify that GOARM and GO386 affect the computed build ID +# Issue 9737: verify that GOARM affects the computed build ID [short] skip -# 386 -env GOOS=linux -env GOARCH=386 -env GO386=387 -go install mycmd -env GO386=sse2 -stale mycmd - # arm env GOOS=linux env GOARCH=arm From 8f1c99035d7797993a5ee393704603f21bb6f10c Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Mon, 28 Sep 2020 19:29:57 +0200 Subject: [PATCH 096/281] crypto/dsa,crypto/x509: deprecate DSA and remove crypto/x509 support Updates #40337 Change-Id: I5c1218df3ae7e13144a1d9f7d4a4b456e4475c0a Reviewed-on: https://go-review.googlesource.com/c/go/+/257939 Trust: Filippo Valsorda Trust: Roland Shoemaker Reviewed-by: Roland Shoemaker Reviewed-by: Katie Hockman --- doc/go1.16.html | 19 +++++++++++++++ src/crypto/dsa/dsa.go | 6 +++++ src/crypto/x509/x509.go | 37 +++++----------------------- src/crypto/x509/x509_test.go | 47 ++---------------------------------- 4 files changed, 33 insertions(+), 76 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index c6e3d92cc6..5e0fa60e2f 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -229,6 +229,25 @@ Do not send CLs removing the interior tags from such phrases. TODO

+
crypto/dsa
+
+

+ The crypto/dsa package is now deprecated. + See issue #40337. +

+
+
+ +
crypto/x509
+
+

+ DSA signature verification is no longer supported. Note that DSA signature + generation was never supported. + See issue #40337. +

+
+
+
net/http

diff --git a/src/crypto/dsa/dsa.go b/src/crypto/dsa/dsa.go index 43826bcb55..a83359996d 100644 --- a/src/crypto/dsa/dsa.go +++ b/src/crypto/dsa/dsa.go @@ -5,6 +5,12 @@ // Package dsa implements the Digital Signature Algorithm, as defined in FIPS 186-3. // // The DSA operations in this package are not implemented using constant-time algorithms. +// +// Deprecated: DSA is a legacy algorithm, and modern alternatives such as +// Ed25519 (implemented by package crypto/ed25519) should be used instead. Keys +// with 1024-bit moduli (L1024N160 parameters) are cryptographically weak, while +// bigger keys are not widely supported. Note that FIPS 186-5 no longer approves +// DSA for signature generation. package dsa import ( diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go index 93dca03840..58c4aa360f 100644 --- a/src/crypto/x509/x509.go +++ b/src/crypto/x509/x509.go @@ -159,10 +159,6 @@ type dsaAlgorithmParameters struct { P, Q, G *big.Int } -type dsaSignature struct { - R, S *big.Int -} - type validity struct { NotBefore, NotAfter time.Time } @@ -182,14 +178,15 @@ type SignatureAlgorithm int const ( UnknownSignatureAlgorithm SignatureAlgorithm = iota - MD2WithRSA - MD5WithRSA + + MD2WithRSA // Unsupported. + MD5WithRSA // Only supported for signing, not verification. SHA1WithRSA SHA256WithRSA SHA384WithRSA SHA512WithRSA - DSAWithSHA1 - DSAWithSHA256 + DSAWithSHA1 // Unsupported. + DSAWithSHA256 // Unsupported. ECDSAWithSHA1 ECDSAWithSHA256 ECDSAWithSHA384 @@ -223,7 +220,7 @@ type PublicKeyAlgorithm int const ( UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota RSA - DSA + DSA // Unsupported. ECDSA Ed25519 ) @@ -845,28 +842,6 @@ func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey } else { return rsa.VerifyPKCS1v15(pub, hashType, signed, signature) } - case *dsa.PublicKey: - if pubKeyAlgo != DSA { - return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) - } - dsaSig := new(dsaSignature) - if rest, err := asn1.Unmarshal(signature, dsaSig); err != nil { - return err - } else if len(rest) != 0 { - return errors.New("x509: trailing data after DSA signature") - } - if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { - return errors.New("x509: DSA signature contained zero or negative values") - } - // According to FIPS 186-3, section 4.6, the hash must be truncated if it is longer - // than the key length, but crypto/dsa doesn't do it automatically. - if maxHashLen := pub.Q.BitLen() / 8; maxHashLen < len(signed) { - signed = signed[:maxHashLen] - } - if !dsa.Verify(pub, signed, dsaSig.R, dsaSig.S) { - return errors.New("x509: DSA verification failure") - } - return case *ecdsa.PublicKey: if pubKeyAlgo != ECDSA { return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go index e87294bde5..2d9ace4a16 100644 --- a/src/crypto/x509/x509_test.go +++ b/src/crypto/x509/x509_test.go @@ -988,51 +988,8 @@ func TestVerifyCertificateWithDSASignature(t *testing.T) { t.Fatalf("Failed to parse certificate: %s", err) } // test cert is self-signed - if err = cert.CheckSignatureFrom(cert); err != nil { - t.Fatalf("DSA Certificate verification failed: %s", err) - } -} - -const dsaCert1024WithSha256 = `-----BEGIN CERTIFICATE----- -MIIDKzCCAumgAwIBAgIUOXWPK4gTRZVVY7OSXTU00QEWQU8wCwYJYIZIAWUDBAMC -MEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJ -bnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwIBcNMTkxMDAxMDYxODUyWhgPMzAxOTAy -MDEwNjE4NTJaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw -HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggG4MIIBLAYHKoZIzjgE -ATCCAR8CgYEAr79m/1ypU1aUbbLX1jikTyX7w2QYP+EkxNtXUiiTuxkC1KBqqxT3 -0Aht2vxFR47ODEK4B79rHO+UevhaqDaAHSH7Z/9umS0h0aS32KLDLb+LI5AneCrn -eW5YbVhfD03N7uR4kKUCKOnWj5hAk9xiE3y7oFR0bBXzqrrHJF9LMd0CFQCB6lSj -HSW0rGmNxIZsBl72u7JFLQKBgQCOFd1PGEQmddn0cdFgby5QQfjrqmoD1zNlFZEt -L0x1EbndFwelLlF1ChNh3NPNUkjwRbla07FDlONs1GMJq6w4vW11ns+pUvAZ2+RM -EVFjugip8az2ncn3UujGTVdFxnSTLBsRlMP/tFDK3ky//8zn/5ha9SKKw4v1uv6M -JuoIbwOBhQACgYEAoeKeR90nwrnoPi5MOUPBLQvuzB87slfr+3kL8vFCmgjA6MtB -7TxQKoBTOo5aVgWDp0lMIMxLd6btzBrm6r3VdRlh/cL8/PtbxkFwBa+Upe4o5NAh -ISCe2/f2leT1PxtF8xxYjz/fszeUeHsJbVMilE2cuB2SYrR5tMExiqy+QpqjUzBR -MB0GA1UdDgQWBBQDMIEL8Z3jc1d9wCxWtksUWc8RkjAfBgNVHSMEGDAWgBQDMIEL -8Z3jc1d9wCxWtksUWc8RkjAPBgNVHRMBAf8EBTADAQH/MAsGCWCGSAFlAwQDAgMv -ADAsAhQFehZgI4OyKBGpfnXvyJ0Z/0a6nAIUTO265Ane87LfJuQr3FrqvuCI354= ------END CERTIFICATE----- -` - -func TestVerifyCertificateWithDSATooLongHash(t *testing.T) { - pemBlock, _ := pem.Decode([]byte(dsaCert1024WithSha256)) - cert, err := ParseCertificate(pemBlock.Bytes) - if err != nil { - t.Fatalf("Failed to parse certificate: %s", err) - } - - // test cert is self-signed - if err = cert.CheckSignatureFrom(cert); err != nil { - t.Fatalf("DSA Certificate self-signature verification failed: %s", err) - } - - signed := []byte("A wild Gopher appears!\n") - signature, _ := hex.DecodeString("302c0214417aca7ff458f5b566e43e7b82f994953da84be50214625901e249e33f4e4838f8b5966020c286dd610e") - - // This signature is using SHA256, but only has 1024 DSA key. The hash has to be truncated - // in CheckSignature, otherwise it won't pass. - if err = cert.CheckSignature(DSAWithSHA256, signed, signature); err != nil { - t.Fatalf("DSA signature verification failed: %s", err) + if err = cert.CheckSignatureFrom(cert); err == nil { + t.Fatalf("Expected error verifying DSA certificate") } } From 44a15a7262b14d517fefab5b7c13ca97ab099a30 Mon Sep 17 00:00:00 2001 From: Dai Jie Date: Fri, 2 Oct 2020 09:09:24 +0000 Subject: [PATCH 097/281] net/http: remove duplicate declaration of error there is no need to declare a error variable here. Change-Id: I9ea5bcf568d800efed19c90caf751aaf9abe5555 GitHub-Last-Rev: 538d1f9cee0b8564a8bec262529f567da847f1b0 GitHub-Pull-Request: golang/go#41751 Reviewed-on: https://go-review.googlesource.com/c/go/+/259037 Reviewed-by: Rob Pike Trust: Alberto Donizetti --- src/net/rpc/client.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/net/rpc/client.go b/src/net/rpc/client.go index 25f2a004e4..60bb2cc99f 100644 --- a/src/net/rpc/client.go +++ b/src/net/rpc/client.go @@ -245,7 +245,6 @@ func DialHTTP(network, address string) (*Client, error) { // DialHTTPPath connects to an HTTP RPC server // at the specified network address and path. func DialHTTPPath(network, address, path string) (*Client, error) { - var err error conn, err := net.Dial(network, address) if err != nil { return nil, err From 79dbdf2a4c2ad93d3de493956f8bbca1465ba932 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 21 Sep 2020 08:47:34 -0400 Subject: [PATCH 098/281] go/types: add Checker.walkDecl to simplify checking declarations Handling ast.GenDecls while typechecking is repetitive, and a source of diffs compared to typechecking using the cmd/compile/internal/syntax package, which unpacks declaration groups into individual declarations. Refactor to extract the logic for walking declarations. This introduces a new AST abstraction: types.decl, which comes at some minor performance cost. However, if we are to fully abstract the AST we will be paying this cost anyway, and benchmarking suggests that the cost is negligible. Change-Id: If73c30c3d08053ccf7bf21ef886f0452fdbf142e Reviewed-on: https://go-review.googlesource.com/c/go/+/256298 Run-TryBot: Robert Findley TryBot-Result: Go Bot Reviewed-by: Robert Griesemer Trust: Robert Findley --- src/go/types/decl.go | 284 +++++++++++++++++++++---------------- src/go/types/resolver.go | 294 +++++++++++++++++---------------------- 2 files changed, 292 insertions(+), 286 deletions(-) diff --git a/src/go/types/decl.go b/src/go/types/decl.go index 5c0e611c51..a022ec5678 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -381,6 +381,76 @@ func firstInSrc(path []Object) int { return fst } +type ( + decl interface { + node() ast.Node + } + + importDecl struct{ spec *ast.ImportSpec } + constDecl struct { + spec *ast.ValueSpec + iota int + typ ast.Expr + init []ast.Expr + } + varDecl struct{ spec *ast.ValueSpec } + typeDecl struct{ spec *ast.TypeSpec } + funcDecl struct{ decl *ast.FuncDecl } +) + +func (d importDecl) node() ast.Node { return d.spec } +func (d constDecl) node() ast.Node { return d.spec } +func (d varDecl) node() ast.Node { return d.spec } +func (d typeDecl) node() ast.Node { return d.spec } +func (d funcDecl) node() ast.Node { return d.decl } + +func (check *Checker) walkDecls(decls []ast.Decl, f func(decl)) { + for _, d := range decls { + check.walkDecl(d, f) + } +} + +func (check *Checker) walkDecl(d ast.Decl, f func(decl)) { + switch d := d.(type) { + case *ast.BadDecl: + // ignore + case *ast.GenDecl: + var last *ast.ValueSpec // last ValueSpec with type or init exprs seen + for iota, s := range d.Specs { + switch s := s.(type) { + case *ast.ImportSpec: + f(importDecl{s}) + case *ast.ValueSpec: + switch d.Tok { + case token.CONST: + // determine which initialization expressions to use + switch { + case s.Type != nil || len(s.Values) > 0: + last = s + case last == nil: + last = new(ast.ValueSpec) // make sure last exists + } + check.arityMatch(s, last) + f(constDecl{spec: s, iota: iota, init: last.Values, typ: last.Type}) + case token.VAR: + check.arityMatch(s, nil) + f(varDecl{s}) + default: + check.invalidAST(s.Pos(), "invalid token %s", d.Tok) + } + case *ast.TypeSpec: + f(typeDecl{s}) + default: + check.invalidAST(s.Pos(), "unknown ast.Spec node %T", s) + } + } + case *ast.FuncDecl: + f(funcDecl{d}) + default: + check.invalidAST(d.Pos(), "unknown ast.Decl node %T", d) + } +} + func (check *Checker) constDecl(obj *Const, typ, init ast.Expr) { assert(obj.typ == nil) @@ -664,133 +734,105 @@ func (check *Checker) funcDecl(obj *Func, decl *declInfo) { } } -func (check *Checker) declStmt(decl ast.Decl) { +func (check *Checker) declStmt(d ast.Decl) { pkg := check.pkg - switch d := decl.(type) { - case *ast.BadDecl: - // ignore + check.walkDecl(d, func(d decl) { + switch d := d.(type) { + case constDecl: + top := len(check.delayed) - case *ast.GenDecl: - var last *ast.ValueSpec // last ValueSpec with type or init exprs seen - for iota, spec := range d.Specs { - switch s := spec.(type) { - case *ast.ValueSpec: - switch d.Tok { - case token.CONST: - top := len(check.delayed) + // declare all constants + lhs := make([]*Const, len(d.spec.Names)) + for i, name := range d.spec.Names { + obj := NewConst(name.Pos(), pkg, name.Name, nil, constant.MakeInt64(int64(d.iota))) + lhs[i] = obj - // determine which init exprs to use - switch { - case s.Type != nil || len(s.Values) > 0: - last = s - case last == nil: - last = new(ast.ValueSpec) // make sure last exists - } - - // declare all constants - lhs := make([]*Const, len(s.Names)) - for i, name := range s.Names { - obj := NewConst(name.Pos(), pkg, name.Name, nil, constant.MakeInt64(int64(iota))) - lhs[i] = obj - - var init ast.Expr - if i < len(last.Values) { - init = last.Values[i] - } - - check.constDecl(obj, last.Type, init) - } - - check.arityMatch(s, last) - - // process function literals in init expressions before scope changes - check.processDelayed(top) - - // spec: "The scope of a constant or variable identifier declared - // inside a function begins at the end of the ConstSpec or VarSpec - // (ShortVarDecl for short variable declarations) and ends at the - // end of the innermost containing block." - scopePos := s.End() - for i, name := range s.Names { - check.declare(check.scope, name, lhs[i], scopePos) - } - - case token.VAR: - top := len(check.delayed) - - lhs0 := make([]*Var, len(s.Names)) - for i, name := range s.Names { - lhs0[i] = NewVar(name.Pos(), pkg, name.Name, nil) - } - - // initialize all variables - for i, obj := range lhs0 { - var lhs []*Var - var init ast.Expr - switch len(s.Values) { - case len(s.Names): - // lhs and rhs match - init = s.Values[i] - case 1: - // rhs is expected to be a multi-valued expression - lhs = lhs0 - init = s.Values[0] - default: - if i < len(s.Values) { - init = s.Values[i] - } - } - check.varDecl(obj, lhs, s.Type, init) - if len(s.Values) == 1 { - // If we have a single lhs variable we are done either way. - // If we have a single rhs expression, it must be a multi- - // valued expression, in which case handling the first lhs - // variable will cause all lhs variables to have a type - // assigned, and we are done as well. - if debug { - for _, obj := range lhs0 { - assert(obj.typ != nil) - } - } - break - } - } - - check.arityMatch(s, nil) - - // process function literals in init expressions before scope changes - check.processDelayed(top) - - // declare all variables - // (only at this point are the variable scopes (parents) set) - scopePos := s.End() // see constant declarations - for i, name := range s.Names { - // see constant declarations - check.declare(check.scope, name, lhs0[i], scopePos) - } - - default: - check.invalidAST(s.Pos(), "invalid token %s", d.Tok) + var init ast.Expr + if i < len(d.init) { + init = d.init[i] } - case *ast.TypeSpec: - obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Name, nil) - // spec: "The scope of a type identifier declared inside a function - // begins at the identifier in the TypeSpec and ends at the end of - // the innermost containing block." - scopePos := s.Name.Pos() - check.declare(check.scope, s.Name, obj, scopePos) - // mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl) - obj.setColor(grey + color(check.push(obj))) - check.typeDecl(obj, s.Type, nil, s.Assign.IsValid()) - check.pop().setColor(black) - default: - check.invalidAST(s.Pos(), "const, type, or var declaration expected") + check.constDecl(obj, d.typ, init) } - } - default: - check.invalidAST(d.Pos(), "unknown ast.Decl node %T", d) - } + // process function literals in init expressions before scope changes + check.processDelayed(top) + + // spec: "The scope of a constant or variable identifier declared + // inside a function begins at the end of the ConstSpec or VarSpec + // (ShortVarDecl for short variable declarations) and ends at the + // end of the innermost containing block." + scopePos := d.spec.End() + for i, name := range d.spec.Names { + check.declare(check.scope, name, lhs[i], scopePos) + } + + case varDecl: + top := len(check.delayed) + + lhs0 := make([]*Var, len(d.spec.Names)) + for i, name := range d.spec.Names { + lhs0[i] = NewVar(name.Pos(), pkg, name.Name, nil) + } + + // initialize all variables + for i, obj := range lhs0 { + var lhs []*Var + var init ast.Expr + switch len(d.spec.Values) { + case len(d.spec.Names): + // lhs and rhs match + init = d.spec.Values[i] + case 1: + // rhs is expected to be a multi-valued expression + lhs = lhs0 + init = d.spec.Values[0] + default: + if i < len(d.spec.Values) { + init = d.spec.Values[i] + } + } + check.varDecl(obj, lhs, d.spec.Type, init) + if len(d.spec.Values) == 1 { + // If we have a single lhs variable we are done either way. + // If we have a single rhs expression, it must be a multi- + // valued expression, in which case handling the first lhs + // variable will cause all lhs variables to have a type + // assigned, and we are done as well. + if debug { + for _, obj := range lhs0 { + assert(obj.typ != nil) + } + } + break + } + } + + // process function literals in init expressions before scope changes + check.processDelayed(top) + + // declare all variables + // (only at this point are the variable scopes (parents) set) + scopePos := d.spec.End() // see constant declarations + for i, name := range d.spec.Names { + // see constant declarations + check.declare(check.scope, name, lhs0[i], scopePos) + } + + case typeDecl: + obj := NewTypeName(d.spec.Name.Pos(), pkg, d.spec.Name.Name, nil) + // spec: "The scope of a type identifier declared inside a function + // begins at the identifier in the TypeSpec and ends at the end of + // the innermost containing block." + scopePos := d.spec.Name.Pos() + check.declare(check.scope, d.spec.Name, obj, scopePos) + // mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl) + obj.setColor(grey + color(check.push(obj))) + check.typeDecl(obj, d.spec.Type, nil, d.spec.Assign.IsValid()) + check.pop().setColor(black) + default: + check.invalidAST(d.node().Pos(), "unknown ast.Decl node %T", d.node()) + } + }) } diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go index 078adc5ec7..cce222cbc5 100644 --- a/src/go/types/resolver.go +++ b/src/go/types/resolver.go @@ -235,179 +235,147 @@ func (check *Checker) collectObjects() { // we get "." as the directory which is what we would want. fileDir := dir(check.fset.Position(file.Name.Pos()).Filename) - for _, decl := range file.Decls { - switch d := decl.(type) { - case *ast.BadDecl: - // ignore + check.walkDecls(file.Decls, func(d decl) { + switch d := d.(type) { + case importDecl: + // import package + path, err := validatedImportPath(d.spec.Path.Value) + if err != nil { + check.errorf(d.spec.Path.Pos(), "invalid import path (%s)", err) + return + } - case *ast.GenDecl: - var last *ast.ValueSpec // last ValueSpec with type or init exprs seen - for iota, spec := range d.Specs { - switch s := spec.(type) { - case *ast.ImportSpec: - // import package - path, err := validatedImportPath(s.Path.Value) - if err != nil { - check.errorf(s.Path.Pos(), "invalid import path (%s)", err) - continue - } + imp := check.importPackage(d.spec.Path.Pos(), path, fileDir) + if imp == nil { + return + } - imp := check.importPackage(s.Path.Pos(), path, fileDir) - if imp == nil { - continue - } + // add package to list of explicit imports + // (this functionality is provided as a convenience + // for clients; it is not needed for type-checking) + if !pkgImports[imp] { + pkgImports[imp] = true + pkg.imports = append(pkg.imports, imp) + } - // add package to list of explicit imports - // (this functionality is provided as a convenience - // for clients; it is not needed for type-checking) - if !pkgImports[imp] { - pkgImports[imp] = true - pkg.imports = append(pkg.imports, imp) - } - - // local name overrides imported package name - name := imp.name - if s.Name != nil { - name = s.Name.Name - if path == "C" { - // match cmd/compile (not prescribed by spec) - check.errorf(s.Name.Pos(), `cannot rename import "C"`) - continue - } - if name == "init" { - check.errorf(s.Name.Pos(), "cannot declare init - must be func") - continue - } - } - - obj := NewPkgName(s.Pos(), pkg, name, imp) - if s.Name != nil { - // in a dot-import, the dot represents the package - check.recordDef(s.Name, obj) - } else { - check.recordImplicit(s, obj) - } - - if path == "C" { - // match cmd/compile (not prescribed by spec) - obj.used = true - } - - // add import to file scope - if name == "." { - // merge imported scope with file scope - for _, obj := range imp.scope.elems { - // A package scope may contain non-exported objects, - // do not import them! - if obj.Exported() { - // declare dot-imported object - // (Do not use check.declare because it modifies the object - // via Object.setScopePos, which leads to a race condition; - // the object may be imported into more than one file scope - // concurrently. See issue #32154.) - if alt := fileScope.Insert(obj); alt != nil { - check.errorf(s.Name.Pos(), "%s redeclared in this block", obj.Name()) - check.reportAltDecl(alt) - } - } - } - // add position to set of dot-import positions for this file - // (this is only needed for "imported but not used" errors) - check.addUnusedDotImport(fileScope, imp, s.Pos()) - } else { - // declare imported package object in file scope - // (no need to provide s.Name since we called check.recordDef earlier) - check.declare(fileScope, nil, obj, token.NoPos) - } - - case *ast.ValueSpec: - switch d.Tok { - case token.CONST: - // determine which initialization expressions to use - switch { - case s.Type != nil || len(s.Values) > 0: - last = s - case last == nil: - last = new(ast.ValueSpec) // make sure last exists - } - - // declare all constants - for i, name := range s.Names { - obj := NewConst(name.Pos(), pkg, name.Name, nil, constant.MakeInt64(int64(iota))) - - var init ast.Expr - if i < len(last.Values) { - init = last.Values[i] - } - - d := &declInfo{file: fileScope, typ: last.Type, init: init} - check.declarePkgObj(name, obj, d) - } - - check.arityMatch(s, last) - - case token.VAR: - lhs := make([]*Var, len(s.Names)) - // If there's exactly one rhs initializer, use - // the same declInfo d1 for all lhs variables - // so that each lhs variable depends on the same - // rhs initializer (n:1 var declaration). - var d1 *declInfo - if len(s.Values) == 1 { - // The lhs elements are only set up after the for loop below, - // but that's ok because declareVar only collects the declInfo - // for a later phase. - d1 = &declInfo{file: fileScope, lhs: lhs, typ: s.Type, init: s.Values[0]} - } - - // declare all variables - for i, name := range s.Names { - obj := NewVar(name.Pos(), pkg, name.Name, nil) - lhs[i] = obj - - d := d1 - if d == nil { - // individual assignments - var init ast.Expr - if i < len(s.Values) { - init = s.Values[i] - } - d = &declInfo{file: fileScope, typ: s.Type, init: init} - } - - check.declarePkgObj(name, obj, d) - } - - check.arityMatch(s, nil) - - default: - check.invalidAST(s.Pos(), "invalid token %s", d.Tok) - } - - case *ast.TypeSpec: - obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Name, nil) - check.declarePkgObj(s.Name, obj, &declInfo{file: fileScope, typ: s.Type, alias: s.Assign.IsValid()}) - - default: - check.invalidAST(s.Pos(), "unknown ast.Spec node %T", s) + // local name overrides imported package name + name := imp.name + if d.spec.Name != nil { + name = d.spec.Name.Name + if path == "C" { + // match cmd/compile (not prescribed by spec) + check.errorf(d.spec.Name.Pos(), `cannot rename import "C"`) + return + } + if name == "init" { + check.errorf(d.spec.Name.Pos(), "cannot declare init - must be func") + return } } - case *ast.FuncDecl: - name := d.Name.Name - obj := NewFunc(d.Name.Pos(), pkg, name, nil) - if d.Recv == nil { + obj := NewPkgName(d.spec.Pos(), pkg, name, imp) + if d.spec.Name != nil { + // in a dot-import, the dot represents the package + check.recordDef(d.spec.Name, obj) + } else { + check.recordImplicit(d.spec, obj) + } + + if path == "C" { + // match cmd/compile (not prescribed by spec) + obj.used = true + } + + // add import to file scope + if name == "." { + // merge imported scope with file scope + for _, obj := range imp.scope.elems { + // A package scope may contain non-exported objects, + // do not import them! + if obj.Exported() { + // declare dot-imported object + // (Do not use check.declare because it modifies the object + // via Object.setScopePos, which leads to a race condition; + // the object may be imported into more than one file scope + // concurrently. See issue #32154.) + if alt := fileScope.Insert(obj); alt != nil { + check.errorf(d.spec.Name.Pos(), "%s redeclared in this block", obj.Name()) + check.reportAltDecl(alt) + } + } + } + // add position to set of dot-import positions for this file + // (this is only needed for "imported but not used" errors) + check.addUnusedDotImport(fileScope, imp, d.spec.Pos()) + } else { + // declare imported package object in file scope + // (no need to provide s.Name since we called check.recordDef earlier) + check.declare(fileScope, nil, obj, token.NoPos) + } + case constDecl: + // declare all constants + for i, name := range d.spec.Names { + obj := NewConst(name.Pos(), pkg, name.Name, nil, constant.MakeInt64(int64(d.iota))) + + var init ast.Expr + if i < len(d.init) { + init = d.init[i] + } + + d := &declInfo{file: fileScope, typ: d.typ, init: init} + check.declarePkgObj(name, obj, d) + } + + case varDecl: + lhs := make([]*Var, len(d.spec.Names)) + // If there's exactly one rhs initializer, use + // the same declInfo d1 for all lhs variables + // so that each lhs variable depends on the same + // rhs initializer (n:1 var declaration). + var d1 *declInfo + if len(d.spec.Values) == 1 { + // The lhs elements are only set up after the for loop below, + // but that's ok because declareVar only collects the declInfo + // for a later phase. + d1 = &declInfo{file: fileScope, lhs: lhs, typ: d.spec.Type, init: d.spec.Values[0]} + } + + // declare all variables + for i, name := range d.spec.Names { + obj := NewVar(name.Pos(), pkg, name.Name, nil) + lhs[i] = obj + + di := d1 + if di == nil { + // individual assignments + var init ast.Expr + if i < len(d.spec.Values) { + init = d.spec.Values[i] + } + di = &declInfo{file: fileScope, typ: d.spec.Type, init: init} + } + + check.declarePkgObj(name, obj, di) + } + case typeDecl: + obj := NewTypeName(d.spec.Name.Pos(), pkg, d.spec.Name.Name, nil) + check.declarePkgObj(d.spec.Name, obj, &declInfo{file: fileScope, typ: d.spec.Type, alias: d.spec.Assign.IsValid()}) + case funcDecl: + info := &declInfo{file: fileScope, fdecl: d.decl} + name := d.decl.Name.Name + obj := NewFunc(d.decl.Name.Pos(), pkg, name, nil) + if d.decl.Recv == nil { // regular function if name == "init" { // don't declare init functions in the package scope - they are invisible obj.parent = pkg.scope - check.recordDef(d.Name, obj) + check.recordDef(d.decl.Name, obj) // init functions must have a body - if d.Body == nil { + if d.decl.Body == nil { check.softErrorf(obj.pos, "missing function body") } } else { - check.declare(pkg.scope, d.Name, obj, token.NoPos) + check.declare(pkg.scope, d.decl.Name, obj, token.NoPos) } } else { // method @@ -417,20 +385,16 @@ func (check *Checker) collectObjects() { if name != "_" { methods = append(methods, obj) } - check.recordDef(d.Name, obj) + check.recordDef(d.decl.Name, obj) } - info := &declInfo{file: fileScope, fdecl: d} // Methods are not package-level objects but we still track them in the // object map so that we can handle them like regular functions (if the // receiver is invalid); also we need their fdecl info when associating // them with their receiver base type, below. check.objMap[obj] = info obj.setOrder(uint32(len(check.objMap))) - - default: - check.invalidAST(d.Pos(), "unknown ast.Decl node %T", d) } - } + }) } // verify that objects in package and file scopes have different names From d888f1d5c06828e9d7b0166f770a443f6315c2d1 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 1 Oct 2020 16:06:03 -0400 Subject: [PATCH 099/281] runtime: add debugging to TestTimePprof We've seen timeouts of TestTimePprof, but the tracebacks aren't useful because goroutines are running on other threads. Add GOTRACEBACK=crash to catch these in the future. For #41120. Change-Id: I97318172ef78d0cbab10df5e4ffcbfeadff579e3 Reviewed-on: https://go-review.googlesource.com/c/go/+/258802 Trust: Austin Clements Run-TryBot: Austin Clements TryBot-Result: Go Bot Reviewed-by: Bryan C. Mills --- src/runtime/crash_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index 34f30c9a37..eae4f538c1 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -667,7 +667,9 @@ func TestBadTraceback(t *testing.T) { } func TestTimePprof(t *testing.T) { - fn := runTestProg(t, "testprog", "TimeProf") + // Pass GOTRACEBACK for issue #41120 to try to get more + // information on timeout. + fn := runTestProg(t, "testprog", "TimeProf", "GOTRACEBACK=crash") fn = strings.TrimSpace(fn) defer os.Remove(fn) From 21eb3dcf93fc3698c9b8cd3ba83c9ddbef31e880 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Fri, 2 Oct 2020 22:13:14 +0200 Subject: [PATCH 100/281] doc/go1.16: announce netbsd/arm64 support netbsd/arm64 now complies with all the requirements for a port as specified on https://golang.org/wiki/PortingPolicy Note that this was preliminarily announced in the Go 1.13 release notes (CL 183637) but then removed again due to the port lacking a builder at that time (CL 192997). Updates #30824 Change-Id: I2f40fabc84fe9cb699282e6a9d13ed9b64478e36 Reviewed-on: https://go-review.googlesource.com/c/go/+/259277 Trust: Tobias Klauser Reviewed-by: Ian Lance Taylor --- doc/go1.16.html | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index 5e0fa60e2f..f7bcb9e94f 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -31,8 +31,11 @@ Do not send CLs removing the interior tags from such phrases.

Ports

-

- TODO +

NetBSD

+ +

+ Go now supports the 64-bit ARM architecture on NetBSD (the + netbsd/arm64 port).

Tools

From f89d05eb7ba1885474d03bb62f0a36a2d3cf56ea Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 1 Oct 2020 10:58:47 -0400 Subject: [PATCH 101/281] runtime: update and tidy cgo callback description The documentation on how cgo callbacks (C -> Go calls) works internally has gotten somewhat stale. This CL refreshes it. Change-Id: I1ab66225c9da52d698d97ebeb4f3c7b9b5ee97db Reviewed-on: https://go-review.googlesource.com/c/go/+/258937 Trust: Austin Clements Reviewed-by: Ian Lance Taylor Reviewed-by: Cherry Zhang --- src/runtime/cgocall.go | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 427ed0ffb9..0b69ff3233 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -35,44 +35,48 @@ // cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't // know about packages). The gcc-compiled C function f calls GoF. // -// GoF calls crosscall2(_cgoexp_GoF, frame, framesize). Crosscall2 -// (in cgo/gcc_$GOARCH.S, a gcc-compiled assembly file) is a two-argument -// adapter from the gcc function call ABI to the 6c function call ABI. -// It is called from gcc to call 6c functions. In this case it calls -// _cgoexp_GoF(frame, framesize), still running on m->g0's stack +// GoF calls crosscall2(_cgoexp_GoF, frame, framesize, ctxt). +// Crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from +// the gcc function call ABI to the gc function call ABI. +// It is called from gcc to call gc functions. In this case it calls +// _cgoexp_GoF(frame, framesize), still running on m.g0's stack // and outside the $GOMAXPROCS limit. Thus, this code cannot yet // call arbitrary Go code directly and must be careful not to allocate -// memory or use up m->g0's stack. +// memory or use up m.g0's stack. // -// _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize, ctxt). +// _cgoexp_GoF (generated by cmd/cgo) calls +// runtime.cgocallback(funcPC(p.GoF), frame, framesize, ctxt). // (The reason for having _cgoexp_GoF instead of writing a crosscall3 // to make this call directly is that _cgoexp_GoF, because it is compiled -// with 6c instead of gcc, can refer to dotted names like +// with gc instead of gcc, can refer to dotted names like // runtime.cgocallback and p.GoF.) // -// runtime.cgocallback (in asm_$GOARCH.s) switches from m->g0's -// stack to the original g (m->curg)'s stack, on which it calls +// runtime.cgocallback (in asm_$GOARCH.s) turns the raw PC of p.GoF +// into a Go function value and calls runtime.cgocallback_gofunc. +// +// runtime.cgocallback_gofunc (in asm_$GOARCH.s) switches from m.g0's +// stack to the original g (m.curg)'s stack, on which it calls // runtime.cgocallbackg(p.GoF, frame, framesize). // As part of the stack switch, runtime.cgocallback saves the current -// SP as m->g0->sched.sp, so that any use of m->g0's stack during the +// SP as m.g0.sched.sp, so that any use of m.g0's stack during the // execution of the callback will be done below the existing stack frames. -// Before overwriting m->g0->sched.sp, it pushes the old value on the -// m->g0 stack, so that it can be restored later. +// Before overwriting m.g0.sched.sp, it pushes the old value on the +// m.g0 stack, so that it can be restored later. // // runtime.cgocallbackg (below) is now running on a real goroutine -// stack (not an m->g0 stack). First it calls runtime.exitsyscall, which will +// stack (not an m.g0 stack). First it calls runtime.exitsyscall, which will // block until the $GOMAXPROCS limit allows running this goroutine. // Once exitsyscall has returned, it is safe to do things like call the memory // allocator or invoke the Go callback function p.GoF. runtime.cgocallbackg -// first defers a function to unwind m->g0.sched.sp, so that if p.GoF -// panics, m->g0.sched.sp will be restored to its old value: the m->g0 stack -// and the m->curg stack will be unwound in lock step. +// first defers a function to unwind m.g0.sched.sp, so that if p.GoF +// panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack +// and the m.curg stack will be unwound in lock step. // Then it calls p.GoF. Finally it pops but does not execute the deferred // function, calls runtime.entersyscall, and returns to runtime.cgocallback. // // After it regains control, runtime.cgocallback switches back to -// m->g0's stack (the pointer is still in m->g0.sched.sp), restores the old -// m->g0.sched.sp value from the stack, and returns to _cgoexp_GoF. +// m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old +// m.g0.sched.sp value from the stack, and returns to _cgoexp_GoF. // // _cgoexp_GoF immediately returns to crosscall2, which restores the // callee-save registers for gcc and returns to GoF, which returns to f. From 095e0f48a19fa3bd7901f79420374b9cb50940e9 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Thu, 1 Oct 2020 12:03:27 +0200 Subject: [PATCH 102/281] cmd/compile: change mustHeapAlloc to return a reason why This change renames mustHeapAlloc to heapAllocReason, and changes it to return the reason why the argument must escape, so we don't have to re-deduce it in its callers just to print the escape reason. It also embeds isSmallMakeSlice body in heapAllocReason, since the former was only used by the latter, and deletes isSmallMakeSlice. An outdated TODO to remove smallintconst, which the TODO claimed was only used in one place, was also removed, since grepping shows we currently call smallintconst in 11 different places. Change-Id: I0bd11bf29b92c4126f5bb455877ff73217d5a155 Reviewed-on: https://go-review.googlesource.com/c/go/+/258678 Run-TryBot: Alberto Donizetti TryBot-Result: Go Bot Trust: Alberto Donizetti Trust: Cuong Manh Le Reviewed-by: Cuong Manh Le Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/const.go | 1 - src/cmd/compile/internal/gc/esc.go | 31 ++++++++++++++++++--------- src/cmd/compile/internal/gc/escape.go | 6 +----- src/cmd/compile/internal/gc/walk.go | 17 ++------------- test/fixedbugs/issue41635.go | 11 +++++----- 5 files changed, 29 insertions(+), 37 deletions(-) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index c0ed8192d9..d881be485e 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -1134,7 +1134,6 @@ func strlit(n *Node) string { return n.Val().U.(string) } -// TODO(gri) smallintconst is only used in one place - can we used indexconst? func smallintconst(n *Node) bool { if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil { switch simtype[n.Type.Etype] { diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 375331d1f5..d7aa72b450 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -169,36 +169,47 @@ func mayAffectMemory(n *Node) bool { } } -func mustHeapAlloc(n *Node) bool { +// heapAllocReason returns the reason the given Node must be heap +// allocated, or the empty string if it doesn't. +func heapAllocReason(n *Node) string { if n.Type == nil { - return false + return "" } // Parameters are always passed via the stack. if n.Op == ONAME && (n.Class() == PPARAM || n.Class() == PPARAMOUT) { - return false + return "" } if n.Type.Width > maxStackVarSize { - return true + return "too large for stack" } if (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize { - return true + return "too large for stack" } if n.Op == OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize { - return true + return "too large for stack" } if n.Op == OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize { - return true + return "too large for stack" } - if n.Op == OMAKESLICE && !isSmallMakeSlice(n) { - return true + if n.Op == OMAKESLICE { + r := n.Right + if r == nil { + r = n.Left + } + if !smallintconst(r) { + return "non-constant size" + } + if t := n.Type; t.Elem().Width != 0 && r.Int64() >= maxImplicitStackVarSize/t.Elem().Width { + return "too large for stack" + } } - return false + return "" } // addrescapes tags node n as having had its address taken diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go index d79d32ec48..79df584ab1 100644 --- a/src/cmd/compile/internal/gc/escape.go +++ b/src/cmd/compile/internal/gc/escape.go @@ -1051,11 +1051,7 @@ func (e *Escape) newLoc(n *Node, transient bool) *EscLocation { } n.SetOpt(loc) - if mustHeapAlloc(n) { - why := "too large for stack" - if n.Op == OMAKESLICE && (!Isconst(n.Left, CTINT) || (n.Right != nil && !Isconst(n.Right, CTINT))) { - why = "non-constant size" - } + if why := heapAllocReason(n); why != "" { e.flow(e.heapHole().addr(n, why), loc) } } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 8e45059eab..3fe7c3e089 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -336,19 +336,6 @@ func walkstmt(n *Node) *Node { return n } -func isSmallMakeSlice(n *Node) bool { - if n.Op != OMAKESLICE { - return false - } - r := n.Right - if r == nil { - r = n.Left - } - t := n.Type - - return smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < maxImplicitStackVarSize/t.Elem().Width) -} - // walk the whole tree of the body of an // expression or simple statement. // the types expressions are calculated. @@ -1339,8 +1326,8 @@ opswitch: yyerror("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) } if n.Esc == EscNone { - if !isSmallMakeSlice(n) { - Fatalf("non-small OMAKESLICE with EscNone: %v", n) + if why := heapAllocReason(n); why != "" { + Fatalf("%v has EscNone, but %v", n, why) } // var arr [r]T // n = arr[:l] diff --git a/test/fixedbugs/issue41635.go b/test/fixedbugs/issue41635.go index b33c1a07e7..35c0034cdd 100644 --- a/test/fixedbugs/issue41635.go +++ b/test/fixedbugs/issue41635.go @@ -7,12 +7,11 @@ package p func f() { // ERROR "" - b1 := make([]byte, 1<<17) // ERROR "too large for stack" "" - b2 := make([]byte, 100, 1<<17) // ERROR "too large for stack" "" - n, m := 100, 200 - b1 = make([]byte, n) // ERROR "non-constant size" "" - b2 = make([]byte, 100, m) // ERROR "non-constant size" "" + _ = make([]byte, 1<<17) // ERROR "too large for stack" "" + _ = make([]byte, 100, 1<<17) // ERROR "too large for stack" "" + _ = make([]byte, n, 1<<17) // ERROR "too large for stack" "" - _, _ = b1, b2 + _ = make([]byte, n) // ERROR "non-constant size" "" + _ = make([]byte, 100, m) // ERROR "non-constant size" "" } From bb48f9925cf541e7b5f4bfafb9d008671c4ace47 Mon Sep 17 00:00:00 2001 From: Joel Sing Date: Tue, 25 Aug 2020 20:19:55 +1000 Subject: [PATCH 103/281] cmd/link: add support for openbsd/mips64 Update #40995 Change-Id: I2cf9b85a960f479eaa59bf58081d03a0467bc2b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/250582 Trust: Joel Sing Run-TryBot: Joel Sing Reviewed-by: Matthew Dempsky TryBot-Result: Go Bot --- src/cmd/link/internal/mips64/obj.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/cmd/link/internal/mips64/obj.go b/src/cmd/link/internal/mips64/obj.go index d2dc20f5c1..01d89a209c 100644 --- a/src/cmd/link/internal/mips64/obj.go +++ b/src/cmd/link/internal/mips64/obj.go @@ -60,7 +60,7 @@ func Init() (*sys.Arch, ld.Arch) { Linuxdynld: "/lib64/ld64.so.1", Freebsddynld: "XXX", - Openbsddynld: "XXX", + Openbsddynld: "/usr/libexec/ld.so", Netbsddynld: "XXX", Dragonflydynld: "XXX", Solarisdynld: "XXX", @@ -84,7 +84,8 @@ func archinit(ctxt *ld.Link) { *ld.FlagRound = 16 * 1024 } - case objabi.Hlinux: /* mips64 elf */ + case objabi.Hlinux, /* mips64 elf */ + objabi.Hopenbsd: ld.Elfinit(ctxt) ld.HEADR = ld.ELFRESERVE if *ld.FlagTextAddr == -1 { From 869c02ce1f635960bfc2f06bb52e2b4e17eaa199 Mon Sep 17 00:00:00 2001 From: Elias Naur Date: Wed, 16 Sep 2020 15:23:58 +0200 Subject: [PATCH 104/281] misc/ios: add support for running programs on the iOS simulator Update the README to mention the emulator. Remove reference to gomobile while here; there are multiple ways to develop for iOS today, including using the c-archive buildmode directly. Updates #38485 Change-Id: Iccef75e646ea8e1b9bc3fc37419cc2d6bf3dfdf4 Reviewed-on: https://go-review.googlesource.com/c/go/+/255257 Run-TryBot: Elias Naur TryBot-Result: Go Bot Trust: Elias Naur Reviewed-by: Cherry Zhang --- misc/ios/README | 31 +- misc/ios/clangwrap.sh | 20 +- misc/ios/detect.go | 2 +- .../{go_darwin_arm_exec.go => go_ios_exec.go} | 287 ++++++++++++------ src/cmd/dist/build.go | 2 +- src/iostest.bash | 2 +- src/runtime/cgo/gcc_darwin_arm64.c | 2 +- 7 files changed, 218 insertions(+), 128 deletions(-) rename misc/ios/{go_darwin_arm_exec.go => go_ios_exec.go} (81%) diff --git a/misc/ios/README b/misc/ios/README index d7df191414..433bcdfd8f 100644 --- a/misc/ios/README +++ b/misc/ios/README @@ -1,13 +1,20 @@ Go on iOS ========= -For details on developing Go for iOS on macOS, see the documentation in the mobile -subrepository: +To run the standard library tests, run all.bash as usual, but with the compiler +set to the clang wrapper that invokes clang for iOS. For example, this command runs + all.bash on the iOS emulator: - https://github.com/golang/mobile + GOOS=ios GOARCH=amd64 CGO_ENABLED=1 CC_FOR_TARGET=$(pwd)/../misc/ios/clangwrap.sh ./all.bash -It is necessary to set up the environment before running tests or programs directly on a -device. +To use the go tool to run individual programs and tests, put $GOROOT/bin into PATH to ensure +the go_ios_$GOARCH_exec wrapper is found. For example, to run the archive/tar tests: + + export PATH=$GOROOT/bin:$PATH + GOOS=ios GOARCH=amd64 CGO_ENABLED=1 go test archive/tar + +The go_ios_exec wrapper uses GOARCH to select the emulator (amd64) or the device (arm64). +However, further setup is required to run tests or programs directly on a device. First make sure you have a valid developer certificate and have setup your device properly to run apps signed by your developer certificate. Then install the libimobiledevice and @@ -29,18 +36,10 @@ which will output something similar to export GOIOS_TEAM_ID=ZZZZZZZZ If you have multiple devices connected, specify the device UDID with the GOIOS_DEVICE_ID -variable. Use `idevice_id -l` to list all available UDIDs. +variable. Use `idevice_id -l` to list all available UDIDs. Then, setting GOARCH to arm64 +will select the device: -Finally, to run the standard library tests, run all.bash as usual, but with the compiler -set to the clang wrapper that invokes clang for iOS. For example, - - GOARCH=arm64 CGO_ENABLED=1 CC_FOR_TARGET=$(pwd)/../misc/ios/clangwrap.sh ./all.bash - -To use the go tool directly to run programs and tests, put $GOROOT/bin into PATH to ensure -the go_darwin_$GOARCH_exec wrapper is found. For example, to run the archive/tar tests - - export PATH=$GOROOT/bin:$PATH - GOARCH=arm64 CGO_ENABLED=1 go test archive/tar + GOOS=ios GOARCH=arm64 CGO_ENABLED=1 CC_FOR_TARGET=$(pwd)/../misc/ios/clangwrap.sh ./all.bash Note that the go_darwin_$GOARCH_exec wrapper uninstalls any existing app identified by the bundle id before installing a new app. If the uninstalled app is the last app by diff --git a/misc/ios/clangwrap.sh b/misc/ios/clangwrap.sh index 1d6dee28a8..dca3fcc904 100755 --- a/misc/ios/clangwrap.sh +++ b/misc/ios/clangwrap.sh @@ -2,17 +2,19 @@ # This uses the latest available iOS SDK, which is recommended. # To select a specific SDK, run 'xcodebuild -showsdks' # to see the available SDKs and replace iphoneos with one of them. -SDK=iphoneos +if [ "$GOARCH" == "arm64" ]; then + SDK=iphoneos + PLATFORM=ios + CLANGARCH="arm64" +else + SDK=iphonesimulator + PLATFORM=ios-simulator + CLANGARCH="x86_64" +fi + SDK_PATH=`xcrun --sdk $SDK --show-sdk-path` export IPHONEOS_DEPLOYMENT_TARGET=5.1 # cmd/cgo doesn't support llvm-gcc-4.2, so we have to use clang. CLANG=`xcrun --sdk $SDK --find clang` -if [ "$GOARCH" == "arm64" ]; then - CLANGARCH="arm64" -else - echo "unknown GOARCH=$GOARCH" >&2 - exit 1 -fi - -exec "$CLANG" -arch $CLANGARCH -isysroot "$SDK_PATH" -mios-version-min=10.0 "$@" +exec "$CLANG" -arch $CLANGARCH -isysroot "$SDK_PATH" -m${PLATFORM}-version-min=10.0 "$@" diff --git a/misc/ios/detect.go b/misc/ios/detect.go index 1d47e47c86..b4651dfbb8 100644 --- a/misc/ios/detect.go +++ b/misc/ios/detect.go @@ -6,7 +6,7 @@ // detect attempts to autodetect the correct // values of the environment variables -// used by go_darwin_arm_exec. +// used by go_io_exec. // detect shells out to ideviceinfo, a third party program that can // be obtained by following the instructions at // https://github.com/libimobiledevice/libimobiledevice. diff --git a/misc/ios/go_darwin_arm_exec.go b/misc/ios/go_ios_exec.go similarity index 81% rename from misc/ios/go_darwin_arm_exec.go rename to misc/ios/go_ios_exec.go index cdf4b07d0a..063c19ec58 100644 --- a/misc/ios/go_darwin_arm_exec.go +++ b/misc/ios/go_ios_exec.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This program can be used as go_darwin_arm_exec by the Go tool. +// This program can be used as go_ios_$GOARCH_exec by the Go tool. // It executes binaries on an iOS device using the XCode toolchain // and the ios-deploy program: https://github.com/phonegap/ios-deploy // @@ -34,6 +34,7 @@ import ( "os/signal" "path/filepath" "runtime" + "strconv" "strings" "syscall" "time" @@ -66,26 +67,8 @@ func main() { log.Fatal("usage: go_darwin_arm_exec a.out") } - // e.g. B393DDEB490947F5A463FD074299B6C0AXXXXXXX - devID = getenv("GOIOS_DEV_ID") - - // e.g. Z8B3JBXXXX.org.golang.sample, Z8B3JBXXXX prefix is available at - // https://developer.apple.com/membercenter/index.action#accountSummary as Team ID. - appID = getenv("GOIOS_APP_ID") - - // e.g. Z8B3JBXXXX, available at - // https://developer.apple.com/membercenter/index.action#accountSummary as Team ID. - teamID = getenv("GOIOS_TEAM_ID") - - // Device IDs as listed with ios-deploy -c. - deviceID = os.Getenv("GOIOS_DEVICE_ID") - - parts := strings.SplitN(appID, ".", 2) // For compatibility with the old builders, use a fallback bundle ID bundleID = "golang.gotest" - if len(parts) == 2 { - bundleID = parts[1] - } exitCode, err := runMain() if err != nil { @@ -126,28 +109,12 @@ func runMain() (int, error) { return 1, err } - if err := uninstall(bundleID); err != nil { - return 1, err + if goarch := os.Getenv("GOARCH"); goarch == "arm64" { + err = runOnDevice(appdir) + } else { + err = runOnSimulator(appdir) } - - if err := install(appdir); err != nil { - return 1, err - } - - if err := mountDevImage(); err != nil { - return 1, err - } - - // Kill any hanging debug bridges that might take up port 3222. - exec.Command("killall", "idevicedebugserverproxy").Run() - - closer, err := startDebugBridge() if err != nil { - return 1, err - } - defer closer() - - if err := run(appdir, bundleID, os.Args[2:]); err != nil { // If the lldb driver completed with an exit code, use that. if err, ok := err.(*exec.ExitError); ok { if ws, ok := err.Sys().(interface{ ExitStatus() int }); ok { @@ -159,6 +126,62 @@ func runMain() (int, error) { return 0, nil } +func runOnSimulator(appdir string) error { + if err := installSimulator(appdir); err != nil { + return err + } + + return runSimulator(appdir, bundleID, os.Args[2:]) +} + +func runOnDevice(appdir string) error { + // e.g. B393DDEB490947F5A463FD074299B6C0AXXXXXXX + devID = getenv("GOIOS_DEV_ID") + + // e.g. Z8B3JBXXXX.org.golang.sample, Z8B3JBXXXX prefix is available at + // https://developer.apple.com/membercenter/index.action#accountSummary as Team ID. + appID = getenv("GOIOS_APP_ID") + + // e.g. Z8B3JBXXXX, available at + // https://developer.apple.com/membercenter/index.action#accountSummary as Team ID. + teamID = getenv("GOIOS_TEAM_ID") + + // Device IDs as listed with ios-deploy -c. + deviceID = os.Getenv("GOIOS_DEVICE_ID") + + parts := strings.SplitN(appID, ".", 2) + if len(parts) == 2 { + bundleID = parts[1] + } + + if err := signApp(appdir); err != nil { + return err + } + + if err := uninstallDevice(bundleID); err != nil { + return err + } + + if err := installDevice(appdir); err != nil { + return err + } + + if err := mountDevImage(); err != nil { + return err + } + + // Kill any hanging debug bridges that might take up port 3222. + exec.Command("killall", "idevicedebugserverproxy").Run() + + closer, err := startDebugBridge() + if err != nil { + return err + } + defer closer() + + return runDevice(appdir, bundleID, os.Args[2:]) +} + func getenv(envvar string) string { s := os.Getenv(envvar) if s == "" { @@ -191,7 +214,11 @@ func assembleApp(appdir, bin string) error { if err := ioutil.WriteFile(filepath.Join(appdir, "ResourceRules.plist"), []byte(resourceRules), 0744); err != nil { return err } + return nil +} +func signApp(appdir string) error { + entitlementsPath := filepath.Join(tmpdir, "Entitlements.plist") cmd := exec.Command( "codesign", "-f", @@ -421,7 +448,20 @@ func parsePlistDict(dict []byte) (map[string]string, error) { return values, nil } -func uninstall(bundleID string) error { +func installSimulator(appdir string) error { + cmd := exec.Command( + "xcrun", "simctl", "install", + "booted", // Install to the booted simulator. + appdir, + ) + if out, err := cmd.CombinedOutput(); err != nil { + os.Stderr.Write(out) + return fmt.Errorf("xcrun simctl install booted %q: %v", appdir, err) + } + return nil +} + +func uninstallDevice(bundleID string) error { cmd := idevCmd(exec.Command( "ideviceinstaller", "-U", bundleID, @@ -433,7 +473,7 @@ func uninstall(bundleID string) error { return nil } -func install(appdir string) error { +func installDevice(appdir string) error { attempt := 0 for { cmd := idevCmd(exec.Command( @@ -464,15 +504,28 @@ func idevCmd(cmd *exec.Cmd) *exec.Cmd { return cmd } -func run(appdir, bundleID string, args []string) error { - var env []string - for _, e := range os.Environ() { - // Don't override TMPDIR, HOME, GOCACHE on the device. - if strings.HasPrefix(e, "TMPDIR=") || strings.HasPrefix(e, "HOME=") || strings.HasPrefix(e, "GOCACHE=") { - continue - } - env = append(env, e) +func runSimulator(appdir, bundleID string, args []string) error { + cmd := exec.Command( + "xcrun", "simctl", "launch", + "--wait-for-debugger", + "booted", + bundleID, + ) + out, err := cmd.CombinedOutput() + if err != nil { + os.Stderr.Write(out) + return fmt.Errorf("xcrun simctl launch booted %q: %v", bundleID, err) } + var processID int + var ignore string + if _, err := fmt.Sscanf(string(out), "%s %d", &ignore, &processID); err != nil { + return fmt.Errorf("runSimulator: couldn't find processID from `simctl launch`: %v (%q)", err, out) + } + _, err = runLLDB("ios-simulator", appdir, strconv.Itoa(processID), args) + return err +} + +func runDevice(appdir, bundleID string, args []string) error { attempt := 0 for { // The device app path reported by the device might be stale, so retry @@ -487,37 +540,10 @@ func run(appdir, bundleID string, args []string) error { time.Sleep(5 * time.Second) continue } - lldb := exec.Command( - "python", - "-", // Read script from stdin. - appdir, - deviceapp, - ) - lldb.Args = append(lldb.Args, args...) - lldb.Env = env - lldb.Stdin = strings.NewReader(lldbDriver) - lldb.Stdout = os.Stdout - var out bytes.Buffer - lldb.Stderr = io.MultiWriter(&out, os.Stderr) - err = lldb.Start() - if err == nil { - // Forward SIGQUIT to the lldb driver which in turn will forward - // to the running program. - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGQUIT) - proc := lldb.Process - go func() { - for sig := range sigs { - proc.Signal(sig) - } - }() - err = lldb.Wait() - signal.Stop(sigs) - close(sigs) - } + out, err := runLLDB("remote-ios", appdir, deviceapp, args) // If the program was not started it can be retried without papering over // real test failures. - started := bytes.HasPrefix(out.Bytes(), []byte("lldb: running program")) + started := bytes.HasPrefix(out, []byte("lldb: running program")) if started || err == nil || attempt == 5 { return err } @@ -528,6 +554,47 @@ func run(appdir, bundleID string, args []string) error { } } +func runLLDB(target, appdir, deviceapp string, args []string) ([]byte, error) { + var env []string + for _, e := range os.Environ() { + // Don't override TMPDIR, HOME, GOCACHE on the device. + if strings.HasPrefix(e, "TMPDIR=") || strings.HasPrefix(e, "HOME=") || strings.HasPrefix(e, "GOCACHE=") { + continue + } + env = append(env, e) + } + lldb := exec.Command( + "python", + "-", // Read script from stdin. + target, + appdir, + deviceapp, + ) + lldb.Args = append(lldb.Args, args...) + lldb.Env = env + lldb.Stdin = strings.NewReader(lldbDriver) + lldb.Stdout = os.Stdout + var out bytes.Buffer + lldb.Stderr = io.MultiWriter(&out, os.Stderr) + err := lldb.Start() + if err == nil { + // Forward SIGQUIT to the lldb driver which in turn will forward + // to the running program. + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGQUIT) + proc := lldb.Process + go func() { + for sig := range sigs { + proc.Signal(sig) + } + }() + err = lldb.Wait() + signal.Stop(sigs) + close(sigs) + } + return out.Bytes(), err +} + func copyLocalDir(dst, src string) error { if err := os.Mkdir(dst, 0755); err != nil { return err @@ -679,6 +746,7 @@ func infoPlist(pkgpath string) string { CFBundleSupportedPlatformsiPhoneOS CFBundleExecutablegotest CFBundleVersion1.0 +CFBundleShortVersionString1.0 CFBundleIdentifier` + bundleID + ` CFBundleResourceSpecificationResourceRules.plist LSRequiresIPhoneOS @@ -739,7 +807,7 @@ import sys import os import signal -exe, device_exe, args = sys.argv[1], sys.argv[2], sys.argv[3:] +platform, exe, device_exe_or_pid, args = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4:] env = [] for k, v in os.environ.items(): @@ -754,17 +822,21 @@ debugger.SetAsync(True) debugger.SkipLLDBInitFiles(True) err = lldb.SBError() -target = debugger.CreateTarget(exe, None, 'remote-ios', True, err) +target = debugger.CreateTarget(exe, None, platform, True, err) if not target.IsValid() or not err.Success(): sys.stderr.write("lldb: failed to setup up target: %s\n" % (err)) sys.exit(1) -target.modules[0].SetPlatformFileSpec(lldb.SBFileSpec(device_exe)) - listener = debugger.GetListener() -process = target.ConnectRemote(listener, 'connect://localhost:3222', None, err) + +if platform == 'remote-ios': + target.modules[0].SetPlatformFileSpec(lldb.SBFileSpec(device_exe_or_pid)) + process = target.ConnectRemote(listener, 'connect://localhost:3222', None, err) +else: + process = target.AttachToProcessWithID(listener, int(device_exe_or_pid), err) + if not err.Success(): - sys.stderr.write("lldb: failed to connect to remote target: %s\n" % (err)) + sys.stderr.write("lldb: failed to connect to remote target %s: %s\n" % (device_exe_or_pid, err)) sys.exit(1) # Don't stop on signals. @@ -777,6 +849,25 @@ for i in range(0, sigs.GetNumSignals()): event = lldb.SBEvent() running = False prev_handler = None + +def signal_handler(signal, frame): + process.Signal(signal) + +def run_program(): + # Forward SIGQUIT to the program. + prev_handler = signal.signal(signal.SIGQUIT, signal_handler) + # Tell the Go driver that the program is running and should not be retried. + sys.stderr.write("lldb: running program\n") + running = True + # Process is stopped at attach/launch. Let it run. + process.Continue() + +if platform != 'remote-ios': + # For the local emulator the program is ready to run. + # For remote device runs, we need to wait for eStateConnected, + # below. + run_program() + while True: if not listener.WaitForEvent(1, event): continue @@ -800,24 +891,22 @@ while True: signal.signal(signal.SIGQUIT, prev_handler) break elif state == lldb.eStateConnected: - process.RemoteLaunch(args, env, None, None, None, None, 0, False, err) - if not err.Success(): - sys.stderr.write("lldb: failed to launch remote process: %s\n" % (err)) - process.Kill() - debugger.Terminate() - sys.exit(1) - # Forward SIGQUIT to the program. - def signal_handler(signal, frame): - process.Signal(signal) - prev_handler = signal.signal(signal.SIGQUIT, signal_handler) - # Tell the Go driver that the program is running and should not be retried. - sys.stderr.write("lldb: running program\n") - running = True - # Process stops once at the beginning. Continue. - process.Continue() + if platform == 'remote-ios': + process.RemoteLaunch(args, env, None, None, None, None, 0, False, err) + if not err.Success(): + sys.stderr.write("lldb: failed to launch remote process: %s\n" % (err)) + process.Kill() + debugger.Terminate() + sys.exit(1) + run_program() exitStatus = process.GetExitStatus() +exitDesc = process.GetExitDescription() process.Kill() debugger.Terminate() +if exitStatus == 0 and exitDesc is not None: + # Ensure tests fail when killed by a signal. + exitStatus = 123 + sys.exit(exitStatus) ` diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index 5d62c1e8fa..3b3eb113b1 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -1453,7 +1453,7 @@ func wrapperPathFor(goos, goarch string) string { } case (goos == "darwin" || goos == "ios") && goarch == "arm64": if gohostos != "darwin" || gohostarch != "arm64" { - return pathf("%s/misc/ios/go_darwin_arm_exec.go", goroot) + return pathf("%s/misc/ios/go_ios_exec.go", goroot) } } return "" diff --git a/src/iostest.bash b/src/iostest.bash index 5fa6744979..33b8c101ff 100755 --- a/src/iostest.bash +++ b/src/iostest.bash @@ -38,7 +38,7 @@ if [ "$1" = "-restart" ]; then sleep 30 # Poll until the device has restarted. until idevicediagnostics $IDEVARGS diagnostics; do - # TODO(crawshaw): replace with a test app using go_darwin_arm_exec. + # TODO(crawshaw): replace with a test app using go_ios_exec. echo "waiting for idevice to come online" sleep 10 done diff --git a/src/runtime/cgo/gcc_darwin_arm64.c b/src/runtime/cgo/gcc_darwin_arm64.c index fd7d4084c9..9ea43ae4af 100644 --- a/src/runtime/cgo/gcc_darwin_arm64.c +++ b/src/runtime/cgo/gcc_darwin_arm64.c @@ -131,7 +131,7 @@ init_working_dir() fprintf(stderr, "runtime/cgo: chdir(%s) failed\n", dir); } - // The test harness in go_darwin_arm_exec passes the relative working directory + // The test harness in go_ios_exec passes the relative working directory // in the GoExecWrapperWorkingDirectory property of the app bundle. CFStringRef wd_ref = CFBundleGetValueForInfoDictionaryKey(bundle, CFSTR("GoExecWrapperWorkingDirectory")); if (wd_ref != NULL) { From 6f2c92e1e11b61dfede9ce861f2fe6217b5ce130 Mon Sep 17 00:00:00 2001 From: Elias Naur Date: Sat, 3 Oct 2020 11:27:04 +0200 Subject: [PATCH 105/281] iostest.bash: remove There are no tethered iOS builders left, and should they appear in the future, they should use all.bash. Change-Id: I3217789514ffa725e4d2584e4991d899c5fda995 Reviewed-on: https://go-review.googlesource.com/c/go/+/259278 Trust: Elias Naur Reviewed-by: Dmitri Shuralyov --- src/iostest.bash | 66 ------------------------------------------------ 1 file changed, 66 deletions(-) delete mode 100755 src/iostest.bash diff --git a/src/iostest.bash b/src/iostest.bash deleted file mode 100755 index 33b8c101ff..0000000000 --- a/src/iostest.bash +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2015 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# For testing darwin/arm64 on iOS. - -set -e -ulimit -c 0 # no core files - -if [ ! -f make.bash ]; then - echo 'iostest.bash must be run from $GOROOT/src' 1>&2 - exit 1 -fi - -if [ -z $GOOS ]; then - export GOOS=darwin -fi -if [ "$GOOS" != "darwin" ]; then - echo "iostest.bash requires GOOS=darwin, got GOOS=$GOOS" 1>&2 - exit 1 -fi -if [ "$GOARCH" != "arm64" ]; then - echo "iostest.bash requires GOARCH=arm64, got GOARCH=$GOARCH" 1>&2 - exit 1 -fi - -if [ "$1" = "-restart" ]; then - # Reboot to make sure previous runs do not interfere with the current run. - # It is reasonably easy for a bad program leave an iOS device in an - # almost unusable state. - IDEVARGS= - if [ -n "$GOIOS_DEVICE_ID" ]; then - IDEVARGS="-u $GOIOS_DEVICE_ID" - fi - idevicediagnostics $IDEVARGS restart - # Initial sleep to make sure we are restarting before we start polling. - sleep 30 - # Poll until the device has restarted. - until idevicediagnostics $IDEVARGS diagnostics; do - # TODO(crawshaw): replace with a test app using go_ios_exec. - echo "waiting for idevice to come online" - sleep 10 - done - # Diagnostics are reported during boot before the device can start an - # app. Wait a little longer before trying to use the device. - sleep 30 -fi - -unset GOBIN -export GOROOT=$(dirname $(pwd)) -export PATH=$GOROOT/bin:$PATH -export CGO_ENABLED=1 -export CC_FOR_TARGET=$GOROOT/misc/ios/clangwrap.sh - -# Run the build for the host bootstrap, so we can build detect.go. -# Also lets us fail early before the (slow) ios-deploy if the build is broken. -./make.bash - -if [ "$GOIOS_DEV_ID" = "" ]; then - echo "detecting iOS development identity" - eval $(GOOS=$GOHOSTOS GOARCH=$GOHOSTARCH go run ../misc/ios/detect.go) -fi - -# Run standard tests. -bash run.bash --no-rebuild From 059ca6185c19404e991cc7714b1df047fd78785f Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Fri, 2 Oct 2020 21:57:24 -0400 Subject: [PATCH 106/281] cmd/dist: detect gohostarch on ios/arm64 Add a case for gohostos == "ios" along with "darwin". Updates #38485. Change-Id: Ic7310e6c97d405f78a5e5db1a639860455e61327 Reviewed-on: https://go-review.googlesource.com/c/go/+/259337 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Dmitri Shuralyov --- src/cmd/dist/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/dist/main.go b/src/cmd/dist/main.go index 224b6c0c3e..37fc522356 100644 --- a/src/cmd/dist/main.go +++ b/src/cmd/dist/main.go @@ -129,7 +129,7 @@ func main() { gohostarch = "riscv64" case strings.Contains(out, "s390x"): gohostarch = "s390x" - case gohostos == "darwin": + case gohostos == "darwin", gohostos == "ios": if strings.Contains(run("", CheckExit, "uname", "-v"), "RELEASE_ARM64_") { gohostarch = "arm64" } From 39d562ecea74bb41aa8fbb9d016fa64165e84bb3 Mon Sep 17 00:00:00 2001 From: Elias Naur Date: Mon, 5 Oct 2020 17:51:54 +0200 Subject: [PATCH 107/281] misc/ios: fixup review comments from CL 255257 Change-Id: I247fc9e0e26e706e6af07367f953eaa1b7e544c1 Reviewed-on: https://go-review.googlesource.com/c/go/+/259577 Trust: Elias Naur Run-TryBot: Elias Naur Reviewed-by: Dmitri Shuralyov TryBot-Result: Go Bot --- misc/ios/detect.go | 2 +- misc/ios/go_ios_exec.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/misc/ios/detect.go b/misc/ios/detect.go index b4651dfbb8..d32bcc3202 100644 --- a/misc/ios/detect.go +++ b/misc/ios/detect.go @@ -6,7 +6,7 @@ // detect attempts to autodetect the correct // values of the environment variables -// used by go_io_exec. +// used by go_ios_exec. // detect shells out to ideviceinfo, a third party program that can // be obtained by following the instructions at // https://github.com/libimobiledevice/libimobiledevice. diff --git a/misc/ios/go_ios_exec.go b/misc/ios/go_ios_exec.go index 063c19ec58..0acf1b259c 100644 --- a/misc/ios/go_ios_exec.go +++ b/misc/ios/go_ios_exec.go @@ -59,12 +59,12 @@ var lock *os.File func main() { log.SetFlags(0) - log.SetPrefix("go_darwin_arm_exec: ") + log.SetPrefix("go_ios_exec: ") if debug { log.Println(strings.Join(os.Args, " ")) } if len(os.Args) < 2 { - log.Fatal("usage: go_darwin_arm_exec a.out") + log.Fatal("usage: go_ios_exec a.out") } // For compatibility with the old builders, use a fallback bundle ID @@ -79,7 +79,7 @@ func main() { func runMain() (int, error) { var err error - tmpdir, err = ioutil.TempDir("", "go_darwin_arm_exec_") + tmpdir, err = ioutil.TempDir("", "go_ios_exec_") if err != nil { return 1, err } @@ -100,7 +100,7 @@ func runMain() (int, error) { // // The lock file is never deleted, to avoid concurrent locks on distinct // files with the same path. - lockName := filepath.Join(os.TempDir(), "go_darwin_arm_exec-"+deviceID+".lock") + lockName := filepath.Join(os.TempDir(), "go_ios_exec-"+deviceID+".lock") lock, err = os.OpenFile(lockName, os.O_CREATE|os.O_RDONLY, 0666) if err != nil { return 1, err From e70bbc702f093ab2d5e305ddb33b8dca2baf8104 Mon Sep 17 00:00:00 2001 From: Roland Bracewell Shoemaker Date: Mon, 5 Oct 2020 15:46:23 +0000 Subject: [PATCH 108/281] encoding/asn1: clarify use of SET suffix This change clarifies the usage of the SET type name suffix. Previously the documentation was somewhat confusing about where the suffix should be used, and when used what it applied to. For instance the previous language could be interpreted such that []exampleSET would be parsed as a SEQUENCE OF SET, which is incorrect as the SET suffix only applies to slice types, such as type exampleSET []struct{} which is parsed as a SET OF SEQUENCE. Change-Id: I74201d9969f931f69391c236559f66cb460569ec GitHub-Last-Rev: d0d2ddc587df4564a265c800efb9d8e204002624 GitHub-Pull-Request: golang/go#38543 Reviewed-on: https://go-review.googlesource.com/c/go/+/229078 Trust: Roland Shoemaker Reviewed-by: Filippo Valsorda --- src/encoding/asn1/asn1.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go index fa3d4e327b..068594e2a1 100644 --- a/src/encoding/asn1/asn1.go +++ b/src/encoding/asn1/asn1.go @@ -1086,9 +1086,10 @@ func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { // If the type of the first field of a structure is RawContent then the raw // ASN1 contents of the struct will be stored in it. // -// If the type name of a slice element ends with "SET" then it's treated as if -// the "set" tag was set on it. This can be used with nested slices where a -// struct tag cannot be given. +// If the name of a slice type ends with "SET" then it's treated as if +// the "set" tag was set on it. This results in interpreting the type as a +// SET OF x rather than a SEQUENCE OF x. This can be used with nested slices +// where a struct tag cannot be given. // // Other ASN.1 types are not supported; if it encounters them, // Unmarshal returns a parse error. From 9dc65d7dc9268d5150174ec55cc4753fe18f554c Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Sat, 3 Oct 2020 16:44:22 -0400 Subject: [PATCH 109/281] runtime: correct signature of call16 The signature of call16 is currently missing the "typ" parameter. This CL fixes this. This wasn't caught by vet because call16 is defined by macro expansion (see #17544), and we didn't notice the mismatch with the other call* functions because call16 is defined only on 32-bit architectures and lives alone in stubs32.go. Unfortunately, this means its GC signature is also wrong: the "arg" parameter is treated as a scalar rather than a pointer, so GC won't trace it and stack copying won't adjust it. This turns out to matter in exactly one case right now: on 32-bit architectures (which are the only architectures where call16 is defined), a stack-allocated defer of a function with a 16-byte or smaller argument frame including a non-empty result area can corrupt memory if the deferred function grows the stack and is invoked during a panic. Whew. All other current uses of reflectcall pass a heap-allocated "arg" frame (which happens to be reachable from other stack roots, so tracing isn't a problem). Curiously, in 2016, the signatures of all call* functions were wrong in exactly this way. CL 31654 fixed all of them in stubs.go, but missed the one in stubs32.go. Fixes #41795. Change-Id: I31e3c0df201f79ee5707eeb8dc4ff0d13fc10ada Reviewed-on: https://go-review.googlesource.com/c/go/+/259338 Trust: Austin Clements Run-TryBot: Austin Clements TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/runtime/stubs32.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/stubs32.go b/src/runtime/stubs32.go index a7f52f6b9e..c4715fe989 100644 --- a/src/runtime/stubs32.go +++ b/src/runtime/stubs32.go @@ -11,4 +11,4 @@ import "unsafe" // Declarations for runtime services implemented in C or assembly that // are only present on 32 bit systems. -func call16(fn, arg unsafe.Pointer, n, retoffset uint32) +func call16(typ, fn, arg unsafe.Pointer, n, retoffset uint32) From 40bff82885b8de850f909f38357c53670562f815 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Sat, 3 Oct 2020 20:40:49 -0400 Subject: [PATCH 110/281] runtime: define and use call16 everywhere Currently, runtime.call16 is defined and used only on 32-bit architectures, while 64-bit architectures all start at call32 and go up from there. This led to unnecessary complexity because call16's prototype needed to be in a different file, separate from all of the other call* prototypes, which in turn led to it getting out of sync with the other call* prototypes. This CL adds call16 on 64-bit architectures, bringing them all into sync, and moves the call16 prototype to live with the others. Prior to CL 31655 (in 2016), call16 couldn't be implemented on 64-bit architectures because it needed at least four words of argument space to invoke "callwritebarrier" after copying back the results. CL 31655 changed the way call* invoked the write barrier in preparation for the hybrid barrier; since the hybrid barrier had to be invoked prior to copying back results, it needed a different solution that didn't reuse call*'s stack space. At this point, call16 was no longer a problem on 64-bit, but we never added it. Until now. Change-Id: Id10ade0e4f75c6ea76afa6229ddaee2b994c27dd Reviewed-on: https://go-review.googlesource.com/c/go/+/259339 Trust: Austin Clements Reviewed-by: Cherry Zhang --- src/runtime/asm_amd64.s | 2 ++ src/runtime/asm_arm64.s | 2 ++ src/runtime/asm_mips64x.s | 1 + src/runtime/asm_ppc64x.s | 2 ++ src/runtime/asm_riscv64.s | 1 + src/runtime/asm_s390x.s | 2 ++ src/runtime/asm_wasm.s | 2 ++ src/runtime/stubs.go | 1 + src/runtime/stubs32.go | 14 -------------- 9 files changed, 13 insertions(+), 14 deletions(-) delete mode 100644 src/runtime/stubs32.go diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index fa25c55b96..256f4112cd 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -470,6 +470,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0 TEXT ·reflectcall(SB), NOSPLIT, $0-32 MOVLQZX argsize+24(FP), CX + DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128) @@ -537,6 +538,7 @@ TEXT callRet<>(SB), NOSPLIT, $32-0 CALL runtime·reflectcallmove(SB) RET +CALLFN(·call16, 16) CALLFN(·call32, 32) CALLFN(·call64, 64) CALLFN(·call128, 128) diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index 6b3d1e779e..5eda3063d7 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -331,6 +331,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 MOVWU argsize+24(FP), R16 + DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128) @@ -416,6 +417,7 @@ TEXT callRet<>(SB), NOSPLIT, $40-0 // These have 8 added to make the overall frame size a multiple of 16, // as required by the ABI. (There is another +8 for the saved LR.) +CALLFN(·call16, 24 ) CALLFN(·call32, 40 ) CALLFN(·call64, 72 ) CALLFN(·call128, 136 ) diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s index 7330f40e85..0ff1b24225 100644 --- a/src/runtime/asm_mips64x.s +++ b/src/runtime/asm_mips64x.s @@ -294,6 +294,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 MOVWU argsize+24(FP), R1 + DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128) diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index 23387a2165..603058a61b 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -372,6 +372,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 MOVWZ argsize+24(FP), R3 + DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128) @@ -478,6 +479,7 @@ TEXT callRet<>(SB), NOSPLIT, $32-0 BL runtime·reflectcallmove(SB) RET +CALLFN(·call16, 16) CALLFN(·call32, 32) CALLFN(·call64, 64) CALLFN(·call128, 128) diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s index 8f6c8773eb..4084ced7f8 100644 --- a/src/runtime/asm_riscv64.s +++ b/src/runtime/asm_riscv64.s @@ -342,6 +342,7 @@ TEXT reflect·call(SB), NOSPLIT, $0-0 // func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32) TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 MOVWU argsize+24(FP), T0 + DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128) diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s index cb39451faa..46a434119b 100644 --- a/src/runtime/asm_s390x.s +++ b/src/runtime/asm_s390x.s @@ -383,6 +383,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 TEXT ·reflectcall(SB), NOSPLIT, $-8-32 MOVWZ argsize+24(FP), R3 + DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128) @@ -461,6 +462,7 @@ TEXT callRet<>(SB), NOSPLIT, $32-0 BL runtime·reflectcallmove(SB) RET +CALLFN(·call16, 16) CALLFN(·call32, 32) CALLFN(·call64, 64) CALLFN(·call128, 128) diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s index 7d88beb537..1275af136b 100644 --- a/src/runtime/asm_wasm.s +++ b/src/runtime/asm_wasm.s @@ -308,6 +308,7 @@ TEXT ·reflectcall(SB), NOSPLIT, $0-32 MOVW argsize+24(FP), R0 + DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128) @@ -398,6 +399,7 @@ TEXT callRet<>(SB), NOSPLIT, $32-0 CALL runtime·reflectcallmove(SB) RET +CALLFN(·call16, 16) CALLFN(·call32, 32) CALLFN(·call64, 64) CALLFN(·call128, 128) diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index b891a12fdd..bd2514e862 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -271,6 +271,7 @@ func return0() // in asm_*.s // not called directly; definitions here supply type information for traceback. +func call16(typ, fn, arg unsafe.Pointer, n, retoffset uint32) func call32(typ, fn, arg unsafe.Pointer, n, retoffset uint32) func call64(typ, fn, arg unsafe.Pointer, n, retoffset uint32) func call128(typ, fn, arg unsafe.Pointer, n, retoffset uint32) diff --git a/src/runtime/stubs32.go b/src/runtime/stubs32.go deleted file mode 100644 index c4715fe989..0000000000 --- a/src/runtime/stubs32.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 arm mips mipsle - -package runtime - -import "unsafe" - -// Declarations for runtime services implemented in C or assembly that -// are only present on 32 bit systems. - -func call16(typ, fn, arg unsafe.Pointer, n, retoffset uint32) From a517c3422e808ae51533a0700e05d59e8a799136 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 5 Oct 2020 12:17:30 -0400 Subject: [PATCH 111/281] runtime: clean up runtime.call* frame sizes on ARM64 ARM64 used to require that all assembly frame sizes were of the form 16*N+8 because ARM64 requires 16-byte SP alignment and the assembler added an 8 byte LR slot. This made all of the runtime.call* frame sizes wonky. The assembler now rounds up the frame size appropriately after adding any additional slots it needs, so this is no longer necessary. This CL cleans up the frame sizes of these functions so they look the way you'd expect and match all other architectures. Change-Id: I47819092296b8983c43eadf2e66c7c1e0d518555 Reviewed-on: https://go-review.googlesource.com/c/go/+/259448 Trust: Austin Clements Reviewed-by: Cherry Zhang --- src/runtime/asm_arm64.s | 56 ++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index 5eda3063d7..1f46d1962c 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -415,35 +415,33 @@ TEXT callRet<>(SB), NOSPLIT, $40-0 BL runtime·reflectcallmove(SB) RET -// These have 8 added to make the overall frame size a multiple of 16, -// as required by the ABI. (There is another +8 for the saved LR.) -CALLFN(·call16, 24 ) -CALLFN(·call32, 40 ) -CALLFN(·call64, 72 ) -CALLFN(·call128, 136 ) -CALLFN(·call256, 264 ) -CALLFN(·call512, 520 ) -CALLFN(·call1024, 1032 ) -CALLFN(·call2048, 2056 ) -CALLFN(·call4096, 4104 ) -CALLFN(·call8192, 8200 ) -CALLFN(·call16384, 16392 ) -CALLFN(·call32768, 32776 ) -CALLFN(·call65536, 65544 ) -CALLFN(·call131072, 131080 ) -CALLFN(·call262144, 262152 ) -CALLFN(·call524288, 524296 ) -CALLFN(·call1048576, 1048584 ) -CALLFN(·call2097152, 2097160 ) -CALLFN(·call4194304, 4194312 ) -CALLFN(·call8388608, 8388616 ) -CALLFN(·call16777216, 16777224 ) -CALLFN(·call33554432, 33554440 ) -CALLFN(·call67108864, 67108872 ) -CALLFN(·call134217728, 134217736 ) -CALLFN(·call268435456, 268435464 ) -CALLFN(·call536870912, 536870920 ) -CALLFN(·call1073741824, 1073741832 ) +CALLFN(·call16, 16) +CALLFN(·call32, 32) +CALLFN(·call64, 64) +CALLFN(·call128, 128) +CALLFN(·call256, 256) +CALLFN(·call512, 512) +CALLFN(·call1024, 1024) +CALLFN(·call2048, 2048) +CALLFN(·call4096, 4096) +CALLFN(·call8192, 8192) +CALLFN(·call16384, 16384) +CALLFN(·call32768, 32768) +CALLFN(·call65536, 65536) +CALLFN(·call131072, 131072) +CALLFN(·call262144, 262144) +CALLFN(·call524288, 524288) +CALLFN(·call1048576, 1048576) +CALLFN(·call2097152, 2097152) +CALLFN(·call4194304, 4194304) +CALLFN(·call8388608, 8388608) +CALLFN(·call16777216, 16777216) +CALLFN(·call33554432, 33554432) +CALLFN(·call67108864, 67108864) +CALLFN(·call134217728, 134217728) +CALLFN(·call268435456, 268435456) +CALLFN(·call536870912, 536870912) +CALLFN(·call1073741824, 1073741824) // func memhash32(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 From 9f24388a7d57a79d0d68c1c04cf3fa4f86338e21 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Sun, 4 Oct 2020 00:25:17 -0400 Subject: [PATCH 112/281] cmd/dist: test c-archive mode on ios/arm64 It is tested on darwin/arm64. Don't lose it when using GOOS=ios. Updates #38485. Change-Id: I7157d6b6f2850f2fd361e35ae310dd1ba9f31aa4 Reviewed-on: https://go-review.googlesource.com/c/go/+/259439 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor --- src/cmd/dist/test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index f953a76963..da894e3eef 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -982,7 +982,7 @@ func (t *tester) supportedBuildmode(mode string) bool { } switch pair { case "aix-ppc64", - "darwin-amd64", "darwin-arm64", + "darwin-amd64", "darwin-arm64", "ios-arm64", "linux-amd64", "linux-386", "linux-ppc64le", "linux-s390x", "freebsd-amd64", "windows-amd64", "windows-386": From b064eb7e1bb1b138405b9c8da1d90c476a266ef5 Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Thu, 24 Sep 2020 11:26:23 -0400 Subject: [PATCH 113/281] cmd/go: update go_windows_test to use test go binary Most of the cmd/go tests build the cmd/go binary and run that binary to test it, but TestAbsolutePath used the GOROOT's cmd/go instead, which makes debugging confusing and means that make.bash has to be run in each iteration cycle. Update TestAbsolutePath to use the same go binary as the rest of the cmd/go tests. Change-Id: Ib4e8ae707b66f1f75ceb346b98358f5604fd28c1 Reviewed-on: https://go-review.googlesource.com/c/go/+/256979 Trust: Michael Matloob Run-TryBot: Michael Matloob TryBot-Result: Go Bot Reviewed-by: Jay Conrod Reviewed-by: Bryan C. Mills --- src/cmd/go/go_windows_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/cmd/go/go_windows_test.go b/src/cmd/go/go_windows_test.go index 3999166ed9..02634f19f5 100644 --- a/src/cmd/go/go_windows_test.go +++ b/src/cmd/go/go_windows_test.go @@ -2,10 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package main +package main_test import ( - "internal/testenv" "io/ioutil" "os" "os/exec" @@ -17,7 +16,9 @@ import ( ) func TestAbsolutePath(t *testing.T) { - t.Parallel() + tg := testgo(t) + defer tg.cleanup() + tg.parallel() tmp, err := ioutil.TempDir("", "TestAbsolutePath") if err != nil { @@ -38,7 +39,7 @@ func TestAbsolutePath(t *testing.T) { noVolume := file[len(filepath.VolumeName(file)):] wrongPath := filepath.Join(dir, noVolume) - cmd := exec.Command(testenv.GoToolPath(t), "build", noVolume) + cmd := exec.Command(tg.goTool(), "build", noVolume) cmd.Dir = dir output, err := cmd.CombinedOutput() if err == nil { From 72ee5bad9f9bd8979e14fab02fb07e39c5e9fd8c Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 2 Oct 2020 16:03:37 -0700 Subject: [PATCH 114/281] cmd/cgo: split gofrontend mangling checks into cmd/internal/pkgpath This is a step toward porting https://golang.org/cl/219817 from the gofrontend repo to the main repo. Note that this also corrects the implementation of the v2 mangling scheme to use ..u and ..U where appropriate. For #37272 Change-Id: I64a1e7ca1c84348efcbf1cf62049eeb05c830ed8 Reviewed-on: https://go-review.googlesource.com/c/go/+/259298 Trust: Ian Lance Taylor Run-TryBot: Ian Lance Taylor TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/cgo/main.go | 3 +- src/cmd/cgo/out.go | 118 +++------------------- src/cmd/dist/buildtool.go | 1 + src/cmd/internal/pkgpath/pkgpath.go | 114 +++++++++++++++++++++ src/cmd/internal/pkgpath/pkgpath_test.go | 121 +++++++++++++++++++++++ 5 files changed, 252 insertions(+), 105 deletions(-) create mode 100644 src/cmd/internal/pkgpath/pkgpath.go create mode 100644 src/cmd/internal/pkgpath/pkgpath_test.go diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index ef3ed968e4..5c44fb72f4 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -224,8 +224,7 @@ var exportHeader = flag.String("exportheader", "", "where to write export header var gccgo = flag.Bool("gccgo", false, "generate files for use with gccgo") var gccgoprefix = flag.String("gccgoprefix", "", "-fgo-prefix option used with gccgo") var gccgopkgpath = flag.String("gccgopkgpath", "", "-fgo-pkgpath option used with gccgo") -var gccgoMangleCheckDone bool -var gccgoNewmanglingInEffect bool +var gccgoMangler func(string) string var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo in generated code") var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code") var goarch, goos string diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index 03b8333b10..b447b07645 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -6,6 +6,7 @@ package main import ( "bytes" + "cmd/internal/pkgpath" "debug/elf" "debug/macho" "debug/pe" @@ -15,7 +16,6 @@ import ( "go/token" "internal/xcoff" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -1282,112 +1282,24 @@ func (p *Package) writeExportHeader(fgcch io.Writer) { fmt.Fprintf(fgcch, "%s\n", p.gccExportHeaderProlog()) } -// gccgoUsesNewMangling reports whether gccgo uses the new collision-free -// packagepath mangling scheme (see determineGccgoManglingScheme for more -// info). -func gccgoUsesNewMangling() bool { - if !gccgoMangleCheckDone { - gccgoNewmanglingInEffect = determineGccgoManglingScheme() - gccgoMangleCheckDone = true - } - return gccgoNewmanglingInEffect -} - -const mangleCheckCode = ` -package läufer -func Run(x int) int { - return 1 -} -` - -// determineGccgoManglingScheme performs a runtime test to see which -// flavor of packagepath mangling gccgo is using. Older versions of -// gccgo use a simple mangling scheme where there can be collisions -// between packages whose paths are different but mangle to the same -// string. More recent versions of gccgo use a new mangler that avoids -// these collisions. Return value is whether gccgo uses the new mangling. -func determineGccgoManglingScheme() bool { - - // Emit a small Go file for gccgo to compile. - filepat := "*_gccgo_manglecheck.go" - var f *os.File - var err error - if f, err = ioutil.TempFile(*objDir, filepat); err != nil { - fatalf("%v", err) - } - gofilename := f.Name() - defer os.Remove(gofilename) - - if err = ioutil.WriteFile(gofilename, []byte(mangleCheckCode), 0666); err != nil { - fatalf("%v", err) - } - - // Compile with gccgo, capturing generated assembly. - gccgocmd := os.Getenv("GCCGO") - if gccgocmd == "" { - gpath, gerr := exec.LookPath("gccgo") - if gerr != nil { - fatalf("unable to locate gccgo: %v", gerr) - } - gccgocmd = gpath - } - cmd := exec.Command(gccgocmd, "-S", "-o", "-", gofilename) - buf, cerr := cmd.CombinedOutput() - if cerr != nil { - fatalf("%s", cerr) - } - - // New mangling: expect go.l..u00e4ufer.Run - // Old mangling: expect go.l__ufer.Run - return regexp.MustCompile(`go\.l\.\.u00e4ufer\.Run`).Match(buf) -} - -// gccgoPkgpathToSymbolNew converts a package path to a gccgo-style -// package symbol. -func gccgoPkgpathToSymbolNew(ppath string) string { - bsl := []byte{} - changed := false - for _, c := range []byte(ppath) { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z', - '0' <= c && c <= '9', c == '_': - bsl = append(bsl, c) - case c == '.': - bsl = append(bsl, ".x2e"...) - default: - changed = true - encbytes := []byte(fmt.Sprintf("..z%02x", c)) - bsl = append(bsl, encbytes...) - } - } - if !changed { - return ppath - } - return string(bsl) -} - -// gccgoPkgpathToSymbolOld converts a package path to a gccgo-style -// package symbol using the older mangling scheme. -func gccgoPkgpathToSymbolOld(ppath string) string { - clean := func(r rune) rune { - switch { - case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z', - '0' <= r && r <= '9': - return r - } - return '_' - } - return strings.Map(clean, ppath) -} - // gccgoPkgpathToSymbol converts a package path to a mangled packagepath // symbol. func gccgoPkgpathToSymbol(ppath string) string { - if gccgoUsesNewMangling() { - return gccgoPkgpathToSymbolNew(ppath) - } else { - return gccgoPkgpathToSymbolOld(ppath) + if gccgoMangler == nil { + var err error + cmd := os.Getenv("GCCGO") + if cmd == "" { + cmd, err = exec.LookPath("gccgo") + if err != nil { + fatalf("unable to locate gccgo: %v", err) + } + } + gccgoMangler, err = pkgpath.ToSymbolFunc(cmd, *objDir) + if err != nil { + fatalf("%v", err) + } } + return gccgoMangler(ppath) } // Return the package prefix when using gccgo. diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 79eab24d29..37b3d45977 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -67,6 +67,7 @@ var bootstrapDirs = []string{ "cmd/internal/obj/s390x", "cmd/internal/obj/x86", "cmd/internal/obj/wasm", + "cmd/internal/pkgpath", "cmd/internal/src", "cmd/internal/sys", "cmd/link", diff --git a/src/cmd/internal/pkgpath/pkgpath.go b/src/cmd/internal/pkgpath/pkgpath.go new file mode 100644 index 0000000000..0b24468be6 --- /dev/null +++ b/src/cmd/internal/pkgpath/pkgpath.go @@ -0,0 +1,114 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgpath determines the package path used by gccgo/GoLLVM symbols. +// This package is not used for the gc compiler. +package pkgpath + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" +) + +// ToSymbolFunc returns a function that may be used to convert a +// package path into a string suitable for use as a symbol. +// cmd is the gccgo/GoLLVM compiler in use, and tmpdir is a temporary +// directory to pass to ioutil.TempFile. +// For example, this returns a function that converts "net/http" +// into a string like "net..z2fhttp". The actual string varies for +// different gccgo/GoLLVM versions, which is why this returns a function +// that does the conversion appropriate for the compiler in use. +func ToSymbolFunc(cmd, tmpdir string) (func(string) string, error) { + // To determine the scheme used by cmd, we compile a small + // file and examine the assembly code. Older versions of gccgo + // use a simple mangling scheme where there can be collisions + // between packages whose paths are different but mangle to + // the same string. More recent versions use a new mangler + // that avoids these collisions. + const filepat = "*_gccgo_manglechck.go" + f, err := ioutil.TempFile(tmpdir, filepat) + if err != nil { + return nil, err + } + gofilename := f.Name() + f.Close() + defer os.Remove(gofilename) + + if err := ioutil.WriteFile(gofilename, []byte(mangleCheckCode), 0644); err != nil { + return nil, err + } + + command := exec.Command(cmd, "-S", "-o", "-", gofilename) + buf, err := command.Output() + if err != nil { + return nil, err + } + + // New mangling: expect go.l..u00e4ufer.Run + // Old mangling: expect go.l__ufer.Run + if bytes.Contains(buf, []byte("go.l..u00e4ufer.Run")) { + return toSymbolV2, nil + } else if bytes.Contains(buf, []byte("go.l__ufer.Run")) { + return toSymbolV1, nil + } else { + return nil, errors.New(cmd + ": unrecognized mangling scheme") + } +} + +// mangleCheckCode is the package we compile to determine the mangling scheme. +const mangleCheckCode = ` +package läufer +func Run(x int) int { + return 1 +} +` + +// toSymbolV1 converts a package path using the original mangling scheme. +func toSymbolV1(ppath string) string { + clean := func(r rune) rune { + switch { + case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z', + '0' <= r && r <= '9': + return r + } + return '_' + } + return strings.Map(clean, ppath) +} + +// toSymbolV2 converts a package path using the newer mangling scheme. +func toSymbolV2(ppath string) string { + // This has to build at boostrap time, so it has to build + // with Go 1.4, so we don't use strings.Builder. + bsl := make([]byte, 0, len(ppath)) + changed := false + for _, c := range ppath { + if ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z') || ('0' <= c && c <= '9') || c == '_' { + bsl = append(bsl, byte(c)) + continue + } + var enc string + switch { + case c == '.': + enc = ".x2e" + case c < 0x80: + enc = fmt.Sprintf("..z%02x", c) + case c < 0x10000: + enc = fmt.Sprintf("..u%04x", c) + default: + enc = fmt.Sprintf("..U%08x", c) + } + bsl = append(bsl, enc...) + changed = true + } + if !changed { + return ppath + } + return string(bsl) +} diff --git a/src/cmd/internal/pkgpath/pkgpath_test.go b/src/cmd/internal/pkgpath/pkgpath_test.go new file mode 100644 index 0000000000..7355f81bae --- /dev/null +++ b/src/cmd/internal/pkgpath/pkgpath_test.go @@ -0,0 +1,121 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgpath + +import ( + "os" + "testing" +) + +const testEnvName = "GO_PKGPATH_TEST_COMPILER" + +// This init function supports TestToSymbolFunc. For simplicity, +// we use the test binary itself as a sample gccgo driver. +// We set an environment variable to specify how it should behave. +func init() { + switch os.Getenv(testEnvName) { + case "": + return + case "v1": + os.Stdout.WriteString(`.string "go.l__ufer.Run"`) + os.Exit(0) + case "v2": + os.Stdout.WriteString(`.string "go.l..u00e4ufer.Run"`) + os.Exit(0) + case "error": + os.Stdout.WriteString(`unknown string`) + os.Exit(0) + } +} + +func TestToSymbolFunc(t *testing.T) { + const input = "pä世🜃" + tests := []struct { + env string + fail bool + mangled string + }{ + { + env: "v1", + mangled: "p___", + }, + { + env: "v2", + mangled: "p..u00e4..u4e16..U0001f703", + }, + { + env: "error", + fail: true, + }, + } + + cmd := os.Args[0] + tmpdir := t.TempDir() + + defer os.Unsetenv(testEnvName) + + for _, test := range tests { + t.Run(test.env, func(t *testing.T) { + os.Setenv(testEnvName, test.env) + + fn, err := ToSymbolFunc(cmd, tmpdir) + if err != nil { + if !test.fail { + t.Errorf("ToSymbolFunc(%q, %q): unexpected error %v", cmd, tmpdir, err) + } + } else if test.fail { + t.Errorf("ToSymbolFunc(%q, %q) succeeded but expected to fail", cmd, tmpdir) + } else if got, want := fn(input), test.mangled; got != want { + t.Errorf("ToSymbolFunc(%q, %q)(%q) = %q, want %q", cmd, tmpdir, input, got, want) + } + }) + } +} + +var symbolTests = []struct { + input, v1, v2 string +}{ + { + "", + "", + "", + }, + { + "bytes", + "bytes", + "bytes", + }, + { + "net/http", + "net_http", + "net..z2fhttp", + }, + { + "golang.org/x/net/http", + "golang_org_x_net_http", + "golang.x2eorg..z2fx..z2fnet..z2fhttp", + }, + { + "pä世.🜃", + "p____", + "p..u00e4..u4e16.x2e..U0001f703", + }, +} + +func TestV1(t *testing.T) { + for _, test := range symbolTests { + if got, want := toSymbolV1(test.input), test.v1; got != want { + t.Errorf("toSymbolV1(%q) = %q, want %q", test.input, got, want) + } + } +} + +func TestV2(t *testing.T) { + for _, test := range symbolTests { + if got, want := toSymbolV2(test.input), test.v2; got != want { + t.Errorf("toSymbolV2(%q) = %q, want %q", test.input, got, want) + } + } +} From a65bc048bf388e399af9bcfd726cd0f11bba7c8e Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 2 Oct 2020 16:17:30 -0700 Subject: [PATCH 115/281] cmd/go: use cmd/internal/pkgpath for gccgo pkgpath symbol Fixes #37272 Change-Id: I6554fd5e5400acb20c5a7e96b1d6cb1a1afb9871 Reviewed-on: https://go-review.googlesource.com/c/go/+/259299 Trust: Ian Lance Taylor Run-TryBot: Ian Lance Taylor TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/go/internal/work/gccgo.go | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go index 4c1f36dbd6..dd5adf2d7b 100644 --- a/src/cmd/go/internal/work/gccgo.go +++ b/src/cmd/go/internal/work/gccgo.go @@ -11,11 +11,13 @@ import ( "os/exec" "path/filepath" "strings" + "sync" "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/load" "cmd/go/internal/str" + "cmd/internal/pkgpath" ) // The Gccgo toolchain. @@ -174,7 +176,7 @@ func (tools gccgoToolchain) asm(b *Builder, a *Action, sfiles []string) ([]strin ofiles = append(ofiles, ofile) sfile = mkAbs(p.Dir, sfile) defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch} - if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { + if pkgpath := tools.gccgoCleanPkgpath(b, p); pkgpath != "" { defs = append(defs, `-D`, `GOPKGPATH=`+pkgpath) } defs = tools.maybePIC(defs) @@ -531,7 +533,7 @@ func (tools gccgoToolchain) cc(b *Builder, a *Action, ofile, cfile string) error cfile = mkAbs(p.Dir, cfile) defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch} defs = append(defs, b.gccArchArgs()...) - if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { + if pkgpath := tools.gccgoCleanPkgpath(b, p); pkgpath != "" { defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`) } compiler := envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch)) @@ -568,14 +570,19 @@ func gccgoPkgpath(p *load.Package) string { return p.ImportPath } -func gccgoCleanPkgpath(p *load.Package) string { - clean := func(r rune) rune { - switch { - case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z', - '0' <= r && r <= '9': - return r +var gccgoToSymbolFuncOnce sync.Once +var gccgoToSymbolFunc func(string) string + +func (tools gccgoToolchain) gccgoCleanPkgpath(b *Builder, p *load.Package) string { + gccgoToSymbolFuncOnce.Do(func() { + fn, err := pkgpath.ToSymbolFunc(tools.compiler(), b.WorkDir) + if err != nil { + fmt.Fprintf(os.Stderr, "cmd/go: %v\n", err) + base.SetExitStatus(2) + base.Exit() } - return '_' - } - return strings.Map(clean, gccgoPkgpath(p)) + gccgoToSymbolFunc = fn + }) + + return gccgoToSymbolFunc(gccgoPkgpath(p)) } From a9c75ecd3da2d87ce08b2e75bd4f332185cd7fc8 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 2 Oct 2020 15:48:50 -0700 Subject: [PATCH 116/281] cmd/compile: export notinheap annotation to object file In the rare case when a cgo type makes it into an object file, we need the go:notinheap annotation to go with it. Fixes #41761 Change-Id: I541500cb1a03de954881aef659f96fc0b7738848 Reviewed-on: https://go-review.googlesource.com/c/go/+/259297 Trust: Keith Randall Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- misc/cgo/test/testdata/issue41761.go | 20 ++++++++++++++++++++ misc/cgo/test/testdata/issue41761a/a.go | 14 ++++++++++++++ src/cmd/compile/internal/gc/iexport.go | 2 ++ src/cmd/compile/internal/gc/iimport.go | 2 +- src/cmd/compile/internal/gc/lex.go | 2 +- 5 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 misc/cgo/test/testdata/issue41761.go create mode 100644 misc/cgo/test/testdata/issue41761a/a.go diff --git a/misc/cgo/test/testdata/issue41761.go b/misc/cgo/test/testdata/issue41761.go new file mode 100644 index 0000000000..919c749251 --- /dev/null +++ b/misc/cgo/test/testdata/issue41761.go @@ -0,0 +1,20 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +/* + typedef struct S S; +*/ +import "C" + +import ( + "cgotest/issue41761a" + "testing" +) + +func test41761(t *testing.T) { + var x issue41761a.T + _ = (*C.struct_S)(x.X) +} diff --git a/misc/cgo/test/testdata/issue41761a/a.go b/misc/cgo/test/testdata/issue41761a/a.go new file mode 100644 index 0000000000..ca5c18191e --- /dev/null +++ b/misc/cgo/test/testdata/issue41761a/a.go @@ -0,0 +1,14 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue41761a + +/* + typedef struct S S; +*/ +import "C" + +type T struct { + X *C.S +} diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index b3f50b63af..3be3b0a213 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1017,6 +1017,8 @@ func (w *exportWriter) symIdx(s *types.Sym) { } func (w *exportWriter) typeExt(t *types.Type) { + // Export whether this type is marked notinheap. + w.bool(t.NotInHeap()) // For type T, export the index of type descriptor symbols of T and *T. if i, ok := typeSymIdx[t]; ok { w.int64(i[0]) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 4169222c14..0c5e469c57 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -596,7 +596,6 @@ func (r *importReader) typ1() *types.Type { // Ensure we expand the interface in the frontend (#25055). checkwidth(t) - return t } } @@ -711,6 +710,7 @@ func (r *importReader) symIdx(s *types.Sym) { } func (r *importReader) typeExt(t *types.Type) { + t.SetNotInHeap(r.bool()) i, pi := r.int64(), r.int64() if i != -1 && pi != -1 { typeSymIdx[t] = [2]int64{i, pi} diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go index 1a344c6566..25bc0399ce 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/gc/lex.go @@ -48,7 +48,7 @@ const ( Nowritebarrierrec // error on write barrier in this or recursive callees Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees - // Runtime-only type pragmas + // Runtime and cgo type pragmas NotInHeap // values of this type must not be heap allocated ) From 56284f9d29d17869cd70847693c51319408710b3 Mon Sep 17 00:00:00 2001 From: Dmitri Shuralyov Date: Fri, 25 Sep 2020 17:10:24 -0400 Subject: [PATCH 117/281] src/buildall.bash: remove mobile filter Mobile targets are not supported by misc-compile trybots, as tracked in golang.org/issue/25963, and need to be filtered out. The buildall.bash script was created in CL 9438, back when it was a single all-compile builder, and it was easier to filter out mobile targets in the script than to come up with a pattern that matches all non-mobile targets. As of CL 254740, all mobile targets (Android and iOS) have unique GOOS values. That makes it it easy to filter them out in x/build/dashboard. This was done in CL 258057. As a result, it's now viable to simplify this script and perform all misc-compile target selection in x/build, rather than having it spread it across two places. Also, as of CL 10750, the all-compile builder has turned into multiple misc-compile builders, so update the script description accordingly. Updates #41610. Change-Id: I1e33260ac18cf0a70bb68cd8e3db5587100c7e87 Reviewed-on: https://go-review.googlesource.com/c/go/+/257962 Run-TryBot: Dmitri Shuralyov TryBot-Result: Go Bot Reviewed-by: Alexander Rakoczy Trust: Dmitri Shuralyov --- src/buildall.bash | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/buildall.bash b/src/buildall.bash index dc67c0630f..19ea172c5b 100755 --- a/src/buildall.bash +++ b/src/buildall.bash @@ -3,10 +3,10 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. -# Usage: buildall.sh [-e] [pattern] +# Usage: buildall.bash [-e] [pattern] # # buildall.bash builds the standard library for all Go-supported -# architectures. It is used by the "all-compile" trybot builder, +# architectures. It is used by the "misc-compile" trybot builders, # as a smoke test to quickly flag portability issues. # # Options: @@ -42,7 +42,7 @@ gettargets() { } selectedtargets() { - gettargets | egrep -v 'android-arm|darwin-arm64' | egrep "$pattern" + gettargets | egrep "$pattern" } # put linux first in the target list to get all the architectures up front. From e7a7a403f92aef0eda8bf9f00091c8b21e2223a3 Mon Sep 17 00:00:00 2001 From: Dmitri Shuralyov Date: Mon, 28 Sep 2020 14:17:37 -0400 Subject: [PATCH 118/281] src/buildall.bash: remove linux-386-387 target Support for GO386=387 is being dropped in Go 1.16. There is no need for the target to be available for testing on the master branch (where Go 1.16 development is ongoing). For #40255. Change-Id: I4a4ee80b0c0a535b6b0b246fe991f26964eb07ca Reviewed-on: https://go-review.googlesource.com/c/go/+/257963 Reviewed-by: Ian Lance Taylor Trust: Dmitri Shuralyov --- src/buildall.bash | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/buildall.bash b/src/buildall.bash index 19ea172c5b..7b3751f42e 100755 --- a/src/buildall.bash +++ b/src/buildall.bash @@ -37,7 +37,6 @@ GOROOT="$(cd .. && pwd)" gettargets() { ../bin/go tool dist list | sed -e 's|/|-|' - echo linux-386-387 echo linux-arm-arm5 } @@ -64,15 +63,11 @@ do echo "### Building $target" export GOOS=$(echo $target | sed 's/-.*//') export GOARCH=$(echo $target | sed 's/.*-//') - unset GO386 GOARM + unset GOARM if [ "$GOARCH" = "arm5" ]; then export GOARCH=arm export GOARM=5 fi - if [ "$GOARCH" = "387" ]; then - export GOARCH=386 - export GO386=387 - fi # Build and vet everything. # cmd/go/internal/work/exec.go enables the same vet flags during go test of std cmd From 5d12434eee031e3db9e0bfe753c663b565b6a0f9 Mon Sep 17 00:00:00 2001 From: Alexey Vilenskiy Date: Fri, 14 Aug 2020 11:37:31 +0300 Subject: [PATCH 119/281] reflect: support multiple keys in struct tags Fixes #40281 Change-Id: Ie624bce3a78a06d7ed71bba1f501e66802dffd13 Reviewed-on: https://go-review.googlesource.com/c/go/+/248341 Reviewed-by: Ian Lance Taylor Trust: Dmitri Shuralyov --- src/reflect/all_test.go | 170 ++++++++++++++++++++++++++++++++++++++++ src/reflect/type.go | 27 ++++++- 2 files changed, 194 insertions(+), 3 deletions(-) diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index 0684eab973..a12712d254 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -7165,6 +7165,176 @@ func TestMapIterDelete1(t *testing.T) { } } +func TestStructTagLookup(t *testing.T) { + var tests = []struct { + tag StructTag + key string + expectedValue string + expectedOK bool + }{ + { + tag: `json:"json_value_1"`, + key: "json", + expectedValue: "json_value_1", + expectedOK: true, + }, + { + tag: `json:"json_value_2" xml:"xml_value_2"`, + key: "json", + expectedValue: "json_value_2", + expectedOK: true, + }, + { + tag: `json:"json_value_3" xml:"xml_value_3"`, + key: "xml", + expectedValue: "xml_value_3", + expectedOK: true, + }, + { + tag: `bson json:"shared_value_4"`, + key: "json", + expectedValue: "shared_value_4", + expectedOK: true, + }, + { + tag: `bson json:"shared_value_5"`, + key: "bson", + expectedValue: "shared_value_5", + expectedOK: true, + }, + { + tag: `json bson xml form:"field_1,omitempty" other:"value_1"`, + key: "xml", + expectedValue: "field_1,omitempty", + expectedOK: true, + }, + { + tag: `json bson xml form:"field_2,omitempty" other:"value_2"`, + key: "form", + expectedValue: "field_2,omitempty", + expectedOK: true, + }, + { + tag: `json bson xml form:"field_3,omitempty" other:"value_3"`, + key: "other", + expectedValue: "value_3", + expectedOK: true, + }, + { + tag: `json bson xml form:"field_4" other:"value_4"`, + key: "json", + expectedValue: "field_4", + expectedOK: true, + }, + { + tag: `json bson xml form:"field_5" other:"value_5"`, + key: "non_existing", + expectedValue: "", + expectedOK: false, + }, + { + tag: `json "json_6"`, + key: "json", + expectedValue: "", + expectedOK: false, + }, + { + tag: `json:"json_7" bson "bson_7"`, + key: "json", + expectedValue: "json_7", + expectedOK: true, + }, + { + tag: `json:"json_8" xml "xml_8"`, + key: "xml", + expectedValue: "", + expectedOK: false, + }, + { + tag: `json bson xml form "form_9" other:"value_9"`, + key: "bson", + expectedValue: "", + expectedOK: false, + }, + { + tag: `json bson xml form "form_10" other:"value_10"`, + key: "other", + expectedValue: "", + expectedOK: false, + }, + { + tag: `json bson xml form:"form_11" other "value_11"`, + key: "json", + expectedValue: "form_11", + expectedOK: true, + }, + { + tag: `tag1`, + key: "tag1", + expectedValue: "", + expectedOK: false, + }, + { + tag: `tag2 :"hello_2"`, + key: "tag2", + expectedValue: "", + expectedOK: false, + }, + { + tag: `tag3: "hello_3"`, + key: "tag3", + expectedValue: "", + expectedOK: false, + }, + { + tag: "json\x7fbson: \"hello_4\"", + key: "json", + expectedValue: "", + expectedOK: false, + }, + { + tag: "json\x7fbson: \"hello_5\"", + key: "bson", + expectedValue: "", + expectedOK: false, + }, + { + tag: "json bson:\x7f\"hello_6\"", + key: "json", + expectedValue: "", + expectedOK: false, + }, + { + tag: "json bson:\x7f\"hello_7\"", + key: "bson", + expectedValue: "", + expectedOK: false, + }, + { + tag: "json\x09bson:\"hello_8\"", + key: "json", + expectedValue: "", + expectedOK: false, + }, + { + tag: "a\x7fb json:\"val\"", + key: "json", + expectedValue: "", + expectedOK: false, + }, + } + + for _, test := range tests { + v, ok := test.tag.Lookup(test.key) + if v != test.expectedValue { + t.Errorf("struct tag lookup failed, got %s, want %s", v, test.expectedValue) + } + if ok != test.expectedOK { + t.Errorf("struct tag lookup failed, got %t, want %t", ok, test.expectedOK) + } + } +} + // iterateToString returns the set of elements // returned by an iterator in readable form. func iterateToString(it *MapIter) string { diff --git a/src/reflect/type.go b/src/reflect/type.go index 44c96fea82..a3a616701b 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -1130,6 +1130,9 @@ func (tag StructTag) Lookup(key string) (value string, ok bool) { // When modifying this code, also update the validateStructTag code // in cmd/vet/structtag.go. + // keyFound indicates that such key on the left side has already been found. + var keyFound bool + for tag != "" { // Skip leading space. i := 0 @@ -1149,11 +1152,29 @@ func (tag StructTag) Lookup(key string) (value string, ok bool) { for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { i++ } - if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { + if i == 0 || i+1 >= len(tag) || tag[i] < ' ' || tag[i] == 0x7f { break } name := string(tag[:i]) - tag = tag[i+1:] + tag = tag[i:] + + // If we found a space char here - assume that we have a tag with + // multiple keys. + if tag[0] == ' ' { + if name == key { + keyFound = true + } + continue + } + + // Spaces were filtered above so we assume that here we have + // only valid tag value started with `:"`. + if tag[0] != ':' || tag[1] != '"' { + break + } + + // Remove the colon leaving tag at the start of the quoted string. + tag = tag[1:] // Scan quoted string to find value. i = 1 @@ -1169,7 +1190,7 @@ func (tag StructTag) Lookup(key string) (value string, ok bool) { qvalue := string(tag[:i+1]) tag = tag[i+1:] - if key == name { + if key == name || keyFound { value, err := strconv.Unquote(qvalue) if err != nil { break From 694025e74f861bf48a737a8b42612d6397f1879b Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 5 Oct 2020 12:07:00 -0400 Subject: [PATCH 120/281] cmd/compile: avoid applying ARM CMP->CMN rewrite in unsigned context Fixes #41780. Change-Id: I1dc7c19a9f057650905da3a96214c2ff4abb51be Reviewed-on: https://go-review.googlesource.com/c/go/+/259450 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/ssa/gen/ARM.rules | 4 +- src/cmd/compile/internal/ssa/rewriteARM.go | 243 ++++++++++++++++++--- test/fixedbugs/issue41780.go | 39 ++++ 3 files changed, 257 insertions(+), 29 deletions(-) create mode 100644 test/fixedbugs/issue41780.go diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 9490805f46..aad7236d59 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -1263,8 +1263,8 @@ (SRLconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x) // comparison simplification -(CMP x (RSBconst [0] y)) => (CMN x y) -(CMN x (RSBconst [0] y)) => (CMP x y) +((LT|LE|EQ|NE|GE|GT) (CMP x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMN x y)) // sense of carry bit not preserved +((LT|LE|EQ|NE|GE|GT) (CMN x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMP x y)) // sense of carry bit not preserved (EQ (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (EQ (CMP x y) yes no) (EQ (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (EQ (CMP a (MUL x y)) yes no) (EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (EQ (CMPconst [c] x) yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 4e44165169..435da688b7 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -3362,21 +3362,6 @@ func rewriteValueARM_OpARMCMN(v *Value) bool { } break } - // match: (CMN x (RSBconst [0] y)) - // result: (CMP x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpARMRSBconst || auxIntToInt32(v_1.AuxInt) != 0 { - continue - } - y := v_1.Args[0] - v.reset(OpARMCMP) - v.AddArg2(x, y) - return true - } - break - } return false } func rewriteValueARM_OpARMCMNconst(v *Value) bool { @@ -3938,18 +3923,6 @@ func rewriteValueARM_OpARMCMP(v *Value) bool { v.AddArg(v0) return true } - // match: (CMP x (RSBconst [0] y)) - // result: (CMN x y) - for { - x := v_0 - if v_1.Op != OpARMRSBconst || auxIntToInt32(v_1.AuxInt) != 0 { - break - } - y := v_1.Args[0] - v.reset(OpARMCMN) - v.AddArg2(x, y) - return true - } return false } func rewriteValueARM_OpARMCMPD(v *Value) bool { @@ -16002,6 +15975,42 @@ func rewriteBlockARM(b *Block) bool { b.resetWithControl(BlockARMEQ, cmp) return true } + // match: (EQ (CMP x (RSBconst [0] y))) + // result: (EQ (CMN x y)) + for b.Controls[0].Op == OpARMCMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + break + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + // match: (EQ (CMN x (RSBconst [0] y))) + // result: (EQ (CMP x y)) + for b.Controls[0].Op == OpARMCMN { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + continue + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMEQ, v0) + return true + } + break + } // match: (EQ (CMPconst [0] l:(SUB x y)) yes no) // cond: l.Uses==1 // result: (EQ (CMP x y) yes no) @@ -16848,6 +16857,42 @@ func rewriteBlockARM(b *Block) bool { b.resetWithControl(BlockARMLE, cmp) return true } + // match: (GE (CMP x (RSBconst [0] y))) + // result: (GE (CMN x y)) + for b.Controls[0].Op == OpARMCMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + break + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) + return true + } + // match: (GE (CMN x (RSBconst [0] y))) + // result: (GE (CMP x y)) + for b.Controls[0].Op == OpARMCMN { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + continue + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGE, v0) + return true + } + break + } // match: (GE (CMPconst [0] l:(SUB x y)) yes no) // cond: l.Uses==1 // result: (GEnoov (CMP x y) yes no) @@ -17728,6 +17773,42 @@ func rewriteBlockARM(b *Block) bool { b.resetWithControl(BlockARMLT, cmp) return true } + // match: (GT (CMP x (RSBconst [0] y))) + // result: (GT (CMN x y)) + for b.Controls[0].Op == OpARMCMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + break + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) + return true + } + // match: (GT (CMN x (RSBconst [0] y))) + // result: (GT (CMP x y)) + for b.Controls[0].Op == OpARMCMN { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + continue + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMGT, v0) + return true + } + break + } // match: (GT (CMPconst [0] l:(SUB x y)) yes no) // cond: l.Uses==1 // result: (GTnoov (CMP x y) yes no) @@ -18699,6 +18780,42 @@ func rewriteBlockARM(b *Block) bool { b.resetWithControl(BlockARMGE, cmp) return true } + // match: (LE (CMP x (RSBconst [0] y))) + // result: (LE (CMN x y)) + for b.Controls[0].Op == OpARMCMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + break + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) + return true + } + // match: (LE (CMN x (RSBconst [0] y))) + // result: (LE (CMP x y)) + for b.Controls[0].Op == OpARMCMN { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + continue + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLE, v0) + return true + } + break + } // match: (LE (CMPconst [0] l:(SUB x y)) yes no) // cond: l.Uses==1 // result: (LEnoov (CMP x y) yes no) @@ -19579,6 +19696,42 @@ func rewriteBlockARM(b *Block) bool { b.resetWithControl(BlockARMGT, cmp) return true } + // match: (LT (CMP x (RSBconst [0] y))) + // result: (LT (CMN x y)) + for b.Controls[0].Op == OpARMCMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + break + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) + return true + } + // match: (LT (CMN x (RSBconst [0] y))) + // result: (LT (CMP x y)) + for b.Controls[0].Op == OpARMCMN { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + continue + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMLT, v0) + return true + } + break + } // match: (LT (CMPconst [0] l:(SUB x y)) yes no) // cond: l.Uses==1 // result: (LTnoov (CMP x y) yes no) @@ -20609,6 +20762,42 @@ func rewriteBlockARM(b *Block) bool { b.resetWithControl(BlockARMNE, cmp) return true } + // match: (NE (CMP x (RSBconst [0] y))) + // result: (NE (CMN x y)) + for b.Controls[0].Op == OpARMCMP { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + break + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + // match: (NE (CMN x (RSBconst [0] y))) + // result: (NE (CMP x y)) + for b.Controls[0].Op == OpARMCMN { + v_0 := b.Controls[0] + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 { + continue + } + y := v_0_1.Args[0] + v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg2(x, y) + b.resetWithControl(BlockARMNE, v0) + return true + } + break + } // match: (NE (CMPconst [0] l:(SUB x y)) yes no) // cond: l.Uses==1 // result: (NE (CMP x y) yes no) diff --git a/test/fixedbugs/issue41780.go b/test/fixedbugs/issue41780.go new file mode 100644 index 0000000000..632c144a48 --- /dev/null +++ b/test/fixedbugs/issue41780.go @@ -0,0 +1,39 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Checks that conversion of CMP(x,-y) -> CMN(x,y) is only applied in correct context. + +package main + +type decimal struct { + d [8]byte // digits, big-endian representation + dp int // decimal point +} + +var powtab = []int{1, 3, 6, 9, 13, 16, 19, 23, 26} + +//go:noinline +func foo(d *decimal) int { + exp := int(d.d[1]) + if d.dp < 0 || d.dp == 0 && d.d[0] < '5' { + var n int + if -d.dp >= len(powtab) { + n = 27 + } else { + n = powtab[-d.dp] // incorrect CMP -> CMN substitution causes indexing panic. + } + exp += n + } + return exp +} + +func main() { + var d decimal + d.d[0] = '1' + if foo(&d) != 1 { + println("FAILURE (though not the one this test was written to catch)") + } +} From 8e203884dcd5c525208ffb137fed76fd2d09ffc4 Mon Sep 17 00:00:00 2001 From: Alberto Donizetti Date: Thu, 1 Oct 2020 14:00:48 +0200 Subject: [PATCH 121/281] doc: fix typo in contribute.html Change-Id: Ica27c4a9e4c364d94250aebfc4c2b59cff7f4a8f Reviewed-on: https://go-review.googlesource.com/c/go/+/258679 Trust: Alberto Donizetti Reviewed-by: Dmitri Shuralyov --- doc/contribute.html | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/contribute.html b/doc/contribute.html index 3fb617b863..09d43313ff 100644 --- a/doc/contribute.html +++ b/doc/contribute.html @@ -806,10 +806,9 @@ tracker will automatically mark the issue as fixed.

If the change is a partial step towards the resolution of the issue, -uses the notation "Updates #12345". -This will leave a comment in the issue -linking back to the change in Gerrit, but it will not close the issue -when the change is applied. +write "Updates #12345" instead. +This will leave a comment in the issue linking back to the change in +Gerrit, but it will not close the issue when the change is applied.

From d2a80f3fb5b44450e0b304ac5a718f99c053d82a Mon Sep 17 00:00:00 2001 From: Luca Spiller Date: Tue, 6 Oct 2020 08:12:45 +0000 Subject: [PATCH 122/281] crypto/tls: fix typo in spelling of permanentError Change-Id: I819c121ff388460ec348af773ef94b44416a2ea9 GitHub-Last-Rev: 98dd8fb25cecb73e88d107e0a35e3e63a53dfd09 GitHub-Pull-Request: golang/go#41785 Reviewed-on: https://go-review.googlesource.com/c/go/+/259517 Run-TryBot: Emmanuel Odeke TryBot-Result: Go Bot Reviewed-by: Emmanuel Odeke Reviewed-by: Filippo Valsorda Trust: Emmanuel Odeke --- src/crypto/tls/conn.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go index 5dff76c988..f1d4cb926c 100644 --- a/src/crypto/tls/conn.go +++ b/src/crypto/tls/conn.go @@ -168,18 +168,18 @@ type halfConn struct { trafficSecret []byte // current TLS 1.3 traffic secret } -type permamentError struct { +type permanentError struct { err net.Error } -func (e *permamentError) Error() string { return e.err.Error() } -func (e *permamentError) Unwrap() error { return e.err } -func (e *permamentError) Timeout() bool { return e.err.Timeout() } -func (e *permamentError) Temporary() bool { return false } +func (e *permanentError) Error() string { return e.err.Error() } +func (e *permanentError) Unwrap() error { return e.err } +func (e *permanentError) Timeout() bool { return e.err.Timeout() } +func (e *permanentError) Temporary() bool { return false } func (hc *halfConn) setErrorLocked(err error) error { if e, ok := err.(net.Error); ok { - hc.err = &permamentError{err: e} + hc.err = &permanentError{err: e} } else { hc.err = err } From f8d80977b784fd4879963e61dc9fca1fc9bf2193 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 2 Oct 2020 14:53:48 -0400 Subject: [PATCH 123/281] cmd/compile: correct leaf type when "selecting" singleton register-sized struct Two part fix: 1) bring the type "correction" forward from a later CL in the expand calls series 2) when a leaf-selwect is rewritten in place, update the type (it might have been changed by the type correction in 1). Fixes #41736. Change-Id: Id097efd10481bf0ad92aaead81a7207221c144b5 Reviewed-on: https://go-review.googlesource.com/c/go/+/259203 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/ssa/config.go | 2 +- src/cmd/compile/internal/ssa/expand_calls.go | 41 ++++++-- test/fixedbugs/issue41736.go | 105 +++++++++++++++++++ 3 files changed, 141 insertions(+), 7 deletions(-) create mode 100644 test/fixedbugs/issue41736.go diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 649b5ba820..f1a748309c 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -195,7 +195,7 @@ const ( ClassParamOut // return value ) -const go116lateCallExpansion = false +const go116lateCallExpansion = true // LateCallExpansionEnabledWithin returns true if late call expansion should be tested // within compilation of a function/method triggered by GOSSAHASH (defaults to "yes"). diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index 7b1d656b64..992936b2d3 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -58,6 +58,29 @@ func expandCalls(f *Func) { return t.IsStruct() || t.IsArray() || regSize == 4 && t.Size() > 4 && t.IsInteger() } + // removeTrivialWrapperTypes unwraps layers of + // struct { singleField SomeType } and [1]SomeType + // until a non-wrapper type is reached. This is useful + // for working with assignments to/from interface data + // fields (either second operand to OpIMake or OpIData) + // where the wrapping or type conversion can be elided + // because of type conversions/assertions in source code + // that do not appear in SSA. + removeTrivialWrapperTypes := func(t *types.Type) *types.Type { + for { + if t.IsStruct() && t.NumFields() == 1 { + t = t.Field(0).Type + continue + } + if t.IsArray() && t.NumElem() == 1 { + t = t.Elem() + continue + } + break + } + return t + } + // Calls that need lowering have some number of inputs, including a memory input, // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able. @@ -84,7 +107,7 @@ func expandCalls(f *Func) { // rewrite v as a Copy of call -- the replacement call will produce a mem. leaf.copyOf(call) } else { - leafType := leaf.Type + leafType := removeTrivialWrapperTypes(leaf.Type) pt := types.NewPtr(leafType) if canSSAType(leafType) { off := f.ConstOffPtrSP(pt, offset+aux.OffsetOfResult(which), sp) @@ -92,6 +115,7 @@ func expandCalls(f *Func) { if leaf.Block == call.Block { leaf.reset(OpLoad) leaf.SetArgs2(off, call) + leaf.Type = leafType } else { w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call) leaf.copyOf(w) @@ -192,6 +216,13 @@ func expandCalls(f *Func) { case types.TARRAY: elt := t.Elem() + if src.Op == OpIData && t.NumElem() == 1 && t.Width == regSize && elt.Width == regSize { + t = removeTrivialWrapperTypes(t) + if t.Etype == types.TSTRUCT || t.Etype == types.TARRAY { + f.Fatalf("Did not expect to find IDATA-immediate with non-trivial struct/array in it") + } + break // handle the leaf type. + } for i := int64(0); i < t.NumElem(); i++ { sel := src.Block.NewValue1I(pos, OpArraySelect, elt, i, src) mem = splitStore(dst, sel, mem, v, elt, offset+i*elt.Width, firstStorePos) @@ -199,7 +230,7 @@ func expandCalls(f *Func) { } return mem case types.TSTRUCT: - if src.Op == OpIData && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == regSize { + if src.Op == OpIData && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == regSize { // This peculiar test deals with accesses to immediate interface data. // It works okay because everything is the same size. // Example code that triggers this can be found in go/constant/value.go, function ToComplex @@ -207,11 +238,9 @@ func expandCalls(f *Func) { // v121 (+882) = StaticLECall {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1 // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)" // Guard against "struct{struct{*foo}}" - for t.Etype == types.TSTRUCT && t.NumFields() == 1 { - t = t.Field(0).Type - } + t = removeTrivialWrapperTypes(t) if t.Etype == types.TSTRUCT || t.Etype == types.TARRAY { - f.Fatalf("Did not expect to find IDATA-immediate with non-trivial struct in it") + f.Fatalf("Did not expect to find IDATA-immediate with non-trivial struct/array in it") } break // handle the leaf type. } diff --git a/test/fixedbugs/issue41736.go b/test/fixedbugs/issue41736.go new file mode 100644 index 0000000000..36f127f4fb --- /dev/null +++ b/test/fixedbugs/issue41736.go @@ -0,0 +1,105 @@ +// compile + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type I struct { + x int64 +} + +type F struct { + x float64 +} + +type C struct { + x *complex128 +} + +type D struct { + x complex64 +} + +type A [1]*complex128 + +//go:noinline +func (i I) X() C { + cx := complex(0, float64(i.x)) + return C{&cx} +} + +//go:noinline +func (f F) X() C { + cx := complex(f.x, 0) + return C{&cx} +} + +//go:noinline +func (c C) X() C { + cx := complex(imag(*c.x), real(*c.x)) + return C{&cx} +} + +//go:noinline +func (d D) X() C { + cx := complex(float64(imag(d.x)), -float64(real(d.x))) + return C{&cx} +} + +//go:noinline +func (a A) X() C { + cx := complex(-float64(imag(*a[0])), float64(real(*a[0]))) + return C{&cx} +} + +//go:noinline +func (i I) id() I { + return i +} + +//go:noinline +func (f F) id() F { + return f +} + +//go:noinline +func (c C) id() C { + return c +} + +//go:noinline +func (d D) id() D { + return d +} + +//go:noinline +func (a A) id() A { + return a +} + +type T interface { + X() C +} + +func G(x []T) []T { + var y []T + for _, a := range x { + var v T + switch u := a.(type) { + case I: + v = u.id() + case F: + v = u.id() + case C: + v = u.id() + case D: + v = u.id() + case A: + v = u.id() + } + y = append(y, v) + } + return y +} From ab2a5b48665eed6d670d719cdef5335bc3602359 Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Tue, 11 Aug 2020 12:57:01 -0400 Subject: [PATCH 124/281] cmd/go: add basic support for overlays This CL adds basic support for listing packages with overlays. The new cmd/go/internal/fs package adds an abstraction for communicating with the file system that will open files according to their overlaid paths, and provides functions to override those in the build context to open overlaid files. There is also some support for executing builds on packages with overlays. In cmd/go/internal/work.(*Builder).build, paths are mapped to their overlaid paths before they are given as arguments to tools. For #39958 Change-Id: I5ec0eb9ebbca303e2f1e7dbe22ec32613bc1fd17 Reviewed-on: https://go-review.googlesource.com/c/go/+/253747 Trust: Michael Matloob Trust: Jay Conrod Run-TryBot: Michael Matloob TryBot-Result: Go Bot Reviewed-by: Jay Conrod Reviewed-by: Bryan C. Mills --- src/cmd/go/internal/cfg/cfg.go | 12 + src/cmd/go/internal/envcmd/env.go | 5 + src/cmd/go/internal/fsys/fsys.go | 426 +++++++++++++++++ src/cmd/go/internal/fsys/fsys_test.go | 479 +++++++++++++++++++ src/cmd/go/internal/imports/scan.go | 7 +- src/cmd/go/internal/modload/import.go | 51 +- src/cmd/go/internal/modload/init.go | 5 + src/cmd/go/internal/search/search.go | 1 + src/cmd/go/internal/work/build.go | 3 + src/cmd/go/internal/work/gc.go | 23 +- src/cmd/go/internal/work/init.go | 4 + src/cmd/go/testdata/script/build_overlay.txt | 64 +++ src/cmd/go/testdata/script/list_overlay.txt | 54 +++ 13 files changed, 1081 insertions(+), 53 deletions(-) create mode 100644 src/cmd/go/internal/fsys/fsys.go create mode 100644 src/cmd/go/internal/fsys/fsys_test.go create mode 100644 src/cmd/go/testdata/script/build_overlay.txt create mode 100644 src/cmd/go/testdata/script/list_overlay.txt diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go index ebbaf04115..9169c12d8f 100644 --- a/src/cmd/go/internal/cfg/cfg.go +++ b/src/cmd/go/internal/cfg/cfg.go @@ -11,6 +11,7 @@ import ( "fmt" "go/build" "internal/cfg" + "io" "io/ioutil" "os" "path/filepath" @@ -18,6 +19,8 @@ import ( "strings" "sync" + "cmd/go/internal/fsys" + "cmd/internal/objabi" ) @@ -104,6 +107,15 @@ func defaultContext() build.Context { // Nothing to do here. } + ctxt.OpenFile = func(path string) (io.ReadCloser, error) { + return fsys.Open(path) + } + ctxt.ReadDir = fsys.ReadDir + ctxt.IsDir = func(path string) bool { + isDir, err := fsys.IsDir(path) + return err == nil && isDir + } + return ctxt } diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index ee0bb0d0b2..e1f2400f60 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -21,6 +21,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cache" "cmd/go/internal/cfg" + "cmd/go/internal/fsys" "cmd/go/internal/load" "cmd/go/internal/modload" "cmd/go/internal/work" @@ -197,6 +198,10 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) { env := cfg.CmdEnv env = append(env, ExtraEnvVars()...) + if err := fsys.Init(base.Cwd); err != nil { + base.Fatalf("go: %v", err) + } + // Do we need to call ExtraEnvVarsCostly, which is a bit expensive? // Only if we're listing all environment variables ("go env") // or the variables being requested are in the extra list. diff --git a/src/cmd/go/internal/fsys/fsys.go b/src/cmd/go/internal/fsys/fsys.go new file mode 100644 index 0000000000..d64ce0aba1 --- /dev/null +++ b/src/cmd/go/internal/fsys/fsys.go @@ -0,0 +1,426 @@ +// Package fsys is an abstraction for reading files that +// allows for virtual overlays on top of the files on disk. +package fsys + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +// OverlayFile is the path to a text file in the OverlayJSON format. +// It is the value of the -overlay flag. +var OverlayFile string + +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +type OverlayJSON struct { + Replace map[string]string +} + +type node struct { + actualFilePath string // empty if a directory + children map[string]*node // path element → file or directory +} + +func (n *node) isDir() bool { + return n.actualFilePath == "" && n.children != nil +} + +func (n *node) isDeleted() bool { + return n.actualFilePath == "" && n.children == nil +} + +// TODO(matloob): encapsulate these in an io/fs-like interface +var overlay map[string]*node // path -> file or directory node +var cwd string // copy of base.Cwd to avoid dependency + +// Canonicalize a path for looking it up in the overlay. +// Important: filepath.Join(cwd, path) doesn't always produce +// the correct absolute path if path is relative, because on +// Windows producing the correct absolute path requires making +// a syscall. So this should only be used when looking up paths +// in the overlay, or canonicalizing the paths in the overlay. +func canonicalize(path string) string { + if path == "" { + return "" + } + if filepath.IsAbs(path) { + return filepath.Clean(path) + } + + if v := filepath.VolumeName(cwd); v != "" && path[0] == filepath.Separator { + // On Windows filepath.Join(cwd, path) doesn't always work. In general + // filepath.Abs needs to make a syscall on Windows. Elsewhere in cmd/go + // use filepath.Join(cwd, path), but cmd/go specifically supports Windows + // paths that start with "\" which implies the path is relative to the + // volume of the working directory. See golang.org/issue/8130. + return filepath.Join(v, path) + } + + // Make the path absolute. + return filepath.Join(cwd, path) +} + +// Init initializes the overlay, if one is being used. +func Init(wd string) error { + if overlay != nil { + // already initialized + return nil + } + + cwd = wd + + if OverlayFile == "" { + return nil + } + + b, err := ioutil.ReadFile(OverlayFile) + if err != nil { + return fmt.Errorf("reading overlay file: %v", err) + } + + var overlayJSON OverlayJSON + if err := json.Unmarshal(b, &overlayJSON); err != nil { + return fmt.Errorf("parsing overlay JSON: %v", err) + } + + return initFromJSON(overlayJSON) +} + +func initFromJSON(overlayJSON OverlayJSON) error { + // Canonicalize the paths in in the overlay map. + // Use reverseCanonicalized to check for collisions: + // no two 'from' paths should canonicalize to the same path. + overlay = make(map[string]*node) + reverseCanonicalized := make(map[string]string) // inverse of canonicalize operation, to check for duplicates + // Build a table of file and directory nodes from the replacement map. + + // Remove any potential non-determinism from iterating over map by sorting it. + replaceFrom := make([]string, 0, len(overlayJSON.Replace)) + for k := range overlayJSON.Replace { + replaceFrom = append(replaceFrom, k) + } + sort.Strings(replaceFrom) + + for _, from := range replaceFrom { + to := overlayJSON.Replace[from] + // Canonicalize paths and check for a collision. + if from == "" { + return fmt.Errorf("empty string key in overlay file Replace map") + } + cfrom := canonicalize(from) + if to != "" { + // Don't canonicalize "", meaning to delete a file, because then it will turn into ".". + to = canonicalize(to) + } + if otherFrom, seen := reverseCanonicalized[cfrom]; seen { + return fmt.Errorf( + "paths %q and %q both canonicalize to %q in overlay file Replace map", otherFrom, from, cfrom) + } + reverseCanonicalized[cfrom] = from + from = cfrom + + // Create node for overlaid file. + dir, base := filepath.Dir(from), filepath.Base(from) + if n, ok := overlay[from]; ok { + // All 'from' paths in the overlay are file paths. Since the from paths + // are in a map, they are unique, so if the node already exists we added + // it below when we create parent directory nodes. That is, that + // both a file and a path to one of its parent directories exist as keys + // in the Replace map. + // + // This only applies if the overlay directory has any files or directories + // in it: placeholder directories that only contain deleted files don't + // count. They are safe to be overwritten with actual files. + for _, f := range n.children { + if !f.isDeleted() { + return fmt.Errorf("invalid overlay: path %v is used as both file and directory", from) + } + } + } + overlay[from] = &node{actualFilePath: to} + + // Add parent directory nodes to overlay structure. + childNode := overlay[from] + for { + dirNode := overlay[dir] + if dirNode == nil || dirNode.isDeleted() { + dirNode = &node{children: make(map[string]*node)} + overlay[dir] = dirNode + } + if childNode.isDeleted() { + // Only create one parent for a deleted file: + // the directory only conditionally exists if + // there are any non-deleted children, so + // we don't create their parents. + if dirNode.isDir() { + dirNode.children[base] = childNode + } + break + } + if !dirNode.isDir() { + // This path already exists as a file, so it can't be a parent + // directory. See comment at error above. + return fmt.Errorf("invalid overlay: path %v is used as both file and directory", dir) + } + dirNode.children[base] = childNode + parent := filepath.Dir(dir) + if parent == dir { + break // reached the top; there is no parent + } + dir, base = parent, filepath.Base(dir) + childNode = dirNode + } + } + + return nil +} + +// IsDir returns true if path is a directory on disk or in the +// overlay. +func IsDir(path string) (bool, error) { + path = canonicalize(path) + + if _, ok := parentIsOverlayFile(path); ok { + return false, nil + } + + if n, ok := overlay[path]; ok { + return n.isDir(), nil + } + + fi, err := os.Stat(path) + if err != nil { + return false, err + } + + return fi.IsDir(), nil +} + +// parentIsOverlayFile returns whether name or any of +// its parents are directories in the overlay, and the first parent found, +// including name itself, that's a directory in the overlay. +func parentIsOverlayFile(name string) (string, bool) { + if overlay != nil { + // Check if name can't possibly be a directory because + // it or one of its parents is overlaid with a file. + // TODO(matloob): Maybe save this to avoid doing it every time? + prefix := name + for { + node := overlay[prefix] + if node != nil && !node.isDir() { + return prefix, true + } + parent := filepath.Dir(prefix) + if parent == prefix { + break + } + prefix = parent + } + } + + return "", false +} + +// errNotDir is used to communicate from ReadDir to IsDirWithGoFiles +// that the argument is not a directory, so that IsDirWithGoFiles doesn't +// return an error. +var errNotDir = errors.New("not a directory") + +// readDir reads a dir on disk, returning an error that is errNotDir if the dir is not a directory. +// Unfortunately, the error returned by ioutil.ReadDir if dir is not a directory +// can vary depending on the OS (Linux, Mac, Windows return ENOTDIR; BSD returns EINVAL). +func readDir(dir string) ([]os.FileInfo, error) { + fis, err := ioutil.ReadDir(dir) + if err == nil { + return fis, nil + } + + if os.IsNotExist(err) { + return nil, err + } else if dirfi, staterr := os.Stat(dir); staterr == nil && !dirfi.IsDir() { + return nil, &os.PathError{Op: "ReadDir", Path: dir, Err: errNotDir} + } else { + return nil, err + } +} + +// ReadDir provides a slice of os.FileInfo entries corresponding +// to the overlaid files in the directory. +func ReadDir(dir string) ([]os.FileInfo, error) { + dir = canonicalize(dir) + if _, ok := parentIsOverlayFile(dir); ok { + return nil, &os.PathError{Op: "ReadDir", Path: dir, Err: errNotDir} + } + + dirNode := overlay[dir] + if dirNode == nil { + return readDir(dir) + } else if dirNode.isDeleted() { + return nil, &os.PathError{Op: "ReadDir", Path: dir, Err: os.ErrNotExist} + } + diskfis, err := readDir(dir) + if err != nil && !os.IsNotExist(err) && !errors.Is(err, errNotDir) { + return nil, err + } + + // Stat files in overlay to make composite list of fileinfos + files := make(map[string]os.FileInfo) + for _, f := range diskfis { + files[f.Name()] = f + } + for name, to := range dirNode.children { + switch { + case to.isDir(): + files[name] = fakeDir(name) + case to.isDeleted(): + delete(files, name) + default: + // This is a regular file. + f, err := os.Lstat(to.actualFilePath) + if err != nil { + files[name] = missingFile(name) + continue + } else if f.IsDir() { + return nil, fmt.Errorf("for overlay of %q to %q: overlay Replace entries can't point to dirctories", + filepath.Join(dir, name), to.actualFilePath) + } + // Add a fileinfo for the overlaid file, so that it has + // the original file's name, but the overlaid file's metadata. + files[name] = fakeFile{name, f} + } + } + sortedFiles := diskfis[:0] + for _, f := range files { + sortedFiles = append(sortedFiles, f) + } + sort.Slice(sortedFiles, func(i, j int) bool { return sortedFiles[i].Name() < sortedFiles[j].Name() }) + return sortedFiles, nil +} + +// OverlayPath returns the path to the overlaid contents of the +// file, the empty string if the overlay deletes the file, or path +// itself if the file is not in the overlay, the file is a directory +// in the overlay, or there is no overlay. +// It returns true if the path is overlaid with a regular file +// or deleted, and false otherwise. +func OverlayPath(path string) (string, bool) { + if p, ok := overlay[canonicalize(path)]; ok && !p.isDir() { + return p.actualFilePath, ok + } + + return path, false +} + +// Open opens the file at or overlaid on the given path. +func Open(path string) (*os.File, error) { + cpath := canonicalize(path) + if node, ok := overlay[cpath]; ok { + if node.isDir() { + return nil, &os.PathError{Op: "Open", Path: path, Err: errors.New("fsys.Open doesn't support opening directories yet")} + } + return os.Open(node.actualFilePath) + } else if parent, ok := parentIsOverlayFile(filepath.Dir(cpath)); ok { + // The file is deleted explicitly in the Replace map, + // or implicitly because one of its parent directories was + // replaced by a file. + return nil, &os.PathError{ + Op: "Open", + Path: path, + Err: fmt.Errorf("file %s does not exist: parent directory %s is replaced by a file in overlay", path, parent)} + } else { + return os.Open(cpath) + } +} + +// IsDirWithGoFiles reports whether dir is a directory containing Go files +// either on disk or in the overlay. +func IsDirWithGoFiles(dir string) (bool, error) { + fis, err := ReadDir(dir) + if os.IsNotExist(err) || errors.Is(err, errNotDir) { + return false, nil + } else if err != nil { + return false, err + } + + var firstErr error + for _, fi := range fis { + if fi.IsDir() { + continue + } + + // TODO(matloob): this enforces that the "from" in the map + // has a .go suffix, but the actual destination file + // doesn't need to have a .go suffix. Is this okay with the + // compiler? + if !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if fi.Mode().IsRegular() { + return true, nil + } + + // fi is the result of an Lstat, so it doesn't follow symlinks. + // But it's okay if the file is a symlink pointing to a regular + // file, so use os.Stat to follow symlinks and check that. + actualFilePath, _ := OverlayPath(filepath.Join(dir, fi.Name())) + if fi, err := os.Stat(actualFilePath); err == nil && fi.Mode().IsRegular() { + return true, nil + } else if err != nil && firstErr == nil { + firstErr = err + } + } + + // No go files found in directory. + return false, firstErr +} + +// fakeFile provides an os.FileInfo implementation for an overlaid file, +// so that the file has the name of the overlaid file, but takes all +// other characteristics of the replacement file. +type fakeFile struct { + name string + real os.FileInfo +} + +func (f fakeFile) Name() string { return f.name } +func (f fakeFile) Size() int64 { return f.real.Size() } +func (f fakeFile) Mode() os.FileMode { return f.real.Mode() } +func (f fakeFile) ModTime() time.Time { return f.real.ModTime() } +func (f fakeFile) IsDir() bool { return f.real.IsDir() } +func (f fakeFile) Sys() interface{} { return f.real.Sys() } + +// missingFile provides an os.FileInfo for an overlaid file where the +// destination file in the overlay doesn't exist. It returns zero values +// for the fileInfo methods other than Name, set to the file's name, and Mode +// set to ModeIrregular. +type missingFile string + +func (f missingFile) Name() string { return string(f) } +func (f missingFile) Size() int64 { return 0 } +func (f missingFile) Mode() os.FileMode { return os.ModeIrregular } +func (f missingFile) ModTime() time.Time { return time.Unix(0, 0) } +func (f missingFile) IsDir() bool { return false } +func (f missingFile) Sys() interface{} { return nil } + +// fakeDir provides an os.FileInfo implementation for directories that are +// implicitly created by overlaid files. Each directory in the +// path of an overlaid file is considered to exist in the overlay filesystem. +type fakeDir string + +func (f fakeDir) Name() string { return string(f) } +func (f fakeDir) Size() int64 { return 0 } +func (f fakeDir) Mode() os.FileMode { return os.ModeDir | 0500 } +func (f fakeDir) ModTime() time.Time { return time.Unix(0, 0) } +func (f fakeDir) IsDir() bool { return true } +func (f fakeDir) Sys() interface{} { return nil } diff --git a/src/cmd/go/internal/fsys/fsys_test.go b/src/cmd/go/internal/fsys/fsys_test.go new file mode 100644 index 0000000000..4b53059427 --- /dev/null +++ b/src/cmd/go/internal/fsys/fsys_test.go @@ -0,0 +1,479 @@ +package fsys + +import ( + "cmd/go/internal/txtar" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// initOverlay resets the overlay state to reflect the config. +// config should be a text archive string. The comment is the overlay config +// json, and the files, in the archive are laid out in a temp directory +// that cwd is set to. +func initOverlay(t *testing.T, config string) { + t.Helper() + + // Create a temporary directory and chdir to it. + prevwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + cwd = t.TempDir() + if err := os.Chdir(cwd); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + overlay = nil + if err := os.Chdir(prevwd); err != nil { + t.Fatal(err) + } + }) + + a := txtar.Parse([]byte(config)) + for _, f := range a.Files { + name := filepath.Join(cwd, f.Name) + if err := os.MkdirAll(filepath.Dir(name), 0777); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(name, f.Data, 0666); err != nil { + t.Fatal(err) + } + } + + var overlayJSON OverlayJSON + if err := json.Unmarshal(a.Comment, &overlayJSON); err != nil { + t.Fatal(fmt.Errorf("parsing overlay JSON: %v", err)) + } + + initFromJSON(overlayJSON) +} + +func TestIsDir(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt", + "subdir4": "overlayfiles/subdir4", + "subdir3/file3b.txt": "overlayfiles/subdir3_file3b.txt", + "subdir5": "", + "subdir6": "" + } +} +-- subdir1/file1.txt -- + +-- subdir3/file3a.txt -- +33 +-- subdir4/file4.txt -- +444 +-- overlayfiles/subdir2_file2.txt -- +2 +-- overlayfiles/subdir3_file3b.txt -- +66666 +-- overlayfiles/subdir4 -- +x +-- subdir6/file6.txt -- +six +`) + + testCases := []struct { + path string + want, wantErr bool + }{ + {"", true, true}, + {".", true, false}, + {cwd, true, false}, + {cwd + string(filepath.Separator), true, false}, + // subdir1 is only on disk + {filepath.Join(cwd, "subdir1"), true, false}, + {"subdir1", true, false}, + {"subdir1" + string(filepath.Separator), true, false}, + {"subdir1/file1.txt", false, false}, + {"subdir1/doesntexist.txt", false, true}, + {"doesntexist", false, true}, + // subdir2 is only in overlay + {filepath.Join(cwd, "subdir2"), true, false}, + {"subdir2", true, false}, + {"subdir2" + string(filepath.Separator), true, false}, + {"subdir2/file2.txt", false, false}, + {"subdir2/doesntexist.txt", false, true}, + // subdir3 has files on disk and in overlay + {filepath.Join(cwd, "subdir3"), true, false}, + {"subdir3", true, false}, + {"subdir3" + string(filepath.Separator), true, false}, + {"subdir3/file3a.txt", false, false}, + {"subdir3/file3b.txt", false, false}, + {"subdir3/doesntexist.txt", false, true}, + // subdir4 is overlaid with a file + {filepath.Join(cwd, "subdir4"), false, false}, + {"subdir4", false, false}, + {"subdir4" + string(filepath.Separator), false, false}, + {"subdir4/file4.txt", false, false}, + {"subdir4/doesntexist.txt", false, false}, + // subdir5 doesn't exist, and is overlaid with a "delete" entry + {filepath.Join(cwd, "subdir5"), false, false}, + {"subdir5", false, false}, + {"subdir5" + string(filepath.Separator), false, false}, + {"subdir5/file5.txt", false, false}, + {"subdir5/doesntexist.txt", false, false}, + // subdir6 does exist, and is overlaid with a "delete" entry + {filepath.Join(cwd, "subdir6"), false, false}, + {"subdir6", false, false}, + {"subdir6" + string(filepath.Separator), false, false}, + {"subdir6/file6.txt", false, false}, + {"subdir6/doesntexist.txt", false, false}, + } + + for _, tc := range testCases { + got, err := IsDir(tc.path) + if err != nil { + if !tc.wantErr { + t.Errorf("IsDir(%q): got error with string %q, want no error", tc.path, err.Error()) + } + continue + } + if tc.wantErr { + t.Errorf("IsDir(%q): got no error, want error", tc.path) + } + if tc.want != got { + t.Errorf("IsDir(%q) = %v, want %v", tc.path, got, tc.want) + } + } +} + +func TestReadDir(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt", + "subdir4": "overlayfiles/subdir4", + "subdir3/file3b.txt": "overlayfiles/subdir3_file3b.txt", + "subdir5": "", + "subdir6/asubsubdir/afile.txt": "overlayfiles/subdir6_asubsubdir_afile.txt", + "subdir6/asubsubdir/zfile.txt": "overlayfiles/subdir6_asubsubdir_zfile.txt", + "subdir6/zsubsubdir/file.txt": "overlayfiles/subdir6_zsubsubdir_file.txt", + "subdir7/asubsubdir/file.txt": "overlayfiles/subdir7_asubsubdir_file.txt", + "subdir7/zsubsubdir/file.txt": "overlayfiles/subdir7_zsubsubdir_file.txt", + "subdir8/doesntexist": "this_file_doesnt_exist_anywhere", + "other/pointstodir": "overlayfiles/this_is_a_directory", + "parentoverwritten/subdir1": "overlayfiles/parentoverwritten_subdir1", + "subdir9/this_file_is_overlaid.txt": "overlayfiles/subdir9_this_file_is_overlaid.txt", + "subdir10/only_deleted_file.txt": "", + "subdir11/deleted.txt": "", + "subdir11": "overlayfiles/subdir11", + "textfile.txt/file.go": "overlayfiles/textfile_txt_file.go" + } +} +-- subdir1/file1.txt -- + +-- subdir3/file3a.txt -- +33 +-- subdir4/file4.txt -- +444 +-- subdir6/file.txt -- +-- subdir6/asubsubdir/file.txt -- +-- subdir6/anothersubsubdir/file.txt -- +-- subdir9/this_file_is_overlaid.txt -- +-- subdir10/only_deleted_file.txt -- +this will be deleted in overlay +-- subdir11/deleted.txt -- +-- parentoverwritten/subdir1/subdir2/subdir3/file.txt -- +-- textfile.txt -- +this will be overridden by textfile.txt/file.go +-- overlayfiles/subdir2_file2.txt -- +2 +-- overlayfiles/subdir3_file3b.txt -- +66666 +-- overlayfiles/subdir4 -- +x +-- overlayfiles/subdir6_asubsubdir_afile.txt -- +-- overlayfiles/subdir6_asubsubdir_zfile.txt -- +-- overlayfiles/subdir6_zsubsubdir_file.txt -- +-- overlayfiles/subdir7_asubsubdir_file.txt -- +-- overlayfiles/subdir7_zsubsubdir_file.txt -- +-- overlayfiles/parentoverwritten_subdir1 -- +x +-- overlayfiles/subdir9_this_file_is_overlaid.txt -- +99999999 +-- overlayfiles/subdir11 -- +-- overlayfiles/this_is_a_directory/file.txt -- +-- overlayfiles/textfile_txt_file.go -- +x +`) + + testCases := map[string][]struct { + name string + size int64 + isDir bool + }{ + ".": { + {"other", 0, true}, + {"overlayfiles", 0, true}, + {"parentoverwritten", 0, true}, + {"subdir1", 0, true}, + {"subdir10", 0, true}, + {"subdir11", 0, false}, + {"subdir2", 0, true}, + {"subdir3", 0, true}, + {"subdir4", 2, false}, + // no subdir5. + {"subdir6", 0, true}, + {"subdir7", 0, true}, + {"subdir8", 0, true}, + {"subdir9", 0, true}, + {"textfile.txt", 0, true}, + }, + "subdir1": {{"file1.txt", 1, false}}, + "subdir2": {{"file2.txt", 2, false}}, + "subdir3": {{"file3a.txt", 3, false}, {"file3b.txt", 6, false}}, + "subdir6": { + {"anothersubsubdir", 0, true}, + {"asubsubdir", 0, true}, + {"file.txt", 0, false}, + {"zsubsubdir", 0, true}, + }, + "subdir6/asubsubdir": {{"afile.txt", 0, false}, {"file.txt", 0, false}, {"zfile.txt", 0, false}}, + "subdir8": {{"doesntexist", 0, false}}, // entry is returned even if destination file doesn't exist + // check that read dir actually redirects files that already exist + // the original this_file_is_overlaid.txt is empty + "subdir9": {{"this_file_is_overlaid.txt", 9, false}}, + "subdir10": {}, + "parentoverwritten": {{"subdir1", 2, false}}, + "textfile.txt": {{"file.go", 2, false}}, + } + + for dir, want := range testCases { + fis, err := ReadDir(dir) + if err != nil { + t.Fatalf("ReadDir(%q): got error %q, want no error", dir, err) + } + if len(fis) != len(want) { + t.Fatalf("ReadDir(%q) result: got %v entries; want %v entries", dir, len(fis), len(want)) + } + for i := range fis { + if fis[i].Name() != want[i].name { + t.Fatalf("ReadDir(%q) entry %v: got Name() = %v, want %v", dir, i, fis[i].Name(), want[i].name) + } + if fis[i].IsDir() != want[i].isDir { + t.Fatalf("ReadDir(%q) entry %v: got IsDir() = %v, want %v", dir, i, fis[i].IsDir(), want[i].isDir) + } + if want[i].isDir { + // We don't try to get size right for directories + continue + } + if fis[i].Size() != want[i].size { + t.Fatalf("ReadDir(%q) entry %v: got Size() = %v, want %v", dir, i, fis[i].Size(), want[i].size) + } + } + } + + errCases := []string{ + "subdir1/file1.txt", // regular file on disk + "subdir2/file2.txt", // regular file in overlay + "subdir4", // directory overlaid with regular file + "subdir5", // directory deleted in overlay + "parentoverwritten/subdir1/subdir2/subdir3", // parentoverwritten/subdir1 overlaid with regular file + "parentoverwritten/subdir1/subdir2", // parentoverwritten/subdir1 overlaid with regular file + "subdir11", // directory with deleted child, overlaid with regular file + "other/pointstodir", + } + + for _, dir := range errCases { + _, gotErr := ReadDir(dir) + if gotErr == nil { + t.Errorf("ReadDir(%q): got no error, want error", dir) + } else if _, ok := gotErr.(*os.PathError); !ok { + t.Errorf("ReadDir(%q): got error with string %q and type %T, want os.PathError", dir, gotErr.Error(), gotErr) + } + } +} + +func TestOverlayPath(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt", + "subdir3/doesntexist": "this_file_doesnt_exist_anywhere", + "subdir4/this_file_is_overlaid.txt": "overlayfiles/subdir4_this_file_is_overlaid.txt", + "subdir5/deleted.txt": "", + "parentoverwritten/subdir1": "" + } +} +-- subdir1/file1.txt -- +file 1 +-- subdir4/this_file_is_overlaid.txt -- +these contents are replaced by the overlay +-- parentoverwritten/subdir1/subdir2/subdir3/file.txt -- +-- subdir5/deleted.txt -- +deleted +-- overlayfiles/subdir2_file2.txt -- +file 2 +-- overlayfiles/subdir4_this_file_is_overlaid.txt -- +99999999 +`) + + testCases := []struct { + path string + wantPath string + wantOK bool + }{ + {"subdir1/file1.txt", "subdir1/file1.txt", false}, + // OverlayPath returns false for directories + {"subdir2", "subdir2", false}, + {"subdir2/file2.txt", filepath.Join(cwd, "overlayfiles/subdir2_file2.txt"), true}, + // OverlayPath doesn't stat a file to see if it exists, so it happily returns + // the 'to' path and true even if the 'to' path doesn't exist on disk. + {"subdir3/doesntexist", filepath.Join(cwd, "this_file_doesnt_exist_anywhere"), true}, + // Like the subdir2/file2.txt case above, but subdir4 exists on disk, but subdir2 does not. + {"subdir4/this_file_is_overlaid.txt", filepath.Join(cwd, "overlayfiles/subdir4_this_file_is_overlaid.txt"), true}, + {"subdir5", "subdir5", false}, + {"subdir5/deleted.txt", "", true}, + } + + for _, tc := range testCases { + gotPath, gotOK := OverlayPath(tc.path) + if gotPath != tc.wantPath || gotOK != tc.wantOK { + t.Errorf("OverlayPath(%q): got %v, %v; want %v, %v", + tc.path, gotPath, gotOK, tc.wantPath, tc.wantOK) + } + } +} + +func TestOpen(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt", + "subdir3/doesntexist": "this_file_doesnt_exist_anywhere", + "subdir4/this_file_is_overlaid.txt": "overlayfiles/subdir4_this_file_is_overlaid.txt", + "subdir5/deleted.txt": "", + "parentoverwritten/subdir1": "", + "childoverlay/subdir1.txt/child.txt": "overlayfiles/child.txt", + "subdir11/deleted.txt": "", + "subdir11": "overlayfiles/subdir11", + "parentdeleted": "", + "parentdeleted/file.txt": "overlayfiles/parentdeleted_file.txt" + } +} +-- subdir11/deleted.txt -- +-- subdir1/file1.txt -- +file 1 +-- subdir4/this_file_is_overlaid.txt -- +these contents are replaced by the overlay +-- parentoverwritten/subdir1/subdir2/subdir3/file.txt -- +-- childoverlay/subdir1.txt -- +this file doesn't exist because the path +childoverlay/subdir1.txt/child.txt is in the overlay +-- subdir5/deleted.txt -- +deleted +-- parentdeleted -- +this will be deleted so that parentdeleted/file.txt can exist +-- overlayfiles/subdir2_file2.txt -- +file 2 +-- overlayfiles/subdir4_this_file_is_overlaid.txt -- +99999999 +-- overlayfiles/child.txt -- +-- overlayfiles/subdir11 -- +11 +-- overlayfiles/parentdeleted_file.txt -- +this can exist because the parent directory is deleted +`) + + testCases := []struct { + path string + wantContents string + isErr bool + }{ + {"subdir1/file1.txt", "file 1\n", false}, + {"subdir2/file2.txt", "file 2\n", false}, + {"subdir3/doesntexist", "", true}, + {"subdir4/this_file_is_overlaid.txt", "99999999\n", false}, + {"subdir5/deleted.txt", "", true}, + {"parentoverwritten/subdir1/subdir2/subdir3/file.txt", "", true}, + {"childoverlay/subdir1.txt", "", true}, + {"subdir11", "11\n", false}, + {"parentdeleted/file.txt", "this can exist because the parent directory is deleted\n", false}, + } + + for _, tc := range testCases { + f, err := Open(tc.path) + if tc.isErr { + if err == nil { + f.Close() + t.Errorf("Open(%q): got no error, but want error", tc.path) + } + continue + } + if err != nil { + t.Errorf("Open(%q): got error %v, want nil", tc.path, err) + continue + } + contents, err := ioutil.ReadAll(f) + if err != nil { + t.Errorf("unexpected error reading contents of file: %v", err) + } + if string(contents) != tc.wantContents { + t.Errorf("contents of file opened with Open(%q): got %q, want %q", + tc.path, contents, tc.wantContents) + } + f.Close() + } +} + +func TestIsDirWithGoFiles(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "goinoverlay/file.go": "dummy", + "directory/removed/by/file": "dummy", + "directory_with_go_dir/dir.go/file.txt": "dummy", + "otherdirectory/deleted.go": "", + "nonexistentdirectory/deleted.go": "", + "textfile.txt/file.go": "dummy" + } +} +-- dummy -- +a destination file for the overlay entries to point to +contents don't matter for this test +-- nogo/file.txt -- +-- goondisk/file.go -- +-- goinoverlay/file.txt -- +-- directory/removed/by/file/in/overlay/file.go -- +-- otherdirectory/deleted.go -- +-- textfile.txt -- +`) + + testCases := []struct { + dir string + want bool + wantErr bool + }{ + {"nogo", false, false}, + {"goondisk", true, false}, + {"goinoverlay", true, false}, + {"directory/removed/by/file/in/overlay", false, false}, + {"directory_with_go_dir", false, false}, + {"otherdirectory", false, false}, + {"nonexistentdirectory", false, false}, + {"textfile.txt", true, false}, + } + + for _, tc := range testCases { + got, gotErr := IsDirWithGoFiles(tc.dir) + if tc.wantErr { + if gotErr == nil { + t.Errorf("IsDirWithGoFiles(%q): got %v, %v; want non-nil error", tc.dir, got, gotErr) + } + continue + } + if gotErr != nil { + t.Errorf("IsDirWithGoFiles(%q): got %v, %v; want nil error", tc.dir, got, gotErr) + } + if got != tc.want { + t.Errorf("IsDirWithGoFiles(%q) = %v; want %v", tc.dir, got, tc.want) + } + } +} diff --git a/src/cmd/go/internal/imports/scan.go b/src/cmd/go/internal/imports/scan.go index 3d9b6132b1..42ee49aaaa 100644 --- a/src/cmd/go/internal/imports/scan.go +++ b/src/cmd/go/internal/imports/scan.go @@ -6,16 +6,17 @@ package imports import ( "fmt" - "io/ioutil" "os" "path/filepath" "sort" "strconv" "strings" + + "cmd/go/internal/fsys" ) func ScanDir(dir string, tags map[string]bool) ([]string, []string, error) { - infos, err := ioutil.ReadDir(dir) + infos, err := fsys.ReadDir(dir) if err != nil { return nil, nil, err } @@ -49,7 +50,7 @@ func scanFiles(files []string, tags map[string]bool, explicitFiles bool) ([]stri numFiles := 0 Files: for _, name := range files { - r, err := os.Open(name) + r, err := fsys.Open(name) if err != nil { return nil, nil, err } diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index c36c8bd29b..3642de851a 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -17,6 +17,7 @@ import ( "time" "cmd/go/internal/cfg" + "cmd/go/internal/fsys" "cmd/go/internal/modfetch" "cmd/go/internal/par" "cmd/go/internal/search" @@ -438,57 +439,9 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile // We don't care about build tags, not even "+build ignore". // We're just looking for a plausible directory. res := haveGoFilesCache.Do(dir, func() interface{} { - ok, err := isDirWithGoFiles(dir) + ok, err := fsys.IsDirWithGoFiles(dir) return goFilesEntry{haveGoFiles: ok, err: err} }).(goFilesEntry) return dir, res.haveGoFiles, res.err } - -func isDirWithGoFiles(dir string) (bool, error) { - f, err := os.Open(dir) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - defer f.Close() - - names, firstErr := f.Readdirnames(-1) - if firstErr != nil { - if fi, err := f.Stat(); err == nil && !fi.IsDir() { - return false, nil - } - - // Rewrite the error from ReadDirNames to include the path if not present. - // See https://golang.org/issue/38923. - var pe *os.PathError - if !errors.As(firstErr, &pe) { - firstErr = &os.PathError{Op: "readdir", Path: dir, Err: firstErr} - } - } - - for _, name := range names { - if strings.HasSuffix(name, ".go") { - info, err := os.Stat(filepath.Join(dir, name)) - if err == nil && info.Mode().IsRegular() { - // If any .go source file exists, the package exists regardless of - // errors for other source files. Leave further error reporting for - // later. - return true, nil - } - if firstErr == nil { - if os.IsNotExist(err) { - // If the file was concurrently deleted, or was a broken symlink, - // convert the error to an opaque error instead of one matching - // os.IsNotExist. - err = errors.New(err.Error()) - } - firstErr = err - } - } - } - - return false, firstErr -} diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 3344242489..e1b784860b 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -22,6 +22,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" + "cmd/go/internal/fsys" "cmd/go/internal/lockedfile" "cmd/go/internal/modconv" "cmd/go/internal/modfetch" @@ -132,6 +133,10 @@ func Init() { return } + if err := fsys.Init(base.Cwd); err != nil { + base.Fatalf("go: %v", err) + } + // Disable any prompting for passwords by Git. // Only has an effect for 2.3.0 or later, but avoiding // the prompt in earlier versions is just too hard. diff --git a/src/cmd/go/internal/search/search.go b/src/cmd/go/internal/search/search.go index 4efef24152..868dbf5f9d 100644 --- a/src/cmd/go/internal/search/search.go +++ b/src/cmd/go/internal/search/search.go @@ -264,6 +264,7 @@ func (m *Match) MatchDirs() { } err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + // TODO(#39958): Handle walk for overlays. if err != nil { return err // Likely a permission error, which could interfere with matching. } diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 86423f118c..21342ac8ba 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -19,6 +19,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" + "cmd/go/internal/fsys" "cmd/go/internal/load" "cmd/go/internal/modfetch" "cmd/go/internal/modload" @@ -277,6 +278,8 @@ func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) { cmd.Flag.BoolVar(&cfg.BuildTrimpath, "trimpath", false, "") cmd.Flag.BoolVar(&cfg.BuildWork, "work", false, "") + cmd.Flag.StringVar(&fsys.OverlayFile, "overlay", "", "") + // Undocumented, unstable debugging flags. cmd.Flag.StringVar(&cfg.DebugActiongraph, "debug-actiongraph", "", "") cmd.Flag.StringVar(&cfg.DebugTrace, "debug-trace", "", "") diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go index d76574932e..1f15654c79 100644 --- a/src/cmd/go/internal/work/gc.go +++ b/src/cmd/go/internal/work/gc.go @@ -18,6 +18,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" + "cmd/go/internal/fsys" "cmd/go/internal/load" "cmd/go/internal/str" "cmd/internal/objabi" @@ -145,7 +146,25 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, s } for _, f := range gofiles { - args = append(args, mkAbs(p.Dir, f)) + f := mkAbs(p.Dir, f) + + // Handle overlays. Convert path names using OverlayPath + // so these paths can be handed directly to tools. + // Deleted files won't show up in when scanning directories earlier, + // so OverlayPath will never return "" (meaning a deleted file) here. + // TODO(#39958): Handle -trimprefix and other cases where + // tools depend on the names of the files that are passed in. + // TODO(#39958): Handle cases where the package directory + // doesn't exist on disk (this can happen when all the package's + // files are in an overlay): the code expects the package directory + // to exist and runs some tools in that directory. + // TODO(#39958): Process the overlays when the + // gofiles, cgofiles, cfiles, sfiles, and cxxfiles variables are + // created in (*Builder).build. Doing that requires rewriting the + // code that uses those values to expect absolute paths. + f, _ = fsys.OverlayPath(f) + + args = append(args, f) } output, err = b.runOut(a, p.Dir, nil, args...) @@ -247,6 +266,8 @@ func (a *Action) trimpath() string { } } + // TODO(#39958): Add rewrite rules for overlaid files. + return rewrite } diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index b0d6133768..bab1935aca 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -9,6 +9,7 @@ package work import ( "cmd/go/internal/base" "cmd/go/internal/cfg" + "cmd/go/internal/fsys" "cmd/go/internal/modload" "cmd/internal/objabi" "cmd/internal/sys" @@ -24,6 +25,9 @@ func BuildInit() { modload.Init() instrumentInit() buildModeInit() + if err := fsys.Init(base.Cwd); err != nil { + base.Fatalf("go: %v", err) + } // Make sure -pkgdir is absolute, because we run commands // in different directories. diff --git a/src/cmd/go/testdata/script/build_overlay.txt b/src/cmd/go/testdata/script/build_overlay.txt new file mode 100644 index 0000000000..3b039901fa --- /dev/null +++ b/src/cmd/go/testdata/script/build_overlay.txt @@ -0,0 +1,64 @@ +[short] skip + +# Test building in overlays. +# TODO(matloob): add a test case where the destination file in the replace map +# isn't a go file. Either completely exclude that case in fs.IsDirWithGoFiles +# if the compiler doesn't allow it, or test that it works all the way. + +# The main package (m) is contained in an overlay. It imports m/dir2 which has one +# file in an overlay and one file outside the overlay, which in turn imports m/dir, +# which only has source files in the overlay. + +! go build . +go build -overlay overlay.json -o main$GOEXE . +exec ./main$goexe +stdout '^hello$' + +-- go.mod -- +// TODO(matloob): how do overlays work with go.mod (especially if mod=readonly) +module m + +go 1.16 + +-- dir2/h.go -- +package dir2 + +func PrintMessage() { + printMessage() +} +-- dir/foo.txt -- +The build action code currently expects the package directory +to exist, so it can run the compiler in that directory. +TODO(matloob): Remove this requirement. +-- overlay.json -- +{ + "Replace": { + "f.go": "overlay/f.go", + "dir/g.go": "overlay/dir_g.go", + "dir2/i.go": "overlay/dir2_i.go" + } +} +-- overlay/f.go -- +package main + +import "m/dir2" + +func main() { + dir2.PrintMessage() +} +-- overlay/dir_g.go -- +package dir + +import "fmt" + +func PrintMessage() { + fmt.Println("hello") +} +-- overlay/dir2_i.go -- +package dir2 + +import "m/dir" + +func printMessage() { + dir.PrintMessage() +} diff --git a/src/cmd/go/testdata/script/list_overlay.txt b/src/cmd/go/testdata/script/list_overlay.txt new file mode 100644 index 0000000000..7d0e3c2c81 --- /dev/null +++ b/src/cmd/go/testdata/script/list_overlay.txt @@ -0,0 +1,54 @@ +# Test listing with overlays + +# Overlay in an existing directory +go list -overlay overlay.json -f '{{.GoFiles}}' . +stdout '^\[f.go\]$' + +# Overlays in a non-existing directory +go list -overlay overlay.json -f '{{.GoFiles}}' ./dir +stdout '^\[g.go\]$' + +# Overlays in an existing directory with already existing files +go list -overlay overlay.json -f '{{.GoFiles}}' ./dir2 +stdout '^\[h.go i.go\]$' + +# Overlay that removes a file from a directory +! go list ./dir3 # contains a file without a package statement +go list -overlay overlay.json -f '{{.GoFiles}}' ./dir3 # overlay removes that file + +# TODO(#39958): assembly files, C files, files that require cgo preprocessing + +-- go.mod -- +// TODO(#39958): Support and test overlays including go.mod itself (especially if mod=readonly) +module m + +go 1.16 + +-- dir2/h.go -- +package dir2 + +-- dir3/good.go -- +package dir3 +-- dir3/bad.go -- +// no package statement +-- overlay.json -- +{ + "Replace": { + "f.go": "overlay/f.go", + "dir/g.go": "overlay/dir_g.go", + "dir2/i.go": "overlay/dir2_i.go", + "dir3/bad.go": "" + } +} +-- overlay/f.go -- +package m + +func f() { +} +-- overlay/dir_g.go -- +package m + +func g() { +} +-- overlay/dir2_i.go -- +package dir2 From 1fb149fd640f2e83f17206aa6eb530d664b0b5ed Mon Sep 17 00:00:00 2001 From: witchard Date: Fri, 25 Sep 2020 14:09:42 +0000 Subject: [PATCH 125/281] cmd/go/internal/get: improve -insecure deprecation docs Updates #37519 Change-Id: I212607f1839b729d7da24b1258e56997b13ad830 GitHub-Last-Rev: db6d3c835bdf867a0b18f115276210e3a05902ed GitHub-Pull-Request: golang/go#41613 Reviewed-on: https://go-review.googlesource.com/c/go/+/257157 Run-TryBot: Bryan C. Mills TryBot-Result: Go Bot Trust: Jay Conrod Trust: Bryan C. Mills Reviewed-by: Bryan C. Mills Reviewed-by: Jay Conrod --- doc/go1.16.html | 13 +++++++------ src/cmd/go/alldocs.go | 15 +++++++-------- src/cmd/go/internal/get/get.go | 4 ++-- src/cmd/go/internal/modget/get.go | 11 +++++------ 4 files changed, 21 insertions(+), 22 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index f7bcb9e94f..2fb7222482 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -90,12 +90,13 @@ Do not send CLs removing the interior tags from such phrases.

The go get -insecure flag is - deprecated and will be removed in a future version. The GOINSECURE - environment variable should be used instead, since it provides control - over which modules may be retrieved using an insecure scheme. Unlike the - -insecure flag, GOINSECURE does not disable module - sum validation using the checksum database. The GOPRIVATE or - GONOSUMDB environment variables may be used instead. + deprecated and will be removed in a future version. This flag permits + fetching from repositories and resolving custom domains using insecure + schemes such as HTTP, and also bypassess module sum validation using the + checksum database. To permit the use of insecure schemes, use the + GOINSECURE environment variable instead. To bypass module + sum validation, use GOPRIVATE or GONOSUMDB. + See go help environment for details.

The all pattern

diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 500682ed02..14840efb22 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -662,13 +662,12 @@ // this automatically as well. // // The -insecure flag permits fetching from repositories and resolving -// custom domains using insecure schemes such as HTTP. Use with caution. +// custom domains using insecure schemes such as HTTP, and also bypassess +// module sum validation using the checksum database. Use with caution. // This flag is deprecated and will be removed in a future version of go. -// The GOINSECURE environment variable is usually a better alternative, since -// it provides control over which modules may be retrieved using an insecure -// scheme. It should be noted that the -insecure flag also turns the module -// checksum validation off. GOINSECURE does not do that, use GONOSUMDB. -// See 'go help environment' for details. +// To permit the use of insecure schemes, use the GOINSECURE environment +// variable instead. To bypass module sum validation, use GOPRIVATE or +// GONOSUMDB. See 'go help environment' for details. // // The second step is to download (if needed), build, and install // the named packages. @@ -2211,8 +2210,8 @@ // The -insecure flag permits fetching from repositories and resolving // custom domains using insecure schemes such as HTTP. Use with caution. // This flag is deprecated and will be removed in a future version of go. -// The GOINSECURE environment variable is usually a better alternative, since -// it provides control over which modules may be retrieved using an insecure +// The GOINSECURE environment variable should be used instead, since it +// provides control over which packages may be retrieved using an insecure // scheme. See 'go help environment' for details. // // The -t flag instructs get to also download the packages required to build diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index ed2786879c..268962eca8 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -46,8 +46,8 @@ before resolving dependencies or building the code. The -insecure flag permits fetching from repositories and resolving custom domains using insecure schemes such as HTTP. Use with caution. This flag is deprecated and will be removed in a future version of go. -The GOINSECURE environment variable is usually a better alternative, since -it provides control over which modules may be retrieved using an insecure +The GOINSECURE environment variable should be used instead, since it +provides control over which packages may be retrieved using an insecure scheme. See 'go help environment' for details. The -t flag instructs get to also download the packages required to build diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index f1cf8b17a8..ea0e99af7d 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -115,13 +115,12 @@ require downgrading other dependencies, and 'go get' does this automatically as well. The -insecure flag permits fetching from repositories and resolving -custom domains using insecure schemes such as HTTP. Use with caution. +custom domains using insecure schemes such as HTTP, and also bypassess +module sum validation using the checksum database. Use with caution. This flag is deprecated and will be removed in a future version of go. -The GOINSECURE environment variable is usually a better alternative, since -it provides control over which modules may be retrieved using an insecure -scheme. It should be noted that the -insecure flag also turns the module -checksum validation off. GOINSECURE does not do that, use GONOSUMDB. -See 'go help environment' for details. +To permit the use of insecure schemes, use the GOINSECURE environment +variable instead. To bypass module sum validation, use GOPRIVATE or +GONOSUMDB. See 'go help environment' for details. The second step is to download (if needed), build, and install the named packages. From bdab5df40f474c7768a945ef4fcf5aab634f7af5 Mon Sep 17 00:00:00 2001 From: Lynn Boger Date: Fri, 2 Oct 2020 17:51:13 -0400 Subject: [PATCH 126/281] cmd/compile,cmd/internal/obj/ppc64: use mulli where possible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support to allow the use of mulli when one of the multiply operands is a constant that fits in 16 bits. This especially helps in the case where this instruction appears in a loop since the load of the constant is not being moved out of the loop. Some improvements seen in compress/flate on power9: Decode/Digits/Huffman/1e4 259µs ± 0% 261µs ± 0% +0.57% (p=1.000 n=1+1) Decode/Digits/Huffman/1e5 2.43ms ± 0% 2.45ms ± 0% +0.79% (p=1.000 n=1+1) Decode/Digits/Huffman/1e6 23.9ms ± 0% 24.2ms ± 0% +0.86% (p=1.000 n=1+1) Decode/Digits/Speed/1e4 278µs ± 0% 279µs ± 0% +0.34% (p=1.000 n=1+1) Decode/Digits/Speed/1e5 2.80ms ± 0% 2.81ms ± 0% +0.29% (p=1.000 n=1+1) Decode/Digits/Speed/1e6 28.0ms ± 0% 28.1ms ± 0% +0.28% (p=1.000 n=1+1) Decode/Digits/Default/1e4 278µs ± 0% 278µs ± 0% +0.28% (p=1.000 n=1+1) Decode/Digits/Default/1e5 2.68ms ± 0% 2.69ms ± 0% +0.19% (p=1.000 n=1+1) Decode/Digits/Default/1e6 26.6ms ± 0% 26.6ms ± 0% +0.21% (p=1.000 n=1+1) Decode/Digits/Compression/1e4 278µs ± 0% 278µs ± 0% +0.00% (p=1.000 n=1+1) Decode/Digits/Compression/1e5 2.68ms ± 0% 2.69ms ± 0% +0.21% (p=1.000 n=1+1) Decode/Digits/Compression/1e6 26.6ms ± 0% 26.6ms ± 0% +0.07% (p=1.000 n=1+1) Decode/Newton/Huffman/1e4 322µs ± 0% 312µs ± 0% -2.84% (p=1.000 n=1+1) Decode/Newton/Huffman/1e5 3.11ms ± 0% 2.91ms ± 0% -6.41% (p=1.000 n=1+1) Decode/Newton/Huffman/1e6 31.4ms ± 0% 29.3ms ± 0% -6.85% (p=1.000 n=1+1) Decode/Newton/Speed/1e4 282µs ± 0% 269µs ± 0% -4.69% (p=1.000 n=1+1) Decode/Newton/Speed/1e5 2.29ms ± 0% 2.20ms ± 0% -4.13% (p=1.000 n=1+1) Decode/Newton/Speed/1e6 22.7ms ± 0% 21.3ms ± 0% -6.06% (p=1.000 n=1+1) Decode/Newton/Default/1e4 254µs ± 0% 237µs ± 0% -6.60% (p=1.000 n=1+1) Decode/Newton/Default/1e5 1.86ms ± 0% 1.75ms ± 0% -5.99% (p=1.000 n=1+1) Decode/Newton/Default/1e6 18.1ms ± 0% 17.4ms ± 0% -4.10% (p=1.000 n=1+1) Decode/Newton/Compression/1e4 254µs ± 0% 244µs ± 0% -3.91% (p=1.000 n=1+1) Decode/Newton/Compression/1e5 1.85ms ± 0% 1.79ms ± 0% -3.10% (p=1.000 n=1+1) Decode/Newton/Compression/1e6 18.0ms ± 0% 17.3ms ± 0% -3.88% (p=1.000 n=1+1) Change-Id: I840320fab1c4bf64c76b001c2651ab79f23df4eb Reviewed-on: https://go-review.googlesource.com/c/go/+/259444 Run-TryBot: Lynn Boger TryBot-Result: Go Bot Reviewed-by: Paul Murphy Reviewed-by: Carlos Eduardo Seo Trust: Lynn Boger --- src/cmd/asm/internal/asm/testdata/ppc64enc.s | 4 ++ src/cmd/compile/internal/gc/bench_test.go | 12 +++++ src/cmd/compile/internal/ppc64/ssa.go | 3 +- src/cmd/compile/internal/ssa/gen/PPC64.rules | 2 + src/cmd/compile/internal/ssa/gen/PPC64Ops.go | 2 + src/cmd/compile/internal/ssa/opGen.go | 30 +++++++++++ src/cmd/compile/internal/ssa/rewritePPC64.go | 54 ++++++++++++++++++++ src/cmd/internal/obj/ppc64/asm9.go | 9 ++-- 8 files changed, 111 insertions(+), 5 deletions(-) diff --git a/src/cmd/asm/internal/asm/testdata/ppc64enc.s b/src/cmd/asm/internal/asm/testdata/ppc64enc.s index 869f8c2d4f..c6d7b59aad 100644 --- a/src/cmd/asm/internal/asm/testdata/ppc64enc.s +++ b/src/cmd/asm/internal/asm/testdata/ppc64enc.s @@ -204,12 +204,16 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 MULLW R3, R4 // 7c8419d6 MULLW R3, R4, R5 // 7ca419d6 + MULLW $10, R3 // 1c63000a + MULLW $10000000, R3 // 641f009863ff96807c7f19d6 MULLWCC R3, R4, R5 // 7ca419d7 MULHW R3, R4, R5 // 7ca41896 MULHWU R3, R4, R5 // 7ca41816 MULLD R3, R4 // 7c8419d2 MULLD R4, R4, R5 // 7ca421d2 + MULLD $20, R4 // 1c840014 + MULLD $200000000, R4 // 641f0beb63ffc2007c9f21d2 MULLDCC R3, R4, R5 // 7ca419d3 MULHD R3, R4, R5 // 7ca41892 MULHDCC R3, R4, R5 // 7ca41893 diff --git a/src/cmd/compile/internal/gc/bench_test.go b/src/cmd/compile/internal/gc/bench_test.go index a2887f2f7b..8c4288128f 100644 --- a/src/cmd/compile/internal/gc/bench_test.go +++ b/src/cmd/compile/internal/gc/bench_test.go @@ -7,6 +7,7 @@ package gc import "testing" var globl int64 +var globl32 int32 func BenchmarkLoadAdd(b *testing.B) { x := make([]int64, 1024) @@ -42,6 +43,17 @@ func BenchmarkModify(b *testing.B) { } } +func BenchmarkMullImm(b *testing.B) { + x := make([]int32, 1024) + for i := 0; i < b.N; i++ { + var s int32 + for i := range x { + s += x[i] * 100 + } + globl32 = s + } +} + func BenchmarkConstModify(b *testing.B) { a := make([]int64, 1024) for i := 0; i < b.N; i++ { diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index d83b2df379..1ece4d999f 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -677,7 +677,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[0].Reg() case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst, - ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst: + ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, + ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst: p := s.Prog(v.Op.Asm()) p.Reg = v.Args[0].Reg() p.From.Type = obj.TYPE_CONST diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 83ee4c499b..a05cfee654 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -821,6 +821,8 @@ (ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x) +(MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x) + // Subtract from (with carry, but ignored) constant. // Note, these clobber the carry bit. (SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index 28317928a8..5885660597 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -181,6 +181,8 @@ func init() { {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit) {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit) + {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit) + {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit) {name: "MADDLD", argLength: 3, reg: gp31, asm: "MADDLD", typ: "Int64"}, // (arg0*arg1)+arg2 (signed 64-bit) {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d7d2b24a48..051550fb17 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1832,6 +1832,8 @@ const ( OpPPC64FSUBS OpPPC64MULLD OpPPC64MULLW + OpPPC64MULLDconst + OpPPC64MULLWconst OpPPC64MADDLD OpPPC64MULHD OpPPC64MULHW @@ -24377,6 +24379,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MULLDconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MULLWconst", + auxType: auxInt32, + argLen: 1, + asm: ppc64.AMULLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "MADDLD", argLen: 3, diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 9822637b05..1b8a5a78ca 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -568,6 +568,10 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64MOVWstorezero(v) case OpPPC64MTVSRD: return rewriteValuePPC64_OpPPC64MTVSRD(v) + case OpPPC64MULLD: + return rewriteValuePPC64_OpPPC64MULLD(v) + case OpPPC64MULLW: + return rewriteValuePPC64_OpPPC64MULLW(v) case OpPPC64NEG: return rewriteValuePPC64_OpPPC64NEG(v) case OpPPC64NOR: @@ -11003,6 +11007,56 @@ func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64MULLD(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULLD x (MOVDconst [c])) + // cond: is16Bit(c) + // result: (MULLDconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is16Bit(c)) { + continue + } + v.reset(OpPPC64MULLDconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuePPC64_OpPPC64MULLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MULLW x (MOVDconst [c])) + // cond: is16Bit(c) + // result: (MULLWconst [int32(c)] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is16Bit(c)) { + continue + } + v.reset(OpPPC64MULLWconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + return false +} func rewriteValuePPC64_OpPPC64NEG(v *Value) bool { v_0 := v.Args[0] // match: (NEG (ADDconst [c] x)) diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 928e299f43..c2e8e9e9d0 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -1279,6 +1279,9 @@ func buildop(ctxt *obj.Link) { case AREMD: opset(AREMDU, r0) + case AMULLW: + opset(AMULLD, r0) + case ADIVW: /* op Rb[,Ra],Rd */ opset(AMULHW, r0) @@ -1312,7 +1315,6 @@ func buildop(ctxt *obj.Link) { opset(AMULHDCC, r0) opset(AMULHDU, r0) opset(AMULHDUCC, r0) - opset(AMULLD, r0) opset(AMULLDCC, r0) opset(AMULLDVCC, r0) opset(AMULLDV, r0) @@ -1996,7 +1998,6 @@ func buildop(ctxt *obj.Link) { AMOVB, /* macro: move byte with sign extension */ AMOVBU, /* macro: move byte with sign extension & update */ AMOVFL, - AMULLW, /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */ ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */ ASTSW, @@ -4990,8 +4991,8 @@ func (c *ctxt9) opirr(a obj.As) uint32 { case ADARN: return OPVCC(31, 755, 0, 0) /* darn - v3.00 */ - case AMULLW: - return OPVCC(7, 0, 0, 0) + case AMULLW, AMULLD: + return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */ case AOR: return OPVCC(24, 0, 0, 0) From 04c7e32517faf6257986e7c4cdd3f5f03eeae37b Mon Sep 17 00:00:00 2001 From: Dan Scales Date: Wed, 30 Sep 2020 16:34:47 -0700 Subject: [PATCH 127/281] compress/flate: remove unneeded zeroing of bytes array in (*huffmanBitWriter).reset There is no correctness reason to zero out the w.bytes array in (w *huffmanBitWriter).reset, since w.nbytes is correctly set to zero. The elements of the bytes array are always written sequentially, with nbytes indicating how many elements have been written, and are only read up to the current value of nybytes. We have a pprof profile of a web server that compresses its request/responses, and the zeroing in reset() is taking up 2.6% of the CPU time of the server (and could be causing more slowdowns elsewhere due to its effects on the cache). This overhead may be showing up especially because there are many request/responses that are all fairly small. I'm not sure if the zeroing of the bytes array was intended as extra protection of data across reset uses in the same program, but no protection is needed as long as the huffman_bit_writer code remains correct. Change-Id: I67f2b2f56cff9dcc38d8fc0aea885bb010aeedbf Reviewed-on: https://go-review.googlesource.com/c/go/+/258577 Run-TryBot: Dan Scales Run-TryBot: Joe Tsai TryBot-Result: Go Bot Reviewed-by: Klaus Post Reviewed-by: Joe Tsai Trust: Joe Tsai Trust: Dan Scales --- src/compress/flate/huffman_bit_writer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/compress/flate/huffman_bit_writer.go b/src/compress/flate/huffman_bit_writer.go index f111f9f592..b3ae76d082 100644 --- a/src/compress/flate/huffman_bit_writer.go +++ b/src/compress/flate/huffman_bit_writer.go @@ -75,7 +75,8 @@ type huffmanBitWriter struct { writer io.Writer // Data waiting to be written is bytes[0:nbytes] - // and then the low nbits of bits. + // and then the low nbits of bits. Data is always written + // sequentially into the bytes array. bits uint64 nbits uint bytes [bufferSize]byte @@ -105,7 +106,6 @@ func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { func (w *huffmanBitWriter) reset(writer io.Writer) { w.writer = writer w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil - w.bytes = [bufferSize]byte{} } func (w *huffmanBitWriter) flush() { From 28e549dec3954b36d0c83442be913d8709d7e5ae Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Sat, 12 Sep 2020 12:33:24 -0400 Subject: [PATCH 128/281] runtime: use sigaltstack on macOS/ARM64 Currently we don't use sigaltstack on darwin/arm64, as is not supported on iOS. However, it is supported on macOS. Use it. (iOS remains unchanged.) Change-Id: Icc154c5e2edf2dbdc8ca68741ad9157fc15a72ee Reviewed-on: https://go-review.googlesource.com/c/go/+/256917 Trust: Cherry Zhang Reviewed-by: Ian Lance Taylor --- misc/cgo/test/sigaltstack.go | 2 +- src/cmd/internal/obj/arm64/obj7.go | 2 +- src/runtime/mkpreempt.go | 7 ++----- src/runtime/os_darwin.go | 8 ++++---- src/runtime/preempt_arm64.s | 3 --- src/runtime/stack.go | 2 +- src/runtime/sys_darwin_arm64.s | 22 ++++++++++++++++++---- 7 files changed, 27 insertions(+), 19 deletions(-) diff --git a/misc/cgo/test/sigaltstack.go b/misc/cgo/test/sigaltstack.go index 27b753a147..034cc4b371 100644 --- a/misc/cgo/test/sigaltstack.go +++ b/misc/cgo/test/sigaltstack.go @@ -62,7 +62,7 @@ import ( func testSigaltstack(t *testing.T) { switch { - case runtime.GOOS == "solaris", runtime.GOOS == "illumos", (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64": + case runtime.GOOS == "solaris", runtime.GOOS == "illumos", runtime.GOOS == "ios" && runtime.GOARCH == "arm64": t.Skipf("switching signal stack not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index 56da854f16..f1bc2583cb 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -589,7 +589,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q1.To.Reg = REGSP q1.Spadj = c.autosize - if c.ctxt.Headtype == objabi.Hdarwin { + if objabi.GOOS == "ios" { // iOS does not support SA_ONSTACK. We will run the signal handler // on the G stack. If we write below SP, it may be clobbered by // the signal handler. So we save LR after decrementing SP. diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index c5bfb0f207..40683bb9d9 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -340,12 +340,9 @@ func genARM64() { p("MOVD R29, -8(RSP)") // save frame pointer (only used on Linux) p("SUB $8, RSP, R29") // set up new frame pointer p("#endif") - // On darwin, save the LR again after decrementing SP. We run the - // signal handler on the G stack (as it doesn't support SA_ONSTACK), + // On iOS, save the LR again after decrementing SP. We run the + // signal handler on the G stack (as it doesn't support sigaltstack), // so any writes below SP may be clobbered. - p("#ifdef GOOS_darwin") - p("MOVD R30, (RSP)") - p("#endif") p("#ifdef GOOS_ios") p("MOVD R30, (RSP)") p("#endif") diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go index 01c40b4813..394bd6fb0f 100644 --- a/src/runtime/os_darwin.go +++ b/src/runtime/os_darwin.go @@ -289,9 +289,9 @@ func mpreinit(mp *m) { // Called to initialize a new m (including the bootstrap m). // Called on the new thread, cannot allocate memory. func minit() { - // The alternate signal stack is buggy on arm64. + // iOS does not support alternate signal stack. // The signal handler handles it directly. - if GOARCH != "arm64" { + if !(GOOS == "ios" && GOARCH == "arm64") { minitSignalStack() } minitSignalMask() @@ -301,9 +301,9 @@ func minit() { // Called from dropm to undo the effect of an minit. //go:nosplit func unminit() { - // The alternate signal stack is buggy on arm64. + // iOS does not support alternate signal stack. // See minit. - if GOARCH != "arm64" { + if !(GOOS == "ios" && GOARCH == "arm64") { unminitSignals() } } diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s index d0e77659c3..36ee13282c 100644 --- a/src/runtime/preempt_arm64.s +++ b/src/runtime/preempt_arm64.s @@ -10,9 +10,6 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVD R29, -8(RSP) SUB $8, RSP, R29 #endif - #ifdef GOOS_darwin - MOVD R30, (RSP) - #endif #ifdef GOOS_ios MOVD R30, (RSP) #endif diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 3802cd049e..2afc2635aa 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -66,7 +66,7 @@ const ( // to each stack below the usual guard area for OS-specific // purposes like signal handling. Used on Windows, Plan 9, // and iOS because they do not use a separate stack. - _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + (sys.GoosDarwin+sys.GoosIos)*sys.GoarchArm64*1024 + _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024 // The minimum size of stack used by Go code _StackMin = 2048 diff --git a/src/runtime/sys_darwin_arm64.s b/src/runtime/sys_darwin_arm64.s index 585d4f2c64..427cb17781 100644 --- a/src/runtime/sys_darwin_arm64.s +++ b/src/runtime/sys_darwin_arm64.s @@ -202,6 +202,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$192 BEQ 2(PC) BL runtime·load_g(SB) +#ifdef GOOS_ios MOVD RSP, R6 CMP $0, g BEQ nog @@ -226,16 +227,21 @@ nog: // Switch to gsignal stack. MOVD R6, RSP - // Call sigtrampgo. + // Save arguments. MOVW R0, (8*1)(RSP) MOVD R1, (8*2)(RSP) MOVD R2, (8*3)(RSP) +#endif + + // Call sigtrampgo. MOVD $runtime·sigtrampgo(SB), R11 BL (R11) +#ifdef GOOS_ios // Switch to old stack. MOVD (8*4)(RSP), R5 MOVD R5, RSP +#endif // Restore callee-save registers. MOVD (8*4)(RSP), R19 @@ -329,12 +335,20 @@ TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0 ADD $16, RSP RET -// sigaltstack on iOS is not supported and will always -// run the signal handler on the main stack, so our sigtramp has -// to do the stack switch ourselves. TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0 +#ifdef GOOS_ios + // sigaltstack on iOS is not supported and will always + // run the signal handler on the main stack, so our sigtramp has + // to do the stack switch ourselves. MOVW $43, R0 BL libc_exit(SB) +#else + MOVD 8(R0), R1 // arg 2 old + MOVD 0(R0), R0 // arg 1 new + CALL libc_sigaltstack(SB) + CBZ R0, 2(PC) + BL notok<>(SB) +#endif RET // Thread related functions From a739306ca7d9ea3a98acca59b853fe889f04c28c Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 17 Sep 2020 10:53:10 -0400 Subject: [PATCH 129/281] runtime: enable more address bits on macOS/ARM64 Apparently macOS/ARM64 has 47-bit addresses, instead of 33-bit as on ios/ARM64. Enable more address bits. Updates #38485. Change-Id: I8aa64ba22a3933e3d9c4fffd17d902b5f31c30e3 Reviewed-on: https://go-review.googlesource.com/c/go/+/256918 Trust: Cherry Zhang Reviewed-by: Ian Lance Taylor Reviewed-by: Michael Knyszek --- src/runtime/malloc.go | 8 ++++---- src/runtime/mpagealloc_32bit.go | 4 ++-- src/runtime/mpagealloc_64bit.go | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index c71f856f09..f7e9b7c4b4 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -198,7 +198,7 @@ const ( // mips32 only has access to the low 2GB of virtual memory, so // we further limit it to 31 bits. // - // On darwin/arm64, although 64-bit pointers are presumably + // On ios/arm64, although 64-bit pointers are presumably // available, pointers are truncated to 33 bits. Furthermore, // only the top 4 GiB of the address space are actually available // to the application, but we allow the whole 33 bits anyway for @@ -207,7 +207,7 @@ const ( // arenaBaseOffset to offset into the top 4 GiB. // // WebAssembly currently has a limit of 4GB linear memory. - heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-(sys.GoosDarwin+sys.GoosIos)*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*(sys.GoosDarwin+sys.GoosIos)*sys.GoarchArm64 + heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64 // maxAlloc is the maximum size of an allocation. On 64-bit, // it's theoretically possible to allocate 1<= 0; i-- { var p uintptr switch { - case GOARCH == "arm64" && (GOOS == "darwin" || GOOS == "ios"): + case GOARCH == "arm64" && GOOS == "ios": p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) case GOARCH == "arm64": p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) diff --git a/src/runtime/mpagealloc_32bit.go b/src/runtime/mpagealloc_32bit.go index 6658a900ac..90f1e54d6c 100644 --- a/src/runtime/mpagealloc_32bit.go +++ b/src/runtime/mpagealloc_32bit.go @@ -2,14 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build 386 arm mips mipsle wasm darwin,arm64 +// +build 386 arm mips mipsle wasm ios,arm64 // wasm is a treated as a 32-bit architecture for the purposes of the page // allocator, even though it has 64-bit pointers. This is because any wasm // pointer always has its top 32 bits as zero, so the effective heap address // space is only 2^32 bytes in size (see heapAddrBits). -// darwin/arm64 is treated as a 32-bit architecture for the purposes of the +// ios/arm64 is treated as a 32-bit architecture for the purposes of the // page allocator, even though it has 64-bit pointers and a 33-bit address // space (see heapAddrBits). The 33 bit address space cannot be rounded up // to 64 bits because there are too many summary levels to fit in just 33 diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go index 831626e4b2..a1691ba802 100644 --- a/src/runtime/mpagealloc_64bit.go +++ b/src/runtime/mpagealloc_64bit.go @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64 !darwin,arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x +// +build amd64 !ios,arm64 mips64 mips64le ppc64 ppc64le riscv64 s390x -// See mpagealloc_32bit.go for why darwin/arm64 is excluded here. +// See mpagealloc_32bit.go for why ios/arm64 is excluded here. package runtime From 2e4ceaf963fc2a0ce95a198769012e62ec4e28ae Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 17 Sep 2020 12:39:43 -0400 Subject: [PATCH 130/281] cmd/dist: enable more tests on macOS/ARM64 Unlike iOS, macOS ARM64 is more of a fully featured OS. Enable more tests. Updates #38485. Change-Id: I2e2240c848d21996db2b950a4a6856987f7a652c Reviewed-on: https://go-review.googlesource.com/c/go/+/256919 Trust: Cherry Zhang Reviewed-by: Ian Lance Taylor --- src/cmd/dist/test.go | 2 +- test/fixedbugs/bug429_run.go | 7 ++++++- test/fixedbugs/issue21576.go | 7 ++++++- test/nilptr.go | 3 ++- 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index da894e3eef..abe496fdee 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -903,7 +903,7 @@ func (t *tester) addCmd(dt *distTest, dir string, cmdline ...interface{}) *exec. } func (t *tester) iOS() bool { - return (goos == "darwin" || goos == "ios") && goarch == "arm64" + return goos == "ios" } func (t *tester) out(v string) { diff --git a/test/fixedbugs/bug429_run.go b/test/fixedbugs/bug429_run.go index c6a02aae5e..60cc5b62de 100644 --- a/test/fixedbugs/bug429_run.go +++ b/test/fixedbugs/bug429_run.go @@ -1,6 +1,11 @@ -// +build !nacl,!js // run +// +build !nacl,!js +// +build !darwin !arm64 + +// Skip on darwin/arm64 as it requires external linking, which brings in +// cgo, causing deadlock detection not working. + // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/test/fixedbugs/issue21576.go b/test/fixedbugs/issue21576.go index b7a32f07ac..3797a8c9ba 100644 --- a/test/fixedbugs/issue21576.go +++ b/test/fixedbugs/issue21576.go @@ -1,6 +1,11 @@ -// +build !nacl,!js // run +// +build !nacl,!js +// +build !darwin !arm64 + +// Skip on darwin/arm64 as it requires external linking, which brings in +// cgo, causing deadlock detection not working. + // Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/test/nilptr.go b/test/nilptr.go index 90f57c54b6..c9a044dd36 100644 --- a/test/nilptr.go +++ b/test/nilptr.go @@ -8,7 +8,8 @@ // in a large address space. // +build !aix -// Address space starts at 1<<32 on AIX, so dummy is too far. +// +build !darwin !arm64 +// Address space starts at 1<<32 on AIX and on darwin/arm64, so dummy is too far. package main From db428ad7b61ed757671162054252b4326045e96c Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 17 Sep 2020 15:02:26 -0400 Subject: [PATCH 131/281] all: enable more tests on macOS/ARM64 Updates #38485. Change-Id: Iac96f5ffe88521fcb11eab306d0df6463bdce046 Reviewed-on: https://go-review.googlesource.com/c/go/+/256920 Trust: Cherry Zhang Reviewed-by: Dmitri Shuralyov Reviewed-by: Ian Lance Taylor --- misc/cgo/testcarchive/carchive_test.go | 2 +- src/cmd/doc/doc_test.go | 2 +- src/cmd/go/internal/work/build_test.go | 6 ++---- src/go/build/build_test.go | 4 ++-- src/log/syslog/syslog_test.go | 7 +------ src/net/dial_test.go | 2 +- src/net/platform_test.go | 2 +- src/path/filepath/path_test.go | 4 ++-- src/runtime/debug/panic_test.go | 4 ++-- src/syscall/syscall_unix_test.go | 2 +- 10 files changed, 14 insertions(+), 21 deletions(-) diff --git a/misc/cgo/testcarchive/carchive_test.go b/misc/cgo/testcarchive/carchive_test.go index 2e223ea369..6ed25d8948 100644 --- a/misc/cgo/testcarchive/carchive_test.go +++ b/misc/cgo/testcarchive/carchive_test.go @@ -603,7 +603,7 @@ func TestExtar(t *testing.T) { if runtime.Compiler == "gccgo" { t.Skip("skipping -extar test when using gccgo") } - if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" { + if runtime.GOOS == "ios" { t.Skip("shell scripts are not executable on iOS hosts") } diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go index 47602833d3..39530e3c2d 100644 --- a/src/cmd/doc/doc_test.go +++ b/src/cmd/doc/doc_test.go @@ -36,7 +36,7 @@ func TestMain(m *testing.M) { } func maybeSkip(t *testing.T) { - if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" { + if runtime.GOOS == "ios" { t.Skip("iOS does not have a full file tree") } } diff --git a/src/cmd/go/internal/work/build_test.go b/src/cmd/go/internal/work/build_test.go index afed0fba72..904aee0684 100644 --- a/src/cmd/go/internal/work/build_test.go +++ b/src/cmd/go/internal/work/build_test.go @@ -221,10 +221,8 @@ func pkgImportPath(pkgpath string) *load.Package { // See https://golang.org/issue/18878. func TestRespectSetgidDir(t *testing.T) { switch runtime.GOOS { - case "darwin", "ios": - if runtime.GOARCH == "arm64" { - t.Skip("can't set SetGID bit with chmod on iOS") - } + case "ios": + t.Skip("can't set SetGID bit with chmod on iOS") case "windows", "plan9": t.Skip("chown/chmod setgid are not supported on Windows or Plan 9") } diff --git a/src/go/build/build_test.go b/src/go/build/build_test.go index 22c62ce87d..2f2e80b5a8 100644 --- a/src/go/build/build_test.go +++ b/src/go/build/build_test.go @@ -120,7 +120,7 @@ func TestMultiplePackageImport(t *testing.T) { } func TestLocalDirectory(t *testing.T) { - if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" { + if runtime.GOOS == "ios" { t.Skipf("skipping on %s/%s, no valid GOROOT", runtime.GOOS, runtime.GOARCH) } @@ -250,7 +250,7 @@ func TestMatchFile(t *testing.T) { } func TestImportCmd(t *testing.T) { - if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" { + if runtime.GOOS == "ios" { t.Skipf("skipping on %s/%s, no valid GOROOT", runtime.GOOS, runtime.GOARCH) } diff --git a/src/log/syslog/syslog_test.go b/src/log/syslog/syslog_test.go index dd2f83e04f..30abfae550 100644 --- a/src/log/syslog/syslog_test.go +++ b/src/log/syslog/syslog_test.go @@ -51,12 +51,7 @@ func testableNetwork(network string) bool { switch network { case "unix", "unixgram": switch runtime.GOOS { - case "darwin", "ios": - switch runtime.GOARCH { - case "arm64": - return false - } - case "android": + case "ios", "android": return false } } diff --git a/src/net/dial_test.go b/src/net/dial_test.go index 2706de4442..57cf5554ad 100644 --- a/src/net/dial_test.go +++ b/src/net/dial_test.go @@ -990,7 +990,7 @@ func TestDialerControl(t *testing.T) { // except that it won't skip testing on non-mobile builders. func mustHaveExternalNetwork(t *testing.T) { t.Helper() - mobile := runtime.GOOS == "android" || (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" + mobile := runtime.GOOS == "android" || runtime.GOOS == "ios" if testenv.Builder() == "" || mobile { testenv.MustHaveExternalNetwork(t) } diff --git a/src/net/platform_test.go b/src/net/platform_test.go index 4b92bb6df0..2da23dedce 100644 --- a/src/net/platform_test.go +++ b/src/net/platform_test.go @@ -82,7 +82,7 @@ func testableNetwork(network string) bool { } func iOS() bool { - return (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" + return runtime.GOOS == "ios" } // testableAddress reports whether address of network is testable on diff --git a/src/path/filepath/path_test.go b/src/path/filepath/path_test.go index ca100ff071..6a8700e413 100644 --- a/src/path/filepath/path_test.go +++ b/src/path/filepath/path_test.go @@ -431,7 +431,7 @@ func chtmpdir(t *testing.T) (restore func()) { } func TestWalk(t *testing.T) { - if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" { + if runtime.GOOS == "ios" { restore := chtmpdir(t) defer restore() } @@ -1278,7 +1278,7 @@ func TestDriveLetterInEvalSymlinks(t *testing.T) { } func TestBug3486(t *testing.T) { // https://golang.org/issue/3486 - if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" { + if runtime.GOOS == "ios" { t.Skipf("skipping on %s/%s", runtime.GOOS, runtime.GOARCH) } root, err := filepath.EvalSymlinks(runtime.GOROOT() + "/test") diff --git a/src/runtime/debug/panic_test.go b/src/runtime/debug/panic_test.go index 93be216985..b67a3de4f9 100644 --- a/src/runtime/debug/panic_test.go +++ b/src/runtime/debug/panic_test.go @@ -20,8 +20,8 @@ func TestPanicOnFault(t *testing.T) { if runtime.GOARCH == "s390x" { t.Skip("s390x fault addresses are missing the low order bits") } - if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" { - t.Skip("darwin/arm64 doesn't provide fault addresses") + if runtime.GOOS == "ios" { + t.Skip("iOS doesn't provide fault addresses") } m, err := syscall.Mmap(-1, 0, 0x1000, syscall.PROT_READ /* Note: no PROT_WRITE */, syscall.MAP_SHARED|syscall.MAP_ANON) if err != nil { diff --git a/src/syscall/syscall_unix_test.go b/src/syscall/syscall_unix_test.go index 7e9bb0c3ac..d754c075f1 100644 --- a/src/syscall/syscall_unix_test.go +++ b/src/syscall/syscall_unix_test.go @@ -70,7 +70,7 @@ func _() { // Thus this test also verifies that the Flock_t structure can be // roundtripped with F_SETLK and F_GETLK. func TestFcntlFlock(t *testing.T) { - if (runtime.GOOS == "darwin" || runtime.GOOS == "ios") && runtime.GOARCH == "arm64" { + if runtime.GOOS == "ios" { t.Skip("skipping; no child processes allowed on iOS") } flock := syscall.Flock_t{ From 930fa890c9b6a75700bda3dc4043de81350749ea Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 6 Oct 2020 10:53:11 -0700 Subject: [PATCH 132/281] net/http: add Transport.GetProxyConnectHeader Fixes golang/go#41048 Change-Id: I38e01605bffb6f85100c098051b0c416dd77f261 Reviewed-on: https://go-review.googlesource.com/c/go/+/259917 Trust: Brad Fitzpatrick Run-TryBot: Brad Fitzpatrick TryBot-Result: Go Bot Reviewed-by: Damien Neil --- src/net/http/transport.go | 23 ++++++++++++++- src/net/http/transport_test.go | 52 ++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/src/net/http/transport.go b/src/net/http/transport.go index b97c4268b5..4546166430 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -240,8 +240,18 @@ type Transport struct { // ProxyConnectHeader optionally specifies headers to send to // proxies during CONNECT requests. + // To set the header dynamically, see GetProxyConnectHeader. ProxyConnectHeader Header + // GetProxyConnectHeader optionally specifies a func to return + // headers to send to proxyURL during a CONNECT request to the + // ip:port target. + // If it returns an error, the Transport's RoundTrip fails with + // that error. It can return (nil, nil) to not add headers. + // If GetProxyConnectHeader is non-nil, ProxyConnectHeader is + // ignored. + GetProxyConnectHeader func(ctx context.Context, proxyURL *url.URL, target string) (Header, error) + // MaxResponseHeaderBytes specifies a limit on how many // response bytes are allowed in the server's response // header. @@ -313,6 +323,7 @@ func (t *Transport) Clone() *Transport { ResponseHeaderTimeout: t.ResponseHeaderTimeout, ExpectContinueTimeout: t.ExpectContinueTimeout, ProxyConnectHeader: t.ProxyConnectHeader.Clone(), + GetProxyConnectHeader: t.GetProxyConnectHeader, MaxResponseHeaderBytes: t.MaxResponseHeaderBytes, ForceAttemptHTTP2: t.ForceAttemptHTTP2, WriteBufferSize: t.WriteBufferSize, @@ -1623,7 +1634,17 @@ func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *pers } case cm.targetScheme == "https": conn := pconn.conn - hdr := t.ProxyConnectHeader + var hdr Header + if t.GetProxyConnectHeader != nil { + var err error + hdr, err = t.GetProxyConnectHeader(ctx, cm.proxyURL, cm.targetAddr) + if err != nil { + conn.Close() + return nil, err + } + } else { + hdr = t.ProxyConnectHeader + } if hdr == nil { hdr = make(Header) } diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index f4b7623630..a1c9e822b4 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -5174,6 +5174,57 @@ func TestTransportProxyConnectHeader(t *testing.T) { } } +func TestTransportProxyGetConnectHeader(t *testing.T) { + defer afterTest(t) + reqc := make(chan *Request, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method != "CONNECT" { + t.Errorf("method = %q; want CONNECT", r.Method) + } + reqc <- r + c, _, err := w.(Hijacker).Hijack() + if err != nil { + t.Errorf("Hijack: %v", err) + return + } + c.Close() + })) + defer ts.Close() + + c := ts.Client() + c.Transport.(*Transport).Proxy = func(r *Request) (*url.URL, error) { + return url.Parse(ts.URL) + } + // These should be ignored: + c.Transport.(*Transport).ProxyConnectHeader = Header{ + "User-Agent": {"foo"}, + "Other": {"bar"}, + } + c.Transport.(*Transport).GetProxyConnectHeader = func(ctx context.Context, proxyURL *url.URL, target string) (Header, error) { + return Header{ + "User-Agent": {"foo2"}, + "Other": {"bar2"}, + }, nil + } + + res, err := c.Get("https://dummy.tld/") // https to force a CONNECT + if err == nil { + res.Body.Close() + t.Errorf("unexpected success") + } + select { + case <-time.After(3 * time.Second): + t.Fatal("timeout") + case r := <-reqc: + if got, want := r.Header.Get("User-Agent"), "foo2"; got != want { + t.Errorf("CONNECT request User-Agent = %q; want %q", got, want) + } + if got, want := r.Header.Get("Other"), "bar2"; got != want { + t.Errorf("CONNECT request Other = %q; want %q", got, want) + } + } +} + var errFakeRoundTrip = errors.New("fake roundtrip") type funcRoundTripper func() @@ -5842,6 +5893,7 @@ func TestTransportClone(t *testing.T) { ResponseHeaderTimeout: time.Second, ExpectContinueTimeout: time.Second, ProxyConnectHeader: Header{}, + GetProxyConnectHeader: func(context.Context, *url.URL, string) (Header, error) { return nil, nil }, MaxResponseHeaderBytes: 1, ForceAttemptHTTP2: true, TLSNextProto: map[string]func(authority string, c *tls.Conn) RoundTripper{ From 04b8a9fea57e37589d82410281f22ebde0027808 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 6 Oct 2020 14:42:15 -0700 Subject: [PATCH 133/281] all: implement GO386=softfloat Backstop support for non-sse2 chips now that 387 is gone. RELNOTE=yes Change-Id: Ib10e69c4a3654c15a03568f93393437e1939e013 Reviewed-on: https://go-review.googlesource.com/c/go/+/260017 Trust: Keith Randall Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor --- src/cmd/compile/internal/x86/galign.go | 15 ++++++++++++ src/cmd/dist/build.go | 11 +++++++++ src/cmd/dist/buildruntime.go | 2 ++ src/cmd/go/alldocs.go | 3 +++ src/cmd/go/internal/cfg/cfg.go | 3 +++ src/cmd/go/internal/help/helpdoc.go | 3 +++ src/cmd/internal/objabi/util.go | 9 +------- src/internal/cfg/cfg.go | 1 + test/codegen/arithmetic.go | 6 ++--- test/codegen/floats.go | 8 +++---- test/codegen/math.go | 2 +- test/codegen/memops.go | 32 +++++++++++++------------- test/run.go | 12 +++++----- 13 files changed, 69 insertions(+), 38 deletions(-) diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go index 2d20b6a6d0..e137daa3fc 100644 --- a/src/cmd/compile/internal/x86/galign.go +++ b/src/cmd/compile/internal/x86/galign.go @@ -7,6 +7,9 @@ package x86 import ( "cmd/compile/internal/gc" "cmd/internal/obj/x86" + "cmd/internal/objabi" + "fmt" + "os" ) func Init(arch *gc.Arch) { @@ -15,6 +18,18 @@ func Init(arch *gc.Arch) { arch.SSAGenValue = ssaGenValue arch.SSAGenBlock = ssaGenBlock arch.MAXWIDTH = (1 << 32) - 1 + switch v := objabi.GO386; v { + case "sse2": + case "softfloat": + arch.SoftFloat = true + case "387": + fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n") + gc.Exit(1) + default: + fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v) + gc.Exit(1) + + } arch.ZeroRange = zerorange arch.Ginsnop = ginsnop diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index 3b3eb113b1..69a66abd2d 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -30,6 +30,7 @@ var ( gohostos string goos string goarm string + go386 string gomips string gomips64 string goppc64 string @@ -141,6 +142,12 @@ func xinit() { } goarm = b + b = os.Getenv("GO386") + if b == "" { + b = "sse2" + } + go386 = b + b = os.Getenv("GOMIPS") if b == "" { b = "hardfloat" @@ -212,6 +219,7 @@ func xinit() { defaultldso = os.Getenv("GO_LDSO") // For tools being invoked but also for os.ExpandEnv. + os.Setenv("GO386", go386) os.Setenv("GOARCH", goarch) os.Setenv("GOARM", goarm) os.Setenv("GOHOSTARCH", gohostarch) @@ -1153,6 +1161,9 @@ func cmdenv() { if goarch == "arm" { xprintf(format, "GOARM", goarm) } + if goarch == "386" { + xprintf(format, "GO386", go386) + } if goarch == "mips" || goarch == "mipsle" { xprintf(format, "GOMIPS", gomips) } diff --git a/src/cmd/dist/buildruntime.go b/src/cmd/dist/buildruntime.go index 67d1d72db4..2744951597 100644 --- a/src/cmd/dist/buildruntime.go +++ b/src/cmd/dist/buildruntime.go @@ -41,6 +41,7 @@ func mkzversion(dir, file string) { // package objabi // // const defaultGOROOT = +// const defaultGO386 = // const defaultGOARM = // const defaultGOMIPS = // const defaultGOMIPS64 = @@ -69,6 +70,7 @@ func mkzbootstrap(file string) { fmt.Fprintln(&buf) fmt.Fprintf(&buf, "import \"runtime\"\n") fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const defaultGO386 = `%s`\n", go386) fmt.Fprintf(&buf, "const defaultGOARM = `%s`\n", goarm) fmt.Fprintf(&buf, "const defaultGOMIPS = `%s`\n", gomips) fmt.Fprintf(&buf, "const defaultGOMIPS64 = `%s`\n", gomips64) diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 14840efb22..5cb32c80e9 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -1852,6 +1852,9 @@ // GOARM // For GOARCH=arm, the ARM architecture for which to compile. // Valid values are 5, 6, 7. +// GO386 +// For GOARCH=386, how to implement floating point instructions. +// Valid values are sse2 (default), softfloat. // GOMIPS // For GOARCH=mips{,le}, whether to use floating point instructions. // Valid values are hardfloat (default), softfloat. diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go index 9169c12d8f..67d581f6e6 100644 --- a/src/cmd/go/internal/cfg/cfg.go +++ b/src/cmd/go/internal/cfg/cfg.go @@ -256,6 +256,7 @@ var ( // Used in envcmd.MkEnv and build ID computations. GOARM = envOr("GOARM", fmt.Sprint(objabi.GOARM)) + GO386 = envOr("GO386", objabi.GO386) GOMIPS = envOr("GOMIPS", objabi.GOMIPS) GOMIPS64 = envOr("GOMIPS64", objabi.GOMIPS64) GOPPC64 = envOr("GOPPC64", fmt.Sprintf("%s%d", "power", objabi.GOPPC64)) @@ -279,6 +280,8 @@ func GetArchEnv() (key, val string) { switch Goarch { case "arm": return "GOARM", GOARM + case "386": + return "GO386", GO386 case "mips", "mipsle": return "GOMIPS", GOMIPS case "mips64", "mips64le": diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go index befa10a0e4..8dfabbaa4a 100644 --- a/src/cmd/go/internal/help/helpdoc.go +++ b/src/cmd/go/internal/help/helpdoc.go @@ -581,6 +581,9 @@ Architecture-specific environment variables: GOARM For GOARCH=arm, the ARM architecture for which to compile. Valid values are 5, 6, 7. + GO386 + For GOARCH=386, how to implement floating point instructions. + Valid values are sse2 (default), softfloat. GOMIPS For GOARCH=mips{,le}, whether to use floating point instructions. Valid values are hardfloat (default), softfloat. diff --git a/src/cmd/internal/objabi/util.go b/src/cmd/internal/objabi/util.go index cedb2d0a26..b81b73a022 100644 --- a/src/cmd/internal/objabi/util.go +++ b/src/cmd/internal/objabi/util.go @@ -24,6 +24,7 @@ var ( GOROOT = envOr("GOROOT", defaultGOROOT) GOARCH = envOr("GOARCH", defaultGOARCH) GOOS = envOr("GOOS", defaultGOOS) + GO386 = envOr("GO386", defaultGO386) GOAMD64 = goamd64() GOARM = goarm() GOMIPS = gomips() @@ -135,14 +136,6 @@ func init() { if GOARCH != "amd64" { Regabi_enabled = 0 } - - if v := os.Getenv("GO386"); v != "" && v != "sse2" { - msg := fmt.Sprintf("unsupported setting GO386=%s", v) - if v == "387" { - msg += ". 387 support was dropped in Go 1.16. Consider using gccgo instead." - } - log.Fatal(msg) - } } // Note: must agree with runtime.framepointer_enabled. diff --git a/src/internal/cfg/cfg.go b/src/internal/cfg/cfg.go index 023429e441..bdbe9df3e7 100644 --- a/src/internal/cfg/cfg.go +++ b/src/internal/cfg/cfg.go @@ -32,6 +32,7 @@ const KnownEnv = ` FC GCCGO GO111MODULE + GO386 GOARCH GOARM GOBIN diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go index 30f39a8da1..0bdb66a376 100644 --- a/test/codegen/arithmetic.go +++ b/test/codegen/arithmetic.go @@ -125,7 +125,7 @@ func Mul_n120(n int) int { func MulMemSrc(a []uint32, b []float32) { // 386:`IMULL\s4\([A-Z]+\),\s[A-Z]+` a[0] *= a[1] - // 386:`MULSS\s4\([A-Z]+\),\sX[0-9]+` + // 386/sse2:`MULSS\s4\([A-Z]+\),\sX[0-9]+` // amd64:`MULSS\s4\([A-Z]+\),\sX[0-9]+` b[0] *= b[1] } @@ -167,7 +167,7 @@ func MergeMuls5(a, n int) int { // -------------- // func DivMemSrc(a []float64) { - // 386:`DIVSD\s8\([A-Z]+\),\sX[0-9]+` + // 386/sse2:`DIVSD\s8\([A-Z]+\),\sX[0-9]+` // amd64:`DIVSD\s8\([A-Z]+\),\sX[0-9]+` a[0] /= a[1] } @@ -211,7 +211,7 @@ func ConstDivs(n1 uint, n2 int) (uint, int) { func FloatDivs(a []float32) float32 { // amd64:`DIVSS\s8\([A-Z]+\),\sX[0-9]+` - // 386:`DIVSS\s8\([A-Z]+\),\sX[0-9]+` + // 386/sse2:`DIVSS\s8\([A-Z]+\),\sX[0-9]+` return a[1] / a[2] } diff --git a/test/codegen/floats.go b/test/codegen/floats.go index d115800a67..83b4a358a5 100644 --- a/test/codegen/floats.go +++ b/test/codegen/floats.go @@ -15,7 +15,7 @@ package codegen // --------------------- // func Mul2(f float64) float64 { - // 386:"ADDSD",-"MULSD" + // 386/sse2:"ADDSD",-"MULSD" // amd64:"ADDSD",-"MULSD" // arm/7:"ADDD",-"MULD" // arm64:"FADDD",-"FMULD" @@ -25,7 +25,7 @@ func Mul2(f float64) float64 { } func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { - // 386:"MULSD",-"DIVSD" + // 386/sse2:"MULSD",-"DIVSD" // amd64:"MULSD",-"DIVSD" // arm/7:"MULD",-"DIVD" // arm64:"FMULD",-"FDIVD" @@ -33,7 +33,7 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { // ppc64le:"FMUL",-"FDIV" x := f1 / 16.0 - // 386:"MULSD",-"DIVSD" + // 386/sse2:"MULSD",-"DIVSD" // amd64:"MULSD",-"DIVSD" // arm/7:"MULD",-"DIVD" // arm64:"FMULD",-"FDIVD" @@ -41,7 +41,7 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { // ppc64le:"FMUL",-"FDIVD" y := f2 / 0.125 - // 386:"ADDSD",-"DIVSD",-"MULSD" + // 386/sse2:"ADDSD",-"DIVSD",-"MULSD" // amd64:"ADDSD",-"DIVSD",-"MULSD" // arm/7:"ADDD",-"MULD",-"DIVD" // arm64:"FADDD",-"FMULD",-"FDIVD" diff --git a/test/codegen/math.go b/test/codegen/math.go index fe678eea23..ac8071400e 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -46,7 +46,7 @@ func approx(x float64) { func sqrt(x float64) float64 { // amd64:"SQRTSD" - // 386:"SQRTSD" + // 386/sse2:"SQRTSD" 386/softfloat:-"SQRTD" // arm64:"FSQRTD" // arm/7:"SQRTD" // mips/hardfloat:"SQRTD" mips/softfloat:-"SQRTD" diff --git a/test/codegen/memops.go b/test/codegen/memops.go index 4b003ad861..a234283146 100644 --- a/test/codegen/memops.go +++ b/test/codegen/memops.go @@ -175,33 +175,33 @@ func idxInt64(x, y []int64, i int) { func idxFloat32(x, y []float32, i int) { var t float32 - // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` - // 386: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` + // 386/sse2: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\), X[0-9]+` t = x[i+1] - // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` - // 386: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` + // 386/sse2: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*4\)` y[i+1] = t - // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` - // 386: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` + // amd64: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` + // 386/sse2: `MOVSS\t4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\), X[0-9]+` t = x[16*i+1] - // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` - // 386: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // amd64: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` + // 386/sse2: `MOVSS\tX[0-9]+, 4\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[14]\)` y[16*i+1] = t } func idxFloat64(x, y []float64, i int) { var t float64 - // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` - // 386: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` + // 386/sse2: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\), X[0-9]+` t = x[i+1] - // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` - // 386: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` + // 386/sse2: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*8\)` y[i+1] = t - // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` - // 386: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` + // amd64: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` + // 386/sse2: `MOVSD\t8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\), X[0-9]+` t = x[16*i+1] - // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` - // 386: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` + // amd64: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` + // 386/sse2: `MOVSD\tX[0-9]+, 8\([A-Z]+[0-9]*\)\([A-Z]+[0-9]*\*[18]\)` y[16*i+1] = t } diff --git a/test/run.go b/test/run.go index 77710fd89a..672861c8d7 100644 --- a/test/run.go +++ b/test/run.go @@ -1489,7 +1489,7 @@ var ( // value[0] is the variant-changing environment variable, and values[1:] // are the supported variants. archVariants = map[string][]string{ - "386": {}, + "386": {"GO386", "sse2", "softfloat"}, "amd64": {}, "arm": {"GOARM", "5", "6", "7"}, "arm64": {}, @@ -1511,12 +1511,12 @@ type wantedAsmOpcode struct { found bool // true if the opcode check matched at least one in the output } -// A build environment triplet separated by slashes (eg: linux/arm/7). +// A build environment triplet separated by slashes (eg: linux/386/sse2). // The third field can be empty if the arch does not support variants (eg: "plan9/amd64/") type buildEnv string // Environ returns the environment it represents in cmd.Environ() "key=val" format -// For instance, "linux/arm/7".Environ() returns {"GOOS=linux", "GOARCH=arm", "GOARM=7"} +// For instance, "linux/386/sse2".Environ() returns {"GOOS=linux", "GOARCH=386", "GO386=sse2"} func (b buildEnv) Environ() []string { fields := strings.Split(string(b), "/") if len(fields) != 3 { @@ -1571,11 +1571,11 @@ func (t *test) wantedAsmOpcodes(fn string) asmChecks { var arch, subarch, os string switch { - case archspec[2] != "": // 3 components: "linux/arm/7" + case archspec[2] != "": // 3 components: "linux/386/sse2" os, arch, subarch = archspec[0], archspec[1][1:], archspec[2][1:] - case archspec[1] != "": // 2 components: "arm/7" + case archspec[1] != "": // 2 components: "386/sse2" os, arch, subarch = "linux", archspec[0], archspec[1][1:] - default: // 1 component: "arm" + default: // 1 component: "386" os, arch, subarch = "linux", archspec[0], "" if arch == "wasm" { os = "js" From 3923460dda205721d9bee2714a7f0dd403082a90 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Sat, 3 Oct 2020 16:18:43 -0400 Subject: [PATCH 134/281] runtime/cgo: only build xx_cgo_panicmem on iOS On iOS, when running under lldb, we install xx_cgo_panicmem as EXC_BAD_ACCESS handler so we can get a proper Go panic for SIGSEGV. Only build it on iOS. Updates #38485. Change-Id: I801c477439e05920a4bb8fdf5eae6f4923ab8274 Reviewed-on: https://go-review.googlesource.com/c/go/+/259440 Trust: Cherry Zhang Reviewed-by: Ian Lance Taylor --- .../cgo/{gcc_signal2_darwin_arm64.c => gcc_signal2_ios_arm64.c} | 0 .../{gcc_signal_darwin_lldb.c => gcc_signal_darwin_nolldb.c} | 2 +- .../cgo/{gcc_signal_darwin_arm64.c => gcc_signal_ios_arm64.c} | 0 src/runtime/cgo/{signal_darwin_arm64.go => signal_ios_arm64.go} | 0 src/runtime/cgo/{signal_darwin_arm64.s => signal_ios_arm64.s} | 0 5 files changed, 1 insertion(+), 1 deletion(-) rename src/runtime/cgo/{gcc_signal2_darwin_arm64.c => gcc_signal2_ios_arm64.c} (100%) rename src/runtime/cgo/{gcc_signal_darwin_lldb.c => gcc_signal_darwin_nolldb.c} (93%) rename src/runtime/cgo/{gcc_signal_darwin_arm64.c => gcc_signal_ios_arm64.c} (100%) rename src/runtime/cgo/{signal_darwin_arm64.go => signal_ios_arm64.go} (100%) rename src/runtime/cgo/{signal_darwin_arm64.s => signal_ios_arm64.s} (100%) diff --git a/src/runtime/cgo/gcc_signal2_darwin_arm64.c b/src/runtime/cgo/gcc_signal2_ios_arm64.c similarity index 100% rename from src/runtime/cgo/gcc_signal2_darwin_arm64.c rename to src/runtime/cgo/gcc_signal2_ios_arm64.c diff --git a/src/runtime/cgo/gcc_signal_darwin_lldb.c b/src/runtime/cgo/gcc_signal_darwin_nolldb.c similarity index 93% rename from src/runtime/cgo/gcc_signal_darwin_lldb.c rename to src/runtime/cgo/gcc_signal_darwin_nolldb.c index 0ccdae324e..26be71bd1d 100644 --- a/src/runtime/cgo/gcc_signal_darwin_lldb.c +++ b/src/runtime/cgo/gcc_signal_darwin_nolldb.c @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !lldb +// +build !lldb !ios // +build darwin // +build arm64 diff --git a/src/runtime/cgo/gcc_signal_darwin_arm64.c b/src/runtime/cgo/gcc_signal_ios_arm64.c similarity index 100% rename from src/runtime/cgo/gcc_signal_darwin_arm64.c rename to src/runtime/cgo/gcc_signal_ios_arm64.c diff --git a/src/runtime/cgo/signal_darwin_arm64.go b/src/runtime/cgo/signal_ios_arm64.go similarity index 100% rename from src/runtime/cgo/signal_darwin_arm64.go rename to src/runtime/cgo/signal_ios_arm64.go diff --git a/src/runtime/cgo/signal_darwin_arm64.s b/src/runtime/cgo/signal_ios_arm64.s similarity index 100% rename from src/runtime/cgo/signal_darwin_arm64.s rename to src/runtime/cgo/signal_ios_arm64.s From 7d6b304f123b6d11784b48179059f843493c4790 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Sat, 3 Oct 2020 16:26:37 -0400 Subject: [PATCH 135/281] cmd/link: support plugin on macOS/ARM64 Updates #38485. Change-Id: I8295f7fad55b1f9701162f9d2902b3499137c64d Reviewed-on: https://go-review.googlesource.com/c/go/+/259441 Trust: Cherry Zhang Reviewed-by: Than McIntosh --- src/cmd/dist/test.go | 2 +- src/cmd/internal/sys/supported.go | 2 +- src/cmd/link/internal/arm64/asm.go | 20 +++++++++- src/cmd/link/internal/ld/config.go | 8 +++- src/cmd/link/internal/ld/lib.go | 4 +- src/cmd/link/internal/ld/macho.go | 62 +++++++++++++++--------------- 6 files changed, 62 insertions(+), 36 deletions(-) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index abe496fdee..4b07501b6d 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -1011,7 +1011,7 @@ func (t *tester) supportedBuildmode(mode string) bool { switch pair { case "linux-386", "linux-amd64", "linux-arm", "linux-s390x", "linux-ppc64le": return true - case "darwin-amd64": + case "darwin-amd64", "darwin-arm64": return true case "freebsd-amd64": return true diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go index b2b3b02bf6..94fc92146c 100644 --- a/src/cmd/internal/sys/supported.go +++ b/src/cmd/internal/sys/supported.go @@ -104,7 +104,7 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool { switch platform { case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/s390x", "linux/ppc64le", "android/amd64", "android/arm", "android/arm64", "android/386", - "darwin/amd64", + "darwin/amd64", "darwin/arm64", "freebsd/amd64": return true } diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index 945b83822c..1d2aa591d7 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -371,7 +371,7 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy rt := r.Type siz := r.Size - if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_CALLARM64 || rt == objabi.R_ADDRARM64 { + if ldr.SymType(rs) == sym.SHOSTOBJ || rt == objabi.R_CALLARM64 || rt == objabi.R_ADDRARM64 || rt == objabi.R_ARM64_GOTPCREL { if ldr.SymDynid(rs) < 0 { ldr.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", rt, sym.RelocName(arch, rt), ldr.SymName(rs), ldr.SymType(rs), ldr.SymType(rs)) return false @@ -415,6 +415,22 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy } v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM64_RELOC_PAGE21 << 28 + case objabi.R_ARM64_GOTPCREL: + siz = 4 + // Two relocation entries: MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 + // if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND. + if r.Xadd != 0 { + out.Write32(uint32(sectoff + 4)) + out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) + } + out.Write32(uint32(sectoff + 4)) + out.Write32(v | (ld.MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 << 28) | (2 << 25)) + if r.Xadd != 0 { + out.Write32(uint32(sectoff)) + out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) + } + v |= 1 << 24 // pc-relative bit + v |= ld.MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 << 28 } switch siz { @@ -457,7 +473,7 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade } nExtReloc = 2 // need two ELF/Mach-O relocations. see elfreloc1/machoreloc1 - if target.IsDarwin() && rt == objabi.R_ADDRARM64 && xadd != 0 { + if target.IsDarwin() && xadd != 0 { nExtReloc = 4 // need another two relocations for non-zero addend } diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go index 9aa59fa3e3..a3ed5f2307 100644 --- a/src/cmd/link/internal/ld/config.go +++ b/src/cmd/link/internal/ld/config.go @@ -95,7 +95,13 @@ func (mode *BuildMode) Set(s string) error { default: return badmode() } - case "darwin", "freebsd": + case "darwin": + switch objabi.GOARCH { + case "amd64", "arm64": + default: + return badmode() + } + case "freebsd": switch objabi.GOARCH { case "amd64": default: diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index cd630e9eae..9fb85becec 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1254,7 +1254,9 @@ func (ctxt *Link) hostlink() { // -headerpad is incompatible with -fembed-bitcode. argv = append(argv, "-Wl,-headerpad,1144") } - if ctxt.DynlinkingGo() && !ctxt.Arch.InFamily(sys.ARM, sys.ARM64) { + if ctxt.DynlinkingGo() && objabi.GOOS != "ios" { + // -flat_namespace is deprecated on iOS. + // It is useful for supporting plugins. We don't support plugins on iOS. argv = append(argv, "-Wl,-flat_namespace") } if !combineDwarf { diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 9765ce18d3..80a753438e 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -76,36 +76,38 @@ const ( ) const ( - MACHO_CPU_AMD64 = 1<<24 | 7 - MACHO_CPU_386 = 7 - MACHO_SUBCPU_X86 = 3 - MACHO_CPU_ARM = 12 - MACHO_SUBCPU_ARM = 0 - MACHO_SUBCPU_ARMV7 = 9 - MACHO_CPU_ARM64 = 1<<24 | 12 - MACHO_SUBCPU_ARM64_ALL = 0 - MACHO32SYMSIZE = 12 - MACHO64SYMSIZE = 16 - MACHO_X86_64_RELOC_UNSIGNED = 0 - MACHO_X86_64_RELOC_SIGNED = 1 - MACHO_X86_64_RELOC_BRANCH = 2 - MACHO_X86_64_RELOC_GOT_LOAD = 3 - MACHO_X86_64_RELOC_GOT = 4 - MACHO_X86_64_RELOC_SUBTRACTOR = 5 - MACHO_X86_64_RELOC_SIGNED_1 = 6 - MACHO_X86_64_RELOC_SIGNED_2 = 7 - MACHO_X86_64_RELOC_SIGNED_4 = 8 - MACHO_ARM_RELOC_VANILLA = 0 - MACHO_ARM_RELOC_PAIR = 1 - MACHO_ARM_RELOC_SECTDIFF = 2 - MACHO_ARM_RELOC_BR24 = 5 - MACHO_ARM64_RELOC_UNSIGNED = 0 - MACHO_ARM64_RELOC_BRANCH26 = 2 - MACHO_ARM64_RELOC_PAGE21 = 3 - MACHO_ARM64_RELOC_PAGEOFF12 = 4 - MACHO_ARM64_RELOC_ADDEND = 10 - MACHO_GENERIC_RELOC_VANILLA = 0 - MACHO_FAKE_GOTPCREL = 100 + MACHO_CPU_AMD64 = 1<<24 | 7 + MACHO_CPU_386 = 7 + MACHO_SUBCPU_X86 = 3 + MACHO_CPU_ARM = 12 + MACHO_SUBCPU_ARM = 0 + MACHO_SUBCPU_ARMV7 = 9 + MACHO_CPU_ARM64 = 1<<24 | 12 + MACHO_SUBCPU_ARM64_ALL = 0 + MACHO32SYMSIZE = 12 + MACHO64SYMSIZE = 16 + MACHO_X86_64_RELOC_UNSIGNED = 0 + MACHO_X86_64_RELOC_SIGNED = 1 + MACHO_X86_64_RELOC_BRANCH = 2 + MACHO_X86_64_RELOC_GOT_LOAD = 3 + MACHO_X86_64_RELOC_GOT = 4 + MACHO_X86_64_RELOC_SUBTRACTOR = 5 + MACHO_X86_64_RELOC_SIGNED_1 = 6 + MACHO_X86_64_RELOC_SIGNED_2 = 7 + MACHO_X86_64_RELOC_SIGNED_4 = 8 + MACHO_ARM_RELOC_VANILLA = 0 + MACHO_ARM_RELOC_PAIR = 1 + MACHO_ARM_RELOC_SECTDIFF = 2 + MACHO_ARM_RELOC_BR24 = 5 + MACHO_ARM64_RELOC_UNSIGNED = 0 + MACHO_ARM64_RELOC_BRANCH26 = 2 + MACHO_ARM64_RELOC_PAGE21 = 3 + MACHO_ARM64_RELOC_PAGEOFF12 = 4 + MACHO_ARM64_RELOC_GOT_LOAD_PAGE21 = 5 + MACHO_ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6 + MACHO_ARM64_RELOC_ADDEND = 10 + MACHO_GENERIC_RELOC_VANILLA = 0 + MACHO_FAKE_GOTPCREL = 100 ) const ( From 234de9e1c2afc518e33c0adcf2928a2a9ebf5ce1 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Sat, 3 Oct 2020 23:36:58 -0400 Subject: [PATCH 136/281] cmd/link: support PIE on macOS/ARM64 On macOS/ARM64 everything must be PIE, and we already build PIE in exe buildmode. Support PIE buildmode as well. Updates #38485. Change-Id: I10b68c2f6eb77714e31c26116c61a0e28bf9a358 Reviewed-on: https://go-review.googlesource.com/c/go/+/259442 Trust: Cherry Zhang Reviewed-by: Than McIntosh --- src/cmd/dist/test.go | 2 +- src/cmd/internal/sys/supported.go | 2 +- src/cmd/link/internal/ld/config.go | 8 +++++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index 4b07501b6d..d37454c651 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -1023,7 +1023,7 @@ func (t *tester) supportedBuildmode(mode string) bool { "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x", "android-amd64", "android-arm", "android-arm64", "android-386": return true - case "darwin-amd64": + case "darwin-amd64", "darwin-arm64": return true case "windows-amd64", "windows-386", "windows-arm": return true diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go index 94fc92146c..f97f663f2a 100644 --- a/src/cmd/internal/sys/supported.go +++ b/src/cmd/internal/sys/supported.go @@ -86,7 +86,7 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool { case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x", "android/amd64", "android/arm", "android/arm64", "android/386", "freebsd/amd64", - "darwin/amd64", + "darwin/amd64", "darwin/arm64", "aix/ppc64", "windows/386", "windows/amd64", "windows/arm": return true diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go index a3ed5f2307..aaf74b58de 100644 --- a/src/cmd/link/internal/ld/config.go +++ b/src/cmd/link/internal/ld/config.go @@ -39,7 +39,13 @@ func (mode *BuildMode) Set(s string) error { case "pie": switch objabi.GOOS { case "aix", "android", "linux", "windows": - case "darwin", "freebsd": + case "darwin": + switch objabi.GOARCH { + case "amd64", "arm64": + default: + return badmode() + } + case "freebsd": switch objabi.GOARCH { case "amd64": default: From f8e554021b7de4bf1150f64d047091b429c92b39 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Sat, 3 Oct 2020 23:58:29 -0400 Subject: [PATCH 137/281] cmd/link: support C-shared buildmode on macOS/ARM64 It just works, after the plugin work. Updates #38485. Change-Id: I55aa11b380a33a729fccb731b77f48bc7d0dea2e Reviewed-on: https://go-review.googlesource.com/c/go/+/259443 Trust: Cherry Zhang Reviewed-by: Than McIntosh --- src/cmd/dist/test.go | 2 +- src/cmd/internal/sys/supported.go | 2 +- src/cmd/link/internal/ld/lib.go | 3 --- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index d37454c651..03e6866d62 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -992,7 +992,7 @@ func (t *tester) supportedBuildmode(mode string) bool { case "c-shared": switch pair { case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x", - "darwin-amd64", + "darwin-amd64", "darwin-arm64", "freebsd-amd64", "android-arm", "android-arm64", "android-386", "windows-amd64", "windows-386": diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go index f97f663f2a..8d87e95655 100644 --- a/src/cmd/internal/sys/supported.go +++ b/src/cmd/internal/sys/supported.go @@ -69,7 +69,7 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool { case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/ppc64le", "linux/s390x", "android/amd64", "android/arm", "android/arm64", "android/386", "freebsd/amd64", - "darwin/amd64", + "darwin/amd64", "darwin/arm64", "windows/amd64", "windows/386": return true } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 9fb85becec..5fe028d321 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1329,9 +1329,6 @@ func (ctxt *Link) hostlink() { case BuildModeCShared: if ctxt.HeadType == objabi.Hdarwin { argv = append(argv, "-dynamiclib") - if ctxt.Arch.Family != sys.AMD64 { - argv = append(argv, "-Wl,-read_only_relocs,suppress") - } } else { // ELF. argv = append(argv, "-Wl,-Bsymbolic") From c19725016df2600a204c9f8447bfcb7dcbdb128a Mon Sep 17 00:00:00 2001 From: Ori Rawlings Date: Thu, 28 May 2020 22:41:38 -0500 Subject: [PATCH 138/281] internal/reflectlite: include Kind in ValueError message The implementation has been ported from reflect, but to avoid introducing a dependency on strconv, Kind.String() falls back to "invalid" if the Kind is unknown rather than "kind" + strconv.Itoa(int(k)) Fixes #39286 Change-Id: I82277242a6c41d0146dabd9d20339fe72d562500 Reviewed-on: https://go-review.googlesource.com/c/go/+/235522 Run-TryBot: Ian Lance Taylor TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor Reviewed-by: Dmitri Shuralyov Trust: Dmitri Shuralyov --- src/internal/reflectlite/type.go | 38 +++++++++++++++++++++++++++++++ src/internal/reflectlite/value.go | 5 +++- 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/src/internal/reflectlite/type.go b/src/internal/reflectlite/type.go index eb7f1a4b78..15ba30da36 100644 --- a/src/internal/reflectlite/type.go +++ b/src/internal/reflectlite/type.go @@ -384,6 +384,44 @@ const ( kindMask = (1 << 5) - 1 ) +// String returns the name of k. +func (k Kind) String() string { + if int(k) < len(kindNames) { + return kindNames[k] + } + return kindNames[0] +} + +var kindNames = []string{ + Invalid: "invalid", + Bool: "bool", + Int: "int", + Int8: "int8", + Int16: "int16", + Int32: "int32", + Int64: "int64", + Uint: "uint", + Uint8: "uint8", + Uint16: "uint16", + Uint32: "uint32", + Uint64: "uint64", + Uintptr: "uintptr", + Float32: "float32", + Float64: "float64", + Complex64: "complex64", + Complex128: "complex128", + Array: "array", + Chan: "chan", + Func: "func", + Interface: "interface", + Map: "map", + Ptr: "ptr", + Slice: "slice", + String: "string", + Struct: "struct", + UnsafePointer: "unsafe.Pointer", +} + func (t *uncommonType) methods() []method { if t.mcount == 0 { return nil diff --git a/src/internal/reflectlite/value.go b/src/internal/reflectlite/value.go index 85beea606c..0365eeeabf 100644 --- a/src/internal/reflectlite/value.go +++ b/src/internal/reflectlite/value.go @@ -160,7 +160,10 @@ type ValueError struct { } func (e *ValueError) Error() string { - return "reflect: call of " + e.Method + " on zero Value" + if e.Kind == 0 { + return "reflect: call of " + e.Method + " on zero Value" + } + return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value" } // methodName returns the name of the calling method, From 67edc0ed81947a55adbcd0c9d2317abb93ac9510 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 6 Oct 2020 22:07:15 -0400 Subject: [PATCH 139/281] runtime: restore SSE guard in asyncPreempt on 386 So we don't use SSE instructions under GO386=softfloat. Change-Id: I8ecc92340ee567f84a22501df2543ec041d25ef2 Reviewed-on: https://go-review.googlesource.com/c/go/+/260137 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Keith Randall --- src/runtime/mkpreempt.go | 28 ++++++++++++++++++---------- src/runtime/preempt_386.s | 6 ++++++ 2 files changed, 24 insertions(+), 10 deletions(-) diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index 40683bb9d9..76237bc31b 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -189,26 +189,34 @@ func (l *layout) restore() { func gen386() { p("PUSHFL") - - // Assign stack offsets. + // Save general purpose registers. var l = layout{sp: "SP"} for _, reg := range regNames386 { - if reg == "SP" { + if reg == "SP" || strings.HasPrefix(reg, "X") { continue } - if strings.HasPrefix(reg, "X") { - l.add("MOVUPS", reg, 16) - } else { - l.add("MOVL", reg, 4) - } + l.add("MOVL", reg, 4) } - p("ADJSP $%d", l.stack) + // Save SSE state only if supported. + lSSE := layout{stack: l.stack, sp: "SP"} + for i := 0; i < 8; i++ { + lSSE.add("MOVUPS", fmt.Sprintf("X%d", i), 16) + } + + p("ADJSP $%d", lSSE.stack) p("NOP SP") l.save() + p("CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1\nJNE nosse") + lSSE.save() + label("nosse:") p("CALL ·asyncPreempt2(SB)") + p("CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1\nJNE nosse2") + lSSE.restore() + label("nosse2:") l.restore() - p("ADJSP $%d", -l.stack) + p("ADJSP $%d", -lSSE.stack) + p("POPFL") p("RET") } diff --git a/src/runtime/preempt_386.s b/src/runtime/preempt_386.s index 5c9b8ea224..c3a5fa1f36 100644 --- a/src/runtime/preempt_386.s +++ b/src/runtime/preempt_386.s @@ -14,6 +14,8 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVL BP, 16(SP) MOVL SI, 20(SP) MOVL DI, 24(SP) + CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1 + JNE nosse MOVUPS X0, 28(SP) MOVUPS X1, 44(SP) MOVUPS X2, 60(SP) @@ -22,7 +24,10 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVUPS X5, 108(SP) MOVUPS X6, 124(SP) MOVUPS X7, 140(SP) +nosse: CALL ·asyncPreempt2(SB) + CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1 + JNE nosse2 MOVUPS 140(SP), X7 MOVUPS 124(SP), X6 MOVUPS 108(SP), X5 @@ -31,6 +36,7 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVUPS 60(SP), X2 MOVUPS 44(SP), X1 MOVUPS 28(SP), X0 +nosse2: MOVL 24(SP), DI MOVL 20(SP), SI MOVL 16(SP), BP From 0941dc446e6b3028c77158728432086b5c06acf6 Mon Sep 17 00:00:00 2001 From: Eugene Kalinin Date: Tue, 25 Aug 2020 01:49:39 +0300 Subject: [PATCH 140/281] cmd/go: env -w validates GOTMPDIR value This change makes go env -w check if GOTMPDIR is an absolute path. If GOTMPDIR is not an absolute and not existing path there will be an error at every `work.Builder.Init()`. If `go env` has `-u/-w` as argument `work.Builder.Init()` is not called. `go env -w GOTMPDIR=` work in the same way as `go env -u GOTMPDIR`. Fixes #40932 Change-Id: I6b0662302eeace7f20460b6d26c6e59af1111da2 Reviewed-on: https://go-review.googlesource.com/c/go/+/250198 Run-TryBot: Jay Conrod TryBot-Result: Go Bot Reviewed-by: Jay Conrod Trust: Bryan C. Mills Trust: Jay Conrod --- src/cmd/go/internal/envcmd/env.go | 24 ++++++++++++++++++---- src/cmd/go/testdata/script/env_write.txt | 26 ++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index e1f2400f60..59d0ded658 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -203,10 +203,19 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) { } // Do we need to call ExtraEnvVarsCostly, which is a bit expensive? - // Only if we're listing all environment variables ("go env") - // or the variables being requested are in the extra list. - needCostly := true - if len(args) > 0 { + needCostly := false + if *envU || *envW { + // We're overwriting or removing default settings, + // so it doesn't really matter what the existing settings are. + // + // Moreover, we haven't validated the new settings yet, so it is + // important that we NOT perform any actions based on them, + // such as initializing the builder to compute other variables. + } else if len(args) == 0 { + // We're listing all environment variables ("go env"), + // including the expensive ones. + needCostly = true + } else { needCostly = false for _, arg := range args { switch argKey(arg) { @@ -269,6 +278,13 @@ func runEnv(ctx context.Context, cmd *base.Command, args []string) { } } + gotmp, okGOTMP := add["GOTMPDIR"] + if okGOTMP { + if !filepath.IsAbs(gotmp) && gotmp != "" { + base.Fatalf("go env -w: GOTMPDIR must be an absolute path") + } + } + updateEnvFile(add, nil) return } diff --git a/src/cmd/go/testdata/script/env_write.txt b/src/cmd/go/testdata/script/env_write.txt index 2366c3f580..bdb9bc4077 100644 --- a/src/cmd/go/testdata/script/env_write.txt +++ b/src/cmd/go/testdata/script/env_write.txt @@ -24,6 +24,12 @@ stdout GOARCH= stdout GOOS= stdout GOROOT= +# checking errors +! go env -w +stderr 'go env -w: no KEY=VALUE arguments given' +! go env -u +stderr 'go env -u: no arguments given' + # go env -w changes default setting env root= [windows] env root=c: @@ -97,6 +103,26 @@ stderr 'GOPATH entry cannot start with shell metacharacter' ! go env -w GOPATH=./go stderr 'GOPATH entry is relative; must be absolute path' +# go env -w rejects invalid GOTMPDIR values +! go env -w GOTMPDIR=x +stderr 'go env -w: GOTMPDIR must be an absolute path' + +# go env -w should accept absolute GOTMPDIR value +# and should not create it +[windows] go env -w GOTMPDIR=$WORK\x\y\z +[!windows] go env -w GOTMPDIR=$WORK/x/y/z +! exists $WORK/x/y/z +# we should be able to clear an env +go env -u GOTMPDIR +go env GOTMPDIR +stdout ^$ + +[windows] go env -w GOTMPDIR=$WORK\x\y\z +[!windows] go env -w GOTMPDIR=$WORK/x/y/z +go env -w GOTMPDIR= +go env GOTMPDIR +stdout ^$ + # go env -w/-u checks validity of GOOS/ARCH combinations env GOOS= env GOARCH= From ccf89bef43f3580526019e0804e91352e62047d5 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 7 Oct 2020 11:32:43 -0400 Subject: [PATCH 141/281] cmd/compile: store call args in the call block We already do this for OpStore, but we didn't do this for OpMove. Do the same, to ensure that no two memories are live at the same time. Fixes #41846. Change-Id: Iad77ff031b3c4459d1217e0b04aeb0e692eb474d Reviewed-on: https://go-review.googlesource.com/c/go/+/260237 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/expand_calls.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index 992936b2d3..8c06040542 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -283,7 +283,7 @@ func expandCalls(f *Func) { // TODO this will be more complicated with registers in the picture. src := a.Args[0] dst := f.ConstOffPtrSP(src.Type, aux.OffsetOfArg(auxI), sp) - if a.Uses == 1 { + if a.Uses == 1 && a.Block == v.Block { a.reset(OpMove) a.Pos = pos a.Type = types.TypeMem @@ -292,7 +292,7 @@ func expandCalls(f *Func) { a.SetArgs3(dst, src, mem) mem = a } else { - mem = a.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(auxI), dst, src, mem) + mem = v.Block.NewValue3A(pos, OpMove, types.TypeMem, aux.TypeOfArg(auxI), dst, src, mem) mem.AuxInt = aux.SizeOfArg(auxI) } } else { From 492258549717d4e73a22170c507fb26a731c4aba Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Tue, 6 Oct 2020 16:31:00 -0700 Subject: [PATCH 142/281] syscall: rewrite Windows makeCmdLine to use []byte It's faster to append to a []byte and only convert to string at the end then it is to build up a string by concatenating characters. Fixes #41825 Change-Id: I45ddf77dcc62726c919f0533c95d483cee8ba366 Reviewed-on: https://go-review.googlesource.com/c/go/+/259978 Trust: Ian Lance Taylor Trust: Alex Brainman Run-TryBot: Ian Lance Taylor TryBot-Result: Go Bot Reviewed-by: Alex Brainman --- src/syscall/exec_windows.go | 75 ++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 31 deletions(-) diff --git a/src/syscall/exec_windows.go b/src/syscall/exec_windows.go index 8d6141c0ca..500321ef0d 100644 --- a/src/syscall/exec_windows.go +++ b/src/syscall/exec_windows.go @@ -24,74 +24,87 @@ var ForkLock sync.RWMutex // - finally, s is wrapped with double quotes (arg -> "arg"), // but only if there is space or tab inside s. func EscapeArg(s string) string { - if len(s) == 0 { - return "\"\"" + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '\\', ' ', '\t': + // Some escaping required. + b := make([]byte, 0, len(s)+2) + b = appendEscapeArg(b, s) + return string(b) + } } - n := len(s) + return s +} + +// appendEscapeArg escapes the string s, as per escapeArg, +// appends the result to b, and returns the updated slice. +func appendEscapeArg(b []byte, s string) []byte { + if len(s) == 0 { + return append(b, `""`...) + } + + needsBackslash := false hasSpace := false for i := 0; i < len(s); i++ { switch s[i] { case '"', '\\': - n++ + needsBackslash = true case ' ', '\t': hasSpace = true } } - if hasSpace { - n += 2 + + if !needsBackslash && !hasSpace { + // No special handling required; normal case. + return append(b, s...) } - if n == len(s) { - return s + if !needsBackslash { + // hasSpace is true, so we need to quote the string. + b = append(b, '"') + b = append(b, s...) + return append(b, '"') } - qs := make([]byte, n) - j := 0 if hasSpace { - qs[j] = '"' - j++ + b = append(b, '"') } slashes := 0 for i := 0; i < len(s); i++ { - switch s[i] { + c := s[i] + switch c { default: slashes = 0 - qs[j] = s[i] case '\\': slashes++ - qs[j] = s[i] case '"': for ; slashes > 0; slashes-- { - qs[j] = '\\' - j++ + b = append(b, '\\') } - qs[j] = '\\' - j++ - qs[j] = s[i] + b = append(b, '\\') } - j++ + b = append(b, c) } if hasSpace { for ; slashes > 0; slashes-- { - qs[j] = '\\' - j++ + b = append(b, '\\') } - qs[j] = '"' - j++ + b = append(b, '"') } - return string(qs[:j]) + + return b } // makeCmdLine builds a command line out of args by escaping "special" // characters and joining the arguments with spaces. func makeCmdLine(args []string) string { - var s string + var b []byte for _, v := range args { - if s != "" { - s += " " + if len(b) > 0 { + b = append(b, ' ') } - s += EscapeArg(v) + b = appendEscapeArg(b, v) } - return s + return string(b) } // createEnvBlock converts an array of environment strings into From 5c1567cdc064b68210aeeddc6bf76bf0a146a626 Mon Sep 17 00:00:00 2001 From: Ayan George Date: Tue, 6 Oct 2020 18:40:40 +0000 Subject: [PATCH 143/281] net/http/pprof: use Request.Context, not the deprecated CloseNotifier Prior to this commit, the profiling code had a sleep() function that waits and unblocks on either time.After() or a channel provided by an http.CloseNotifier derived from a supplied http.ResponseWriter. According to the documentation, http.CloseNotifier is deprecated: Deprecated: the CloseNotifier interface predates Go's context package. New code should use Request.Context instead. This patch does just that -- sleep() now takes an *http.Request and uses http.Request.Context() to signal when a request has been cancelled. Change-Id: I98702314addf494f5743a4f99172dc607389dbb8 GitHub-Last-Rev: c1e37a03ca28417ed5833618d3eeddb2eecccd09 GitHub-Pull-Request: golang/go#41756 Reviewed-on: https://go-review.googlesource.com/c/go/+/259157 Reviewed-by: Bryan C. Mills Reviewed-by: Hyang-Ah Hana Kim Reviewed-by: Emmanuel Odeke Trust: Bryan C. Mills Trust: Emmanuel Odeke Run-TryBot: Bryan C. Mills TryBot-Result: Go Bot --- src/net/http/pprof/pprof.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go index 81df0448e9..5ff7fdc3de 100644 --- a/src/net/http/pprof/pprof.go +++ b/src/net/http/pprof/pprof.go @@ -93,14 +93,10 @@ func Cmdline(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, strings.Join(os.Args, "\x00")) } -func sleep(w http.ResponseWriter, d time.Duration) { - var clientGone <-chan bool - if cn, ok := w.(http.CloseNotifier); ok { - clientGone = cn.CloseNotify() - } +func sleep(r *http.Request, d time.Duration) { select { case <-time.After(d): - case <-clientGone: + case <-r.Context().Done(): } } @@ -142,7 +138,7 @@ func Profile(w http.ResponseWriter, r *http.Request) { fmt.Sprintf("Could not enable CPU profiling: %s", err)) return } - sleep(w, time.Duration(sec)*time.Second) + sleep(r, time.Duration(sec)*time.Second) pprof.StopCPUProfile() } @@ -171,7 +167,7 @@ func Trace(w http.ResponseWriter, r *http.Request) { fmt.Sprintf("Could not enable tracing: %s", err)) return } - sleep(w, time.Duration(sec*float64(time.Second))) + sleep(r, time.Duration(sec*float64(time.Second))) trace.Stop() } From 470829d474fb5ae1125c81bdfb10375373c881d5 Mon Sep 17 00:00:00 2001 From: Dmitri Shuralyov Date: Wed, 7 Oct 2020 13:42:49 -0400 Subject: [PATCH 144/281] doc/go1.16: document GO386=387 and GO386=softfloat Also add a few more TODOs as found by the relnote command. It's an incomplete list due to #41849. For #40700. Change-Id: Id17a9be86d3338e1fcb281d26e7298ff26e92864 Reviewed-on: https://go-review.googlesource.com/c/go/+/260337 Reviewed-by: Ian Lance Taylor Trust: Dmitri Shuralyov --- doc/go1.16.html | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/doc/go1.16.html b/doc/go1.16.html index 2fb7222482..2962448742 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -38,6 +38,17 @@ Do not send CLs removing the interior tags from such phrases. netbsd/arm64 port).

+

386

+ +

+ As announced in the Go 1.15 release notes, + Go 1.16 drops support for x87 mode compilation (GO386=387). + Support for non-SSE2 processors is now available using soft float + mode (GO386=softfloat). + Users running on non-SSE2 processors should replace GO386=387 + with GO386=softfloat. +

+

Tools

@@ -162,6 +173,8 @@ Do not send CLs removing the interior tags from such phrases. TODO: update with final numbers later in the release.

+ +

Core library

@@ -275,3 +288,11 @@ Do not send CLs removing the interior tags from such phrases.

+ +
runtime/debug
+
+

+ TODO: https://golang.org/cl/249677: provide Addr method for errors from SetPanicOnFault +

+
+
From 83dfc0d02d005f7de3b8a57ea29dacf8609edba8 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 6 Oct 2020 20:36:52 -0400 Subject: [PATCH 145/281] time: enable system zoneinfo on macOS/ARM64 Updates #38485. Change-Id: I4a8b509dc4ad03706235289fbe8c2a675453c871 Reviewed-on: https://go-review.googlesource.com/c/go/+/260339 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor --- src/time/{zoneinfo_darwin_arm64.go => zoneinfo_ios.go} | 0 src/time/zoneinfo_unix.go | 2 +- src/time/zoneinfo_unix_test.go | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename src/time/{zoneinfo_darwin_arm64.go => zoneinfo_ios.go} (100%) diff --git a/src/time/zoneinfo_darwin_arm64.go b/src/time/zoneinfo_ios.go similarity index 100% rename from src/time/zoneinfo_darwin_arm64.go rename to src/time/zoneinfo_ios.go diff --git a/src/time/zoneinfo_unix.go b/src/time/zoneinfo_unix.go index 80724eb30a..d2465eef65 100644 --- a/src/time/zoneinfo_unix.go +++ b/src/time/zoneinfo_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin,amd64 dragonfly freebsd linux,!android netbsd openbsd solaris +// +build aix darwin,!ios dragonfly freebsd linux,!android netbsd openbsd solaris // Parse "zoneinfo" time zone file. // This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others. diff --git a/src/time/zoneinfo_unix_test.go b/src/time/zoneinfo_unix_test.go index 2d45b83d52..f290ae754f 100644 --- a/src/time/zoneinfo_unix_test.go +++ b/src/time/zoneinfo_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin,amd64 dragonfly freebsd linux,!android netbsd openbsd solaris +// +build aix darwin,!ios dragonfly freebsd linux,!android netbsd openbsd solaris package time_test From ade5161f51f2b7239705047875dc36c35139b253 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Tue, 6 Oct 2020 21:13:16 -0400 Subject: [PATCH 146/281] crypto/x509: use macOS/AMD64 implementation on macOS/ARM64 Updates #38485. Change-Id: I0582a53171ce803ca1b0237cfa9bc022fc1da6f9 Reviewed-on: https://go-review.googlesource.com/c/go/+/260340 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor --- .../x509/internal/macos/corefoundation.go | 2 +- .../x509/internal/macos/corefoundation.s | 2 +- src/crypto/x509/internal/macos/security.go | 2 +- src/crypto/x509/internal/macos/security.s | 2 +- src/crypto/x509/root.go | 2 +- ...cgo_darwin_amd64.go => root_cgo_darwin.go} | 0 .../{root_darwin_amd64.go => root_darwin.go} | 0 .../x509/{root_darwin_iosx.go => root_ios.go} | 4 ++-- ...root_darwin_ios_gen.go => root_ios_gen.go} | 11 ++++------ src/runtime/sys_darwin_arm64.s | 20 +++++++++++++++++++ 10 files changed, 31 insertions(+), 14 deletions(-) rename src/crypto/x509/{root_cgo_darwin_amd64.go => root_cgo_darwin.go} (100%) rename src/crypto/x509/{root_darwin_amd64.go => root_darwin.go} (100%) rename src/crypto/x509/{root_darwin_iosx.go => root_ios.go} (99%) rename src/crypto/x509/{root_darwin_ios_gen.go => root_ios_gen.go} (90%) diff --git a/src/crypto/x509/internal/macos/corefoundation.go b/src/crypto/x509/internal/macos/corefoundation.go index 359694fabf..a248ee3292 100644 --- a/src/crypto/x509/internal/macos/corefoundation.go +++ b/src/crypto/x509/internal/macos/corefoundation.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,amd64 +// +build darwin,!ios // Package macOS provides cgo-less wrappers for Core Foundation and // Security.framework, similarly to how package syscall provides access to diff --git a/src/crypto/x509/internal/macos/corefoundation.s b/src/crypto/x509/internal/macos/corefoundation.s index 8f6be47e4b..a4495d68dd 100644 --- a/src/crypto/x509/internal/macos/corefoundation.s +++ b/src/crypto/x509/internal/macos/corefoundation.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,amd64 +// +build darwin,!ios #include "textflag.h" diff --git a/src/crypto/x509/internal/macos/security.go b/src/crypto/x509/internal/macos/security.go index 64fe206390..59cc19c587 100644 --- a/src/crypto/x509/internal/macos/security.go +++ b/src/crypto/x509/internal/macos/security.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,amd64 +// +build darwin,!ios package macOS diff --git a/src/crypto/x509/internal/macos/security.s b/src/crypto/x509/internal/macos/security.s index 1630c55bab..bd446dbcbe 100644 --- a/src/crypto/x509/internal/macos/security.s +++ b/src/crypto/x509/internal/macos/security.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,amd64 +// +build darwin,!ios #include "textflag.h" diff --git a/src/crypto/x509/root.go b/src/crypto/x509/root.go index da5e91b91c..eccb64121f 100644 --- a/src/crypto/x509/root.go +++ b/src/crypto/x509/root.go @@ -4,7 +4,7 @@ package x509 -//go:generate go run root_darwin_ios_gen.go -version 55161.80.1 +//go:generate go run root_ios_gen.go -version 55161.80.1 import "sync" diff --git a/src/crypto/x509/root_cgo_darwin_amd64.go b/src/crypto/x509/root_cgo_darwin.go similarity index 100% rename from src/crypto/x509/root_cgo_darwin_amd64.go rename to src/crypto/x509/root_cgo_darwin.go diff --git a/src/crypto/x509/root_darwin_amd64.go b/src/crypto/x509/root_darwin.go similarity index 100% rename from src/crypto/x509/root_darwin_amd64.go rename to src/crypto/x509/root_darwin.go diff --git a/src/crypto/x509/root_darwin_iosx.go b/src/crypto/x509/root_ios.go similarity index 99% rename from src/crypto/x509/root_darwin_iosx.go rename to src/crypto/x509/root_ios.go index 5ecc4911b3..98e747733a 100644 --- a/src/crypto/x509/root_darwin_iosx.go +++ b/src/crypto/x509/root_ios.go @@ -1,7 +1,7 @@ -// Code generated by root_darwin_ios_gen.go -version 55161.80.1; DO NOT EDIT. +// Code generated by root_ios_gen.go -version 55161.80.1; DO NOT EDIT. // Update the version in root.go and regenerate with "go generate". -// +build darwin,arm64 darwin,amd64,ios +// +build ios // +build !x509omitbundledroots package x509 diff --git a/src/crypto/x509/root_darwin_ios_gen.go b/src/crypto/x509/root_ios_gen.go similarity index 90% rename from src/crypto/x509/root_darwin_ios_gen.go rename to src/crypto/x509/root_ios_gen.go index 61152b4d11..34dd5d5b22 100644 --- a/src/crypto/x509/root_darwin_ios_gen.go +++ b/src/crypto/x509/root_ios_gen.go @@ -4,7 +4,7 @@ // +build ignore -// Generates root_darwin_iosx.go. +// Generates root_ios.go. // // As of iOS 13, there is no API for querying the system trusted X.509 root // certificates. @@ -37,10 +37,7 @@ import ( ) func main() { - // Temporarily name the file _iosx.go, to avoid restricting it to GOOS=ios, - // as this is also used for darwin/arm64 (macOS). - // TODO: maybe use darwin/amd64 implementation on macOS arm64? - var output = flag.String("output", "root_darwin_iosx.go", "file name to write") + var output = flag.String("output", "root_ios.go", "file name to write") var version = flag.String("version", "", "security_certificates version") flag.Parse() if *version == "" { @@ -159,10 +156,10 @@ func main() { } } -const header = `// Code generated by root_darwin_ios_gen.go -version %s; DO NOT EDIT. +const header = `// Code generated by root_ios_gen.go -version %s; DO NOT EDIT. // Update the version in root.go and regenerate with "go generate". -// +build darwin,arm64 darwin,amd64,ios +// +build ios // +build !x509omitbundledroots package x509 diff --git a/src/runtime/sys_darwin_arm64.s b/src/runtime/sys_darwin_arm64.s index 427cb17781..f8d6f28dc7 100644 --- a/src/runtime/sys_darwin_arm64.s +++ b/src/runtime/sys_darwin_arm64.s @@ -707,3 +707,23 @@ TEXT runtime·syscall6X(SB),NOSPLIT,$0 MOVD R0, 72(R2) // save err ok: RET + +// syscallNoErr is like syscall6 but does not check for errors, and +// only returns one value, for use with standard C ABI library functions. +TEXT runtime·syscallNoErr(SB),NOSPLIT,$0 + SUB $16, RSP // push structure pointer + MOVD R0, (RSP) + + MOVD 0(R0), R12 // fn + MOVD 16(R0), R1 // a2 + MOVD 24(R0), R2 // a3 + MOVD 32(R0), R3 // a4 + MOVD 40(R0), R4 // a5 + MOVD 48(R0), R5 // a6 + MOVD 8(R0), R0 // a1 + BL (R12) + + MOVD (RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 56(R2) // save r1 + RET From 3f7b4d12075277f28427e6b57708258225841ecd Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 7 Oct 2020 14:39:47 -0400 Subject: [PATCH 147/281] cmd/internal/obj/arm64: only emit R_CALLIND relocations on calls Don't emit it for jumps. In particular, not for the return instruction, which is JMP (LR). Reduce some binary size and linker resources. Change-Id: Idb3242b86c5a137597fb8accb8aadfe0244c14cf Reviewed-on: https://go-review.googlesource.com/c/go/+/260341 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/internal/obj/arm64/asm7.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index ee4a33eef4..7c35fce106 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -3120,12 +3120,13 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { case 6: /* b ,O(R); bl ,O(R) */ o1 = c.opbrr(p, p.As) - o1 |= uint32(p.To.Reg&31) << 5 - rel := obj.Addrel(c.cursym) - rel.Off = int32(c.pc) - rel.Siz = 0 - rel.Type = objabi.R_CALLIND + if p.As == obj.ACALL { + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 0 + rel.Type = objabi.R_CALLIND + } case 7: /* beq s */ o1 = c.opbra(p, p.As) From 5012e806b5e50a463609643d5ab04f509e55c3be Mon Sep 17 00:00:00 2001 From: "Hana (Hyang-Ah) Kim" Date: Fri, 14 Aug 2020 12:27:26 -0400 Subject: [PATCH 148/281] cmd/vendor,cmd/pprof: sync pprof@1a94d8640e99 Updated cmd/pprof.objTool.Disasm to accept an additional bool param introduced in https://github.com/google/pprof/pull/520 to support intel syntax in the assembly report. Returns an error if the intelSyntax param is set. We use src/cmd/internal/objfile to disassemble and print assembly so I am not sure if it is relevant, and if so, how. Fixes #38802 Updates #36905 Change-Id: Iae2b4322404f232196705f05210f00e2495588d9 Reviewed-on: https://go-review.googlesource.com/c/go/+/248499 Trust: Hyang-Ah Hana Kim Run-TryBot: Hyang-Ah Hana Kim Reviewed-by: Dmitri Shuralyov --- src/cmd/go.mod | 2 +- src/cmd/go.sum | 4 +- src/cmd/pprof/pprof.go | 5 +- .../github.com/google/pprof/driver/driver.go | 6 +- .../pprof/internal/binutils/binutils.go | 132 ++++++- .../google/pprof/internal/binutils/disasm.go | 14 +- .../google/pprof/internal/driver/cli.go | 129 +++--- .../google/pprof/internal/driver/commands.go | 281 ++++---------- .../google/pprof/internal/driver/config.go | 367 ++++++++++++++++++ .../google/pprof/internal/driver/driver.go | 110 +++--- .../pprof/internal/driver/driver_focus.go | 22 +- .../pprof/internal/driver/flamegraph.go | 7 +- .../pprof/internal/driver/interactive.go | 177 ++++----- .../google/pprof/internal/driver/settings.go | 157 ++++++++ .../google/pprof/internal/driver/webhtml.go | 238 ++++++++++++ .../google/pprof/internal/driver/webui.go | 143 ++++--- .../google/pprof/internal/plugin/plugin.go | 2 +- .../google/pprof/internal/report/report.go | 11 +- .../google/pprof/internal/report/source.go | 6 +- .../google/pprof/profile/profile.go | 10 +- src/cmd/vendor/modules.txt | 2 +- 21 files changed, 1300 insertions(+), 525 deletions(-) create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/config.go create mode 100644 src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 59d6152e2a..56941b0541 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -3,7 +3,7 @@ module cmd go 1.16 require ( - github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340 // indirect golang.org/x/arch v0.0.0-20200826200359-b19915210f00 golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a diff --git a/src/cmd/go.sum b/src/cmd/go.sum index 1b6d680d62..2b505c4354 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -1,8 +1,8 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 h1:SRgJV+IoxM5MKyFdlSUeNy6/ycRUF2yBAKdAQswoHUk= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340 h1:S1+yTUaFPXuDZnPDbO+TrDFIjPzQraYH8/CwSlu9Fac= github.com/ianlancetaylor/demangle v0.0.0-20200414190113-039b1ae3a340/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= diff --git a/src/cmd/pprof/pprof.go b/src/cmd/pprof/pprof.go index 903f9cc1db..c1ddbe372f 100644 --- a/src/cmd/pprof/pprof.go +++ b/src/cmd/pprof/pprof.go @@ -171,7 +171,10 @@ func (*objTool) Demangle(names []string) (map[string]string, error) { return make(map[string]string), nil } -func (t *objTool) Disasm(file string, start, end uint64) ([]driver.Inst, error) { +func (t *objTool) Disasm(file string, start, end uint64, intelSyntax bool) ([]driver.Inst, error) { + if intelSyntax { + return nil, fmt.Errorf("printing assembly in Intel syntax is not supported") + } d, err := t.cachedDisasm(file) if err != nil { return nil, err diff --git a/src/cmd/vendor/github.com/google/pprof/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/driver/driver.go index 9bcbc8295a..e65bc2f417 100644 --- a/src/cmd/vendor/github.com/google/pprof/driver/driver.go +++ b/src/cmd/vendor/github.com/google/pprof/driver/driver.go @@ -142,7 +142,7 @@ type ObjTool interface { // Disasm disassembles the named object file, starting at // the start address and stopping at (before) the end address. - Disasm(file string, start, end uint64) ([]Inst, error) + Disasm(file string, start, end uint64, intelSyntax bool) ([]Inst, error) } // An Inst is a single instruction in an assembly listing. @@ -269,8 +269,8 @@ func (f *internalObjFile) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, return pluginSyms, nil } -func (o *internalObjTool) Disasm(file string, start, end uint64) ([]plugin.Inst, error) { - insts, err := o.ObjTool.Disasm(file, start, end) +func (o *internalObjTool) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + insts, err := o.ObjTool.Disasm(file, start, end, intelSyntax) if err != nil { return nil, err } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go index 967726d1fa..4b67cc4ab0 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go @@ -19,6 +19,7 @@ import ( "debug/elf" "debug/macho" "encoding/binary" + "errors" "fmt" "io" "os" @@ -26,6 +27,7 @@ import ( "path/filepath" "regexp" "runtime" + "strconv" "strings" "sync" @@ -39,6 +41,8 @@ type Binutils struct { rep *binrep } +var objdumpLLVMVerRE = regexp.MustCompile(`LLVM version (?:(\d*)\.(\d*)\.(\d*)|.*(trunk).*)`) + // binrep is an immutable representation for Binutils. It is atomically // replaced on every mutation to provide thread-safe access. type binrep struct { @@ -51,6 +55,7 @@ type binrep struct { nmFound bool objdump string objdumpFound bool + isLLVMObjdump bool // if fast, perform symbolization using nm (symbol names only), // instead of file-line detail from the slower addr2line. @@ -132,15 +137,103 @@ func initTools(b *binrep, config string) { } defaultPath := paths[""] - b.llvmSymbolizer, b.llvmSymbolizerFound = findExe("llvm-symbolizer", append(paths["llvm-symbolizer"], defaultPath...)) - b.addr2line, b.addr2lineFound = findExe("addr2line", append(paths["addr2line"], defaultPath...)) - if !b.addr2lineFound { - // On MacOS, brew installs addr2line under gaddr2line name, so search for - // that if the tool is not found by its default name. - b.addr2line, b.addr2lineFound = findExe("gaddr2line", append(paths["addr2line"], defaultPath...)) + b.llvmSymbolizer, b.llvmSymbolizerFound = chooseExe([]string{"llvm-symbolizer"}, []string{}, append(paths["llvm-symbolizer"], defaultPath...)) + b.addr2line, b.addr2lineFound = chooseExe([]string{"addr2line"}, []string{"gaddr2line"}, append(paths["addr2line"], defaultPath...)) + // The "-n" option is supported by LLVM since 2011. The output of llvm-nm + // and GNU nm with "-n" option is interchangeable for our purposes, so we do + // not need to differrentiate them. + b.nm, b.nmFound = chooseExe([]string{"llvm-nm", "nm"}, []string{"gnm"}, append(paths["nm"], defaultPath...)) + b.objdump, b.objdumpFound, b.isLLVMObjdump = findObjdump(append(paths["objdump"], defaultPath...)) +} + +// findObjdump finds and returns path to preferred objdump binary. +// Order of preference is: llvm-objdump, objdump. +// On MacOS only, also looks for gobjdump with least preference. +// Accepts a list of paths and returns: +// a string with path to the preferred objdump binary if found, +// or an empty string if not found; +// a boolean if any acceptable objdump was found; +// a boolean indicating if it is an LLVM objdump. +func findObjdump(paths []string) (string, bool, bool) { + objdumpNames := []string{"llvm-objdump", "objdump"} + if runtime.GOOS == "darwin" { + objdumpNames = append(objdumpNames, "gobjdump") } - b.nm, b.nmFound = findExe("nm", append(paths["nm"], defaultPath...)) - b.objdump, b.objdumpFound = findExe("objdump", append(paths["objdump"], defaultPath...)) + + for _, objdumpName := range objdumpNames { + if objdump, objdumpFound := findExe(objdumpName, paths); objdumpFound { + cmdOut, err := exec.Command(objdump, "--version").Output() + if err != nil { + continue + } + if isLLVMObjdump(string(cmdOut)) { + return objdump, true, true + } + if isBuObjdump(string(cmdOut)) { + return objdump, true, false + } + } + } + return "", false, false +} + +// chooseExe finds and returns path to preferred binary. names is a list of +// names to search on both Linux and OSX. osxNames is a list of names specific +// to OSX. names always has a higher priority than osxNames. The order of +// the name within each list decides its priority (e.g. the first name has a +// higher priority than the second name in the list). +// +// It returns a string with path to the binary and a boolean indicating if any +// acceptable binary was found. +func chooseExe(names, osxNames []string, paths []string) (string, bool) { + if runtime.GOOS == "darwin" { + names = append(names, osxNames...) + } + for _, name := range names { + if binary, found := findExe(name, paths); found { + return binary, true + } + } + return "", false +} + +// isLLVMObjdump accepts a string with path to an objdump binary, +// and returns a boolean indicating if the given binary is an LLVM +// objdump binary of an acceptable version. +func isLLVMObjdump(output string) bool { + fields := objdumpLLVMVerRE.FindStringSubmatch(output) + if len(fields) != 5 { + return false + } + if fields[4] == "trunk" { + return true + } + verMajor, err := strconv.Atoi(fields[1]) + if err != nil { + return false + } + verPatch, err := strconv.Atoi(fields[3]) + if err != nil { + return false + } + if runtime.GOOS == "linux" && verMajor >= 8 { + // Ensure LLVM objdump is at least version 8.0 on Linux. + // Some flags, like --demangle, and double dashes for options are + // not supported by previous versions. + return true + } + if runtime.GOOS == "darwin" { + // Ensure LLVM objdump is at least version 10.0.1 on MacOS. + return verMajor > 10 || (verMajor == 10 && verPatch >= 1) + } + return false +} + +// isBuObjdump accepts a string with path to an objdump binary, +// and returns a boolean indicating if the given binary is a GNU +// binutils objdump binary. No version check is performed. +func isBuObjdump(output string) bool { + return strings.Contains(output, "GNU objdump") } // findExe looks for an executable command on a set of paths. @@ -157,12 +250,25 @@ func findExe(cmd string, paths []string) (string, bool) { // Disasm returns the assembly instructions for the specified address range // of a binary. -func (bu *Binutils) Disasm(file string, start, end uint64) ([]plugin.Inst, error) { +func (bu *Binutils) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { b := bu.get() - cmd := exec.Command(b.objdump, "-d", "-C", "--no-show-raw-insn", "-l", - fmt.Sprintf("--start-address=%#x", start), - fmt.Sprintf("--stop-address=%#x", end), - file) + if !b.objdumpFound { + return nil, errors.New("cannot disasm: no objdump tool available") + } + args := []string{"--disassemble-all", "--demangle", "--no-show-raw-insn", + "--line-numbers", fmt.Sprintf("--start-address=%#x", start), + fmt.Sprintf("--stop-address=%#x", end)} + + if intelSyntax { + if b.isLLVMObjdump { + args = append(args, "--x86-asm-syntax=intel") + } else { + args = append(args, "-M", "intel") + } + } + + args = append(args, file) + cmd := exec.Command(b.objdump, args...) out, err := cmd.Output() if err != nil { return nil, fmt.Errorf("%v: %v", cmd.Args, err) diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go index 28c89aa163..d0be614bdc 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/disasm.go @@ -25,10 +25,11 @@ import ( ) var ( - nmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+)\s+(.)\s+(.*)`) - objdumpAsmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+):\s+(.*)`) - objdumpOutputFileLine = regexp.MustCompile(`^(.*):([0-9]+)`) - objdumpOutputFunction = regexp.MustCompile(`^(\S.*)\(\):`) + nmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+)\s+(.)\s+(.*)`) + objdumpAsmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+):\s+(.*)`) + objdumpOutputFileLine = regexp.MustCompile(`^;?\s?(.*):([0-9]+)`) + objdumpOutputFunction = regexp.MustCompile(`^;?\s?(\S.*)\(\):`) + objdumpOutputFunctionLLVM = regexp.MustCompile(`^([[:xdigit:]]+)?\s?(.*):`) ) func findSymbols(syms []byte, file string, r *regexp.Regexp, address uint64) ([]*plugin.Sym, error) { @@ -143,6 +144,11 @@ func disassemble(asm []byte) ([]plugin.Inst, error) { if fields := objdumpOutputFunction.FindStringSubmatch(input); len(fields) == 2 { function = fields[1] continue + } else { + if fields := objdumpOutputFunctionLLVM.FindStringSubmatch(input); len(fields) == 3 { + function = fields[2] + continue + } } // Reset on unrecognized lines. function, file, line = "", "", 0 diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go index 9fc1eea1f0..492400c5f3 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go @@ -69,8 +69,9 @@ func parseFlags(o *plugin.Options) (*source, []string, error) { flagHTTP := flag.String("http", "", "Present interactive web UI at the specified http host:port") flagNoBrowser := flag.Bool("no_browser", false, "Skip opening a browswer for the interactive web UI") - // Flags used during command processing - installedFlags := installFlags(flag) + // Flags that set configuration properties. + cfg := currentConfig() + configFlagSetter := installConfigFlags(flag, &cfg) flagCommands := make(map[string]*bool) flagParamCommands := make(map[string]*string) @@ -107,8 +108,8 @@ func parseFlags(o *plugin.Options) (*source, []string, error) { } } - // Report conflicting options - if err := updateFlags(installedFlags); err != nil { + // Apply any specified flags to cfg. + if err := configFlagSetter(); err != nil { return nil, nil, err } @@ -124,7 +125,7 @@ func parseFlags(o *plugin.Options) (*source, []string, error) { return nil, nil, errors.New("-no_browser only makes sense with -http") } - si := pprofVariables["sample_index"].value + si := cfg.SampleIndex si = sampleIndex(flagTotalDelay, si, "delay", "-total_delay", o.UI) si = sampleIndex(flagMeanDelay, si, "delay", "-mean_delay", o.UI) si = sampleIndex(flagContentions, si, "contentions", "-contentions", o.UI) @@ -132,10 +133,10 @@ func parseFlags(o *plugin.Options) (*source, []string, error) { si = sampleIndex(flagInUseObjects, si, "inuse_objects", "-inuse_objects", o.UI) si = sampleIndex(flagAllocSpace, si, "alloc_space", "-alloc_space", o.UI) si = sampleIndex(flagAllocObjects, si, "alloc_objects", "-alloc_objects", o.UI) - pprofVariables.set("sample_index", si) + cfg.SampleIndex = si if *flagMeanDelay { - pprofVariables.set("mean", "true") + cfg.Mean = true } source := &source{ @@ -154,7 +155,7 @@ func parseFlags(o *plugin.Options) (*source, []string, error) { return nil, nil, err } - normalize := pprofVariables["normalize"].boolValue() + normalize := cfg.Normalize if normalize && len(source.Base) == 0 { return nil, nil, errors.New("must have base profile to normalize by") } @@ -163,6 +164,8 @@ func parseFlags(o *plugin.Options) (*source, []string, error) { if bu, ok := o.Obj.(*binutils.Binutils); ok { bu.SetTools(*flagTools) } + + setCurrentConfig(cfg) return source, cmd, nil } @@ -194,66 +197,72 @@ func dropEmpty(list []*string) []string { return l } -// installFlags creates command line flags for pprof variables. -func installFlags(flag plugin.FlagSet) flagsInstalled { - f := flagsInstalled{ - ints: make(map[string]*int), - bools: make(map[string]*bool), - floats: make(map[string]*float64), - strings: make(map[string]*string), - } - for n, v := range pprofVariables { - switch v.kind { - case boolKind: - if v.group != "" { - // Set all radio variables to false to identify conflicts. - f.bools[n] = flag.Bool(n, false, v.help) +// installConfigFlags creates command line flags for configuration +// fields and returns a function which can be called after flags have +// been parsed to copy any flags specified on the command line to +// *cfg. +func installConfigFlags(flag plugin.FlagSet, cfg *config) func() error { + // List of functions for setting the different parts of a config. + var setters []func() + var err error // Holds any errors encountered while running setters. + + for _, field := range configFields { + n := field.name + help := configHelp[n] + var setter func() + switch ptr := cfg.fieldPtr(field).(type) { + case *bool: + f := flag.Bool(n, *ptr, help) + setter = func() { *ptr = *f } + case *int: + f := flag.Int(n, *ptr, help) + setter = func() { *ptr = *f } + case *float64: + f := flag.Float64(n, *ptr, help) + setter = func() { *ptr = *f } + case *string: + if len(field.choices) == 0 { + f := flag.String(n, *ptr, help) + setter = func() { *ptr = *f } } else { - f.bools[n] = flag.Bool(n, v.boolValue(), v.help) + // Make a separate flag per possible choice. + // Set all flags to initially false so we can + // identify conflicts. + bools := make(map[string]*bool) + for _, choice := range field.choices { + bools[choice] = flag.Bool(choice, false, configHelp[choice]) + } + setter = func() { + var set []string + for k, v := range bools { + if *v { + set = append(set, k) + } + } + switch len(set) { + case 0: + // Leave as default value. + case 1: + *ptr = set[0] + default: + err = fmt.Errorf("conflicting options set: %v", set) + } + } } - case intKind: - f.ints[n] = flag.Int(n, v.intValue(), v.help) - case floatKind: - f.floats[n] = flag.Float64(n, v.floatValue(), v.help) - case stringKind: - f.strings[n] = flag.String(n, v.value, v.help) } + setters = append(setters, setter) } - return f -} -// updateFlags updates the pprof variables according to the flags -// parsed in the command line. -func updateFlags(f flagsInstalled) error { - vars := pprofVariables - groups := map[string]string{} - for n, v := range f.bools { - vars.set(n, fmt.Sprint(*v)) - if *v { - g := vars[n].group - if g != "" && groups[g] != "" { - return fmt.Errorf("conflicting options %q and %q set", n, groups[g]) + return func() error { + // Apply the setter for every flag. + for _, setter := range setters { + setter() + if err != nil { + return err } - groups[g] = n } + return nil } - for n, v := range f.ints { - vars.set(n, fmt.Sprint(*v)) - } - for n, v := range f.floats { - vars.set(n, fmt.Sprint(*v)) - } - for n, v := range f.strings { - vars.set(n, *v) - } - return nil -} - -type flagsInstalled struct { - ints map[string]*int - bools map[string]*bool - floats map[string]*float64 - strings map[string]*string } // isBuildID determines if the profile may contain a build ID, by diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go index f52471490a..4397e253e0 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go @@ -22,7 +22,6 @@ import ( "os/exec" "runtime" "sort" - "strconv" "strings" "time" @@ -70,9 +69,7 @@ func AddCommand(cmd string, format int, post PostProcessor, desc, usage string) // SetVariableDefault sets the default value for a pprof // variable. This enables extensions to set their own defaults. func SetVariableDefault(variable, value string) { - if v := pprofVariables[variable]; v != nil { - v.value = value - } + configure(variable, value) } // PostProcessor is a function that applies post-processing to the report output @@ -124,130 +121,132 @@ var pprofCommands = commands{ "weblist": {report.WebList, nil, invokeVisualizer("html", browsers()), true, "Display annotated source in a web browser", listHelp("weblist", false)}, } -// pprofVariables are the configuration parameters that affect the -// reported generated by pprof. -var pprofVariables = variables{ +// configHelp contains help text per configuration parameter. +var configHelp = map[string]string{ // Filename for file-based output formats, stdout by default. - "output": &variable{stringKind, "", "", helpText("Output filename for file-based outputs")}, + "output": helpText("Output filename for file-based outputs"), // Comparisons. - "drop_negative": &variable{boolKind, "f", "", helpText( + "drop_negative": helpText( "Ignore negative differences", - "Do not show any locations with values <0.")}, + "Do not show any locations with values <0."), // Graph handling options. - "call_tree": &variable{boolKind, "f", "", helpText( + "call_tree": helpText( "Create a context-sensitive call tree", - "Treat locations reached through different paths as separate.")}, + "Treat locations reached through different paths as separate."), // Display options. - "relative_percentages": &variable{boolKind, "f", "", helpText( + "relative_percentages": helpText( "Show percentages relative to focused subgraph", "If unset, percentages are relative to full graph before focusing", - "to facilitate comparison with original graph.")}, - "unit": &variable{stringKind, "minimum", "", helpText( + "to facilitate comparison with original graph."), + "unit": helpText( "Measurement units to display", "Scale the sample values to this unit.", "For time-based profiles, use seconds, milliseconds, nanoseconds, etc.", "For memory profiles, use megabytes, kilobytes, bytes, etc.", - "Using auto will scale each value independently to the most natural unit.")}, - "compact_labels": &variable{boolKind, "f", "", "Show minimal headers"}, - "source_path": &variable{stringKind, "", "", "Search path for source files"}, - "trim_path": &variable{stringKind, "", "", "Path to trim from source paths before search"}, + "Using auto will scale each value independently to the most natural unit."), + "compact_labels": "Show minimal headers", + "source_path": "Search path for source files", + "trim_path": "Path to trim from source paths before search", + "intel_syntax": helpText( + "Show assembly in Intel syntax", + "Only applicable to commands `disasm` and `weblist`"), // Filtering options - "nodecount": &variable{intKind, "-1", "", helpText( + "nodecount": helpText( "Max number of nodes to show", "Uses heuristics to limit the number of locations to be displayed.", - "On graphs, dotted edges represent paths through nodes that have been removed.")}, - "nodefraction": &variable{floatKind, "0.005", "", "Hide nodes below *total"}, - "edgefraction": &variable{floatKind, "0.001", "", "Hide edges below *total"}, - "trim": &variable{boolKind, "t", "", helpText( + "On graphs, dotted edges represent paths through nodes that have been removed."), + "nodefraction": "Hide nodes below *total", + "edgefraction": "Hide edges below *total", + "trim": helpText( "Honor nodefraction/edgefraction/nodecount defaults", - "Set to false to get the full profile, without any trimming.")}, - "focus": &variable{stringKind, "", "", helpText( + "Set to false to get the full profile, without any trimming."), + "focus": helpText( "Restricts to samples going through a node matching regexp", "Discard samples that do not include a node matching this regexp.", - "Matching includes the function name, filename or object name.")}, - "ignore": &variable{stringKind, "", "", helpText( + "Matching includes the function name, filename or object name."), + "ignore": helpText( "Skips paths going through any nodes matching regexp", "If set, discard samples that include a node matching this regexp.", - "Matching includes the function name, filename or object name.")}, - "prune_from": &variable{stringKind, "", "", helpText( + "Matching includes the function name, filename or object name."), + "prune_from": helpText( "Drops any functions below the matched frame.", "If set, any frames matching the specified regexp and any frames", - "below it will be dropped from each sample.")}, - "hide": &variable{stringKind, "", "", helpText( + "below it will be dropped from each sample."), + "hide": helpText( "Skips nodes matching regexp", "Discard nodes that match this location.", "Other nodes from samples that include this location will be shown.", - "Matching includes the function name, filename or object name.")}, - "show": &variable{stringKind, "", "", helpText( + "Matching includes the function name, filename or object name."), + "show": helpText( "Only show nodes matching regexp", "If set, only show nodes that match this location.", - "Matching includes the function name, filename or object name.")}, - "show_from": &variable{stringKind, "", "", helpText( + "Matching includes the function name, filename or object name."), + "show_from": helpText( "Drops functions above the highest matched frame.", "If set, all frames above the highest match are dropped from every sample.", - "Matching includes the function name, filename or object name.")}, - "tagfocus": &variable{stringKind, "", "", helpText( + "Matching includes the function name, filename or object name."), + "tagfocus": helpText( "Restricts to samples with tags in range or matched by regexp", "Use name=value syntax to limit the matching to a specific tag.", "Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:", - "String tag filter examples: foo, foo.*bar, mytag=foo.*bar")}, - "tagignore": &variable{stringKind, "", "", helpText( + "String tag filter examples: foo, foo.*bar, mytag=foo.*bar"), + "tagignore": helpText( "Discard samples with tags in range or matched by regexp", "Use name=value syntax to limit the matching to a specific tag.", "Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:", - "String tag filter examples: foo, foo.*bar, mytag=foo.*bar")}, - "tagshow": &variable{stringKind, "", "", helpText( + "String tag filter examples: foo, foo.*bar, mytag=foo.*bar"), + "tagshow": helpText( "Only consider tags matching this regexp", - "Discard tags that do not match this regexp")}, - "taghide": &variable{stringKind, "", "", helpText( + "Discard tags that do not match this regexp"), + "taghide": helpText( "Skip tags matching this regexp", - "Discard tags that match this regexp")}, + "Discard tags that match this regexp"), // Heap profile options - "divide_by": &variable{floatKind, "1", "", helpText( + "divide_by": helpText( "Ratio to divide all samples before visualization", - "Divide all samples values by a constant, eg the number of processors or jobs.")}, - "mean": &variable{boolKind, "f", "", helpText( + "Divide all samples values by a constant, eg the number of processors or jobs."), + "mean": helpText( "Average sample value over first value (count)", "For memory profiles, report average memory per allocation.", - "For time-based profiles, report average time per event.")}, - "sample_index": &variable{stringKind, "", "", helpText( + "For time-based profiles, report average time per event."), + "sample_index": helpText( "Sample value to report (0-based index or name)", "Profiles contain multiple values per sample.", - "Use sample_index=i to select the ith value (starting at 0).")}, - "normalize": &variable{boolKind, "f", "", helpText( - "Scales profile based on the base profile.")}, + "Use sample_index=i to select the ith value (starting at 0)."), + "normalize": helpText( + "Scales profile based on the base profile."), // Data sorting criteria - "flat": &variable{boolKind, "t", "cumulative", helpText("Sort entries based on own weight")}, - "cum": &variable{boolKind, "f", "cumulative", helpText("Sort entries based on cumulative weight")}, + "flat": helpText("Sort entries based on own weight"), + "cum": helpText("Sort entries based on cumulative weight"), // Output granularity - "functions": &variable{boolKind, "t", "granularity", helpText( + "functions": helpText( "Aggregate at the function level.", - "Ignores the filename where the function was defined.")}, - "filefunctions": &variable{boolKind, "t", "granularity", helpText( + "Ignores the filename where the function was defined."), + "filefunctions": helpText( "Aggregate at the function level.", - "Takes into account the filename where the function was defined.")}, - "files": &variable{boolKind, "f", "granularity", "Aggregate at the file level."}, - "lines": &variable{boolKind, "f", "granularity", "Aggregate at the source code line level."}, - "addresses": &variable{boolKind, "f", "granularity", helpText( + "Takes into account the filename where the function was defined."), + "files": "Aggregate at the file level.", + "lines": "Aggregate at the source code line level.", + "addresses": helpText( "Aggregate at the address level.", - "Includes functions' addresses in the output.")}, - "noinlines": &variable{boolKind, "f", "", helpText( + "Includes functions' addresses in the output."), + "noinlines": helpText( "Ignore inlines.", - "Attributes inlined functions to their first out-of-line caller.")}, + "Attributes inlined functions to their first out-of-line caller."), } func helpText(s ...string) string { return strings.Join(s, "\n") + "\n" } -// usage returns a string describing the pprof commands and variables. -// if commandLine is set, the output reflect cli usage. +// usage returns a string describing the pprof commands and configuration +// options. if commandLine is set, the output reflect cli usage. func usage(commandLine bool) string { var prefix string if commandLine { @@ -269,40 +268,33 @@ func usage(commandLine bool) string { } else { help = " Commands:\n" commands = append(commands, fmtHelp("o/options", "List options and their current values")) - commands = append(commands, fmtHelp("quit/exit/^D", "Exit pprof")) + commands = append(commands, fmtHelp("q/quit/exit/^D", "Exit pprof")) } help = help + strings.Join(commands, "\n") + "\n\n" + " Options:\n" - // Print help for variables after sorting them. - // Collect radio variables by their group name to print them together. - radioOptions := make(map[string][]string) + // Print help for configuration options after sorting them. + // Collect choices for multi-choice options print them together. var variables []string - for name, vr := range pprofVariables { - if vr.group != "" { - radioOptions[vr.group] = append(radioOptions[vr.group], name) + var radioStrings []string + for _, f := range configFields { + if len(f.choices) == 0 { + variables = append(variables, fmtHelp(prefix+f.name, configHelp[f.name])) continue } - variables = append(variables, fmtHelp(prefix+name, vr.help)) - } - sort.Strings(variables) - - help = help + strings.Join(variables, "\n") + "\n\n" + - " Option groups (only set one per group):\n" - - var radioStrings []string - for radio, ops := range radioOptions { - sort.Strings(ops) - s := []string{fmtHelp(radio, "")} - for _, op := range ops { - s = append(s, " "+fmtHelp(prefix+op, pprofVariables[op].help)) + // Format help for for this group. + s := []string{fmtHelp(f.name, "")} + for _, choice := range f.choices { + s = append(s, " "+fmtHelp(prefix+choice, configHelp[choice])) } - radioStrings = append(radioStrings, strings.Join(s, "\n")) } + sort.Strings(variables) sort.Strings(radioStrings) - return help + strings.Join(radioStrings, "\n") + return help + strings.Join(variables, "\n") + "\n\n" + + " Option groups (only set one per group):\n" + + strings.Join(radioStrings, "\n") } func reportHelp(c string, cum, redirect bool) string { @@ -445,105 +437,8 @@ func invokeVisualizer(suffix string, visualizers []string) PostProcessor { } } -// variables describe the configuration parameters recognized by pprof. -type variables map[string]*variable - -// variable is a single configuration parameter. -type variable struct { - kind int // How to interpret the value, must be one of the enums below. - value string // Effective value. Only values appropriate for the Kind should be set. - group string // boolKind variables with the same Group != "" cannot be set simultaneously. - help string // Text describing the variable, in multiple lines separated by newline. -} - -const ( - // variable.kind must be one of these variables. - boolKind = iota - intKind - floatKind - stringKind -) - -// set updates the value of a variable, checking that the value is -// suitable for the variable Kind. -func (vars variables) set(name, value string) error { - v := vars[name] - if v == nil { - return fmt.Errorf("no variable %s", name) - } - var err error - switch v.kind { - case boolKind: - var b bool - if b, err = stringToBool(value); err == nil { - if v.group != "" && !b { - err = fmt.Errorf("%q can only be set to true", name) - } - } - case intKind: - _, err = strconv.Atoi(value) - case floatKind: - _, err = strconv.ParseFloat(value, 64) - case stringKind: - // Remove quotes, particularly useful for empty values. - if len(value) > 1 && strings.HasPrefix(value, `"`) && strings.HasSuffix(value, `"`) { - value = value[1 : len(value)-1] - } - } - if err != nil { - return err - } - vars[name].value = value - if group := vars[name].group; group != "" { - for vname, vvar := range vars { - if vvar.group == group && vname != name { - vvar.value = "f" - } - } - } - return err -} - -// boolValue returns the value of a boolean variable. -func (v *variable) boolValue() bool { - b, err := stringToBool(v.value) - if err != nil { - panic("unexpected value " + v.value + " for bool ") - } - return b -} - -// intValue returns the value of an intKind variable. -func (v *variable) intValue() int { - i, err := strconv.Atoi(v.value) - if err != nil { - panic("unexpected value " + v.value + " for int ") - } - return i -} - -// floatValue returns the value of a Float variable. -func (v *variable) floatValue() float64 { - f, err := strconv.ParseFloat(v.value, 64) - if err != nil { - panic("unexpected value " + v.value + " for float ") - } - return f -} - -// stringValue returns a canonical representation for a variable. -func (v *variable) stringValue() string { - switch v.kind { - case boolKind: - return fmt.Sprint(v.boolValue()) - case intKind: - return fmt.Sprint(v.intValue()) - case floatKind: - return fmt.Sprint(v.floatValue()) - } - return v.value -} - +// stringToBool is a custom parser for bools. We avoid using strconv.ParseBool +// to remain compatible with old pprof behavior (e.g., treating "" as true). func stringToBool(s string) (bool, error) { switch strings.ToLower(s) { case "true", "t", "yes", "y", "1", "": @@ -554,13 +449,3 @@ func stringToBool(s string) (bool, error) { return false, fmt.Errorf(`illegal value "%s" for bool variable`, s) } } - -// makeCopy returns a duplicate of a set of shell variables. -func (vars variables) makeCopy() variables { - varscopy := make(variables, len(vars)) - for n, v := range vars { - vcopy := *v - varscopy[n] = &vcopy - } - return varscopy -} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go new file mode 100644 index 0000000000..b3f82f22c9 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go @@ -0,0 +1,367 @@ +package driver + +import ( + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "sync" +) + +// config holds settings for a single named config. +// The JSON tag name for a field is used both for JSON encoding and as +// a named variable. +type config struct { + // Filename for file-based output formats, stdout by default. + Output string `json:"-"` + + // Display options. + CallTree bool `json:"call_tree,omitempty"` + RelativePercentages bool `json:"relative_percentages,omitempty"` + Unit string `json:"unit,omitempty"` + CompactLabels bool `json:"compact_labels,omitempty"` + SourcePath string `json:"-"` + TrimPath string `json:"-"` + IntelSyntax bool `json:"intel_syntax,omitempty"` + Mean bool `json:"mean,omitempty"` + SampleIndex string `json:"-"` + DivideBy float64 `json:"-"` + Normalize bool `json:"normalize,omitempty"` + Sort string `json:"sort,omitempty"` + + // Filtering options + DropNegative bool `json:"drop_negative,omitempty"` + NodeCount int `json:"nodecount,omitempty"` + NodeFraction float64 `json:"nodefraction,omitempty"` + EdgeFraction float64 `json:"edgefraction,omitempty"` + Trim bool `json:"trim,omitempty"` + Focus string `json:"focus,omitempty"` + Ignore string `json:"ignore,omitempty"` + PruneFrom string `json:"prune_from,omitempty"` + Hide string `json:"hide,omitempty"` + Show string `json:"show,omitempty"` + ShowFrom string `json:"show_from,omitempty"` + TagFocus string `json:"tagfocus,omitempty"` + TagIgnore string `json:"tagignore,omitempty"` + TagShow string `json:"tagshow,omitempty"` + TagHide string `json:"taghide,omitempty"` + NoInlines bool `json:"noinlines,omitempty"` + + // Output granularity + Granularity string `json:"granularity,omitempty"` +} + +// defaultConfig returns the default configuration values; it is unaffected by +// flags and interactive assignments. +func defaultConfig() config { + return config{ + Unit: "minimum", + NodeCount: -1, + NodeFraction: 0.005, + EdgeFraction: 0.001, + Trim: true, + DivideBy: 1.0, + Sort: "flat", + Granularity: "functions", + } +} + +// currentConfig holds the current configuration values; it is affected by +// flags and interactive assignments. +var currentCfg = defaultConfig() +var currentMu sync.Mutex + +func currentConfig() config { + currentMu.Lock() + defer currentMu.Unlock() + return currentCfg +} + +func setCurrentConfig(cfg config) { + currentMu.Lock() + defer currentMu.Unlock() + currentCfg = cfg +} + +// configField contains metadata for a single configuration field. +type configField struct { + name string // JSON field name/key in variables + urlparam string // URL parameter name + saved bool // Is field saved in settings? + field reflect.StructField // Field in config + choices []string // Name Of variables in group + defaultValue string // Default value for this field. +} + +var ( + configFields []configField // Precomputed metadata per config field + + // configFieldMap holds an entry for every config field as well as an + // entry for every valid choice for a multi-choice field. + configFieldMap map[string]configField +) + +func init() { + // Config names for fields that are not saved in settings and therefore + // do not have a JSON name. + notSaved := map[string]string{ + // Not saved in settings, but present in URLs. + "SampleIndex": "sample_index", + + // Following fields are also not placed in URLs. + "Output": "output", + "SourcePath": "source_path", + "TrimPath": "trim_path", + "DivideBy": "divide_by", + } + + // choices holds the list of allowed values for config fields that can + // take on one of a bounded set of values. + choices := map[string][]string{ + "sort": {"cum", "flat"}, + "granularity": {"functions", "filefunctions", "files", "lines", "addresses"}, + } + + // urlparam holds the mapping from a config field name to the URL + // parameter used to hold that config field. If no entry is present for + // a name, the corresponding field is not saved in URLs. + urlparam := map[string]string{ + "drop_negative": "dropneg", + "call_tree": "calltree", + "relative_percentages": "rel", + "unit": "unit", + "compact_labels": "compact", + "intel_syntax": "intel", + "nodecount": "n", + "nodefraction": "nf", + "edgefraction": "ef", + "trim": "trim", + "focus": "f", + "ignore": "i", + "prune_from": "prunefrom", + "hide": "h", + "show": "s", + "show_from": "sf", + "tagfocus": "tf", + "tagignore": "ti", + "tagshow": "ts", + "taghide": "th", + "mean": "mean", + "sample_index": "si", + "normalize": "norm", + "sort": "sort", + "granularity": "g", + "noinlines": "noinlines", + } + + def := defaultConfig() + configFieldMap = map[string]configField{} + t := reflect.TypeOf(config{}) + for i, n := 0, t.NumField(); i < n; i++ { + field := t.Field(i) + js := strings.Split(field.Tag.Get("json"), ",") + if len(js) == 0 { + continue + } + // Get the configuration name for this field. + name := js[0] + if name == "-" { + name = notSaved[field.Name] + if name == "" { + // Not a configurable field. + continue + } + } + f := configField{ + name: name, + urlparam: urlparam[name], + saved: (name == js[0]), + field: field, + choices: choices[name], + } + f.defaultValue = def.get(f) + configFields = append(configFields, f) + configFieldMap[f.name] = f + for _, choice := range f.choices { + configFieldMap[choice] = f + } + } +} + +// fieldPtr returns a pointer to the field identified by f in *cfg. +func (cfg *config) fieldPtr(f configField) interface{} { + // reflect.ValueOf: converts to reflect.Value + // Elem: dereferences cfg to make *cfg + // FieldByIndex: fetches the field + // Addr: takes address of field + // Interface: converts back from reflect.Value to a regular value + return reflect.ValueOf(cfg).Elem().FieldByIndex(f.field.Index).Addr().Interface() +} + +// get returns the value of field f in cfg. +func (cfg *config) get(f configField) string { + switch ptr := cfg.fieldPtr(f).(type) { + case *string: + return *ptr + case *int: + return fmt.Sprint(*ptr) + case *float64: + return fmt.Sprint(*ptr) + case *bool: + return fmt.Sprint(*ptr) + } + panic(fmt.Sprintf("unsupported config field type %v", f.field.Type)) +} + +// set sets the value of field f in cfg to value. +func (cfg *config) set(f configField, value string) error { + switch ptr := cfg.fieldPtr(f).(type) { + case *string: + if len(f.choices) > 0 { + // Verify that value is one of the allowed choices. + for _, choice := range f.choices { + if choice == value { + *ptr = value + return nil + } + } + return fmt.Errorf("invalid %q value %q", f.name, value) + } + *ptr = value + case *int: + v, err := strconv.Atoi(value) + if err != nil { + return err + } + *ptr = v + case *float64: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + *ptr = v + case *bool: + v, err := stringToBool(value) + if err != nil { + return err + } + *ptr = v + default: + panic(fmt.Sprintf("unsupported config field type %v", f.field.Type)) + } + return nil +} + +// isConfigurable returns true if name is either the name of a config field, or +// a valid value for a multi-choice config field. +func isConfigurable(name string) bool { + _, ok := configFieldMap[name] + return ok +} + +// isBoolConfig returns true if name is either name of a boolean config field, +// or a valid value for a multi-choice config field. +func isBoolConfig(name string) bool { + f, ok := configFieldMap[name] + if !ok { + return false + } + if name != f.name { + return true // name must be one possible value for the field + } + var cfg config + _, ok = cfg.fieldPtr(f).(*bool) + return ok +} + +// completeConfig returns the list of configurable names starting with prefix. +func completeConfig(prefix string) []string { + var result []string + for v := range configFieldMap { + if strings.HasPrefix(v, prefix) { + result = append(result, v) + } + } + return result +} + +// configure stores the name=value mapping into the current config, correctly +// handling the case when name identifies a particular choice in a field. +func configure(name, value string) error { + currentMu.Lock() + defer currentMu.Unlock() + f, ok := configFieldMap[name] + if !ok { + return fmt.Errorf("unknown config field %q", name) + } + if f.name == name { + return currentCfg.set(f, value) + } + // name must be one of the choices. If value is true, set field-value + // to name. + if v, err := strconv.ParseBool(value); v && err == nil { + return currentCfg.set(f, name) + } + return fmt.Errorf("unknown config field %q", name) +} + +// resetTransient sets all transient fields in *cfg to their currently +// configured values. +func (cfg *config) resetTransient() { + current := currentConfig() + cfg.Output = current.Output + cfg.SourcePath = current.SourcePath + cfg.TrimPath = current.TrimPath + cfg.DivideBy = current.DivideBy + cfg.SampleIndex = current.SampleIndex +} + +// applyURL updates *cfg based on params. +func (cfg *config) applyURL(params url.Values) error { + for _, f := range configFields { + var value string + if f.urlparam != "" { + value = params.Get(f.urlparam) + } + if value == "" { + continue + } + if err := cfg.set(f, value); err != nil { + return fmt.Errorf("error setting config field %s: %v", f.name, err) + } + } + return nil +} + +// makeURL returns a URL based on initialURL that contains the config contents +// as parameters. The second result is true iff a parameter value was changed. +func (cfg *config) makeURL(initialURL url.URL) (url.URL, bool) { + q := initialURL.Query() + changed := false + for _, f := range configFields { + if f.urlparam == "" || !f.saved { + continue + } + v := cfg.get(f) + if v == f.defaultValue { + v = "" // URL for of default value is the empty string. + } else if f.field.Type.Kind() == reflect.Bool { + // Shorten bool values to "f" or "t" + v = v[:1] + } + if q.Get(f.urlparam) == v { + continue + } + changed = true + if v == "" { + q.Del(f.urlparam) + } else { + q.Set(f.urlparam, v) + } + } + if changed { + initialURL.RawQuery = q.Encode() + } + return initialURL, changed +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go index 1be749aa32..878f2e1ead 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go @@ -50,7 +50,7 @@ func PProf(eo *plugin.Options) error { } if cmd != nil { - return generateReport(p, cmd, pprofVariables, o) + return generateReport(p, cmd, currentConfig(), o) } if src.HTTPHostport != "" { @@ -59,7 +59,7 @@ func PProf(eo *plugin.Options) error { return interactive(p, o) } -func generateRawReport(p *profile.Profile, cmd []string, vars variables, o *plugin.Options) (*command, *report.Report, error) { +func generateRawReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) (*command, *report.Report, error) { p = p.Copy() // Prevent modification to the incoming profile. // Identify units of numeric tags in profile. @@ -71,16 +71,16 @@ func generateRawReport(p *profile.Profile, cmd []string, vars variables, o *plug panic("unexpected nil command") } - vars = applyCommandOverrides(cmd[0], c.format, vars) + cfg = applyCommandOverrides(cmd[0], c.format, cfg) // Delay focus after configuring report to get percentages on all samples. - relative := vars["relative_percentages"].boolValue() + relative := cfg.RelativePercentages if relative { - if err := applyFocus(p, numLabelUnits, vars, o.UI); err != nil { + if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil { return nil, nil, err } } - ropt, err := reportOptions(p, numLabelUnits, vars) + ropt, err := reportOptions(p, numLabelUnits, cfg) if err != nil { return nil, nil, err } @@ -95,19 +95,19 @@ func generateRawReport(p *profile.Profile, cmd []string, vars variables, o *plug rpt := report.New(p, ropt) if !relative { - if err := applyFocus(p, numLabelUnits, vars, o.UI); err != nil { + if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil { return nil, nil, err } } - if err := aggregate(p, vars); err != nil { + if err := aggregate(p, cfg); err != nil { return nil, nil, err } return c, rpt, nil } -func generateReport(p *profile.Profile, cmd []string, vars variables, o *plugin.Options) error { - c, rpt, err := generateRawReport(p, cmd, vars, o) +func generateReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) error { + c, rpt, err := generateRawReport(p, cmd, cfg, o) if err != nil { return err } @@ -129,7 +129,7 @@ func generateReport(p *profile.Profile, cmd []string, vars variables, o *plugin. } // If no output is specified, use default visualizer. - output := vars["output"].value + output := cfg.Output if output == "" { if c.visualizer != nil { return c.visualizer(src, os.Stdout, o.UI) @@ -151,7 +151,7 @@ func generateReport(p *profile.Profile, cmd []string, vars variables, o *plugin. return out.Close() } -func applyCommandOverrides(cmd string, outputFormat int, v variables) variables { +func applyCommandOverrides(cmd string, outputFormat int, cfg config) config { // Some report types override the trim flag to false below. This is to make // sure the default heuristics of excluding insignificant nodes and edges // from the call graph do not apply. One example where it is important is @@ -160,55 +160,55 @@ func applyCommandOverrides(cmd string, outputFormat int, v variables) variables // data is selected. So, with trimming enabled, the report could end up // showing no data if the specified function is "uninteresting" as far as the // trimming is concerned. - trim := v["trim"].boolValue() + trim := cfg.Trim switch cmd { case "disasm", "weblist": trim = false - v.set("addresses", "t") + cfg.Granularity = "addresses" // Force the 'noinlines' mode so that source locations for a given address // collapse and there is only one for the given address. Without this // cumulative metrics would be double-counted when annotating the assembly. // This is because the merge is done by address and in case of an inlined // stack each of the inlined entries is a separate callgraph node. - v.set("noinlines", "t") + cfg.NoInlines = true case "peek": trim = false case "list": trim = false - v.set("lines", "t") + cfg.Granularity = "lines" // Do not force 'noinlines' to be false so that specifying // "-list foo -noinlines" is supported and works as expected. case "text", "top", "topproto": - if v["nodecount"].intValue() == -1 { - v.set("nodecount", "0") + if cfg.NodeCount == -1 { + cfg.NodeCount = 0 } default: - if v["nodecount"].intValue() == -1 { - v.set("nodecount", "80") + if cfg.NodeCount == -1 { + cfg.NodeCount = 80 } } switch outputFormat { case report.Proto, report.Raw, report.Callgrind: trim = false - v.set("addresses", "t") - v.set("noinlines", "f") + cfg.Granularity = "addresses" + cfg.NoInlines = false } if !trim { - v.set("nodecount", "0") - v.set("nodefraction", "0") - v.set("edgefraction", "0") + cfg.NodeCount = 0 + cfg.NodeFraction = 0 + cfg.EdgeFraction = 0 } - return v + return cfg } -func aggregate(prof *profile.Profile, v variables) error { +func aggregate(prof *profile.Profile, cfg config) error { var function, filename, linenumber, address bool - inlines := !v["noinlines"].boolValue() - switch { - case v["addresses"].boolValue(): + inlines := !cfg.NoInlines + switch cfg.Granularity { + case "addresses": if inlines { return nil } @@ -216,15 +216,15 @@ func aggregate(prof *profile.Profile, v variables) error { filename = true linenumber = true address = true - case v["lines"].boolValue(): + case "lines": function = true filename = true linenumber = true - case v["files"].boolValue(): + case "files": filename = true - case v["functions"].boolValue(): + case "functions": function = true - case v["filefunctions"].boolValue(): + case "filefunctions": function = true filename = true default: @@ -233,8 +233,8 @@ func aggregate(prof *profile.Profile, v variables) error { return prof.Aggregate(inlines, function, filename, linenumber, address) } -func reportOptions(p *profile.Profile, numLabelUnits map[string]string, vars variables) (*report.Options, error) { - si, mean := vars["sample_index"].value, vars["mean"].boolValue() +func reportOptions(p *profile.Profile, numLabelUnits map[string]string, cfg config) (*report.Options, error) { + si, mean := cfg.SampleIndex, cfg.Mean value, meanDiv, sample, err := sampleFormat(p, si, mean) if err != nil { return nil, err @@ -245,29 +245,37 @@ func reportOptions(p *profile.Profile, numLabelUnits map[string]string, vars var stype = "mean_" + stype } - if vars["divide_by"].floatValue() == 0 { + if cfg.DivideBy == 0 { return nil, fmt.Errorf("zero divisor specified") } var filters []string - for _, k := range []string{"focus", "ignore", "hide", "show", "show_from", "tagfocus", "tagignore", "tagshow", "taghide"} { - v := vars[k].value + addFilter := func(k string, v string) { if v != "" { filters = append(filters, k+"="+v) } } + addFilter("focus", cfg.Focus) + addFilter("ignore", cfg.Ignore) + addFilter("hide", cfg.Hide) + addFilter("show", cfg.Show) + addFilter("show_from", cfg.ShowFrom) + addFilter("tagfocus", cfg.TagFocus) + addFilter("tagignore", cfg.TagIgnore) + addFilter("tagshow", cfg.TagShow) + addFilter("taghide", cfg.TagHide) ropt := &report.Options{ - CumSort: vars["cum"].boolValue(), - CallTree: vars["call_tree"].boolValue(), - DropNegative: vars["drop_negative"].boolValue(), + CumSort: cfg.Sort == "cum", + CallTree: cfg.CallTree, + DropNegative: cfg.DropNegative, - CompactLabels: vars["compact_labels"].boolValue(), - Ratio: 1 / vars["divide_by"].floatValue(), + CompactLabels: cfg.CompactLabels, + Ratio: 1 / cfg.DivideBy, - NodeCount: vars["nodecount"].intValue(), - NodeFraction: vars["nodefraction"].floatValue(), - EdgeFraction: vars["edgefraction"].floatValue(), + NodeCount: cfg.NodeCount, + NodeFraction: cfg.NodeFraction, + EdgeFraction: cfg.EdgeFraction, ActiveFilters: filters, NumLabelUnits: numLabelUnits, @@ -277,10 +285,12 @@ func reportOptions(p *profile.Profile, numLabelUnits map[string]string, vars var SampleType: stype, SampleUnit: sample.Unit, - OutputUnit: vars["unit"].value, + OutputUnit: cfg.Unit, - SourcePath: vars["source_path"].stringValue(), - TrimPath: vars["trim_path"].stringValue(), + SourcePath: cfg.SourcePath, + TrimPath: cfg.TrimPath, + + IntelSyntax: cfg.IntelSyntax, } if len(p.Mapping) > 0 && p.Mapping[0].File != "" { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go index af7b8d478a..048ba17cb0 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_focus.go @@ -28,15 +28,15 @@ import ( var tagFilterRangeRx = regexp.MustCompile("([+-]?[[:digit:]]+)([[:alpha:]]+)?") // applyFocus filters samples based on the focus/ignore options -func applyFocus(prof *profile.Profile, numLabelUnits map[string]string, v variables, ui plugin.UI) error { - focus, err := compileRegexOption("focus", v["focus"].value, nil) - ignore, err := compileRegexOption("ignore", v["ignore"].value, err) - hide, err := compileRegexOption("hide", v["hide"].value, err) - show, err := compileRegexOption("show", v["show"].value, err) - showfrom, err := compileRegexOption("show_from", v["show_from"].value, err) - tagfocus, err := compileTagFilter("tagfocus", v["tagfocus"].value, numLabelUnits, ui, err) - tagignore, err := compileTagFilter("tagignore", v["tagignore"].value, numLabelUnits, ui, err) - prunefrom, err := compileRegexOption("prune_from", v["prune_from"].value, err) +func applyFocus(prof *profile.Profile, numLabelUnits map[string]string, cfg config, ui plugin.UI) error { + focus, err := compileRegexOption("focus", cfg.Focus, nil) + ignore, err := compileRegexOption("ignore", cfg.Ignore, err) + hide, err := compileRegexOption("hide", cfg.Hide, err) + show, err := compileRegexOption("show", cfg.Show, err) + showfrom, err := compileRegexOption("show_from", cfg.ShowFrom, err) + tagfocus, err := compileTagFilter("tagfocus", cfg.TagFocus, numLabelUnits, ui, err) + tagignore, err := compileTagFilter("tagignore", cfg.TagIgnore, numLabelUnits, ui, err) + prunefrom, err := compileRegexOption("prune_from", cfg.PruneFrom, err) if err != nil { return err } @@ -54,8 +54,8 @@ func applyFocus(prof *profile.Profile, numLabelUnits map[string]string, v variab warnNoMatches(tagfocus == nil || tfm, "TagFocus", ui) warnNoMatches(tagignore == nil || tim, "TagIgnore", ui) - tagshow, err := compileRegexOption("tagshow", v["tagshow"].value, err) - taghide, err := compileRegexOption("taghide", v["taghide"].value, err) + tagshow, err := compileRegexOption("tagshow", cfg.TagShow, err) + taghide, err := compileRegexOption("taghide", cfg.TagHide, err) tns, tnh := prof.FilterTagsByName(tagshow, taghide) warnNoMatches(tagshow == nil || tns, "TagShow", ui) warnNoMatches(tagignore == nil || tnh, "TagHide", ui) diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go index 13613cff86..fbeb765dbc 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go @@ -38,7 +38,10 @@ type treeNode struct { func (ui *webInterface) flamegraph(w http.ResponseWriter, req *http.Request) { // Force the call tree so that the graph is a tree. // Also do not trim the tree so that the flame graph contains all functions. - rpt, errList := ui.makeReport(w, req, []string{"svg"}, "call_tree", "true", "trim", "false") + rpt, errList := ui.makeReport(w, req, []string{"svg"}, func(cfg *config) { + cfg.CallTree = true + cfg.Trim = false + }) if rpt == nil { return // error already reported } @@ -96,7 +99,7 @@ func (ui *webInterface) flamegraph(w http.ResponseWriter, req *http.Request) { return } - ui.render(w, "flamegraph", rpt, errList, config.Labels, webArgs{ + ui.render(w, req, "flamegraph", rpt, errList, config.Labels, webArgs{ FlameGraph: template.JS(b), Nodes: nodeArr, }) diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go index 3a458b0b77..777fb90bfb 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive.go @@ -34,17 +34,14 @@ var tailDigitsRE = regexp.MustCompile("[0-9]+$") func interactive(p *profile.Profile, o *plugin.Options) error { // Enter command processing loop. o.UI.SetAutoComplete(newCompleter(functionNames(p))) - pprofVariables.set("compact_labels", "true") - pprofVariables["sample_index"].help += fmt.Sprintf("Or use sample_index=name, with name in %v.\n", sampleTypes(p)) + configure("compact_labels", "true") + configHelp["sample_index"] += fmt.Sprintf("Or use sample_index=name, with name in %v.\n", sampleTypes(p)) // Do not wait for the visualizer to complete, to allow multiple // graphs to be visualized simultaneously. interactiveMode = true shortcuts := profileShortcuts(p) - // Get all groups in pprofVariables to allow for clearer error messages. - groups := groupOptions(pprofVariables) - greetings(p, o.UI) for { input, err := o.UI.ReadLine("(pprof) ") @@ -69,7 +66,12 @@ func interactive(p *profile.Profile, o *plugin.Options) error { } value = strings.TrimSpace(value) } - if v := pprofVariables[name]; v != nil { + if isConfigurable(name) { + // All non-bool options require inputs + if len(s) == 1 && !isBoolConfig(name) { + o.UI.PrintErr(fmt.Errorf("please specify a value, e.g. %s=", name)) + continue + } if name == "sample_index" { // Error check sample_index=xxx to ensure xxx is a valid sample type. index, err := p.SampleIndexByName(value) @@ -77,23 +79,17 @@ func interactive(p *profile.Profile, o *plugin.Options) error { o.UI.PrintErr(err) continue } + if index < 0 || index >= len(p.SampleType) { + o.UI.PrintErr(fmt.Errorf("invalid sample_index %q", value)) + continue + } value = p.SampleType[index].Type } - if err := pprofVariables.set(name, value); err != nil { + if err := configure(name, value); err != nil { o.UI.PrintErr(err) } continue } - // Allow group=variable syntax by converting into variable="". - if v := pprofVariables[value]; v != nil && v.group == name { - if err := pprofVariables.set(value, ""); err != nil { - o.UI.PrintErr(err) - } - continue - } else if okValues := groups[name]; okValues != nil { - o.UI.PrintErr(fmt.Errorf("unrecognized value for %s: %q. Use one of %s", name, value, strings.Join(okValues, ", "))) - continue - } } tokens := strings.Fields(input) @@ -105,16 +101,16 @@ func interactive(p *profile.Profile, o *plugin.Options) error { case "o", "options": printCurrentOptions(p, o.UI) continue - case "exit", "quit": + case "exit", "quit", "q": return nil case "help": commandHelp(strings.Join(tokens[1:], " "), o.UI) continue } - args, vars, err := parseCommandLine(tokens) + args, cfg, err := parseCommandLine(tokens) if err == nil { - err = generateReportWrapper(p, args, vars, o) + err = generateReportWrapper(p, args, cfg, o) } if err != nil { @@ -124,30 +120,13 @@ func interactive(p *profile.Profile, o *plugin.Options) error { } } -// groupOptions returns a map containing all non-empty groups -// mapped to an array of the option names in that group in -// sorted order. -func groupOptions(vars variables) map[string][]string { - groups := make(map[string][]string) - for name, option := range vars { - group := option.group - if group != "" { - groups[group] = append(groups[group], name) - } - } - for _, names := range groups { - sort.Strings(names) - } - return groups -} - var generateReportWrapper = generateReport // For testing purposes. // greetings prints a brief welcome and some overall profile // information before accepting interactive commands. func greetings(p *profile.Profile, ui plugin.UI) { numLabelUnits := identifyNumLabelUnits(p, ui) - ropt, err := reportOptions(p, numLabelUnits, pprofVariables) + ropt, err := reportOptions(p, numLabelUnits, currentConfig()) if err == nil { rpt := report.New(p, ropt) ui.Print(strings.Join(report.ProfileLabels(rpt), "\n")) @@ -200,27 +179,16 @@ func sampleTypes(p *profile.Profile) []string { func printCurrentOptions(p *profile.Profile, ui plugin.UI) { var args []string - type groupInfo struct { - set string - values []string - } - groups := make(map[string]*groupInfo) - for n, o := range pprofVariables { - v := o.stringValue() + current := currentConfig() + for _, f := range configFields { + n := f.name + v := current.get(f) comment := "" - if g := o.group; g != "" { - gi, ok := groups[g] - if !ok { - gi = &groupInfo{} - groups[g] = gi - } - if o.boolValue() { - gi.set = n - } - gi.values = append(gi.values, n) - continue - } switch { + case len(f.choices) > 0: + values := append([]string{}, f.choices...) + sort.Strings(values) + comment = "[" + strings.Join(values, " | ") + "]" case n == "sample_index": st := sampleTypes(p) if v == "" { @@ -242,18 +210,13 @@ func printCurrentOptions(p *profile.Profile, ui plugin.UI) { } args = append(args, fmt.Sprintf(" %-25s = %-20s %s", n, v, comment)) } - for g, vars := range groups { - sort.Strings(vars.values) - comment := commentStart + " [" + strings.Join(vars.values, " | ") + "]" - args = append(args, fmt.Sprintf(" %-25s = %-20s %s", g, vars.set, comment)) - } sort.Strings(args) ui.Print(strings.Join(args, "\n")) } // parseCommandLine parses a command and returns the pprof command to -// execute and a set of variables for the report. -func parseCommandLine(input []string) ([]string, variables, error) { +// execute and the configuration to use for the report. +func parseCommandLine(input []string) ([]string, config, error) { cmd, args := input[:1], input[1:] name := cmd[0] @@ -267,25 +230,32 @@ func parseCommandLine(input []string) ([]string, variables, error) { } } if c == nil { - return nil, nil, fmt.Errorf("unrecognized command: %q", name) + if _, ok := configHelp[name]; ok { + value := "" + if len(args) > 0 { + value = args[0] + } + return nil, config{}, fmt.Errorf("did you mean: %s=%s", name, value) + } + return nil, config{}, fmt.Errorf("unrecognized command: %q", name) } if c.hasParam { if len(args) == 0 { - return nil, nil, fmt.Errorf("command %s requires an argument", name) + return nil, config{}, fmt.Errorf("command %s requires an argument", name) } cmd = append(cmd, args[0]) args = args[1:] } - // Copy the variables as options set in the command line are not persistent. - vcopy := pprofVariables.makeCopy() + // Copy config since options set in the command line should not persist. + vcopy := currentConfig() var focus, ignore string for i := 0; i < len(args); i++ { t := args[i] - if _, err := strconv.ParseInt(t, 10, 32); err == nil { - vcopy.set("nodecount", t) + if n, err := strconv.ParseInt(t, 10, 32); err == nil { + vcopy.NodeCount = int(n) continue } switch t[0] { @@ -294,14 +264,14 @@ func parseCommandLine(input []string) ([]string, variables, error) { if outputFile == "" { i++ if i >= len(args) { - return nil, nil, fmt.Errorf("unexpected end of line after >") + return nil, config{}, fmt.Errorf("unexpected end of line after >") } outputFile = args[i] } - vcopy.set("output", outputFile) + vcopy.Output = outputFile case '-': if t == "--cum" || t == "-cum" { - vcopy.set("cum", "t") + vcopy.Sort = "cum" continue } ignore = catRegex(ignore, t[1:]) @@ -311,30 +281,27 @@ func parseCommandLine(input []string) ([]string, variables, error) { } if name == "tags" { - updateFocusIgnore(vcopy, "tag", focus, ignore) + if focus != "" { + vcopy.TagFocus = focus + } + if ignore != "" { + vcopy.TagIgnore = ignore + } } else { - updateFocusIgnore(vcopy, "", focus, ignore) + if focus != "" { + vcopy.Focus = focus + } + if ignore != "" { + vcopy.Ignore = ignore + } } - - if vcopy["nodecount"].intValue() == -1 && (name == "text" || name == "top") { - vcopy.set("nodecount", "10") + if vcopy.NodeCount == -1 && (name == "text" || name == "top") { + vcopy.NodeCount = 10 } return cmd, vcopy, nil } -func updateFocusIgnore(v variables, prefix, f, i string) { - if f != "" { - focus := prefix + "focus" - v.set(focus, catRegex(v[focus].value, f)) - } - - if i != "" { - ignore := prefix + "ignore" - v.set(ignore, catRegex(v[ignore].value, i)) - } -} - func catRegex(a, b string) string { if a != "" && b != "" { return a + "|" + b @@ -362,8 +329,8 @@ func commandHelp(args string, ui plugin.UI) { return } - if v := pprofVariables[args]; v != nil { - ui.Print(v.help + "\n") + if help, ok := configHelp[args]; ok { + ui.Print(help + "\n") return } @@ -373,18 +340,17 @@ func commandHelp(args string, ui plugin.UI) { // newCompleter creates an autocompletion function for a set of commands. func newCompleter(fns []string) func(string) string { return func(line string) string { - v := pprofVariables switch tokens := strings.Fields(line); len(tokens) { case 0: // Nothing to complete case 1: // Single token -- complete command name - if match := matchVariableOrCommand(v, tokens[0]); match != "" { + if match := matchVariableOrCommand(tokens[0]); match != "" { return match } case 2: if tokens[0] == "help" { - if match := matchVariableOrCommand(v, tokens[1]); match != "" { + if match := matchVariableOrCommand(tokens[1]); match != "" { return tokens[0] + " " + match } return line @@ -408,26 +374,19 @@ func newCompleter(fns []string) func(string) string { } // matchVariableOrCommand attempts to match a string token to the prefix of a Command. -func matchVariableOrCommand(v variables, token string) string { +func matchVariableOrCommand(token string) string { token = strings.ToLower(token) - found := "" + var matches []string for cmd := range pprofCommands { if strings.HasPrefix(cmd, token) { - if found != "" { - return "" - } - found = cmd + matches = append(matches, cmd) } } - for variable := range v { - if strings.HasPrefix(variable, token) { - if found != "" { - return "" - } - found = variable - } + matches = append(matches, completeConfig(token)...) + if len(matches) == 1 { + return matches[0] } - return found + return "" } // functionCompleter replaces provided substring with a function diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go new file mode 100644 index 0000000000..f72314b185 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/settings.go @@ -0,0 +1,157 @@ +package driver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" +) + +// settings holds pprof settings. +type settings struct { + // Configs holds a list of named UI configurations. + Configs []namedConfig `json:"configs"` +} + +// namedConfig associates a name with a config. +type namedConfig struct { + Name string `json:"name"` + config +} + +// settingsFileName returns the name of the file where settings should be saved. +func settingsFileName() (string, error) { + // Return "pprof/settings.json" under os.UserConfigDir(). + dir, err := os.UserConfigDir() + if err != nil { + return "", err + } + return filepath.Join(dir, "pprof", "settings.json"), nil +} + +// readSettings reads settings from fname. +func readSettings(fname string) (*settings, error) { + data, err := ioutil.ReadFile(fname) + if err != nil { + if os.IsNotExist(err) { + return &settings{}, nil + } + return nil, fmt.Errorf("could not read settings: %w", err) + } + settings := &settings{} + if err := json.Unmarshal(data, settings); err != nil { + return nil, fmt.Errorf("could not parse settings: %w", err) + } + for i := range settings.Configs { + settings.Configs[i].resetTransient() + } + return settings, nil +} + +// writeSettings saves settings to fname. +func writeSettings(fname string, settings *settings) error { + data, err := json.MarshalIndent(settings, "", " ") + if err != nil { + return fmt.Errorf("could not encode settings: %w", err) + } + + // create the settings directory if it does not exist + // XDG specifies permissions 0700 when creating settings dirs: + // https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return fmt.Errorf("failed to create settings directory: %w", err) + } + + if err := ioutil.WriteFile(fname, data, 0644); err != nil { + return fmt.Errorf("failed to write settings: %w", err) + } + return nil +} + +// configMenuEntry holds information for a single config menu entry. +type configMenuEntry struct { + Name string + URL string + Current bool // Is this the currently selected config? + UserConfig bool // Is this a user-provided config? +} + +// configMenu returns a list of items to add to a menu in the web UI. +func configMenu(fname string, url url.URL) []configMenuEntry { + // Start with system configs. + configs := []namedConfig{{Name: "Default", config: defaultConfig()}} + if settings, err := readSettings(fname); err == nil { + // Add user configs. + configs = append(configs, settings.Configs...) + } + + // Convert to menu entries. + result := make([]configMenuEntry, len(configs)) + lastMatch := -1 + for i, cfg := range configs { + dst, changed := cfg.config.makeURL(url) + if !changed { + lastMatch = i + } + result[i] = configMenuEntry{ + Name: cfg.Name, + URL: dst.String(), + UserConfig: (i != 0), + } + } + // Mark the last matching config as currennt + if lastMatch >= 0 { + result[lastMatch].Current = true + } + return result +} + +// editSettings edits settings by applying fn to them. +func editSettings(fname string, fn func(s *settings) error) error { + settings, err := readSettings(fname) + if err != nil { + return err + } + if err := fn(settings); err != nil { + return err + } + return writeSettings(fname, settings) +} + +// setConfig saves the config specified in request to fname. +func setConfig(fname string, request url.URL) error { + q := request.Query() + name := q.Get("config") + if name == "" { + return fmt.Errorf("invalid config name") + } + cfg := currentConfig() + if err := cfg.applyURL(q); err != nil { + return err + } + return editSettings(fname, func(s *settings) error { + for i, c := range s.Configs { + if c.Name == name { + s.Configs[i].config = cfg + return nil + } + } + s.Configs = append(s.Configs, namedConfig{Name: name, config: cfg}) + return nil + }) +} + +// removeConfig removes config from fname. +func removeConfig(fname, config string) error { + return editSettings(fname, func(s *settings) error { + for i, c := range s.Configs { + if c.Name == config { + s.Configs = append(s.Configs[:i], s.Configs[i+1:]...) + return nil + } + } + return fmt.Errorf("config %s not found", config) + }) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go index 89b8882a6b..4f7610c7e5 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go @@ -166,6 +166,73 @@ a { color: gray; pointer-events: none; } +.menu-check-mark { + position: absolute; + left: 2px; +} +.menu-delete-btn { + position: absolute; + right: 2px; +} + +{{/* Used to disable events when a modal dialog is displayed */}} +#dialog-overlay { + display: none; + position: fixed; + left: 0px; + top: 0px; + width: 100%; + height: 100%; + background-color: rgba(1,1,1,0.1); +} + +.dialog { + {{/* Displayed centered horizontally near the top */}} + display: none; + position: fixed; + margin: 0px; + top: 60px; + left: 50%; + transform: translateX(-50%); + + z-index: 3; + font-size: 125%; + background-color: #ffffff; + box-shadow: 0 1px 5px rgba(0,0,0,.3); +} +.dialog-header { + font-size: 120%; + border-bottom: 1px solid #CCCCCC; + width: 100%; + text-align: center; + background: #EEEEEE; + user-select: none; +} +.dialog-footer { + border-top: 1px solid #CCCCCC; + width: 100%; + text-align: right; + padding: 10px; +} +.dialog-error { + margin: 10px; + color: red; +} +.dialog input { + margin: 10px; + font-size: inherit; +} +.dialog button { + margin-left: 10px; + font-size: inherit; +} +#save-dialog, #delete-dialog { + width: 50%; + max-width: 20em; +} +#delete-prompt { + padding: 10px; +} #content { overflow-y: scroll; @@ -200,6 +267,8 @@ table thead { font-family: 'Roboto Medium', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; } table tr th { + position: sticky; + top: 0; background-color: #ddd; text-align: right; padding: .3em .5em; @@ -282,6 +351,24 @@ table tr td { + +
@@ -294,6 +381,31 @@ table tr td { +
+ +
+
Save options as
+ + {{range .Configs}}{{if .UserConfig}} + + +
+ +
+
Delete config
+
+ +
+
{{range .Errors}}
{{.}}
{{end}}
{{end}} @@ -583,6 +695,131 @@ function initMenus() { }, { passive: true, capture: true }); } +function sendURL(method, url, done) { + fetch(url.toString(), {method: method}) + .then((response) => { done(response.ok); }) + .catch((error) => { done(false); }); +} + +// Initialize handlers for saving/loading configurations. +function initConfigManager() { + 'use strict'; + + // Initialize various elements. + function elem(id) { + const result = document.getElementById(id); + if (!result) console.warn('element ' + id + ' not found'); + return result; + } + const overlay = elem('dialog-overlay'); + const saveDialog = elem('save-dialog'); + const saveInput = elem('save-name'); + const saveError = elem('save-error'); + const delDialog = elem('delete-dialog'); + const delPrompt = elem('delete-prompt'); + const delError = elem('delete-error'); + + let currentDialog = null; + let currentDeleteTarget = null; + + function showDialog(dialog) { + if (currentDialog != null) { + overlay.style.display = 'none'; + currentDialog.style.display = 'none'; + } + currentDialog = dialog; + if (dialog != null) { + overlay.style.display = 'block'; + dialog.style.display = 'block'; + } + } + + function cancelDialog(e) { + showDialog(null); + } + + // Show dialog for saving the current config. + function showSaveDialog(e) { + saveError.innerText = ''; + showDialog(saveDialog); + saveInput.focus(); + } + + // Commit save config. + function commitSave(e) { + const name = saveInput.value; + const url = new URL(document.URL); + // Set path relative to existing path. + url.pathname = new URL('./saveconfig', document.URL).pathname; + url.searchParams.set('config', name); + saveError.innerText = ''; + sendURL('POST', url, (ok) => { + if (!ok) { + saveError.innerText = 'Save failed'; + } else { + showDialog(null); + location.reload(); // Reload to show updated config menu + } + }); + } + + function handleSaveInputKey(e) { + if (e.key === 'Enter') commitSave(e); + } + + function deleteConfig(e, elem) { + e.preventDefault(); + const config = elem.dataset.config; + delPrompt.innerText = 'Delete ' + config + '?'; + currentDeleteTarget = elem; + showDialog(delDialog); + } + + function commitDelete(e, elem) { + if (!currentDeleteTarget) return; + const config = currentDeleteTarget.dataset.config; + const url = new URL('./deleteconfig', document.URL); + url.searchParams.set('config', config); + delError.innerText = ''; + sendURL('DELETE', url, (ok) => { + if (!ok) { + delError.innerText = 'Delete failed'; + return; + } + showDialog(null); + // Remove menu entry for this config. + if (currentDeleteTarget && currentDeleteTarget.parentElement) { + currentDeleteTarget.parentElement.remove(); + } + }); + } + + // Bind event on elem to fn. + function bind(event, elem, fn) { + if (elem == null) return; + elem.addEventListener(event, fn); + if (event == 'click') { + // Also enable via touch. + elem.addEventListener('touchstart', fn); + } + } + + bind('click', elem('save-config'), showSaveDialog); + bind('click', elem('save-cancel'), cancelDialog); + bind('click', elem('save-confirm'), commitSave); + bind('keydown', saveInput, handleSaveInputKey); + + bind('click', elem('delete-cancel'), cancelDialog); + bind('click', elem('delete-confirm'), commitDelete); + + // Activate deletion button for all config entries in menu. + for (const del of Array.from(document.getElementsByClassName('menu-delete-btn'))) { + bind('click', del, (e) => { + deleteConfig(e, del); + }); + } +} + function viewer(baseUrl, nodes) { 'use strict'; @@ -875,6 +1112,7 @@ function viewer(baseUrl, nodes) { } addAction('details', handleDetails); + initConfigManager(); search.addEventListener('input', handleSearch); search.addEventListener('keydown', handleKey); diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go index 4006085538..52dc68809c 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go @@ -35,22 +35,28 @@ import ( // webInterface holds the state needed for serving a browser based interface. type webInterface struct { - prof *profile.Profile - options *plugin.Options - help map[string]string - templates *template.Template + prof *profile.Profile + options *plugin.Options + help map[string]string + templates *template.Template + settingsFile string } -func makeWebInterface(p *profile.Profile, opt *plugin.Options) *webInterface { +func makeWebInterface(p *profile.Profile, opt *plugin.Options) (*webInterface, error) { + settingsFile, err := settingsFileName() + if err != nil { + return nil, err + } templates := template.New("templategroup") addTemplates(templates) report.AddSourceTemplates(templates) return &webInterface{ - prof: p, - options: opt, - help: make(map[string]string), - templates: templates, - } + prof: p, + options: opt, + help: make(map[string]string), + templates: templates, + settingsFile: settingsFile, + }, nil } // maxEntries is the maximum number of entries to print for text interfaces. @@ -80,6 +86,7 @@ type webArgs struct { TextBody string Top []report.TextItem FlameGraph template.JS + Configs []configMenuEntry } func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options, disableBrowser bool) error { @@ -88,16 +95,20 @@ func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options, d return err } interactiveMode = true - ui := makeWebInterface(p, o) + ui, err := makeWebInterface(p, o) + if err != nil { + return err + } for n, c := range pprofCommands { ui.help[n] = c.description } - for n, v := range pprofVariables { - ui.help[n] = v.help + for n, help := range configHelp { + ui.help[n] = help } ui.help["details"] = "Show information about the profile and this view" ui.help["graph"] = "Display profile as a directed graph" ui.help["reset"] = "Show the entire profile" + ui.help["save_config"] = "Save current settings" server := o.HTTPServer if server == nil { @@ -108,12 +119,14 @@ func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options, d Host: host, Port: port, Handlers: map[string]http.Handler{ - "/": http.HandlerFunc(ui.dot), - "/top": http.HandlerFunc(ui.top), - "/disasm": http.HandlerFunc(ui.disasm), - "/source": http.HandlerFunc(ui.source), - "/peek": http.HandlerFunc(ui.peek), - "/flamegraph": http.HandlerFunc(ui.flamegraph), + "/": http.HandlerFunc(ui.dot), + "/top": http.HandlerFunc(ui.top), + "/disasm": http.HandlerFunc(ui.disasm), + "/source": http.HandlerFunc(ui.source), + "/peek": http.HandlerFunc(ui.peek), + "/flamegraph": http.HandlerFunc(ui.flamegraph), + "/saveconfig": http.HandlerFunc(ui.saveConfig), + "/deleteconfig": http.HandlerFunc(ui.deleteConfig), }, } @@ -206,21 +219,9 @@ func isLocalhost(host string) bool { func openBrowser(url string, o *plugin.Options) { // Construct URL. - u, _ := gourl.Parse(url) - q := u.Query() - for _, p := range []struct{ param, key string }{ - {"f", "focus"}, - {"s", "show"}, - {"sf", "show_from"}, - {"i", "ignore"}, - {"h", "hide"}, - {"si", "sample_index"}, - } { - if v := pprofVariables[p.key].value; v != "" { - q.Set(p.param, v) - } - } - u.RawQuery = q.Encode() + baseURL, _ := gourl.Parse(url) + current := currentConfig() + u, _ := current.makeURL(*baseURL) // Give server a little time to get ready. time.Sleep(time.Millisecond * 500) @@ -240,28 +241,23 @@ func openBrowser(url string, o *plugin.Options) { o.UI.PrintErr(u.String()) } -func varsFromURL(u *gourl.URL) variables { - vars := pprofVariables.makeCopy() - vars["focus"].value = u.Query().Get("f") - vars["show"].value = u.Query().Get("s") - vars["show_from"].value = u.Query().Get("sf") - vars["ignore"].value = u.Query().Get("i") - vars["hide"].value = u.Query().Get("h") - vars["sample_index"].value = u.Query().Get("si") - return vars -} - // makeReport generates a report for the specified command. +// If configEditor is not null, it is used to edit the config used for the report. func (ui *webInterface) makeReport(w http.ResponseWriter, req *http.Request, - cmd []string, vars ...string) (*report.Report, []string) { - v := varsFromURL(req.URL) - for i := 0; i+1 < len(vars); i += 2 { - v[vars[i]].value = vars[i+1] + cmd []string, configEditor func(*config)) (*report.Report, []string) { + cfg := currentConfig() + if err := cfg.applyURL(req.URL.Query()); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return nil, nil + } + if configEditor != nil { + configEditor(&cfg) } catcher := &errorCatcher{UI: ui.options.UI} options := *ui.options options.UI = catcher - _, rpt, err := generateRawReport(ui.prof, cmd, v, &options) + _, rpt, err := generateRawReport(ui.prof, cmd, cfg, &options) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) ui.options.UI.PrintErr(err) @@ -271,7 +267,7 @@ func (ui *webInterface) makeReport(w http.ResponseWriter, req *http.Request, } // render generates html using the named template based on the contents of data. -func (ui *webInterface) render(w http.ResponseWriter, tmpl string, +func (ui *webInterface) render(w http.ResponseWriter, req *http.Request, tmpl string, rpt *report.Report, errList, legend []string, data webArgs) { file := getFromLegend(legend, "File: ", "unknown") profile := getFromLegend(legend, "Type: ", "unknown") @@ -281,6 +277,8 @@ func (ui *webInterface) render(w http.ResponseWriter, tmpl string, data.SampleTypes = sampleTypes(ui.prof) data.Legend = legend data.Help = ui.help + data.Configs = configMenu(ui.settingsFile, *req.URL) + html := &bytes.Buffer{} if err := ui.templates.ExecuteTemplate(html, tmpl, data); err != nil { http.Error(w, "internal template error", http.StatusInternalServerError) @@ -293,7 +291,7 @@ func (ui *webInterface) render(w http.ResponseWriter, tmpl string, // dot generates a web page containing an svg diagram. func (ui *webInterface) dot(w http.ResponseWriter, req *http.Request) { - rpt, errList := ui.makeReport(w, req, []string{"svg"}) + rpt, errList := ui.makeReport(w, req, []string{"svg"}, nil) if rpt == nil { return // error already reported } @@ -320,7 +318,7 @@ func (ui *webInterface) dot(w http.ResponseWriter, req *http.Request) { nodes = append(nodes, n.Info.Name) } - ui.render(w, "graph", rpt, errList, legend, webArgs{ + ui.render(w, req, "graph", rpt, errList, legend, webArgs{ HTMLBody: template.HTML(string(svg)), Nodes: nodes, }) @@ -345,7 +343,9 @@ func dotToSvg(dot []byte) ([]byte, error) { } func (ui *webInterface) top(w http.ResponseWriter, req *http.Request) { - rpt, errList := ui.makeReport(w, req, []string{"top"}, "nodecount", "500") + rpt, errList := ui.makeReport(w, req, []string{"top"}, func(cfg *config) { + cfg.NodeCount = 500 + }) if rpt == nil { return // error already reported } @@ -355,7 +355,7 @@ func (ui *webInterface) top(w http.ResponseWriter, req *http.Request) { nodes = append(nodes, item.Name) } - ui.render(w, "top", rpt, errList, legend, webArgs{ + ui.render(w, req, "top", rpt, errList, legend, webArgs{ Top: top, Nodes: nodes, }) @@ -364,7 +364,7 @@ func (ui *webInterface) top(w http.ResponseWriter, req *http.Request) { // disasm generates a web page containing disassembly. func (ui *webInterface) disasm(w http.ResponseWriter, req *http.Request) { args := []string{"disasm", req.URL.Query().Get("f")} - rpt, errList := ui.makeReport(w, req, args) + rpt, errList := ui.makeReport(w, req, args, nil) if rpt == nil { return // error already reported } @@ -377,7 +377,7 @@ func (ui *webInterface) disasm(w http.ResponseWriter, req *http.Request) { } legend := report.ProfileLabels(rpt) - ui.render(w, "plaintext", rpt, errList, legend, webArgs{ + ui.render(w, req, "plaintext", rpt, errList, legend, webArgs{ TextBody: out.String(), }) @@ -387,7 +387,7 @@ func (ui *webInterface) disasm(w http.ResponseWriter, req *http.Request) { // data. func (ui *webInterface) source(w http.ResponseWriter, req *http.Request) { args := []string{"weblist", req.URL.Query().Get("f")} - rpt, errList := ui.makeReport(w, req, args) + rpt, errList := ui.makeReport(w, req, args, nil) if rpt == nil { return // error already reported } @@ -401,7 +401,7 @@ func (ui *webInterface) source(w http.ResponseWriter, req *http.Request) { } legend := report.ProfileLabels(rpt) - ui.render(w, "sourcelisting", rpt, errList, legend, webArgs{ + ui.render(w, req, "sourcelisting", rpt, errList, legend, webArgs{ HTMLBody: template.HTML(body.String()), }) } @@ -409,7 +409,9 @@ func (ui *webInterface) source(w http.ResponseWriter, req *http.Request) { // peek generates a web page listing callers/callers. func (ui *webInterface) peek(w http.ResponseWriter, req *http.Request) { args := []string{"peek", req.URL.Query().Get("f")} - rpt, errList := ui.makeReport(w, req, args, "lines", "t") + rpt, errList := ui.makeReport(w, req, args, func(cfg *config) { + cfg.Granularity = "lines" + }) if rpt == nil { return // error already reported } @@ -422,11 +424,30 @@ func (ui *webInterface) peek(w http.ResponseWriter, req *http.Request) { } legend := report.ProfileLabels(rpt) - ui.render(w, "plaintext", rpt, errList, legend, webArgs{ + ui.render(w, req, "plaintext", rpt, errList, legend, webArgs{ TextBody: out.String(), }) } +// saveConfig saves URL configuration. +func (ui *webInterface) saveConfig(w http.ResponseWriter, req *http.Request) { + if err := setConfig(ui.settingsFile, *req.URL); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } +} + +// deleteConfig deletes a configuration. +func (ui *webInterface) deleteConfig(w http.ResponseWriter, req *http.Request) { + name := req.URL.Query().Get("config") + if err := removeConfig(ui.settingsFile, name); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } +} + // getFromLegend returns the suffix of an entry in legend that starts // with param. It returns def if no such entry is found. func getFromLegend(legend []string, param, def string) string { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go index 4c1db2331f..3a8d0af730 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go @@ -114,7 +114,7 @@ type ObjTool interface { // Disasm disassembles the named object file, starting at // the start address and stopping at (before) the end address. - Disasm(file string, start, end uint64) ([]Inst, error) + Disasm(file string, start, end uint64, intelSyntax bool) ([]Inst, error) } // An Inst is a single instruction in an assembly listing. diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go index 56083d8abf..a345208910 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go @@ -79,6 +79,8 @@ type Options struct { Symbol *regexp.Regexp // Symbols to include on disassembly report. SourcePath string // Search path for source files. TrimPath string // Paths to trim from source file paths. + + IntelSyntax bool // Whether or not to print assembly in Intel syntax. } // Generate generates a report as directed by the Report. @@ -438,7 +440,7 @@ func PrintAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFuncs int) e flatSum, cumSum := sns.Sum() // Get the function assembly. - insts, err := obj.Disasm(s.sym.File, s.sym.Start, s.sym.End) + insts, err := obj.Disasm(s.sym.File, s.sym.Start, s.sym.End, o.IntelSyntax) if err != nil { return err } @@ -1201,6 +1203,13 @@ func reportLabels(rpt *Report, g *graph.Graph, origCount, droppedNodes, droppedE nodeCount, origCount)) } } + + // Help new users understand the graph. + // A new line is intentionally added here to better show this message. + if fullHeaders { + label = append(label, "\\lSee https://git.io/JfYMW for how to read the graph") + } + return label } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go index ab8b64cbab..b480535439 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go @@ -205,7 +205,7 @@ func PrintWebList(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFiles int) er ff := fileFunction{n.Info.File, n.Info.Name} fns := fileNodes[ff] - asm := assemblyPerSourceLine(symbols, fns, ff.fileName, obj) + asm := assemblyPerSourceLine(symbols, fns, ff.fileName, obj, o.IntelSyntax) start, end := sourceCoordinates(asm) fnodes, path, err := getSourceFromFile(ff.fileName, reader, fns, start, end) @@ -239,7 +239,7 @@ func sourceCoordinates(asm map[int][]assemblyInstruction) (start, end int) { // assemblyPerSourceLine disassembles the binary containing a symbol // and classifies the assembly instructions according to its // corresponding source line, annotating them with a set of samples. -func assemblyPerSourceLine(objSyms []*objSymbol, rs graph.Nodes, src string, obj plugin.ObjTool) map[int][]assemblyInstruction { +func assemblyPerSourceLine(objSyms []*objSymbol, rs graph.Nodes, src string, obj plugin.ObjTool, intelSyntax bool) map[int][]assemblyInstruction { assembly := make(map[int][]assemblyInstruction) // Identify symbol to use for this collection of samples. o := findMatchingSymbol(objSyms, rs) @@ -248,7 +248,7 @@ func assemblyPerSourceLine(objSyms []*objSymbol, rs graph.Nodes, src string, obj } // Extract assembly for matched symbol - insts, err := obj.Disasm(o.sym.File, o.sym.Start, o.sym.End) + insts, err := obj.Disasm(o.sym.File, o.sym.Start, o.sym.End, intelSyntax) if err != nil { return assembly } diff --git a/src/cmd/vendor/github.com/google/pprof/profile/profile.go b/src/cmd/vendor/github.com/google/pprof/profile/profile.go index c950d8dc7f..d94d8b3d1c 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/profile.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/profile.go @@ -398,10 +398,12 @@ func (p *Profile) CheckValid() error { } } for _, ln := range l.Line { - if f := ln.Function; f != nil { - if f.ID == 0 || functions[f.ID] != f { - return fmt.Errorf("inconsistent function %p: %d", f, f.ID) - } + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) } } } diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index 21326f7521..e2078b1a7f 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -1,4 +1,4 @@ -# github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 +# github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 ## explicit github.com/google/pprof/driver github.com/google/pprof/internal/binutils From 5d1378143bc07791296abb420df35537ad80492f Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 7 Oct 2020 16:37:05 -0700 Subject: [PATCH 149/281] cmd/cgo: add more architectures to size maps This brings over the architectures that the gofrontend knows about. This permits using the main cgo tool for those architectures, as cgo can be used with -godefs without gc support. This will help add golang.org/x/sys/unix support for other architectures. For #37443 Change-Id: I63632b9c5139e71b9ccab8edcc7acdb464229b74 Reviewed-on: https://go-review.googlesource.com/c/go/+/260657 Trust: Ian Lance Taylor Run-TryBot: Ian Lance Taylor TryBot-Result: Go Bot Reviewed-by: Than McIntosh --- src/cmd/cgo/main.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index 5c44fb72f4..7d02ac3c54 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -170,35 +170,51 @@ func usage() { var ptrSizeMap = map[string]int64{ "386": 4, + "alpha": 8, "amd64": 8, "arm": 4, "arm64": 8, + "m68k": 4, "mips": 4, "mipsle": 4, "mips64": 8, "mips64le": 8, + "nios2": 4, + "ppc": 4, "ppc64": 8, "ppc64le": 8, + "riscv": 4, "riscv64": 8, "s390": 4, "s390x": 8, + "sh": 4, + "shbe": 4, + "sparc": 4, "sparc64": 8, } var intSizeMap = map[string]int64{ "386": 4, + "alpha": 8, "amd64": 8, "arm": 4, "arm64": 8, + "m68k": 4, "mips": 4, "mipsle": 4, "mips64": 8, "mips64le": 8, + "nios2": 4, + "ppc": 4, "ppc64": 8, "ppc64le": 8, + "riscv": 4, "riscv64": 8, "s390": 4, "s390x": 8, + "sh": 4, + "shbe": 4, + "sparc": 4, "sparc64": 8, } From 542693e00529fbb4248fac614ece68b127a5ec4d Mon Sep 17 00:00:00 2001 From: Roberto Clapis Date: Tue, 22 Sep 2020 17:57:06 +0200 Subject: [PATCH 150/281] net/http: make SameSiteDefaultMode behavior match the specification The current specification does not foresee a SameSite attribute without a value. While the existing implementation would serialize SameSite in a way that would likely be ignored by well-impelemented clients, it is better to not rely on this kind of quirks. Specification: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-05#section-4.1.1 Fixes #36990 Change-Id: Ie51152741d7e84bab64d3e4e4f780286932acbde Reviewed-on: https://go-review.googlesource.com/c/go/+/256498 Trust: Roberto Clapis Reviewed-by: Filippo Valsorda --- doc/go1.16.html | 5 +++++ src/net/http/cookie.go | 2 +- src/net/http/cookie_test.go | 11 ++++++++++- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index 2962448742..720acc757a 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -286,6 +286,11 @@ Do not send CLs removing the interior tags from such phrases. of the form "Range": "bytes=--N" where "-N" is a negative suffix length, for example "Range": "bytes=--2". It now replies with a 416 "Range Not Satisfiable" response.

+ +

+ Cookies set with SameSiteDefaultMode now behave according to the current + spec (no attribute is set) instead of generating a SameSite key without a value. +

diff --git a/src/net/http/cookie.go b/src/net/http/cookie.go index d7a8f5e94e..141bc947f6 100644 --- a/src/net/http/cookie.go +++ b/src/net/http/cookie.go @@ -220,7 +220,7 @@ func (c *Cookie) String() string { } switch c.SameSite { case SameSiteDefaultMode: - b.WriteString("; SameSite") + // Skip, default mode is obtained by not emitting the attribute. case SameSiteNoneMode: b.WriteString("; SameSite=None") case SameSiteLaxMode: diff --git a/src/net/http/cookie_test.go b/src/net/http/cookie_test.go index 9e8196ebce..959713a0dc 100644 --- a/src/net/http/cookie_test.go +++ b/src/net/http/cookie_test.go @@ -67,7 +67,7 @@ var writeSetCookiesTests = []struct { }, { &Cookie{Name: "cookie-12", Value: "samesite-default", SameSite: SameSiteDefaultMode}, - "cookie-12=samesite-default; SameSite", + "cookie-12=samesite-default", }, { &Cookie{Name: "cookie-13", Value: "samesite-lax", SameSite: SameSiteLaxMode}, @@ -282,6 +282,15 @@ var readSetCookiesTests = []struct { Raw: "samesitedefault=foo; SameSite", }}, }, + { + Header{"Set-Cookie": {"samesiteinvalidisdefault=foo; SameSite=invalid"}}, + []*Cookie{{ + Name: "samesiteinvalidisdefault", + Value: "foo", + SameSite: SameSiteDefaultMode, + Raw: "samesiteinvalidisdefault=foo; SameSite=invalid", + }}, + }, { Header{"Set-Cookie": {"samesitelax=foo; SameSite=Lax"}}, []*Cookie{{ From f3b58edd036e082c210b11bb3aee8a40aa8fbcf2 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 8 Oct 2020 19:51:57 +0700 Subject: [PATCH 151/281] cmd/compile: use types.IdealFoo directly in predecl Instead of using untype(Ctype) to get corresponding untyped type. Passes toolstash-check. Change-Id: I311fe6c94b1f8eb2e1615101a379cd06dcab835b Reviewed-on: https://go-review.googlesource.com/c/go/+/260698 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 38 +++++--------------------- src/cmd/compile/internal/gc/iexport.go | 4 +-- 2 files changed, 9 insertions(+), 33 deletions(-) diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 5ced66c0da..f82925347c 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -126,30 +126,6 @@ const ( aliasTag ) -// untype returns the "pseudo" untyped type for a Ctype (import/export use only). -// (we can't use a pre-initialized array because we must be sure all types are -// set up) -func untype(ctype Ctype) *types.Type { - switch ctype { - case CTINT: - return types.Idealint - case CTRUNE: - return types.Idealrune - case CTFLT: - return types.Idealfloat - case CTCPLX: - return types.Idealcomplex - case CTSTR: - return types.Idealstring - case CTBOOL: - return types.Idealbool - case CTNIL: - return types.Types[TNIL] - } - Fatalf("exporter: unknown Ctype") - return nil -} - var predecl []*types.Type // initialized lazily func predeclared() []*types.Type { @@ -184,13 +160,13 @@ func predeclared() []*types.Type { types.Errortype, // untyped types - untype(CTBOOL), - untype(CTINT), - untype(CTRUNE), - untype(CTFLT), - untype(CTCPLX), - untype(CTSTR), - untype(CTNIL), + types.Idealbool, + types.Idealint, + types.Idealrune, + types.Idealfloat, + types.Idealcomplex, + types.Idealstring, + types.Types[TNIL], // package unsafe types.Types[TUNSAFEPTR], diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 3be3b0a213..3ccaf60f40 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -780,8 +780,8 @@ func constTypeOf(typ *types.Type) Ctype { } func (w *exportWriter) value(typ *types.Type, v Val) { - if typ.IsUntyped() { - typ = untype(v.Ctype()) + if vt := idealType(v.Ctype()); typ.IsUntyped() && typ != vt { + Fatalf("exporter: untyped type mismatch, have: %v, want: %v", typ, vt) } w.typ(typ) From 46ab0c0c0474d38d9b924b2428f20c6da58c85fa Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Thu, 8 Oct 2020 20:33:36 +0700 Subject: [PATCH 152/281] cmd/compile: rename types.IdealFoo to types.UntypedFoo To be consistent with go/types. Passes toolstash-check. Change-Id: I5e02f529064a904310a164f8765082aa533cc799 Reviewed-on: https://go-review.googlesource.com/c/go/+/260699 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 12 ++++---- src/cmd/compile/internal/gc/const.go | 36 ++++++++++++------------ src/cmd/compile/internal/gc/fmt.go | 14 ++++----- src/cmd/compile/internal/gc/iexport.go | 6 ++-- src/cmd/compile/internal/gc/iimport.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 4 +-- src/cmd/compile/internal/gc/subr.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 16 +++++------ src/cmd/compile/internal/gc/universe.go | 10 +++---- src/cmd/compile/internal/types/type.go | 14 ++++----- 10 files changed, 58 insertions(+), 58 deletions(-) diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index f82925347c..0cb9fe9e62 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -160,12 +160,12 @@ func predeclared() []*types.Type { types.Errortype, // untyped types - types.Idealbool, - types.Idealint, - types.Idealrune, - types.Idealfloat, - types.Idealcomplex, - types.Idealstring, + types.UntypedBool, + types.UntypedInt, + types.UntypedRune, + types.UntypedFloat, + types.UntypedComplex, + types.UntypedString, types.Types[TNIL], // package unsafe diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index d881be485e..b28c0fc8d0 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -1019,17 +1019,17 @@ func nodlit(v Val) *Node { func idealType(ct Ctype) *types.Type { switch ct { case CTSTR: - return types.Idealstring + return types.UntypedString case CTBOOL: - return types.Idealbool + return types.UntypedBool case CTINT: - return types.Idealint + return types.UntypedInt case CTRUNE: - return types.Idealrune + return types.UntypedRune case CTFLT: - return types.Idealfloat + return types.UntypedFloat case CTCPLX: - return types.Idealcomplex + return types.UntypedComplex case CTNIL: return types.Types[TNIL] } @@ -1080,17 +1080,17 @@ func defaultlit2(l *Node, r *Node, force bool) (*Node, *Node) { func ctype(t *types.Type) Ctype { switch t { - case types.Idealbool: + case types.UntypedBool: return CTBOOL - case types.Idealstring: + case types.UntypedString: return CTSTR - case types.Idealint: + case types.UntypedInt: return CTINT - case types.Idealrune: + case types.UntypedRune: return CTRUNE - case types.Idealfloat: + case types.UntypedFloat: return CTFLT - case types.Idealcomplex: + case types.UntypedComplex: return CTCPLX } Fatalf("bad type %v", t) @@ -1111,17 +1111,17 @@ func defaultType(t *types.Type) *types.Type { } switch t { - case types.Idealbool: + case types.UntypedBool: return types.Types[TBOOL] - case types.Idealstring: + case types.UntypedString: return types.Types[TSTRING] - case types.Idealint: + case types.UntypedInt: return types.Types[TINT] - case types.Idealrune: + case types.UntypedRune: return types.Runetype - case types.Idealfloat: + case types.UntypedFloat: return types.Types[TFLOAT64] - case types.Idealcomplex: + case types.UntypedComplex: return types.Types[TCOMPLEX128] } diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index d4af451506..36b596338f 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -773,17 +773,17 @@ func tconv2(b *bytes.Buffer, t *types.Type, flag FmtFlag, mode fmtMode, visited if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" { var name string switch t { - case types.Idealbool: + case types.UntypedBool: name = "untyped bool" - case types.Idealstring: + case types.UntypedString: name = "untyped string" - case types.Idealint: + case types.UntypedInt: name = "untyped int" - case types.Idealrune: + case types.UntypedRune: name = "untyped rune" - case types.Idealfloat: + case types.UntypedFloat: name = "untyped float" - case types.Idealcomplex: + case types.UntypedComplex: name = "untyped complex" default: name = basicnames[t.Etype] @@ -1333,7 +1333,7 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { n.Orig.exprfmt(s, prec, mode) return } - if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != types.Idealbool && n.Type != types.Idealstring { + if n.Type != nil && n.Type.Etype != TIDEAL && n.Type.Etype != TNIL && n.Type != types.UntypedBool && n.Type != types.UntypedString { // Need parens when type begins with what might // be misinterpreted as a unary operator: * or <-. if n.Type.IsPtr() || (n.Type.IsChan() && n.Type.ChanDir() == types.Crecv) { diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index 3ccaf60f40..df08a4a6c2 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -751,11 +751,11 @@ func (w *exportWriter) param(f *types.Field) { func constTypeOf(typ *types.Type) Ctype { switch typ { - case types.Idealint, types.Idealrune: + case types.UntypedInt, types.UntypedRune: return CTINT - case types.Idealfloat: + case types.UntypedFloat: return CTFLT - case types.Idealcomplex: + case types.UntypedComplex: return CTCPLX } diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 0c5e469c57..5f107eeec7 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -375,7 +375,7 @@ func (p *importReader) value() (typ *types.Type, v Val) { v.U = p.string() case CTINT: x := new(Mpint) - x.Rune = typ == types.Idealrune + x.Rune = typ == types.UntypedRune p.mpint(&x.Val, typ) v.U = x case CTFLT: diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 32394c4b1a..7bde7f7c65 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -50,12 +50,12 @@ func initssaconfig() { // Caching is disabled in the backend, so generating these here avoids allocations. _ = types.NewPtr(types.Types[TINTER]) // *interface{} _ = types.NewPtr(types.NewPtr(types.Types[TSTRING])) // **string - _ = types.NewPtr(types.NewPtr(types.Idealstring)) // **string + _ = types.NewPtr(types.NewPtr(types.UntypedString)) // **string _ = types.NewPtr(types.NewSlice(types.Types[TINTER])) // *[]interface{} _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte _ = types.NewPtr(types.NewSlice(types.Types[TSTRING])) // *[]string - _ = types.NewPtr(types.NewSlice(types.Idealstring)) // *[]string + _ = types.NewPtr(types.NewSlice(types.UntypedString)) // *[]string _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8 _ = types.NewPtr(types.Types[TINT16]) // *int16 _ = types.NewPtr(types.Types[TINT64]) // *int64 diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 07547df36e..0242832322 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -825,7 +825,7 @@ func assignconvfn(n *Node, t *types.Type, context func() string) *Node { // Convert ideal bool from comparison to plain bool // if the next step is non-bool (like interface{}). - if n.Type == types.Idealbool && !t.IsBoolean() { + if n.Type == types.UntypedBool && !t.IsBoolean() { if n.Op == ONAME || n.Op == OLITERAL { r := nod(OCONVNOP, n, nil) r.Type = types.Types[TBOOL] diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 0eb0dae373..769341ee04 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -361,7 +361,7 @@ func typecheck1(n *Node, top int) (res *Node) { ok |= ctxExpr if n.Type == nil && n.Val().Ctype() == CTSTR { - n.Type = types.Idealstring + n.Type = types.UntypedString } case ONONAME: @@ -623,8 +623,8 @@ func typecheck1(n *Node, top int) (res *Node) { // no defaultlit for left // the outer context gives the type n.Type = l.Type - if (l.Type == types.Idealfloat || l.Type == types.Idealcomplex) && r.Op == OLITERAL { - n.Type = types.Idealint + if (l.Type == types.UntypedFloat || l.Type == types.UntypedComplex) && r.Op == OLITERAL { + n.Type = types.UntypedInt } break @@ -777,7 +777,7 @@ func typecheck1(n *Node, top int) (res *Node) { if iscmp[n.Op] { evconst(n) - t = types.Idealbool + t = types.UntypedBool if n.Op != OLITERAL { l, r = defaultlit2(l, r, true) n.Left = l @@ -1458,7 +1458,7 @@ func typecheck1(n *Node, top int) (res *Node) { // Determine result type. switch t.Etype { case TIDEAL: - n.Type = types.Idealfloat + n.Type = types.UntypedFloat case TCOMPLEX64: n.Type = types.Types[TFLOAT32] case TCOMPLEX128: @@ -1504,7 +1504,7 @@ func typecheck1(n *Node, top int) (res *Node) { return n case TIDEAL: - t = types.Idealcomplex + t = types.UntypedComplex case TFLOAT32: t = types.Types[TCOMPLEX64] @@ -2724,9 +2724,9 @@ func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string { // e.g in error messages about wrong arguments to return. func sigrepr(t *types.Type, isddd bool) string { switch t { - case types.Idealstring: + case types.UntypedString: return "string" - case types.Idealbool: + case types.UntypedBool: return "bool" } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 04861c8dd4..ff8cabd8e3 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -123,21 +123,21 @@ func lexinit() { asNode(s2.Def).SetSubOp(s.op) } - types.Idealstring = types.New(TSTRING) - types.Idealbool = types.New(TBOOL) + types.UntypedString = types.New(TSTRING) + types.UntypedBool = types.New(TBOOL) types.Types[TANY] = types.New(TANY) s := builtinpkg.Lookup("true") s.Def = asTypesNode(nodbool(true)) asNode(s.Def).Sym = lookup("true") asNode(s.Def).Name = new(Name) - asNode(s.Def).Type = types.Idealbool + asNode(s.Def).Type = types.UntypedBool s = builtinpkg.Lookup("false") s.Def = asTypesNode(nodbool(false)) asNode(s.Def).Sym = lookup("false") asNode(s.Def).Name = new(Name) - asNode(s.Def).Type = types.Idealbool + asNode(s.Def).Type = types.UntypedBool s = lookup("_") s.Block = -100 @@ -351,7 +351,7 @@ func typeinit() { sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr)) dowidth(types.Types[TSTRING]) - dowidth(types.Idealstring) + dowidth(types.UntypedString) } func makeErrorInterface() *types.Type { diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 5d1d5d4008..023ab9af88 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -105,14 +105,14 @@ var ( Errortype *Type // Types to represent untyped string and boolean constants. - Idealstring *Type - Idealbool *Type + UntypedString *Type + UntypedBool *Type // Types to represent untyped numeric constants. - Idealint = New(TIDEAL) - Idealrune = New(TIDEAL) - Idealfloat = New(TIDEAL) - Idealcomplex = New(TIDEAL) + UntypedInt = New(TIDEAL) + UntypedRune = New(TIDEAL) + UntypedFloat = New(TIDEAL) + UntypedComplex = New(TIDEAL) ) // A Type represents a Go type. @@ -1436,7 +1436,7 @@ func (t *Type) IsUntyped() bool { if t == nil { return false } - if t == Idealstring || t == Idealbool { + if t == UntypedString || t == UntypedBool { return true } switch t.Etype { From a4b95cd092aa10b40c6be82a3e0bf1052e27122d Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 8 Oct 2020 11:18:02 -0700 Subject: [PATCH 153/281] cmd/compile: fix incorrect comparison folding We lost a sign extension that was necessary. The nonnegative comparison didn't have the correct extension on it. If the larger constant is positive, but its shorter sign extension is negative, the rule breaks. Fixes #41872 Change-Id: I6592ef103f840fbb786bf8cb94fd8804c760c976 Reviewed-on: https://go-review.googlesource.com/c/go/+/260701 Trust: Keith Randall Run-TryBot: Keith Randall TryBot-Result: Go Bot Reviewed-by: Alberto Donizetti --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 4 +-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 8 +++--- test/fixedbugs/issue41872.go | 26 ++++++++++++++++++++ 3 files changed, 32 insertions(+), 6 deletions(-) create mode 100644 test/fixedbugs/issue41872.go diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 408678f054..8a253035e0 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1274,8 +1274,8 @@ (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) (CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) -(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= m && int16(m) < n => (FlagLT_ULT) -(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= m && int8(m) < n => (FlagLT_ULT) +(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT) +(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT) // TESTQ c c sets flags like CMPQ c 0. (TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 3d7eb8c9a4..32ef26f98d 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -6886,7 +6886,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { return true } // match: (CMPBconst (ANDLconst _ [m]) [n]) - // cond: 0 <= m && int8(m) < n + // cond: 0 <= int8(m) && int8(m) < n // result: (FlagLT_ULT) for { n := auxIntToInt8(v.AuxInt) @@ -6894,7 +6894,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool { break } m := auxIntToInt32(v_0.AuxInt) - if !(0 <= m && int8(m) < n) { + if !(0 <= int8(m) && int8(m) < n) { break } v.reset(OpAMD64FlagLT_ULT) @@ -8243,7 +8243,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { return true } // match: (CMPWconst (ANDLconst _ [m]) [n]) - // cond: 0 <= m && int16(m) < n + // cond: 0 <= int16(m) && int16(m) < n // result: (FlagLT_ULT) for { n := auxIntToInt16(v.AuxInt) @@ -8251,7 +8251,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool { break } m := auxIntToInt32(v_0.AuxInt) - if !(0 <= m && int16(m) < n) { + if !(0 <= int16(m) && int16(m) < n) { break } v.reset(OpAMD64FlagLT_ULT) diff --git a/test/fixedbugs/issue41872.go b/test/fixedbugs/issue41872.go new file mode 100644 index 0000000000..837d61ae0a --- /dev/null +++ b/test/fixedbugs/issue41872.go @@ -0,0 +1,26 @@ +// run + +// Copyright 2020 The Go Authors. All rights reserved. Use of this +// source code is governed by a BSD-style license that can be found in +// the LICENSE file. + +package main + +//go:noinline +func f8(x int32) bool { + return byte(x&0xc0) == 64 +} + +//go:noinline +func f16(x int32) bool { + return uint16(x&0x8040) == 64 +} + +func main() { + if !f8(64) { + panic("wanted true, got false") + } + if !f16(64) { + panic("wanted true, got false") + } +} From 9e0837f2e9967b883bebf4b8928108d9199ae677 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 7 Oct 2020 11:15:01 -0700 Subject: [PATCH 154/281] syscall: restore EscapeArg behavior for empty string Accidentally broken by CL 259978. For #41825 Change-Id: Id663514e6eefa325faccdb66493d0bb2b3281046 Reviewed-on: https://go-review.googlesource.com/c/go/+/260397 Trust: Ian Lance Taylor Trust: Alex Brainman Trust: Emmanuel Odeke Run-TryBot: Ian Lance Taylor TryBot-Result: Go Bot Reviewed-by: Alex Brainman Reviewed-by: Emmanuel Odeke Reviewed-by: Brad Fitzpatrick --- src/syscall/exec_windows.go | 3 +++ src/syscall/exec_windows_test.go | 43 ++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 src/syscall/exec_windows_test.go diff --git a/src/syscall/exec_windows.go b/src/syscall/exec_windows.go index 500321ef0d..4a1d74ba3f 100644 --- a/src/syscall/exec_windows.go +++ b/src/syscall/exec_windows.go @@ -24,6 +24,9 @@ var ForkLock sync.RWMutex // - finally, s is wrapped with double quotes (arg -> "arg"), // but only if there is space or tab inside s. func EscapeArg(s string) string { + if len(s) == 0 { + return `""` + } for i := 0; i < len(s); i++ { switch s[i] { case '"', '\\', ' ', '\t': diff --git a/src/syscall/exec_windows_test.go b/src/syscall/exec_windows_test.go new file mode 100644 index 0000000000..eda1d36877 --- /dev/null +++ b/src/syscall/exec_windows_test.go @@ -0,0 +1,43 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall_test + +import ( + "syscall" + "testing" +) + +func TestEscapeArg(t *testing.T) { + var tests = []struct { + input, output string + }{ + {``, `""`}, + {`a`, `a`}, + {` `, `" "`}, + {`\`, `\`}, + {`"`, `\"`}, + {`\"`, `\\\"`}, + {`\\"`, `\\\\\"`}, + {`\\ `, `"\\ "`}, + {` \\`, `" \\\\"`}, + {`a `, `"a "`}, + {`C:\`, `C:\`}, + {`C:\Program Files (x32)\Common\`, `"C:\Program Files (x32)\Common\\"`}, + {`C:\Users\Игорь\`, `C:\Users\Игорь\`}, + {`Андрей\file`, `Андрей\file`}, + {`C:\Windows\temp`, `C:\Windows\temp`}, + {`c:\temp\newfile`, `c:\temp\newfile`}, + {`\\?\C:\Windows`, `\\?\C:\Windows`}, + {`\\?\`, `\\?\`}, + {`\\.\C:\Windows\`, `\\.\C:\Windows\`}, + {`\\server\share\file`, `\\server\share\file`}, + {`\\newserver\tempshare\really.txt`, `\\newserver\tempshare\really.txt`}, + } + for _, test := range tests { + if got := syscall.EscapeArg(test.input); got != test.output { + t.Errorf("EscapeArg(%#q) = %#q, want %#q", test.input, got, test.output) + } + } +} From 23e9e0c7f09bb50a870cbd1a2543a33df49b37b6 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 7 Oct 2020 18:26:26 -0400 Subject: [PATCH 155/281] syscall: support ptrace on macOS/ARM64 Updates #38485. Change-Id: I853966d934a8ee05cf62c7321f3e6271811d47b1 Reviewed-on: https://go-review.googlesource.com/c/go/+/260718 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Ian Lance Taylor --- src/syscall/syscall_darwin_arm64.go | 14 ++++---------- src/syscall/zsyscall_darwin_arm64.go | 16 ++++++++++++++++ src/syscall/zsyscall_darwin_arm64.s | 2 ++ 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/syscall/syscall_darwin_arm64.go b/src/syscall/syscall_darwin_arm64.go index bd110f2e7f..c824f6d89d 100644 --- a/src/syscall/syscall_darwin_arm64.go +++ b/src/syscall/syscall_darwin_arm64.go @@ -7,11 +7,11 @@ package syscall import "unsafe" func setTimespec(sec, nsec int64) Timespec { - return Timespec{Sec: int64(sec), Nsec: int64(nsec)} + return Timespec{Sec: sec, Nsec: nsec} } func setTimeval(sec, usec int64) Timeval { - return Timeval{Sec: int64(sec), Usec: int32(usec)} + return Timeval{Sec: sec, Usec: int32(usec)} } //sys Fstat(fd int, stat *Stat_t) (err error) @@ -20,14 +20,8 @@ func setTimeval(sec, usec int64) Timeval { //sys Lstat(path string, stat *Stat_t) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) -//sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) - -// Marked nosplit because it is called from forkAndExecInChild where -// stack growth is forbidden. -//go:nosplit -func ptrace(request int, pid int, addr uintptr, data uintptr) error { - return ENOTSUP -} +//sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) diff --git a/src/syscall/zsyscall_darwin_arm64.go b/src/syscall/zsyscall_darwin_arm64.go index 0ff642eb25..7698b2503e 100644 --- a/src/syscall/zsyscall_darwin_arm64.go +++ b/src/syscall/zsyscall_darwin_arm64.go @@ -2087,3 +2087,19 @@ func libc_fstatat_trampoline() //go:linkname libc_fstatat libc_fstatat //go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +//go:nosplit +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/src/syscall/zsyscall_darwin_arm64.s b/src/syscall/zsyscall_darwin_arm64.s index 214851604a..0a8879d1c3 100644 --- a/src/syscall/zsyscall_darwin_arm64.s +++ b/src/syscall/zsyscall_darwin_arm64.s @@ -249,3 +249,5 @@ TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) From f8df205e74d5122c43f41923280451641e566ee2 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Wed, 7 Oct 2020 18:29:51 -0400 Subject: [PATCH 156/281] all: enable more tests on macOS/ARM64 On macOS, we can do "go build", can exec, and have the source tree available, so we can enable more tests. Skip ones that don't work. Most of them are due to that it requires external linking (for now) and some tests don't work with external linking (e.g. runtime deadlock detection). For them, helper functions CanInternalLink/MustInternalLink are introduced. I still want to have internal linking implemented, but it is still a good idea to identify which tests don't work with external linking. Updates #38485. Change-Id: I6b14697573cf3f371daf54b9ddd792acf232f2f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/260719 Trust: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Go Bot Reviewed-by: Brad Fitzpatrick Reviewed-by: Than McIntosh --- src/cmd/go/go_test.go | 9 +++--- src/cmd/internal/sys/supported.go | 1 + src/cmd/internal/sys/supported_test.go | 18 +++++++++++ src/cmd/link/internal/ld/dwarf_test.go | 17 ++++++++++ src/cmd/link/internal/ld/ld_test.go | 7 +++- src/cmd/link/link_test.go | 1 + src/cmd/nm/nm_cgo_test.go | 5 +++ src/cmd/nm/nm_test.go | 3 ++ src/internal/cpu/cpu_test.go | 9 ++++++ src/internal/testenv/testenv.go | 44 ++++++++++++++++++-------- src/os/exec/exec_test.go | 4 +++ src/runtime/crash_test.go | 21 ++++++++++++ src/runtime/time_test.go | 4 +++ 13 files changed, 123 insertions(+), 20 deletions(-) create mode 100644 src/cmd/internal/sys/supported_test.go diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 66a52c86ad..093ea2ffa1 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -58,11 +58,10 @@ func init() { switch runtime.GOOS { case "android", "js": canRun = false - case "darwin", "ios": - switch runtime.GOARCH { - case "arm64": - canRun = false - } + case "darwin": + // nothing to do + case "ios": + canRun = false case "linux": switch runtime.GOARCH { case "arm": diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go index 8d87e95655..41e5ec1432 100644 --- a/src/cmd/internal/sys/supported.go +++ b/src/cmd/internal/sys/supported.go @@ -32,6 +32,7 @@ func MSanSupported(goos, goarch string) bool { } // MustLinkExternal reports whether goos/goarch requires external linking. +// (This is the opposite of internal/testenv.CanInternalLink. Keep them in sync.) func MustLinkExternal(goos, goarch string) bool { switch goos { case "android": diff --git a/src/cmd/internal/sys/supported_test.go b/src/cmd/internal/sys/supported_test.go new file mode 100644 index 0000000000..1217814af5 --- /dev/null +++ b/src/cmd/internal/sys/supported_test.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sys + +import ( + "internal/testenv" + "runtime" + "testing" +) + +func TestMustLinkExternalMatchesTestenv(t *testing.T) { + // MustLinkExternal and testenv.CanInternalLink are the exact opposite. + if b := MustLinkExternal(runtime.GOOS, runtime.GOARCH); b != !testenv.CanInternalLink() { + t.Fatalf("MustLinkExternal() == %v, testenv.CanInternalLink() == %v, don't match", b, testenv.CanInternalLink()) + } +} diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go index 22948521f5..a66506d392 100644 --- a/src/cmd/link/internal/ld/dwarf_test.go +++ b/src/cmd/link/internal/ld/dwarf_test.go @@ -238,6 +238,10 @@ func TestSizes(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; no DWARF symbol table in executables") } + + // External linking may bring in C symbols with unknown size. Skip. + testenv.MustInternalLink(t) + t.Parallel() // DWARF sizes should never be -1. @@ -919,6 +923,7 @@ func TestAbstractOriginSanityIssue26237(t *testing.T) { func TestRuntimeTypeAttrInternal(t *testing.T) { testenv.MustHaveGoBuild(t) + testenv.MustInternalLink(t) if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; no DWARF symbol table in executables") @@ -1018,6 +1023,9 @@ func main() { t.Fatalf("*main.X DIE had no runtime type attr. DIE: %v", dies[0]) } + if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" { + return // everything is PIE on ARM64, addresses are relocated + } if rtAttr.(uint64)+types.Addr != addr { t.Errorf("DWARF type offset was %#x+%#x, but test program said %#x", rtAttr.(uint64), types.Addr, addr) } @@ -1203,6 +1211,15 @@ func main() { } } + // When external linking, we put all symbols in the symbol table (so the + // external linker can find them). Skip the symbol table check. + // TODO: maybe there is some way to tell the external linker not to put + // those symbols in the executable's symbol table? Prefix the symbol name + // with "." or "L" to pretend it is a label? + if !testenv.CanInternalLink() { + return + } + syms, err := f.Symbols() if err != nil { t.Fatalf("error reading symbols: %v", err) diff --git a/src/cmd/link/internal/ld/ld_test.go b/src/cmd/link/internal/ld/ld_test.go index 4367c1028e..cdfaadb17d 100644 --- a/src/cmd/link/internal/ld/ld_test.go +++ b/src/cmd/link/internal/ld/ld_test.go @@ -18,8 +18,13 @@ import ( ) func TestUndefinedRelocErrors(t *testing.T) { - t.Parallel() testenv.MustHaveGoBuild(t) + + // When external linking, symbols may be defined externally, so we allow + // undefined symbols and let external linker resolve. Skip the test. + testenv.MustInternalLink(t) + + t.Parallel() dir, err := ioutil.TempDir("", "go-build") if err != nil { t.Fatal(err) diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index b7611f207c..6729568766 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -181,6 +181,7 @@ main.x: relocation target main.zero not defined func TestIssue33979(t *testing.T) { testenv.MustHaveGoBuild(t) testenv.MustHaveCGO(t) + testenv.MustInternalLink(t) // Skip test on platforms that do not support cgo internal linking. switch runtime.GOARCH { diff --git a/src/cmd/nm/nm_cgo_test.go b/src/cmd/nm/nm_cgo_test.go index 9a257e0ed2..58f2c24908 100644 --- a/src/cmd/nm/nm_cgo_test.go +++ b/src/cmd/nm/nm_cgo_test.go @@ -15,6 +15,11 @@ func canInternalLink() bool { switch runtime.GOOS { case "aix": return false + case "darwin": + switch runtime.GOARCH { + case "arm64": + return false + } case "dragonfly": return false case "freebsd": diff --git a/src/cmd/nm/nm_test.go b/src/cmd/nm/nm_test.go index 413a4eb06f..382446e9fe 100644 --- a/src/cmd/nm/nm_test.go +++ b/src/cmd/nm/nm_test.go @@ -173,6 +173,9 @@ func testGoExec(t *testing.T, iscgo, isexternallinker bool) { if runtime.GOOS == "windows" { return true } + if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" { + return true // On darwin/arm64 everything is PIE + } return false } diff --git a/src/internal/cpu/cpu_test.go b/src/internal/cpu/cpu_test.go index e09bd2d8b9..919bbd5ed7 100644 --- a/src/internal/cpu/cpu_test.go +++ b/src/internal/cpu/cpu_test.go @@ -15,6 +15,7 @@ import ( ) func TestMinimalFeatures(t *testing.T) { + // TODO: maybe do MustSupportFeatureDectection(t) ? if runtime.GOARCH == "arm64" { switch runtime.GOOS { case "linux", "android": @@ -36,6 +37,13 @@ func MustHaveDebugOptionsSupport(t *testing.T) { } } +func MustSupportFeatureDectection(t *testing.T) { + if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" { + t.Skipf("CPU feature detection is not supported on %s/%s", runtime.GOOS, runtime.GOARCH) + } + // TODO: maybe there are other platforms? +} + func runDebugOptionsTest(t *testing.T, test string, options string) { MustHaveDebugOptionsSupport(t) @@ -58,6 +66,7 @@ func runDebugOptionsTest(t *testing.T, test string, options string) { } func TestDisableAllCapabilities(t *testing.T) { + MustSupportFeatureDectection(t) runDebugOptionsTest(t, "TestAllCapabilitiesDisabled", "cpu.all=off") } diff --git a/src/internal/testenv/testenv.go b/src/internal/testenv/testenv.go index cfb033b2a2..0ee6355ee3 100644 --- a/src/internal/testenv/testenv.go +++ b/src/internal/testenv/testenv.go @@ -43,12 +43,8 @@ func HasGoBuild() bool { return false } switch runtime.GOOS { - case "android", "js": + case "android", "js", "ios": return false - case "darwin", "ios": - if runtime.GOARCH == "arm64" { - return false - } } return true } @@ -122,12 +118,8 @@ func GoTool() (string, error) { // using os.StartProcess or (more commonly) exec.Command. func HasExec() bool { switch runtime.GOOS { - case "js": + case "js", "ios": return false - case "darwin", "ios": - if runtime.GOARCH == "arm64" { - return false - } } return true } @@ -135,10 +127,8 @@ func HasExec() bool { // HasSrc reports whether the entire source tree is available under GOROOT. func HasSrc() bool { switch runtime.GOOS { - case "darwin", "ios": - if runtime.GOARCH == "arm64" { - return false - } + case "ios": + return false } return true } @@ -202,6 +192,32 @@ func MustHaveCGO(t testing.TB) { } } +// CanInternalLink reports whether the current system can link programs with +// internal linking. +// (This is the opposite of cmd/internal/sys.MustLinkExternal. Keep them in sync.) +func CanInternalLink() bool { + switch runtime.GOOS { + case "android": + if runtime.GOARCH != "arm64" { + return false + } + case "darwin", "ios": + if runtime.GOARCH == "arm64" { + return false + } + } + return true +} + +// MustInternalLink checks that the current system can link programs with internal +// linking. +// If not, MustInternalLink calls t.Skip with an explanation. +func MustInternalLink(t testing.TB) { + if !CanInternalLink() { + t.Skipf("skipping test: internal linking on %s/%s is not supported", runtime.GOOS, runtime.GOARCH) + } +} + // HasSymlink reports whether the current system can use os.Symlink. func HasSymlink() bool { ok, _ := hasSymlink() diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go index dafbc64a17..9746722980 100644 --- a/src/os/exec/exec_test.go +++ b/src/os/exec/exec_test.go @@ -605,6 +605,10 @@ func TestExtraFiles(t *testing.T) { testenv.MustHaveExec(t) testenv.MustHaveGoBuild(t) + // This test runs with cgo disabled. External linking needs cgo, so + // it doesn't work if external linking is required. + testenv.MustInternalLink(t) + if runtime.GOOS == "windows" { t.Skipf("skipping test on %q", runtime.GOOS) } diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index eae4f538c1..5e22b7593e 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -181,6 +181,9 @@ func TestCrashHandler(t *testing.T) { } func testDeadlock(t *testing.T, name string) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t) + output := runTestProg(t, "testprog", name) want := "fatal error: all goroutines are asleep - deadlock!\n" if !strings.HasPrefix(output, want) { @@ -205,6 +208,9 @@ func TestLockedDeadlock2(t *testing.T) { } func TestGoexitDeadlock(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t) + output := runTestProg(t, "testprog", "GoexitDeadlock") want := "no goroutines (main called runtime.Goexit) - deadlock!" if !strings.Contains(output, want) { @@ -290,6 +296,9 @@ func TestRecursivePanic4(t *testing.T) { } func TestGoexitCrash(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t) + output := runTestProg(t, "testprog", "GoexitExit") want := "no goroutines (main called runtime.Goexit) - deadlock!" if !strings.Contains(output, want) { @@ -348,6 +357,9 @@ func TestBreakpoint(t *testing.T) { } func TestGoexitInPanic(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t) + // see issue 8774: this code used to trigger an infinite recursion output := runTestProg(t, "testprog", "GoexitInPanic") want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" @@ -412,6 +424,9 @@ func TestPanicAfterGoexit(t *testing.T) { } func TestRecoveredPanicAfterGoexit(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t) + output := runTestProg(t, "testprog", "RecoveredPanicAfterGoexit") want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" if !strings.HasPrefix(output, want) { @@ -420,6 +435,9 @@ func TestRecoveredPanicAfterGoexit(t *testing.T) { } func TestRecoverBeforePanicAfterGoexit(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t) + t.Parallel() output := runTestProg(t, "testprog", "RecoverBeforePanicAfterGoexit") want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" @@ -429,6 +447,9 @@ func TestRecoverBeforePanicAfterGoexit(t *testing.T) { } func TestRecoverBeforePanicAfterGoexit2(t *testing.T) { + // External linking brings in cgo, causing deadlock detection not working. + testenv.MustInternalLink(t) + t.Parallel() output := runTestProg(t, "testprog", "RecoverBeforePanicAfterGoexit2") want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" diff --git a/src/runtime/time_test.go b/src/runtime/time_test.go index a8dab7db8e..afd9af2af4 100644 --- a/src/runtime/time_test.go +++ b/src/runtime/time_test.go @@ -20,6 +20,10 @@ func TestFakeTime(t *testing.T) { t.Skip("faketime not supported on windows") } + // Faketime is advanced in checkdead. External linking brings in cgo, + // causing checkdead not working. + testenv.MustInternalLink(t) + t.Parallel() exe, err := buildTestProg(t, "testfaketime", "-tags=faketime") From 8f26b57f9afc238bdecb9b7030bc2f4364093885 Mon Sep 17 00:00:00 2001 From: Cuong Manh Le Date: Sat, 3 Oct 2020 01:23:47 +0700 Subject: [PATCH 157/281] cmd/compile: split exported/non-exported methods for interface type Currently, mhdr/methods is emitted with the same len/cap. There's no way to distinguish between exported and non-exported methods statically. This CL splits mhdr/methods into two parts, use "len" for number of exported methods, and "cap" for all methods. This fixes the bug in issue #22075, which intends to return the number of exported methods but currently return all methods. Note that with this encoding, we still can access either all/exported-only/non-exported-only methods: mhdr[:cap(mhdr)] // all methods mhdr // exported methods mhdr[len(mhdr):cap(mhdr)] // non-exported methods Thank to Matthew Dempsky (@mdempsky) for suggesting this encoding. Fixes #22075 Change-Id: If662adb03ccff27407d55a5578a0ed05a15e7cdd Reviewed-on: https://go-review.googlesource.com/c/go/+/259237 Trust: Cuong Manh Le Run-TryBot: Cuong Manh Le TryBot-Result: Go Bot Reviewed-by: Cherry Zhang Reviewed-by: Matthew Dempsky --- doc/go1.16.html | 8 ++++ src/cmd/compile/internal/gc/reflect.go | 3 +- src/internal/reflectlite/type.go | 35 ++++++++++------ src/internal/reflectlite/value.go | 4 +- src/reflect/all_test.go | 18 ++++++--- src/reflect/type.go | 55 +++++++++++++++----------- src/reflect/value.go | 21 +++++----- src/runtime/alg.go | 2 +- src/runtime/iface.go | 12 +++--- src/runtime/mfinal.go | 4 +- src/runtime/type.go | 26 +++++++++--- 11 files changed, 124 insertions(+), 64 deletions(-) diff --git a/doc/go1.16.html b/doc/go1.16.html index 720acc757a..509956fbf2 100644 --- a/doc/go1.16.html +++ b/doc/go1.16.html @@ -213,6 +213,14 @@ Do not send CLs removing the interior tags from such phrases. with "use of closed network connection".

+

reflect

+ +

+ For interface types and values, Method, + MethodByName, and + NumMethod now + operate on the interface's exported method set, rather than its full method set. +

text/template/parse

diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 21429af782..229fcfeaee 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -1275,8 +1275,9 @@ func dtypesym(t *types.Type) *obj.LSym { } ot = dgopkgpath(lsym, ot, tpkg) + xcount := sort.Search(n, func(i int) bool { return !types.IsExported(m[i].name.Name) }) ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) - ot = duintptr(lsym, ot, uint64(n)) + ot = duintptr(lsym, ot, uint64(xcount)) ot = duintptr(lsym, ot, uint64(n)) dataAdd := imethodSize() * n ot = dextratype(lsym, ot, t, dataAdd) diff --git a/src/internal/reflectlite/type.go b/src/internal/reflectlite/type.go index 15ba30da36..37cf03594f 100644 --- a/src/internal/reflectlite/type.go +++ b/src/internal/reflectlite/type.go @@ -234,10 +234,13 @@ type imethod struct { // interfaceType represents an interface type. type interfaceType struct { rtype - pkgPath name // import path - methods []imethod // sorted by hash + pkgPath name // import path + expMethods []imethod // sorted by name, see runtime/type.go:interfacetype to see how it is encoded. } +func (t *interfaceType) methods() []imethod { return t.expMethods[:cap(t.expMethods)] } +func (t *interfaceType) isEmpty() bool { return cap(t.expMethods) == 0 } + // mapType represents a map type. type mapType struct { rtype @@ -695,7 +698,7 @@ func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer { } // NumMethod returns the number of interface methods in the type's method set. -func (t *interfaceType) NumMethod() int { return len(t.methods) } +func (t *interfaceType) NumMethod() int { return len(t.expMethods) } // TypeOf returns the reflection Type that represents the dynamic type of i. // If i is a nil interface value, TypeOf returns nil. @@ -732,9 +735,10 @@ func implements(T, V *rtype) bool { return false } t := (*interfaceType)(unsafe.Pointer(T)) - if len(t.methods) == 0 { + if t.isEmpty() { return true } + tmethods := t.methods() // The same algorithm applies in both cases, but the // method tables for an interface type and a concrete type @@ -751,10 +755,11 @@ func implements(T, V *rtype) bool { if V.Kind() == Interface { v := (*interfaceType)(unsafe.Pointer(V)) i := 0 - for j := 0; j < len(v.methods); j++ { - tm := &t.methods[i] + vmethods := v.methods() + for j := 0; j < len(vmethods); j++ { + tm := &tmethods[i] tmName := t.nameOff(tm.name) - vm := &v.methods[j] + vm := &vmethods[j] vmName := V.nameOff(vm.name) if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { if !tmName.isExported() { @@ -770,7 +775,7 @@ func implements(T, V *rtype) bool { continue } } - if i++; i >= len(t.methods) { + if i++; i >= len(tmethods) { return true } } @@ -785,7 +790,7 @@ func implements(T, V *rtype) bool { i := 0 vmethods := v.methods() for j := 0; j < int(v.mcount); j++ { - tm := &t.methods[i] + tm := &tmethods[i] tmName := t.nameOff(tm.name) vm := vmethods[j] vmName := V.nameOff(vm.name) @@ -803,7 +808,7 @@ func implements(T, V *rtype) bool { continue } } - if i++; i >= len(t.methods) { + if i++; i >= len(tmethods) { return true } } @@ -897,7 +902,7 @@ func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { case Interface: t := (*interfaceType)(unsafe.Pointer(T)) v := (*interfaceType)(unsafe.Pointer(V)) - if len(t.methods) == 0 && len(v.methods) == 0 { + if t.isEmpty() && v.isEmpty() { return true } // Might have the same methods but still @@ -962,3 +967,11 @@ func toType(t *rtype) Type { func ifaceIndir(t *rtype) bool { return t.kind&kindDirectIface == 0 } + +func isEmptyIface(t *rtype) bool { + if t.Kind() != Interface { + return false + } + tt := (*interfaceType)(unsafe.Pointer(t)) + return tt.isEmpty() +} diff --git a/src/internal/reflectlite/value.go b/src/internal/reflectlite/value.go index 0365eeeabf..fb0ec77b58 100644 --- a/src/internal/reflectlite/value.go +++ b/src/internal/reflectlite/value.go @@ -228,7 +228,7 @@ func (v Value) Elem() Value { switch k { case Interface: var eface interface{} - if v.typ.NumMethod() == 0 { + if isEmptyIface(v.typ) { eface = *(*interface{})(v.ptr) } else { eface = (interface{})(*(*interface { @@ -433,7 +433,7 @@ func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value return Value{dst, nil, flag(Interface)} } x := valueInterface(v) - if dst.NumMethod() == 0 { + if isEmptyIface(dst) { *(*interface{})(target) = x } else { ifaceE2I(dst, x, target) diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index a12712d254..be15362aae 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -2995,6 +2995,14 @@ func TestUnexportedMethods(t *testing.T) { if got := typ.NumMethod(); got != 0 { t.Errorf("NumMethod=%d, want 0 satisfied methods", got) } + + var i unexpI + if got := TypeOf(&i).Elem().NumMethod(); got != 0 { + t.Errorf("NumMethod=%d, want 0 satisfied methods", got) + } + if got := ValueOf(&i).Elem().NumMethod(); got != 0 { + t.Errorf("NumMethod=%d, want 0 satisfied methods", got) + } } type InnerInt struct { @@ -3648,21 +3656,21 @@ func TestCallPanic(t *testing.T) { v := ValueOf(T{i, i, i, i, T2{i, i}, i, i, T2{i, i}}) badCall(func() { call(v.Field(0).Method(0)) }) // .t0.W badCall(func() { call(v.Field(0).Elem().Method(0)) }) // .t0.W - badCall(func() { call(v.Field(0).Method(1)) }) // .t0.w + badMethod(func() { call(v.Field(0).Method(1)) }) // .t0.w badMethod(func() { call(v.Field(0).Elem().Method(2)) }) // .t0.w ok(func() { call(v.Field(1).Method(0)) }) // .T1.Y ok(func() { call(v.Field(1).Elem().Method(0)) }) // .T1.Y - badCall(func() { call(v.Field(1).Method(1)) }) // .T1.y + badMethod(func() { call(v.Field(1).Method(1)) }) // .T1.y badMethod(func() { call(v.Field(1).Elem().Method(2)) }) // .T1.y ok(func() { call(v.Field(2).Method(0)) }) // .NamedT0.W ok(func() { call(v.Field(2).Elem().Method(0)) }) // .NamedT0.W - badCall(func() { call(v.Field(2).Method(1)) }) // .NamedT0.w + badMethod(func() { call(v.Field(2).Method(1)) }) // .NamedT0.w badMethod(func() { call(v.Field(2).Elem().Method(2)) }) // .NamedT0.w ok(func() { call(v.Field(3).Method(0)) }) // .NamedT1.Y ok(func() { call(v.Field(3).Elem().Method(0)) }) // .NamedT1.Y - badCall(func() { call(v.Field(3).Method(1)) }) // .NamedT1.y + badMethod(func() { call(v.Field(3).Method(1)) }) // .NamedT1.y badMethod(func() { call(v.Field(3).Elem().Method(3)) }) // .NamedT1.y ok(func() { call(v.Field(4).Field(0).Method(0)) }) // .NamedT2.T1.Y @@ -3672,7 +3680,7 @@ func TestCallPanic(t *testing.T) { badCall(func() { call(v.Field(5).Method(0)) }) // .namedT0.W badCall(func() { call(v.Field(5).Elem().Method(0)) }) // .namedT0.W - badCall(func() { call(v.Field(5).Method(1)) }) // .namedT0.w + badMethod(func() { call(v.Field(5).Method(1)) }) // .namedT0.w badMethod(func() { call(v.Field(5).Elem().Method(2)) }) // .namedT0.w badCall(func() { call(v.Field(6).Method(0)) }) // .namedT1.Y diff --git a/src/reflect/type.go b/src/reflect/type.go index a3a616701b..0b34ca0c94 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -386,10 +386,14 @@ type imethod struct { // interfaceType represents an interface type. type interfaceType struct { rtype - pkgPath name // import path - methods []imethod // sorted by hash + pkgPath name // import path + expMethods []imethod // sorted by name, see runtime/type.go:interfacetype to see how it is encoded. } +// methods returns t's full method set, both exported and non-exported. +func (t *interfaceType) methods() []imethod { return t.expMethods[:cap(t.expMethods)] } +func (t *interfaceType) isEmpty() bool { return cap(t.expMethods) == 0 } + // mapType represents a map type. type mapType struct { rtype @@ -1049,25 +1053,22 @@ func (d ChanDir) String() string { // Method returns the i'th method in the type's method set. func (t *interfaceType) Method(i int) (m Method) { - if i < 0 || i >= len(t.methods) { - return + if i < 0 || i >= len(t.expMethods) { + panic("reflect: Method index out of range") } - p := &t.methods[i] + p := &t.expMethods[i] pname := t.nameOff(p.name) m.Name = pname.name() if !pname.isExported() { - m.PkgPath = pname.pkgPath() - if m.PkgPath == "" { - m.PkgPath = t.pkgPath.name() - } + panic("reflect: unexported method: " + pname.name()) } m.Type = toType(t.typeOff(p.typ)) m.Index = i return } -// NumMethod returns the number of interface methods in the type's method set. -func (t *interfaceType) NumMethod() int { return len(t.methods) } +// NumMethod returns the number of exported interface methods in the type's method set. +func (t *interfaceType) NumMethod() int { return len(t.expMethods) } // MethodByName method with the given name in the type's method set. func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { @@ -1075,8 +1076,8 @@ func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { return } var p *imethod - for i := range t.methods { - p = &t.methods[i] + for i := range t.expMethods { + p = &t.expMethods[i] if t.nameOff(p.name).name() == name { return t.Method(i), true } @@ -1485,9 +1486,10 @@ func implements(T, V *rtype) bool { return false } t := (*interfaceType)(unsafe.Pointer(T)) - if len(t.methods) == 0 { + if t.isEmpty() { return true } + tmethods := t.methods() // The same algorithm applies in both cases, but the // method tables for an interface type and a concrete type @@ -1504,10 +1506,11 @@ func implements(T, V *rtype) bool { if V.Kind() == Interface { v := (*interfaceType)(unsafe.Pointer(V)) i := 0 - for j := 0; j < len(v.methods); j++ { - tm := &t.methods[i] + vmethods := v.methods() + for j := 0; j < len(vmethods); j++ { + tm := &tmethods[i] tmName := t.nameOff(tm.name) - vm := &v.methods[j] + vm := &vmethods[j] vmName := V.nameOff(vm.name) if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { if !tmName.isExported() { @@ -1523,7 +1526,7 @@ func implements(T, V *rtype) bool { continue } } - if i++; i >= len(t.methods) { + if i++; i >= len(tmethods) { return true } } @@ -1538,7 +1541,7 @@ func implements(T, V *rtype) bool { i := 0 vmethods := v.methods() for j := 0; j < int(v.mcount); j++ { - tm := &t.methods[i] + tm := &tmethods[i] tmName := t.nameOff(tm.name) vm := vmethods[j] vmName := V.nameOff(vm.name) @@ -1556,7 +1559,7 @@ func implements(T, V *rtype) bool { continue } } - if i++; i >= len(t.methods) { + if i++; i >= len(tmethods) { return true } } @@ -1658,7 +1661,7 @@ func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool { case Interface: t := (*interfaceType)(unsafe.Pointer(T)) v := (*interfaceType)(unsafe.Pointer(V)) - if len(t.methods) == 0 && len(v.methods) == 0 { + if t.isEmpty() && v.isEmpty() { return true } // Might have the same methods but still @@ -2442,7 +2445,7 @@ func StructOf(fields []StructField) Type { switch f.typ.Kind() { case Interface: ift := (*interfaceType)(unsafe.Pointer(ft)) - for im, m := range ift.methods { + for im, m := range ift.methods() { if ift.nameOff(m.name).pkgPath() != "" { // TODO(sbinet). Issue 15924. panic("reflect: embedded interface with unexported method(s) not implemented") @@ -3149,3 +3152,11 @@ func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { } } } + +func isEmptyIface(rt *rtype) bool { + if rt.Kind() != Interface { + return false + } + tt := (*interfaceType)(unsafe.Pointer(rt)) + return len(tt.methods()) == 0 +} diff --git a/src/reflect/value.go b/src/reflect/value.go index a14131e1f8..bb6371b867 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -635,10 +635,11 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *fu i := methodIndex if v.typ.Kind() == Interface { tt := (*interfaceType)(unsafe.Pointer(v.typ)) - if uint(i) >= uint(len(tt.methods)) { + ttmethods := tt.methods() + if uint(i) >= uint(len(ttmethods)) { panic("reflect: internal error: invalid method index") } - m := &tt.methods[i] + m := &ttmethods[i] if !tt.nameOff(m.name).isExported() { panic("reflect: " + op + " of unexported method") } @@ -812,7 +813,7 @@ func (v Value) Elem() Value { switch k { case Interface: var eface interface{} - if v.typ.NumMethod() == 0 { + if isEmptyIface(v.typ) { eface = *(*interface{})(v.ptr) } else { eface = (interface{})(*(*interface { @@ -1033,7 +1034,7 @@ func valueInterface(v Value, safe bool) interface{} { // Special case: return the element inside the interface. // Empty interface has one layout, all interfaces with // methods have a second layout. - if v.NumMethod() == 0 { + if isEmptyIface(v.typ) { return *(*interface{})(v.ptr) } return *(*interface { @@ -1908,10 +1909,11 @@ func (v Value) Type() Type { if v.typ.Kind() == Interface { // Method on interface. tt := (*interfaceType)(unsafe.Pointer(v.typ)) - if uint(i) >= uint(len(tt.methods)) { + ttmethods := tt.methods() + if uint(i) >= uint(len(ttmethods)) { panic("reflect: internal error: invalid method index") } - m := &tt.methods[i] + m := &ttmethods[i] return v.typ.typeOff(m.typ) } // Method on concrete type. @@ -2429,7 +2431,7 @@ func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value return Value{dst, nil, flag(Interface)} } x := valueInterface(v, false) - if dst.NumMethod() == 0 { + if isEmptyIface(dst) { *(*interface{})(target) = x } else { ifaceE2I(dst, x, target) @@ -2718,10 +2720,11 @@ func cvtDirect(v Value, typ Type) Value { func cvtT2I(v Value, typ Type) Value { target := unsafe_New(typ.common()) x := valueInterface(v, false) - if typ.NumMethod() == 0 { + rt := typ.(*rtype) + if isEmptyIface(rt) { *(*interface{})(target) = x } else { - ifaceE2I(typ.(*rtype), x, target) + ifaceE2I(rt, x, target) } return Value{typ.common(), target, v.flag.ro() | flagIndir | flag(Interface)} } diff --git a/src/runtime/alg.go b/src/runtime/alg.go index 0af48ab25c..4a98b84e4a 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -185,7 +185,7 @@ func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { return strhash(p, h) case kindInterface: i := (*interfacetype)(unsafe.Pointer(t)) - if len(i.mhdr) == 0 { + if i.isEmpty() { return nilinterhash(p, h) } return interhash(p, h) diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 0504b89363..f8b7d429a3 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -31,16 +31,17 @@ func itabHashFunc(inter *interfacetype, typ *_type) uintptr { } func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { - if len(inter.mhdr) == 0 { + if inter.isEmpty() { throw("internal error - misuse of itab") } + imethods := inter.methods() // easy case if typ.tflag&tflagUncommon == 0 { if canfail { return nil } - name := inter.typ.nameOff(inter.mhdr[0].name) + name := inter.typ.nameOff(imethods[0].name) panic(&TypeAssertionError{nil, typ, &inter.typ, name.name()}) } @@ -63,7 +64,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { } // Entry doesn't exist yet. Make a new entry & add it. - m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys)) + m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(imethods)-1)*sys.PtrSize, 0, &memstats.other_sys)) m.inter = inter m._type = typ // The hash is used in type switches. However, compiler statically generates itab's @@ -197,7 +198,8 @@ func (m *itab) init() string { // and interface names are unique, // so can iterate over both in lock step; // the loop is O(ni+nt) not O(ni*nt). - ni := len(inter.mhdr) + imethods := inter.methods() + ni := len(imethods) nt := int(x.mcount) xmhdr := (*[1 << 16]method)(add(unsafe.Pointer(x), uintptr(x.moff)))[:nt:nt] j := 0 @@ -205,7 +207,7 @@ func (m *itab) init() string { var fun0 unsafe.Pointer imethods: for k := 0; k < ni; k++ { - i := &inter.mhdr[k] + i := &imethods[k] itype := inter.typ.typeOff(i.ityp) name := inter.typ.nameOff(i.name) iname := name.name() diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index cd6196dcab..6676ae6736 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -210,7 +210,7 @@ func runfinq() { // set up with empty interface (*eface)(frame)._type = &f.ot.typ (*eface)(frame).data = f.arg - if len(ityp.mhdr) != 0 { + if !ityp.isEmpty() { // convert to interface with methods // this conversion is guaranteed to succeed - we checked in SetFinalizer *(*iface)(frame) = assertE2I(ityp, *(*eface)(frame)) @@ -394,7 +394,7 @@ func SetFinalizer(obj interface{}, finalizer interface{}) { } case fint.kind&kindMask == kindInterface: ityp := (*interfacetype)(unsafe.Pointer(fint)) - if len(ityp.mhdr) == 0 { + if ityp.isEmpty() { // ok - satisfies empty interface goto okarg } diff --git a/src/runtime/type.go b/src/runtime/type.go index 81455f3532..36492619e1 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -366,7 +366,19 @@ type imethod struct { type interfacetype struct { typ _type pkgpath name - mhdr []imethod + // expMethods contains all interface methods. + // + // - len(expMethods) returns number of exported methods. + // - cap(expMethods) returns all interface methods, including both exported/non-exported methods. + expMethods []imethod +} + +func (it *interfacetype) methods() []imethod { + return it.expMethods[:cap(it.expMethods)] +} + +func (it *interfacetype) isEmpty() bool { + return cap(it.expMethods) == 0 } type maptype struct { @@ -664,13 +676,15 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool { if it.pkgpath.name() != iv.pkgpath.name() { return false } - if len(it.mhdr) != len(iv.mhdr) { + itmethods := it.methods() + ivmethods := iv.methods() + if len(itmethods) != len(ivmethods) { return false } - for i := range it.mhdr { - tm := &it.mhdr[i] - vm := &iv.mhdr[i] - // Note the mhdr array can be relocated from + for i := range itmethods { + tm := &itmethods[i] + vm := &ivmethods[i] + // Note the expMethods array can be relocated from // another module. See #17724. tname := resolveNameOff(unsafe.Pointer(tm), tm.name) vname := resolveNameOff(unsafe.Pointer(vm), vm.name) From 2be7788f8383c2330cd96db53273e2995d4468f8 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 8 Oct 2020 08:42:11 -0700 Subject: [PATCH 158/281] doc: update install docs for 387->softfloat transition Fixes #41861 Change-Id: I7aa9370c7762986ee07ba6ff7f6ebda067559f06 Reviewed-on: https://go-review.googlesource.com/c/go/+/260757 Trust: Keith Randall Reviewed-by: Ian Lance Taylor --- doc/install-source.html | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/doc/install-source.html b/doc/install-source.html index 86a4644c0c..c6dc3aed43 100644 --- a/doc/install-source.html +++ b/doc/install-source.html @@ -666,16 +666,13 @@ For example, you should not set $GOHOSTARCH to arm on an x86 system.

-
  • $GO386 (for 386 only, default is auto-detected -if built on either 386 or amd64, 387 otherwise) +
  • $GO386 (for 386 only, defaults to sse2)

    -This controls the code generated by gc to use either the 387 floating-point unit -(set to 387) or SSE2 instructions (set to sse2) for -floating point computations. +This variable controls how gc implements floating point computations.

      -
    • GO386=387: use x87 for floating point operations; should support all x86 chips (Pentium MMX or later).
    • -
    • GO386=sse2: use SSE2 for floating point operations; has better performance than 387, but only available on Pentium 4/Opteron/Athlon 64 or later.
    • +
    • GO386=softfloat: use software floating point operations; should support all x86 chips (Pentium MMX or later).
    • +
    • GO386=sse2: use SSE2 for floating point operations; has better performance but only available on Pentium 4/Opteron/Athlon 64 or later.
  • From 33511fb959f8f0edd5e831a4b41523daf9d84e87 Mon Sep 17 00:00:00 2001 From: Egon Elbre Date: Wed, 23 Sep 2020 14:06:28 +0300 Subject: [PATCH 159/281] net/http/pprof: remove html/template dependency html/template indirectly uses reflect MethodByName, this causes linker to use conservative mode resulting in larger binaries. The template here is trivial and can be replaced by string manipulation. This reduces a binary using only net/http/pprof by ~2.5MB. Fixes #41569 Change-Id: I240e1daa6376182ff4961997ee3ec7b96cb07be8 Reviewed-on: https://go-review.googlesource.com/c/go/+/256900 Reviewed-by: Brad Fitzpatrick Reviewed-by: Hajime Hoshi Run-TryBot: Brad Fitzpatrick TryBot-Result: Go Bot Trust: Hajime Hoshi Trust: Brad Fitzpatrick --- src/go/build/deps_test.go | 2 +- src/net/http/pprof/pprof.go | 60 +++++++++++++++++++++---------------- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index fa8ecf10f4..42382d583c 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -449,7 +449,7 @@ var depsRules = ` OS, compress/gzip, regexp < internal/profile; - html/template, internal/profile, net/http, runtime/pprof, runtime/trace + html, internal/profile, net/http, runtime/pprof, runtime/trace < net/http/pprof; # RPC diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go index 5ff7fdc3de..2bfcfb9545 100644 --- a/src/net/http/pprof/pprof.go +++ b/src/net/http/pprof/pprof.go @@ -61,11 +61,12 @@ import ( "bytes" "context" "fmt" - "html/template" + "html" "internal/profile" "io" "log" "net/http" + "net/url" "os" "runtime" "runtime/pprof" @@ -352,6 +353,13 @@ var profileDescriptions = map[string]string{ "trace": "A trace of execution of the current program. You can specify the duration in the seconds GET parameter. After you get the trace file, use the go tool trace command to investigate the trace.", } +type profileEntry struct { + Name string + Href string + Desc string + Count int +} + // Index responds with the pprof-formatted profile named by the request. // For example, "/debug/pprof/heap" serves the "heap" profile. // Index responds to a request for "/debug/pprof/" with an HTML page @@ -368,17 +376,11 @@ func Index(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Content-Type-Options", "nosniff") w.Header().Set("Content-Type", "text/html; charset=utf-8") - type profile struct { - Name string - Href string - Desc string - Count int - } - var profiles []profile + var profiles []profileEntry for _, p := range pprof.Profiles() { - profiles = append(profiles, profile{ + profiles = append(profiles, profileEntry{ Name: p.Name(), - Href: p.Name() + "?debug=1", + Href: p.Name(), Desc: profileDescriptions[p.Name()], Count: p.Count(), }) @@ -386,7 +388,7 @@ func Index(w http.ResponseWriter, r *http.Request) { // Adding other profiles exposed from within this package for _, p := range []string{"cmdline", "profile", "trace"} { - profiles = append(profiles, profile{ + profiles = append(profiles, profileEntry{ Name: p, Href: p, Desc: profileDescriptions[p], @@ -397,12 +399,14 @@ func Index(w http.ResponseWriter, r *http.Request) { return profiles[i].Name < profiles[j].Name }) - if err := indexTmpl.Execute(w, profiles); err != nil { + if err := indexTmplExecute(w, profiles); err != nil { log.Print(err) } } -var indexTmpl = template.Must(template.New("index").Parse(` +func indexTmplExecute(w io.Writer, profiles []profileEntry) error { + var b bytes.Buffer + b.WriteString(` /debug/pprof/