[dev.link] all: merge branch 'master' into dev.link

Clean merge.

Change-Id: Ib1c3217641f3f09260133b92f406b63f14a0c51e
This commit is contained in:
Cherry Zhang 2020-03-06 14:56:14 -05:00
commit 3d3db8beff
164 changed files with 15875 additions and 22972 deletions

View File

@ -379,6 +379,38 @@ func (w *Watchdog) Start() {
}
</pre>
<h3 id="Unsynchronized_send_and_close_operations">Unsynchronized send and close operations</h3>
<p>
As this example demonstrates, unsynchronized send and close operations
on the same channel can also be a race condition:
</p>
<pre>
c := make(chan struct{}) // or buffered channel
// The race detector cannot derive the happens before relation
// for the following send and close operations. These two operations
// are unsynchronized and happen concurrently.
go func() { c <- struct{}{} }()
close(c)
</pre>
<p>
According to the Go memory model, a send on a channel happens before
the corresponding receive from that channel completes. To synchronize
send and close operations, use a receive operation that guarantees
the send is done before the close:
</p>
<pre>
c := make(chan struct{}) // or buffered channel
go func() { c <- struct{}{} }()
<-c
close(c)
</pre>
<h2 id="Supported_Systems">Supported Systems</h2>
<p>

View File

@ -759,6 +759,19 @@ Do not send CLs removing the interior tags from such phrases.
</dd>
</dl><!-- net/textproto -->
<dl id="net/url"><dt><a href="/pkg/net/url/">net/url</a></dt>
<dd>
<p><!-- CL 185117 -->
When parsing of a URL fails
(for example by <a href="/pkg/net/url/#Parse"><code>Parse</code></a>
or <a href="/pkg/net/url/#ParseRequestURI"><code>ParseRequestURI</code></a>),
the resulting <a href="/pkg/net/url/#Error.Error"><code>Error</code></a> message
will now quote the unparsable URL.
This provides clearer structure and consistency with other parsing errors.
</p>
</dd>
</dl><!-- net/url -->
<dl id="os/signal"><dt><a href="/pkg/os/signal/">os/signal</a></dt>
<dd>
<p><!-- CL 187739 -->

View File

@ -47,6 +47,14 @@ TODO
TODO
</p>
<h4 id="go-test"><code>go</code> <code>test</code></h4>
<p><!-- https://golang.org/issue/36134 -->
Changing the <code>-timeout</code> flag now invalidates cached test results. A
cached result for a test run with a long timeout will no longer count as
passing when <code>go</code> <code>test</code> is re-invoked with a short one.
</p>
<h4 id="go-flag-parsing">Flag parsing</h4>
<p><!-- https://golang.org/cl/211358 -->
@ -92,6 +100,18 @@ TODO
TODO
</p>
<dl id="pkg-runtime"><dt><a href="/pkg/runtime/">runtime</a></dt>
<dd>
<p><!-- CL 221779 -->
If <code>panic</code> is invoked with a value whose type is derived from any
of: <code>bool</code>, <code>complex64</code>, <code>complex128</code>, <code>float32</code>, <code>float64</code>,
<code>int</code>, <code>int8</code>, <code>int16</code>, <code>int32</code>, <code>int64</code>, <code>string</code>,
<code>uint</code>, <code>uint8</code>, <code>uint16</code>, <code>uint32</code>, <code>uint64</code>, <code>uintptr</code>,
then the value will be printed, instead of just its address.
</p>
</dd>
</dl>
<dl id="sync"><dt><a href="/pkg/sync/">sync</a></dt>
<dd>
<p><!-- golang.org/issue/33762 -->

View File

@ -273,9 +273,7 @@ func f() {
a = "hello, world"
&lt;-c
}
</pre>
<pre>
func main() {
go f()
c &lt;- 0

View File

@ -14,15 +14,22 @@ package cgotest
#include <stdlib.h>
#include <string.h>
#ifdef _AIX
// On AIX, SIGSTKSZ is too small to handle Go sighandler.
#define CSIGSTKSZ 0x4000
#else
#define CSIGSTKSZ SIGSTKSZ
#endif
static stack_t oss;
static char signalStack[SIGSTKSZ];
static char signalStack[CSIGSTKSZ];
static void changeSignalStack(void) {
stack_t ss;
memset(&ss, 0, sizeof ss);
ss.ss_sp = signalStack;
ss.ss_flags = 0;
ss.ss_size = SIGSTKSZ;
ss.ss_size = CSIGSTKSZ;
if (sigaltstack(&ss, &oss) < 0) {
perror("sigaltstack");
abort();

View File

@ -117,17 +117,17 @@ func LastIndex(s, sep []byte) int {
return -1
}
// Rabin-Karp search from the end of the string
hashss, pow := hashStrRev(sep)
hashss, pow := bytealg.HashStrRevBytes(sep)
last := len(s) - n
var h uint32
for i := len(s) - 1; i >= last; i-- {
h = h*primeRK + uint32(s[i])
h = h*bytealg.PrimeRK + uint32(s[i])
}
if h == hashss && Equal(s[last:], sep) {
return last
}
for i := last - 1; i >= 0; i-- {
h *= primeRK
h *= bytealg.PrimeRK
h += uint32(s[i])
h -= pow * uint32(s[i+n])
if h == hashss && Equal(s[i:i+n], sep) {
@ -1068,7 +1068,7 @@ func Index(s, sep []byte) int {
// we should cutover at even larger average skips,
// because Equal becomes that much more expensive.
// This code does not take that effect into account.
j := indexRabinKarp(s[i:], sep)
j := bytealg.IndexRabinKarpBytes(s[i:], sep)
if j < 0 {
return -1
}
@ -1077,63 +1077,3 @@ func Index(s, sep []byte) int {
}
return -1
}
func indexRabinKarp(s, sep []byte) int {
// Rabin-Karp search
hashsep, pow := hashStr(sep)
n := len(sep)
var h uint32
for i := 0; i < n; i++ {
h = h*primeRK + uint32(s[i])
}
if h == hashsep && Equal(s[:n], sep) {
return 0
}
for i := n; i < len(s); {
h *= primeRK
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
if h == hashsep && Equal(s[i-n:i], sep) {
return i - n
}
}
return -1
}
// primeRK is the prime base used in Rabin-Karp algorithm.
const primeRK = 16777619
// hashStr returns the hash and the appropriate multiplicative
// factor for use in Rabin-Karp algorithm.
func hashStr(sep []byte) (uint32, uint32) {
hash := uint32(0)
for i := 0; i < len(sep); i++ {
hash = hash*primeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, primeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
}
// hashStrRev returns the hash of the reverse of sep and the
// appropriate multiplicative factor for use in Rabin-Karp algorithm.
func hashStrRev(sep []byte) (uint32, uint32) {
hash := uint32(0)
for i := len(sep) - 1; i >= 0; i-- {
hash = hash*primeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, primeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
}

View File

@ -141,9 +141,10 @@ var indexTests = []BinOpTest{
{"barfoobarfooyyyzzzyyyzzzyyyzzzyyyxxxzzzyyy", "x", 33},
{"foofyfoobarfoobar", "y", 4},
{"oooooooooooooooooooooo", "r", -1},
// test fallback to Rabin-Karp.
{"oxoxoxoxoxoxoxoxoxoxoxoy", "oy", 22},
{"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1},
// test fallback to Rabin-Karp.
{"000000000000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000001", 5},
}
var lastIndexTests = []BinOpTest{
@ -209,6 +210,27 @@ func runIndexTests(t *testing.T, f func(s, sep []byte) int, funcName string, tes
t.Errorf("%s(%q,%q) = %v; want %v", funcName, a, b, actual, test.i)
}
}
var allocTests = []struct {
a []byte
b []byte
i int
}{
// case for function Index.
{[]byte("000000000000000000000000000000000000000000000000000000000000000000000001"), []byte("0000000000000000000000000000000000000000000000000000000000000000001"), 5},
// case for function LastIndex.
{[]byte("000000000000000000000000000000000000000000000000000000000000000010000"), []byte("00000000000000000000000000000000000000000000000000000000000001"), 3},
}
allocs := testing.AllocsPerRun(100, func() {
if i := Index(allocTests[1].a, allocTests[1].b); i != allocTests[1].i {
t.Errorf("Index([]byte(%q), []byte(%q)) = %v; want %v", allocTests[1].a, allocTests[1].b, i, allocTests[1].i)
}
if i := LastIndex(allocTests[0].a, allocTests[0].b); i != allocTests[0].i {
t.Errorf("LastIndex([]byte(%q), []byte(%q)) = %v; want %v", allocTests[0].a, allocTests[0].b, i, allocTests[0].i)
}
})
if allocs != 0 {
t.Errorf("expected no allocations, got %f", allocs)
}
}
func runIndexAnyTests(t *testing.T, f func(s []byte, chars string) int, funcName string, testCases []BinOpTest) {

View File

@ -484,6 +484,9 @@ func archMips64(linkArch *obj.LinkArch) *Arch {
for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ {
register[obj.Rconv(i)] = int16(i)
}
for i := mips.REG_W0; i <= mips.REG_W31; i++ {
register[obj.Rconv(i)] = int16(i)
}
register["HI"] = mips.REG_HI
register["LO"] = mips.REG_LO
// Pseudo-registers.
@ -501,6 +504,7 @@ func archMips64(linkArch *obj.LinkArch) *Arch {
"FCR": true,
"M": true,
"R": true,
"W": true,
}
instructions := make(map[string]obj.As)

View File

@ -63,6 +63,10 @@ func mipsRegisterNumber(name string, n int16) (int16, bool) {
if 0 <= n && n <= 31 {
return mips.REG_R0 + n, true
}
case "W":
if 0 <= n && n <= 31 {
return mips.REG_W0 + n, true
}
}
return 0, false
}

View File

@ -583,6 +583,39 @@ label4:
NEGV R1, R2 // 0001102f
RET
// MSA VMOVI
VMOVB $511, W0 // 7b0ff807
VMOVH $24, W23 // 7b20c5c7
VMOVW $-24, W15 // 7b5f43c7
VMOVD $-511, W31 // 7b700fc7
VMOVB (R0), W8 // 78000220
VMOVB 511(R3), W0 // 79ff1820
VMOVB -512(R12), W21 // 7a006560
VMOVH (R24), W12 // 7800c321
VMOVH 110(R19), W8 // 78379a21
VMOVH -70(R12), W3 // 7bdd60e1
VMOVW (R3), W31 // 78001fe2
VMOVW 64(R20), W16 // 7810a422
VMOVW -104(R17), W24 // 7be68e22
VMOVD (R3), W2 // 780018a3
VMOVD 128(R23), W19 // 7810bce3
VMOVD -256(R31), W0 // 7be0f823
VMOVB W8, (R0) // 78000224
VMOVB W0, 511(R3) // 79ff1824
VMOVB W21, -512(R12) // 7a006564
VMOVH W12, (R24) // 7800c325
VMOVH W8, 110(R19) // 78379a25
VMOVH W3, -70(R12) // 7bdd60e5
VMOVW W31, (R3) // 78001fe6
VMOVW W16, 64(R20) // 7810a426
VMOVW W24, -104(R17) // 7be68e26
VMOVD W2, (R3) // 780018a7
VMOVD W19, 128(R23) // 7810bce7
VMOVD W0, -256(R31) // 7be0f827
RET
// END
//
// LEND comma // asm doesn't support the trailing comma.

View File

@ -483,6 +483,64 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) {
}
}
// Signaling NaN values as constants.
const (
snan32bits uint32 = 0x7f800001
snan64bits uint64 = 0x7ff0000000000001
)
// Signaling NaNs as variables.
var snan32bitsVar uint32 = snan32bits
var snan64bitsVar uint64 = snan64bits
func TestFloatSignalingNaN(t *testing.T) {
// Make sure we generate a signaling NaN from a constant properly.
// See issue 36400.
f32 := math.Float32frombits(snan32bits)
g32 := math.Float32frombits(snan32bitsVar)
x32 := math.Float32bits(f32)
y32 := math.Float32bits(g32)
if x32 != y32 {
t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32)
}
f64 := math.Float64frombits(snan64bits)
g64 := math.Float64frombits(snan64bitsVar)
x64 := math.Float64bits(f64)
y64 := math.Float64bits(g64)
if x64 != y64 {
t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64)
}
}
func TestFloatSignalingNaNConversion(t *testing.T) {
// Test to make sure when we convert a signaling NaN, we get a NaN.
// (Ideally we want a quiet NaN, but some platforms don't agree.)
// See issue 36399.
s32 := math.Float32frombits(snan32bitsVar)
if s32 == s32 {
t.Errorf("converting a NaN did not result in a NaN")
}
s64 := math.Float64frombits(snan64bitsVar)
if s64 == s64 {
t.Errorf("converting a NaN did not result in a NaN")
}
}
func TestFloatSignalingNaNConversionConst(t *testing.T) {
// Test to make sure when we convert a signaling NaN, it converts to a NaN.
// (Ideally we want a quiet NaN, but some platforms don't agree.)
// See issue 36399 and 36400.
s32 := math.Float32frombits(snan32bits)
if s32 == s32 {
t.Errorf("converting a NaN did not result in a NaN")
}
s64 := math.Float64frombits(snan64bits)
if s64 == s64 {
t.Errorf("converting a NaN did not result in a NaN")
}
}
var sinkFloat float64
func BenchmarkMul2(b *testing.B) {

View File

@ -279,7 +279,7 @@ type Arch struct {
var thearch Arch
var (
staticbytes,
staticuint64s,
zerobase *Node
assertE2I,

View File

@ -1274,6 +1274,16 @@ func (s *state) stmt(n *Node) {
s.assign(n.Left, r, deref, skip)
case OIF:
if Isconst(n.Left, CTBOOL) {
s.stmtList(n.Left.Ninit)
if n.Left.Bool() {
s.stmtList(n.Nbody)
} else {
s.stmtList(n.Rlist)
}
break
}
bEnd := s.f.NewBlock(ssa.BlockPlain)
var likely int8
if n.Likely() {
@ -2203,7 +2213,7 @@ func (s *state) expr(n *Node) *ssa.Value {
conv = conv1
}
}
if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || s.softFloat {
if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
@ -3269,7 +3279,7 @@ func init() {
}
return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
},
sys.AMD64, sys.I386)
sys.AMD64, sys.I386, sys.MIPS64)
add("runtime", "KeepAlive",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])

View File

@ -565,7 +565,6 @@ opswitch:
n.Right = walkexpr(n.Right, &ll)
n.Right = addinit(n.Right, ll.Slice())
n = walkinrange(n, init)
case OPRINT, OPRINTN:
n = walkprint(n, init)
@ -838,10 +837,12 @@ opswitch:
break
}
if staticbytes == nil {
staticbytes = newname(Runtimepkg.Lookup("staticbytes"))
staticbytes.SetClass(PEXTERN)
staticbytes.Type = types.NewArray(types.Types[TUINT8], 256)
if staticuint64s == nil {
staticuint64s = newname(Runtimepkg.Lookup("staticuint64s"))
staticuint64s.SetClass(PEXTERN)
// The actual type is [256]uint64, but we use [256*8]uint8 so we can address
// individual bytes.
staticuint64s.Type = types.NewArray(types.Types[TUINT8], 256*8)
zerobase = newname(Runtimepkg.Lookup("zerobase"))
zerobase.SetClass(PEXTERN)
zerobase.Type = types.Types[TUINTPTR]
@ -857,9 +858,16 @@ opswitch:
cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
value = zerobase
case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
// n.Left is a bool/byte. Use staticbytes[n.Left].
// n.Left is a bool/byte. Use staticuint64s[n.Left * 8] on little-endian
// and staticuint64s[n.Left * 8 + 7] on big-endian.
n.Left = cheapexpr(n.Left, init)
value = nod(OINDEX, staticbytes, byteindex(n.Left))
// byteindex widens n.Left so that the multiplication doesn't overflow.
index := nod(OLSH, byteindex(n.Left), nodintconst(3))
index.SetBounded(true)
if thearch.LinkArch.ByteOrder == binary.BigEndian {
index = nod(OADD, index, nodintconst(7))
}
value = nod(OINDEX, staticuint64s, index)
value.SetBounded(true)
case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
// n.Left is a readonly global; use it directly.
@ -2424,15 +2432,21 @@ func convnop(n *Node, t *types.Type) *Node {
return n
}
// byteindex converts n, which is byte-sized, to a uint8.
// We cannot use conv, because we allow converting bool to uint8 here,
// byteindex converts n, which is byte-sized, to an int used to index into an array.
// We cannot use conv, because we allow converting bool to int here,
// which is forbidden in user code.
func byteindex(n *Node) *Node {
if types.Identical(n.Type, types.Types[TUINT8]) {
return n
// We cannot convert from bool to int directly.
// While converting from int8 to int is possible, it would yield
// the wrong result for negative values.
// Reinterpreting the value as an unsigned byte solves both cases.
if !types.Identical(n.Type, types.Types[TUINT8]) {
n = nod(OCONV, n, nil)
n.Type = types.Types[TUINT8]
n.SetTypecheck(1)
}
n = nod(OCONV, n, nil)
n.Type = types.Types[TUINT8]
n.Type = types.Types[TINT]
n.SetTypecheck(1)
return n
}
@ -3523,133 +3537,6 @@ func (n *Node) isIntOrdering() bool {
return n.Left.Type.IsInteger() && n.Right.Type.IsInteger()
}
// walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10.
// n must be an OANDAND or OOROR node.
// The result of walkinrange MUST be assigned back to n, e.g.
// n.Left = walkinrange(n.Left)
func walkinrange(n *Node, init *Nodes) *Node {
// We are looking for something equivalent to a opl b OP b opr c, where:
// * a, b, and c have integer type
// * b is side-effect-free
// * opl and opr are each < or ≤
// * OP is &&
l := n.Left
r := n.Right
if !l.isIntOrdering() || !r.isIntOrdering() {
return n
}
// Find b, if it exists, and rename appropriately.
// Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right
// Output is: a opl b(==x) ANDAND/OROR b(==x) opr c
a, opl, b := l.Left, l.Op, l.Right
x, opr, c := r.Left, r.Op, r.Right
for i := 0; ; i++ {
if samesafeexpr(b, x) {
break
}
if i == 3 {
// Tried all permutations and couldn't find an appropriate b == x.
return n
}
if i&1 == 0 {
a, opl, b = b, brrev(opl), a
} else {
x, opr, c = c, brrev(opr), x
}
}
// If n.Op is ||, apply de Morgan.
// Negate the internal ops now; we'll negate the top level op at the end.
// Henceforth assume &&.
negateResult := n.Op == OOROR
if negateResult {
opl = brcom(opl)
opr = brcom(opr)
}
cmpdir := func(o Op) int {
switch o {
case OLE, OLT:
return -1
case OGE, OGT:
return +1
}
Fatalf("walkinrange cmpdir %v", o)
return 0
}
if cmpdir(opl) != cmpdir(opr) {
// Not a range check; something like b < a && b < c.
return n
}
switch opl {
case OGE, OGT:
// We have something like a > b && b ≥ c.
// Switch and reverse ops and rename constants,
// to make it look like a ≤ b && b < c.
a, c = c, a
opl, opr = brrev(opr), brrev(opl)
}
// We must ensure that c-a is non-negative.
// For now, require a and c to be constants.
// In the future, we could also support a == 0 and c == len/cap(...).
// Unfortunately, by this point, most len/cap expressions have been
// stored into temporary variables.
if !Isconst(a, CTINT) || !Isconst(c, CTINT) {
return n
}
// Ensure that Int64() does not overflow on a and c (it'll happen
// for any const above 2**63; see issue #27143).
if !a.CanInt64() || !c.CanInt64() {
return n
}
if opl == OLT {
// We have a < b && ...
// We need a ≤ b && ... to safely use unsigned comparison tricks.
// If a is not the maximum constant for b's type,
// we can increment a and switch to ≤.
if a.Int64() >= maxintval[b.Type.Etype].Int64() {
return n
}
a = nodintconst(a.Int64() + 1)
opl = OLE
}
bound := c.Int64() - a.Int64()
if bound < 0 {
// Bad news. Something like 5 <= x && x < 3.
// Rare in practice, and we still need to generate side-effects,
// so just leave it alone.
return n
}
// We have a ≤ b && b < c (or a ≤ b && b ≤ c).
// This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a),
// which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a),
// which is equivalent to uint(b-a) < uint(c-a).
ut := b.Type.ToUnsigned()
lhs := conv(nod(OSUB, b, a), ut)
rhs := nodintconst(bound)
if negateResult {
// Negate top level.
opr = brcom(opr)
}
cmp := nod(opr, lhs, rhs)
cmp.Pos = n.Pos
cmp = addinit(cmp, l.Ninit.Slice())
cmp = addinit(cmp, r.Ninit.Slice())
// Typecheck the AST rooted at cmp...
cmp = typecheck(cmp, ctxExpr)
// ...but then reset cmp's type to match n's type.
cmp.Type = n.Type
cmp = walkexpr(cmp, init)
return cmp
}
// return 1 if integer n must be in range [0, max), 0 otherwise
func bounded(n *Node, max int64) bool {
if n.Type == nil || !n.Type.IsInteger() {

View File

@ -1328,7 +1328,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString())
case ssa.OpPPC64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT, ssa.OpPPC64FlagCarrySet, ssa.OpPPC64FlagCarryClear:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.OpClobber:
// TODO: implement for clobberdead experiment. Nop is ok for now.

View File

@ -314,6 +314,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = riscv.REG_ZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
@ -464,7 +471,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := s.Prog(obj.AJMP)
p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)

View File

@ -498,6 +498,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpS390XLDGR, ssa.OpS390XLGDR,
ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA,
ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
ssa.OpS390XCELFBR, ssa.OpS390XCDLFBR, ssa.OpS390XCELGBR, ssa.OpS390XCDLGBR,
ssa.OpS390XCLFEBR, ssa.OpS390XCLFDBR, ssa.OpS390XCLGEBR, ssa.OpS390XCLGDBR,
ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
ssa.OpS390XFNEG, ssa.OpS390XFNEGS,
ssa.OpS390XLPDFR, ssa.OpS390XLNDFR:

View File

@ -232,6 +232,34 @@ func (b *Block) Reset(kind BlockKind) {
b.AuxInt = 0
}
// resetWithControl resets b and adds control v.
// It is equivalent to b.Reset(kind); b.AddControl(v),
// except that it is one call instead of two and avoids a bounds check.
// It is intended for use by rewrite rules, where this matters.
func (b *Block) resetWithControl(kind BlockKind, v *Value) {
b.Kind = kind
b.ResetControls()
b.Aux = nil
b.AuxInt = 0
b.Controls[0] = v
v.Uses++
}
// resetWithControl2 resets b and adds controls v and w.
// It is equivalent to b.Reset(kind); b.AddControl(v); b.AddControl(w),
// except that it is one call instead of three and avoids two bounds checks.
// It is intended for use by rewrite rules, where this matters.
func (b *Block) resetWithControl2(kind BlockKind, v, w *Value) {
b.Kind = kind
b.ResetControls()
b.Aux = nil
b.AuxInt = 0
b.Controls[0] = v
b.Controls[1] = w
v.Uses++
w.Uses++
}
// AddEdgeTo adds an edge from block b to block c. Used during building of the
// SSA graph; do not use on an already-completed SSA graph.
func (b *Block) AddEdgeTo(c *Block) {

View File

@ -148,7 +148,7 @@ func elimIf(f *Func, loadAddr *sparseSet, dom *Block) bool {
// the number of useless instructions executed.
const maxfuseinsts = 2
if len(simple.Values) > maxfuseinsts || !allTrivial(simple) {
if len(simple.Values) > maxfuseinsts || !canSpeculativelyExecute(simple) {
return false
}
@ -305,10 +305,10 @@ func elimIfElse(f *Func, loadAddr *sparseSet, b *Block) bool {
return false
}
yes, no := b.Succs[0].Block(), b.Succs[1].Block()
if !isLeafPlain(yes) || len(yes.Values) > 1 || !allTrivial(yes) {
if !isLeafPlain(yes) || len(yes.Values) > 1 || !canSpeculativelyExecute(yes) {
return false
}
if !isLeafPlain(no) || len(no.Values) > 1 || !allTrivial(no) {
if !isLeafPlain(no) || len(no.Values) > 1 || !canSpeculativelyExecute(no) {
return false
}
if b.Succs[0].Block().Succs[0].Block() != b.Succs[1].Block().Succs[0].Block() {
@ -415,7 +415,15 @@ func shouldElimIfElse(no, yes, post *Block, arch string) bool {
}
}
func allTrivial(b *Block) bool {
// canSpeculativelyExecute reports whether every value in the block can
// be evaluated without causing any observable side effects (memory
// accesses, panics and so on) except for execution time changes. It
// also ensures that the block does not contain any phis which we can't
// speculatively execute.
// Warning: this function cannot currently detect values that represent
// instructions the execution of which need to be guarded with CPU
// hardware feature checks. See issue #34950.
func canSpeculativelyExecute(b *Block) bool {
// don't fuse memory ops, Phi ops, divides (can panic),
// or anything else with side-effects
for _, v := range b.Values {

View File

@ -141,15 +141,23 @@ func checkFunc(f *Func) {
f.Fatalf("bad int32 AuxInt value for %v", v)
}
canHaveAuxInt = true
case auxInt64, auxFloat64, auxARM64BitField:
case auxInt64, auxARM64BitField:
canHaveAuxInt = true
case auxInt128:
// AuxInt must be zero, so leave canHaveAuxInt set to false.
case auxFloat32:
canHaveAuxInt = true
if math.IsNaN(v.AuxFloat()) {
f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
}
if !isExactFloat32(v.AuxFloat()) {
f.Fatalf("value %v has an AuxInt value that is not an exact float32", v)
}
case auxFloat64:
canHaveAuxInt = true
if math.IsNaN(v.AuxFloat()) {
f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
}
case auxString, auxSym, auxTyp, auxArchSpecific:
canHaveAux = true
case auxSymOff, auxSymValAndOff, auxTypSize:

View File

@ -35,7 +35,8 @@ func Compile(f *Func) {
var rnd *rand.Rand
if checkEnabled {
rnd = rand.New(rand.NewSource(int64(crc32.ChecksumIEEE(([]byte)(f.Name)))))
seed := int64(crc32.ChecksumIEEE(([]byte)(f.Name))) ^ int64(checkRandSeed)
rnd = rand.New(rand.NewSource(seed))
}
// hook to print function & phase if panic happens
@ -199,7 +200,10 @@ func (p *pass) addDump(s string) {
}
// Run consistency checker between each phase
var checkEnabled = false
var (
checkEnabled = false
checkRandSeed = 0
)
// Debug output
var IntrinsicsDebug int
@ -253,7 +257,7 @@ where:
` + phasenames + `
- <flag> is one of:
on, off, debug, mem, time, test, stats, dump
on, off, debug, mem, time, test, stats, dump, seed
- <value> defaults to 1
@ -271,6 +275,10 @@ Examples:
-d=ssa/check/on
enables checking after each phase
-d=ssa/check/seed=1234
enables checking after each phase, using 1234 to seed the PRNG
used for value order randomization
-d=ssa/all/time
enables time reporting for all phases
@ -294,6 +302,12 @@ commas. For example:
debugPoset = checkEnabled
return ""
}
if phase == "check" && flag == "seed" {
checkEnabled = true
checkRandSeed = val
debugPoset = checkEnabled
return ""
}
alltime := false
allmem := false
@ -414,7 +428,7 @@ var passes = [...]pass{
{name: "gcse deadcode", fn: deadcode, required: true}, // clean out after cse and phiopt
{name: "nilcheckelim", fn: nilcheckelim},
{name: "prove", fn: prove},
{name: "fuse plain", fn: fusePlain},
{name: "early fuse", fn: fuseEarly},
{name: "decompose builtin", fn: decomposeBuiltIn, required: true},
{name: "softfloat", fn: softfloat, required: true},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
@ -422,7 +436,7 @@ var passes = [...]pass{
{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
{name: "check bce", fn: checkbce},
{name: "branchelim", fn: branchelim},
{name: "fuse", fn: fuseAll},
{name: "late fuse", fn: fuseLate},
{name: "dse", fn: dse},
{name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
{name: "insert resched checks", fn: insertLoopReschedChecks,
@ -477,7 +491,7 @@ var passOrder = [...]constraint{
// allow deadcode to clean up after nilcheckelim
{"nilcheckelim", "generic deadcode"},
// nilcheckelim generates sequences of plain basic blocks
{"nilcheckelim", "fuse"},
{"nilcheckelim", "late fuse"},
// nilcheckelim relies on opt to rewrite user nil checks
{"opt", "nilcheckelim"},
// tighten will be most effective when as many values have been removed as possible

View File

@ -8,18 +8,18 @@ import (
"cmd/internal/src"
)
// fusePlain runs fuse(f, fuseTypePlain).
func fusePlain(f *Func) { fuse(f, fuseTypePlain) }
// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange).
func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) }
// fuseAll runs fuse(f, fuseTypeAll).
func fuseAll(f *Func) { fuse(f, fuseTypeAll) }
// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf).
func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf) }
type fuseType uint8
const (
fuseTypePlain fuseType = 1 << iota
fuseTypeIf
fuseTypeAll = fuseTypePlain | fuseTypeIf
fuseTypeIntInRange
)
// fuse simplifies control flow by joining basic blocks.
@ -32,6 +32,9 @@ func fuse(f *Func, typ fuseType) {
if typ&fuseTypeIf != 0 {
changed = fuseBlockIf(b) || changed
}
if typ&fuseTypeIntInRange != 0 {
changed = fuseIntegerComparisons(b) || changed
}
if typ&fuseTypePlain != 0 {
changed = fuseBlockPlain(b) || changed
}

View File

@ -0,0 +1,157 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5',
// which can be optimized to 'unsigned(x-1) < 4'.
//
// Look for branch structure like:
//
// p
// |\
// | b
// |/ \
// s0 s1
//
// In our example, p has control '1 <= x', b has control 'x < 5',
// and s0 and s1 are the if and else results of the comparison.
//
// This will be optimized into:
//
// p
// \
// b
// / \
// s0 s1
//
// where b has the combined control value 'unsigned(x-1) < 4'.
// Later passes will then fuse p and b.
func fuseIntegerComparisons(b *Block) bool {
if len(b.Preds) != 1 {
return false
}
p := b.Preds[0].Block()
if b.Kind != BlockIf || p.Kind != BlockIf {
return false
}
// Don't merge control values if b is likely to be bypassed anyway.
if p.Likely == BranchLikely && p.Succs[0].Block() != b {
return false
}
if p.Likely == BranchUnlikely && p.Succs[1].Block() != b {
return false
}
// Check if the control values combine to make an integer inequality that
// can be further optimized later.
bc := b.Controls[0]
pc := p.Controls[0]
if !areMergeableInequalities(bc, pc) {
return false
}
// If the first (true) successors match then we have a disjunction (||).
// If the second (false) successors match then we have a conjunction (&&).
for i, op := range [2]Op{OpOrB, OpAndB} {
if p.Succs[i].Block() != b.Succs[i].Block() {
continue
}
// TODO(mundaym): should we also check the cost of executing b?
// Currently we might speculatively execute b even if b contains
// a lot of instructions. We could just check that len(b.Values)
// is lower than a fixed amount. Bear in mind however that the
// other optimization passes might yet reduce the cost of b
// significantly so we shouldn't be overly conservative.
if !canSpeculativelyExecute(b) {
return false
}
// Logically combine the control values for p and b.
v := b.NewValue0(bc.Pos, op, bc.Type)
v.AddArg(pc)
v.AddArg(bc)
// Set the combined control value as the control value for b.
b.SetControl(v)
// Modify p so that it jumps directly to b.
p.removeEdge(i)
p.Kind = BlockPlain
p.Likely = BranchUnknown
p.ResetControls()
return true
}
// TODO: could negate condition(s) to merge controls.
return false
}
// getConstIntArgIndex returns the index of the first argument that is a
// constant integer or -1 if no such argument exists.
func getConstIntArgIndex(v *Value) int {
for i, a := range v.Args {
switch a.Op {
case OpConst8, OpConst16, OpConst32, OpConst64:
return i
}
}
return -1
}
// isSignedInequality reports whether op represents the inequality < or ≤
// in the signed domain.
func isSignedInequality(v *Value) bool {
switch v.Op {
case OpLess64, OpLess32, OpLess16, OpLess8,
OpLeq64, OpLeq32, OpLeq16, OpLeq8:
return true
}
return false
}
// isUnsignedInequality reports whether op represents the inequality < or ≤
// in the unsigned domain.
func isUnsignedInequality(v *Value) bool {
switch v.Op {
case OpLess64U, OpLess32U, OpLess16U, OpLess8U,
OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U:
return true
}
return false
}
func areMergeableInequalities(x, y *Value) bool {
// We need both inequalities to be either in the signed or unsigned domain.
// TODO(mundaym): it would also be good to merge when we have an Eq op that
// could be transformed into a Less/Leq. For example in the unsigned
// domain 'x == 0 || 3 < x' is equivalent to 'x <= 0 || 3 < x'
inequalityChecks := [...]func(*Value) bool{
isSignedInequality,
isUnsignedInequality,
}
for _, f := range inequalityChecks {
if !f(x) || !f(y) {
continue
}
// Check that both inequalities are comparisons with constants.
xi := getConstIntArgIndex(x)
if xi < 0 {
return false
}
yi := getConstIntArgIndex(y)
if yi < 0 {
return false
}
// Check that the non-constant arguments to the inequalities
// are the same.
return x.Args[xi^1] == y.Args[yi^1]
}
return false
}

View File

@ -26,7 +26,7 @@ func TestFuseEliminatesOneBranch(t *testing.T) {
Exit("mem")))
CheckFunc(fun.f)
fuseAll(fun.f)
fuseLate(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@ -56,7 +56,7 @@ func TestFuseEliminatesBothBranches(t *testing.T) {
Exit("mem")))
CheckFunc(fun.f)
fuseAll(fun.f)
fuseLate(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@ -90,7 +90,7 @@ func TestFuseHandlesPhis(t *testing.T) {
Exit("mem")))
CheckFunc(fun.f)
fuseAll(fun.f)
fuseLate(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind != BlockInvalid {
@ -122,7 +122,7 @@ func TestFuseEliminatesEmptyBlocks(t *testing.T) {
))
CheckFunc(fun.f)
fuseAll(fun.f)
fuseLate(fun.f)
for k, b := range fun.blocks {
if k[:1] == "z" && b.Kind != BlockInvalid {
@ -153,7 +153,7 @@ func TestFuseSideEffects(t *testing.T) {
Goto("loop")))
CheckFunc(fun.f)
fuseAll(fun.f)
fuseLate(fun.f)
for _, b := range fun.f.Blocks {
if b == fun.blocks["then"] && b.Kind == BlockInvalid {
@ -196,7 +196,7 @@ func BenchmarkFuse(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
fun := c.Fun("entry", blocks...)
fuseAll(fun.f)
fuseLate(fun.f)
}
})
}

View File

@ -92,6 +92,8 @@
(Round32F ...) -> (Copy ...)
(Round64F ...) -> (Copy ...)
(CvtBoolToUint8 ...) -> (Copy ...)
// Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)

View File

@ -154,6 +154,8 @@
(Round(32|64)F ...) -> (Copy ...)
(CvtBoolToUint8 ...) -> (Copy ...)
// Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
@ -756,6 +758,7 @@
(MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x)
(ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x)
(ORQ x (MOVLconst [c])) -> (ORQconst [c] x)
(ORL x (MOVLconst [c])) -> (ORLconst [c] x)
(XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x)
@ -1305,6 +1308,15 @@
(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)<uint8(y) -> (FlagGT_ULT)
(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT)
// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
// In theory this applies to any of the simplifications above,
// but CMPQ is the only one I've actually seen occur.
(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y -> (FlagEQ)
(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) -> (FlagLT_ULT)
(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) -> (FlagLT_UGT)
(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) -> (FlagGT_ULT)
(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) -> (FlagGT_UGT)
// Other known comparisons.
(CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT)
(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT)
@ -1478,6 +1490,12 @@
(BTCQconst [c] (MOVQconst [d])) -> (MOVQconst [d^(1<<uint32(c))])
(BTCLconst [c] (MOVLconst [d])) -> (MOVLconst [d^(1<<uint32(c))])
// If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
// but we can still constant-fold.
// In theory this applies to any of the simplifications above,
// but ORQ is the only one I've actually seen occur.
(ORQ (MOVQconst [c]) (MOVQconst [d])) -> (MOVQconst [c|d])
// generic simplifications
// TODO: more of this
(ADDQ x (NEGQ y)) -> (SUBQ x y)
@ -1493,6 +1511,7 @@
(SHLLconst [d] (MOVLconst [c])) -> (MOVLconst [int64(int32(c)) << uint64(d)])
(SHLQconst [d] (MOVQconst [c])) -> (MOVQconst [c << uint64(d)])
(SHLQconst [d] (MOVLconst [c])) -> (MOVQconst [int64(int32(c)) << uint64(d)])
// Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) -> (ADDQconst [-c] x)

View File

@ -207,6 +207,8 @@
(Round(32|64)F ...) -> (Copy ...)
(CvtBoolToUint8 ...) -> (Copy ...)
// fused-multiply-add
(FMA x y z) -> (FMULAD z x y)

View File

@ -244,6 +244,8 @@
(Cvt32Fto64F ...) -> (FCVTSD ...)
(Cvt64Fto32F ...) -> (FCVTDS ...)
(CvtBoolToUint8 ...) -> (Copy ...)
(Round32F ...) -> (LoweredRound32F ...)
(Round64F ...) -> (LoweredRound64F ...)

View File

@ -170,6 +170,8 @@
(Cvt32Fto64F ...) -> (MOVFD ...)
(Cvt64Fto32F ...) -> (MOVDF ...)
(CvtBoolToUint8 ...) -> (Copy ...)
(Round(32|64)F ...) -> (Copy ...)
// comparisons

View File

@ -11,6 +11,8 @@
(Mul(64|32|16|8) x y) -> (Select1 (MULVU x y))
(Mul(32|64)F ...) -> (MUL(F|D) ...)
(Mul64uhilo ...) -> (MULVU ...)
(Select0 (Mul64uover x y)) -> (Select1 <typ.UInt64> (MULVU x y))
(Select1 (Mul64uover x y)) -> (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
(Hmul64 x y) -> (Select0 (MULV x y))
(Hmul64u x y) -> (Select0 (MULVU x y))
@ -171,6 +173,8 @@
(Cvt32Fto64F ...) -> (MOVFD ...)
(Cvt64Fto32F ...) -> (MOVDF ...)
(CvtBoolToUint8 ...) -> (Copy ...)
(Round(32|64)F ...) -> (Copy ...)
// comparisons

View File

@ -59,6 +59,8 @@
(Cvt32Fto64F ...) -> (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
(Cvt64Fto32F ...) -> (FRSP ...)
(CvtBoolToUint8 ...) -> (Copy ...)
(Round(32|64)F ...) -> (LoweredRound(32|64)F ...)
(Sqrt ...) -> (FSQRT ...)
@ -78,7 +80,7 @@
// Constant folding
(FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
(FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
(FSQRT (FMOVDconst [x])) && auxTo64F(x) >= 0 -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
(FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
(FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
(FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
@ -116,47 +118,22 @@
(ROTLW x (MOVDconst [c])) -> (ROTLWconst x [c&31])
(ROTL x (MOVDconst [c])) -> (ROTLconst x [c&63])
(Lsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c])
(Rsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c])
(Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c])
(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c])
(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c])
(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c])
(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c])
(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLWconst x [c])
(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
(Lsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c])
(Rsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c])
(Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c])
(Lsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c])
(Rsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c])
(Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c])
(Lsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c])
(Rsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
(Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
(Lsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SLWconst x [c])
(Rsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
(Rsh8Ux32 x (Const64 [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
// large constant shifts
(Lsh64x64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
(Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0])
(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0])
(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0])
(Lsh32x64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0])
(Lsh16x64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0])
(Lsh8x64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0])
(Rsh8Ux64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0])
// large constant signed right shift, we leave the sign bit
(Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63])
(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63])
(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 -> (SRADconst x [63])
(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63])
// constant shifts
(Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
@ -297,11 +274,13 @@
(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
(ORN x (MOVDconst [-1])) -> x
// Potentially useful optimizing rewrites.
// (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
// (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
// (MaskIfNotCarry CarrySet) -> 0
// (MaskIfNotCarry CarrySet) -> -1
(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 && (int64(int16(c)) < 0 || int64(int16(c)) + d >= 0) -> (FlagCarryClear)
(ADDconstForCarry [c] (MOVDconst [d])) && int64(int16(c)) < 0 && int64(int16(c)) >= 0 && int64(int16(c)) + d < 0 -> (FlagCarrySet)
(MaskIfNotCarry (FlagCarrySet)) -> (MOVDconst [0])
(MaskIfNotCarry (FlagCarryClear)) -> (MOVDconst [-1])
(S(RAD|RAW|RD|RW|LD|LW) x (MOVDconst [c])) -> (S(RAD|RAW|RD|RW|LD|LW)const [c] x)
(Addr ...) -> (MOVDaddr ...)
(LocalAddr {sym} base _) -> (MOVDaddr {sym} base)
@ -662,6 +641,9 @@
(AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
(OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
(XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
(ORN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|^d])
(ANDN (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&^d])
(NOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [^(c|d)])
// Discover consts
(AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)

View File

@ -205,7 +205,7 @@ func init() {
{name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
{name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry)
{name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + aux
{name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + auxint
{name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"}, // carry - 1 (if carry then 0 else -1)
{name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // arg0 >>a aux, 64 bits
@ -588,10 +588,11 @@ func init() {
// These ops are for temporary use by rewrite rules. They
// cannot appear in the generated assembly.
{name: "FlagEQ"}, // equal
{name: "FlagLT"}, // signed < or unsigned <
{name: "FlagGT"}, // signed > or unsigned >
{name: "FlagEQ"}, // equal
{name: "FlagLT"}, // signed < or unsigned <
{name: "FlagGT"}, // signed > or unsigned >
{name: "FlagCarrySet"}, // carry flag set
{name: "FlagCarryClear"}, // carry flag clear
}
blocks := []blockData{

View File

@ -131,6 +131,8 @@
(Cvt32Fto64F ...) -> (FCVTDS ...)
(Cvt64Fto32F ...) -> (FCVTSD ...)
(CvtBoolToUint8 ...) -> (Copy ...)
(Round32F ...) -> (Copy ...)
(Round64F ...) -> (Copy ...)
@ -325,6 +327,14 @@
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
(MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
(MOVBUload [off1+off2] {sym} base mem)
@ -349,6 +359,10 @@
(MOVWstore [off1+off2] {sym} base val mem)
(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
(MOVDstore [off1+off2] {sym} base val mem)
(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDstorezero [off1+off2] {sym} ptr mem)
// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
// with OffPtr -> ADDI.
@ -436,9 +450,6 @@
(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) < 0 -> (ADD (SLLI <t> [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))]))
(MOVDconst <t> [c]) && !is32Bit(c) && int32(c) >= 0 -> (ADD (SLLI <t> [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))]))
// Fold ADD+MOVDconst into ADDI where possible.
(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
(Addr ...) -> (MOVaddr ...)
(LocalAddr {sym} base _) -> (MOVaddr {sym} base)
@ -457,5 +468,34 @@
(ClosureCall ...) -> (CALLclosure ...)
(InterCall ...) -> (CALLinter ...)
// Optimizations
// Absorb SNEZ into branch.
(BNE (SNEZ x) yes no) -> (BNE x yes no)
// Store zero
(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
// Fold ADD+MOVDconst into ADDI where possible.
(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
// Convert subtraction of a const into ADDI with negative immediate, where possible.
(SUB x (MOVBconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
(SUB x (MOVHconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
(SUB x (MOVWconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
(SUB x (MOVDconst [val])) && is32Bit(-val) -> (ADDI [-val] x)
// Subtraction of zero.
(SUB x (MOVBconst [0])) -> x
(SUB x (MOVHconst [0])) -> x
(SUB x (MOVWconst [0])) -> x
(SUB x (MOVDconst [0])) -> x
// Subtraction of zero with sign extension.
(SUBW x (MOVWconst [0])) -> (ADDIW [0] x)
// remove redundant *const ops
(ADDI [0] x) -> x

View File

@ -106,12 +106,13 @@ func init() {
callerSave := gpMask | fpMask | regNamed["g"]
var (
gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
gp01 = regInfo{outputs: []regMask{gpMask}}
gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
gpstore0 = regInfo{inputs: []regMask{gpspsbMask}}
gp01 = regInfo{outputs: []regMask{gpMask}}
gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
@ -171,6 +172,12 @@ func init() {
{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
{name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
// Stores: store <size> of zero in arg0+auxint+aux; arg1=mem
{name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
{name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
{name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
{name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
// Shift ops
{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << aux1
{name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> aux1, signed

View File

@ -227,9 +227,23 @@
(Cvt64Fto32 ...) -> (CFDBRA ...)
(Cvt64Fto64 ...) -> (CGDBRA ...)
// Lowering float <-> uint
(Cvt32Uto32F ...) -> (CELFBR ...)
(Cvt32Uto64F ...) -> (CDLFBR ...)
(Cvt64Uto32F ...) -> (CELGBR ...)
(Cvt64Uto64F ...) -> (CDLGBR ...)
(Cvt32Fto32U ...) -> (CLFEBR ...)
(Cvt32Fto64U ...) -> (CLGEBR ...)
(Cvt64Fto32U ...) -> (CLFDBR ...)
(Cvt64Fto64U ...) -> (CLGDBR ...)
// Lowering float32 <-> float64
(Cvt32Fto64F ...) -> (LDEBR ...)
(Cvt64Fto32F ...) -> (LEDBR ...)
(CvtBoolToUint8 ...) -> (Copy ...)
(Round(32|64)F ...) -> (LoweredRound(32|64)F ...)
// Lowering shifts

View File

@ -401,8 +401,17 @@ func init() {
{name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA"}, // convert int32 to float64
{name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA"}, // convert int64 to float32
{name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA"}, // convert int64 to float64
{name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32
{name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64
{name: "CLFEBR", argLength: 1, reg: fpgp, asm: "CLFEBR"}, // convert float32 to uint32
{name: "CLFDBR", argLength: 1, reg: fpgp, asm: "CLFDBR"}, // convert float64 to uint32
{name: "CLGEBR", argLength: 1, reg: fpgp, asm: "CLGEBR"}, // convert float32 to uint64
{name: "CLGDBR", argLength: 1, reg: fpgp, asm: "CLGDBR"}, // convert float64 to uint64
{name: "CELFBR", argLength: 1, reg: gpfp, asm: "CELFBR"}, // convert uint32 to float32
{name: "CDLFBR", argLength: 1, reg: gpfp, asm: "CDLFBR"}, // convert uint32 to float64
{name: "CELGBR", argLength: 1, reg: gpfp, asm: "CELGBR"}, // convert uint64 to float32
{name: "CDLGBR", argLength: 1, reg: gpfp, asm: "CDLGBR"}, // convert uint64 to float64
{name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32
{name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64
{name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, symEffect: "Read"}, // arg0 + auxint + offset encoded in aux
{name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", symEffect: "Read"}, // arg0 + arg1 + auxint + aux

View File

@ -91,6 +91,8 @@
(Cvt32Fto64F ...) -> (F64PromoteF32 ...)
(Cvt64Fto32F ...) -> (F32DemoteF64 ...)
(CvtBoolToUint8 ...) -> (Copy ...)
(Round32F ...) -> (Copy ...)
(Round64F ...) -> (Copy ...)
@ -355,7 +357,7 @@
(I64Or (I64Const [x]) (I64Const [y])) -> (I64Const [x | y])
(I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y])
(F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))])
(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(auxTo64F(x) * auxTo64F(y)) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
(I64Eq (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1])
(I64Eq (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0])
(I64Ne (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0])
@ -365,15 +367,16 @@
(I64ShrU (I64Const [x]) (I64Const [y])) -> (I64Const [int64(uint64(x) >> uint64(y))])
(I64ShrS (I64Const [x]) (I64Const [y])) -> (I64Const [x >> uint64(y)])
(I64Add (I64Const [x]) y) -> (I64Add y (I64Const [x]))
(I64Mul (I64Const [x]) y) -> (I64Mul y (I64Const [x]))
(I64And (I64Const [x]) y) -> (I64And y (I64Const [x]))
(I64Or (I64Const [x]) y) -> (I64Or y (I64Const [x]))
(I64Xor (I64Const [x]) y) -> (I64Xor y (I64Const [x]))
(F64Add (F64Const [x]) y) -> (F64Add y (F64Const [x]))
(F64Mul (F64Const [x]) y) -> (F64Mul y (F64Const [x]))
(I64Eq (I64Const [x]) y) -> (I64Eq y (I64Const [x]))
(I64Ne (I64Const [x]) y) -> (I64Ne y (I64Const [x]))
// TODO: declare these operations as commutative and get rid of these rules?
(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Add y (I64Const [x]))
(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Mul y (I64Const [x]))
(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64And y (I64Const [x]))
(I64Or (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Or y (I64Const [x]))
(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Xor y (I64Const [x]))
(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Add y (F64Const [x]))
(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Mul y (F64Const [x]))
(I64Eq (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Eq y (I64Const [x]))
(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Ne y (I64Const [x]))
(I64Eq x (I64Const [0])) -> (I64Eqz x)
(I64Ne x (I64Const [0])) -> (I64Eqz (I64Eqz x))

View File

@ -56,6 +56,7 @@
(Cvt64Fto64 (Const64F [c])) -> (Const64 [int64(auxTo64F(c))])
(Round32F x:(Const32F)) -> x
(Round64F x:(Const64F)) -> x
(CvtBoolToUint8 (ConstBool [c])) -> (Const8 [c])
(Trunc16to8 (ZeroExt8to16 x)) -> x
(Trunc32to8 (ZeroExt8to32 x)) -> x
@ -118,8 +119,8 @@
(Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))])
(Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))])
(Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d])
(Mul32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
(Mul32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
(Mul64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) * auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
(And8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c&d))])
(And16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c&d))])
@ -144,8 +145,8 @@
(Div16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))])
(Div32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))])
(Div64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))])
(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
(Div32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
(Div64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) / auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
(Select0 (Div128u (Const64 [0]) lo y)) -> (Div64u lo y)
(Select1 (Div128u (Const64 [0]) lo y)) -> (Mod64u lo y)
@ -253,6 +254,54 @@
(Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) -> (Neq16 (Const16 <t> [int64(int16(c-d))]) x)
(Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) -> (Neq8 (Const8 <t> [int64(int8(c-d))]) x)
// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) )
(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c -> ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c -> ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
(AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c -> ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
(AndB (Leq8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c -> ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
// signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) )
(AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && int64(c+1) > int64(c) -> ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
(AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && int32(c+1) > int32(c) -> ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
(AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && int16(c+1) > int16(c) -> ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
(AndB (Less8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c+1 && int8(c+1) > int8(c) -> ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
// unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c )
(AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) -> ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
(AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) -> ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [int64(int32(d-c))]))
(AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) -> ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [int64(int16(d-c))]))
(AndB (Leq8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c) -> ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [int64(int8(d-c))]))
// unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) )
(AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) -> ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
(AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) -> ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [int64(int32(c+1))])) (Const32 <x.Type> [int64(int32(d-c-1))]))
(AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) -> ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [int64(int16(c+1))])) (Const16 <x.Type> [int64(int16(d-c-1))]))
(AndB (Less8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) -> ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [int64(int8(c+1))])) (Const8 <x.Type> [int64(int8(d-c-1))]))
// signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) )
(OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d -> ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
(OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d -> ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
(OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d -> ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
(OrB ((Less|Leq)8 (Const8 [c]) x) (Less8 x (Const8 [d]))) && c >= d -> ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
// signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) )
(OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && int64(d+1) > int64(d) -> ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
(OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && int32(d+1) > int32(d) -> ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
(OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && int16(d+1) > int16(d) -> ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
(OrB ((Less|Leq)8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) && c >= d+1 && int8(d+1) > int8(d) -> ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
// unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d )
(OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) -> ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
(OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) -> ((Less|Leq)32U (Const32 <x.Type> [int64(int32(c-d))]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
(OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) -> ((Less|Leq)16U (Const16 <x.Type> [int64(int16(c-d))]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
(OrB ((Less|Leq)8U (Const8 [c]) x) (Less8U x (Const8 [d]))) && uint8(c) >= uint8(d) -> ((Less|Leq)8U (Const8 <x.Type> [int64( int8(c-d))]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
// unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) )
(OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) -> ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
(OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) -> ((Less|Leq)32U (Const32 <x.Type> [int64(int32(c-d-1))]) (Sub32 <x.Type> x (Const32 <x.Type> [int64(int32(d+1))])))
(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) -> ((Less|Leq)16U (Const16 <x.Type> [int64(int16(c-d-1))]) (Sub16 <x.Type> x (Const16 <x.Type> [int64(int16(d+1))])))
(OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) -> ((Less|Leq)8U (Const8 <x.Type> [int64( int8(c-d-1))]) (Sub8 <x.Type> x (Const8 <x.Type> [int64( int8(d+1))])))
// Canonicalize x-const to x+(-const)
(Sub64 x (Const64 <t> [c])) && x.Op != OpConst64 -> (Add64 (Const64 <t> [-c]) x)
(Sub32 x (Const32 <t> [c])) && x.Op != OpConst32 -> (Add32 (Const32 <t> [int64(int32(-c))]) x)
@ -574,8 +623,8 @@
-> x
// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
(Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x])
(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
(Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) -> (Const64F [x])
(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x])
(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))])
@ -1844,7 +1893,7 @@
(Div32F x (Const32F <t> [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F <t> [auxFrom32F(1/auxTo32F(c))]))
(Div64F x (Const64F <t> [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F <t> [auxFrom64F(1/auxTo64F(c))]))
(Sqrt (Const64F [c])) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(auxTo64F(c))) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
// recognize runtime.newobject and don't Zero/Nilcheck it
(Zero (Load (OffPtr [c] (SP)) mem) mem)

View File

@ -323,7 +323,12 @@ var genericOps = []opData{
{name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits
// Note: ConstX are sign-extended even when the type of the value is unsigned.
// For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
{name: "Const64", aux: "Int64"}, // value is auxint
{name: "Const64", aux: "Int64"}, // value is auxint
// Note: for both Const32F and Const64F, we disallow encoding NaNs.
// Signaling NaNs are tricky because if you do anything with them, they become quiet.
// Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN.
// See issue 36399 and 36400.
// Encodings of +inf, -inf, and -0 are fine.
{name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32
{name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
{name: "ConstInterface"}, // nil interface
@ -418,6 +423,7 @@ var genericOps = []opData{
{name: "Cvt64Fto64", argLength: 1},
{name: "Cvt32Fto64F", argLength: 1},
{name: "Cvt64Fto32F", argLength: 1},
{name: "CvtBoolToUint8", argLength: 1},
// Force rounding to precision of type.
{name: "Round32F", argLength: 1},

View File

@ -891,7 +891,7 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
}
blockName, _ := getBlockInfo(outop, arch)
rr.add(stmtf("b.Reset(%s)", blockName))
var genControls [2]string
for i, control := range t[:outdata.controls] {
// Select a source position for any new control values.
// TODO: does it always make sense to use the source position
@ -904,9 +904,19 @@ func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
}
// Generate a new control value (or copy an existing value).
v := genResult0(rr, arch, control, false, false, newpos)
rr.add(stmtf("b.AddControl(%s)", v))
genControls[i] = genResult0(rr, arch, control, false, false, newpos)
}
switch outdata.controls {
case 0:
rr.add(stmtf("b.Reset(%s)", blockName))
case 1:
rr.add(stmtf("b.resetWithControl(%s, %s)", blockName, genControls[0]))
case 2:
rr.add(stmtf("b.resetWithControl2(%s, %s, %s)", blockName, genControls[0], genControls[1]))
default:
log.Fatalf("too many controls: %d", outdata.controls)
}
if auxint != "" {
rr.add(stmtf("b.AuxInt = %s", auxint))
}
@ -991,16 +1001,21 @@ func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int,
}
}
// Access last argument first to minimize bounds checks.
if n := len(args); n > 1 && !pregenTop {
a := args[n-1]
if a != "_" && !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
rr.add(declf(a, "%s.Args[%d]", v, n-1))
// delete the last argument so it is not reprocessed
args = args[:n-1]
} else {
rr.add(stmtf("_ = %s.Args[%d]", v, n-1))
if !pregenTop {
// Access last argument first to minimize bounds checks.
for n := len(args) - 1; n > 0; n-- {
a := args[n]
if a == "_" {
continue
}
if !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
rr.add(declf(a, "%s.Args[%d]", v, n))
// delete the last argument so it is not reprocessed
args = args[:n]
} else {
rr.add(stmtf("_ = %s.Args[%d]", v, n))
}
break
}
}
if commutative && !pregenTop {
@ -1093,9 +1108,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
// It in not safe in general to move a variable between blocks
// (and particularly not a phi node).
// Introduce a copy.
rr.add(stmtf("v.reset(OpCopy)"))
rr.add(stmtf("v.Type = %s.Type", result))
rr.add(stmtf("v.AddArg(%s)", result))
rr.add(stmtf("v.copyOf(%s)", result))
}
return result
}
@ -1123,8 +1136,7 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
rr.add(declf(v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ))
if move && top {
// Rewrite original into a copy
rr.add(stmtf("v.reset(OpCopy)"))
rr.add(stmtf("v.AddArg(%s)", v))
rr.add(stmtf("v.copyOf(%s)", v))
}
}
@ -1134,11 +1146,21 @@ func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos s
if aux != "" {
rr.add(stmtf("%s.Aux = %s", v, aux))
}
for _, arg := range args {
all := new(strings.Builder)
for i, arg := range args {
x := genResult0(rr, arch, arg, false, move, pos)
rr.add(stmtf("%s.AddArg(%s)", v, x))
if i > 0 {
all.WriteString(", ")
}
all.WriteString(x)
}
switch len(args) {
case 0:
case 1:
rr.add(stmtf("%s.AddArg(%s)", v, all.String()))
default:
rr.add(stmtf("%s.AddArg%d(%s)", v, len(args), all.String()))
}
return v
}

View File

@ -93,7 +93,7 @@ td > h2 {
td.collapsed {
font-size: 12px;
width: 12px;
border: 0px;
border: 1px solid white;
padding: 0;
cursor: pointer;
background: #fafafa;
@ -247,18 +247,61 @@ svg {
outline: 1px solid #eee;
}
.highlight-aquamarine { background-color: aquamarine; }
.highlight-coral { background-color: coral; }
.highlight-lightpink { background-color: lightpink; }
.highlight-lightsteelblue { background-color: lightsteelblue; }
.highlight-palegreen { background-color: palegreen; }
.highlight-skyblue { background-color: skyblue; }
.highlight-lightgray { background-color: lightgray; }
.highlight-yellow { background-color: yellow; }
.highlight-lime { background-color: lime; }
.highlight-khaki { background-color: khaki; }
.highlight-aqua { background-color: aqua; }
.highlight-salmon { background-color: salmon; }
body.darkmode {
background-color: rgb(21, 21, 21);
color: rgb(230, 255, 255);
opacity: 100%;
}
td.darkmode {
background-color: rgb(21, 21, 21);
border: 1px solid gray;
}
body.darkmode table, th {
border: 1px solid gray;
}
.highlight-aquamarine { background-color: aquamarine; color: black; }
.highlight-coral { background-color: coral; color: black; }
.highlight-lightpink { background-color: lightpink; color: black; }
.highlight-lightsteelblue { background-color: lightsteelblue; color: black; }
.highlight-palegreen { background-color: palegreen; color: black; }
.highlight-skyblue { background-color: skyblue; color: black; }
.highlight-lightgray { background-color: lightgray; color: black; }
.highlight-yellow { background-color: yellow; color: black; }
.highlight-lime { background-color: lime; color: black; }
.highlight-khaki { background-color: khaki; color: black; }
.highlight-aqua { background-color: aqua; color: black; }
.highlight-salmon { background-color: salmon; color: black; }
/* Ensure all dead values/blocks continue to have gray font color in dark mode with highlights */
.dead-value span.highlight-aquamarine,
.dead-block.highlight-aquamarine,
.dead-value span.highlight-coral,
.dead-block.highlight-coral,
.dead-value span.highlight-lightpink,
.dead-block.highlight-lightpink,
.dead-value span.highlight-lightsteelblue,
.dead-block.highlight-lightsteelblue,
.dead-value span.highlight-palegreen,
.dead-block.highlight-palegreen,
.dead-value span.highlight-skyblue,
.dead-block.highlight-skyblue,
.dead-value span.highlight-lightgray,
.dead-block.highlight-lightgray,
.dead-value span.highlight-yellow,
.dead-block.highlight-yellow,
.dead-value span.highlight-lime,
.dead-block.highlight-lime,
.dead-value span.highlight-khaki,
.dead-block.highlight-khaki,
.dead-value span.highlight-aqua,
.dead-block.highlight-aqua,
.dead-value span.highlight-salmon,
.dead-block.highlight-salmon {
color: gray;
}
.outline-blue { outline: blue solid 2px; }
.outline-red { outline: red solid 2px; }
@ -284,6 +327,10 @@ ellipse.outline-teal { stroke-width: 2px; stroke: teal; }
ellipse.outline-maroon { stroke-width: 2px; stroke: maroon; }
ellipse.outline-black { stroke-width: 2px; stroke: black; }
/* Capture alternative for outline-black and ellipse.outline-black when in dark mode */
body.darkmode .outline-black { outline: gray solid 2px; }
body.darkmode ellipse.outline-black { outline: gray solid 2px; }
</style>
<script type="text/javascript">
@ -331,6 +378,11 @@ for (var i = 0; i < outlines.length; i++) {
}
window.onload = function() {
if (window.matchMedia && window.matchMedia("(prefers-color-scheme: dark)").matches) {
toggleDarkMode();
document.getElementById("dark-mode-button").checked = true;
}
var ssaElemClicked = function(elem, event, selections, selected) {
event.stopPropagation();
@ -584,7 +636,20 @@ function makeDraggable(event) {
function endDrag(event) {
isPointerDown = false;
}
}</script>
}
function toggleDarkMode() {
document.body.classList.toggle('darkmode');
const collapsedEls = document.getElementsByClassName('collapsed');
const len = collapsedEls.length;
for (let i = 0; i < len; i++) {
collapsedEls[i].classList.toggle('darkmode');
}
}
</script>
</head>`)
w.WriteString("<body>")
@ -616,6 +681,8 @@ Edge with a dot means that this edge follows the order in which blocks were laid
</p>
</div>
<label for="dark-mode-button" style="margin-left: 15px; cursor: pointer;">darkmode</label>
<input type="checkbox" onclick="toggleDarkMode();" id="dark-mode-button" style="cursor: pointer" />
`)
w.WriteString("<table>")
w.WriteString("<tr>")

View File

@ -87,7 +87,7 @@ func TestNilcheckSimple(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
fusePlain(fun.f)
fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@ -124,7 +124,7 @@ func TestNilcheckDomOrder(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
fusePlain(fun.f)
fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@ -157,7 +157,7 @@ func TestNilcheckAddr(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
fusePlain(fun.f)
fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@ -191,7 +191,7 @@ func TestNilcheckAddPtr(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
fusePlain(fun.f)
fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@ -235,7 +235,7 @@ func TestNilcheckPhi(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
fusePlain(fun.f)
fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@ -276,7 +276,7 @@ func TestNilcheckKeepRemove(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
fusePlain(fun.f)
fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@ -323,7 +323,7 @@ func TestNilcheckInFalseBranch(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
fusePlain(fun.f)
fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@ -374,7 +374,7 @@ func TestNilcheckUser(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
fusePlain(fun.f)
fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)
@ -418,7 +418,7 @@ func TestNilcheckBug(t *testing.T) {
nilcheckelim(fun.f)
// clean up the removed nil check
fusePlain(fun.f)
fuse(fun.f, fuseTypePlain)
deadcode(fun.f)
CheckFunc(fun.f)

View File

@ -66,12 +66,9 @@ func nextGoodStatementIndex(v *Value, i int, b *Block) int {
return i
}
// notStmtBoundary indicates which value opcodes can never be a statement
// boundary because they don't correspond to a user's understanding of a
// statement boundary. Called from *Value.reset(), and *Func.newValue(),
// located here to keep all the statement boundary heuristics in one place.
// Note: *Value.reset() filters out OpCopy because of how that is used in
// rewrite.
// notStmtBoundary reports whether a value with opcode op can never be a statement
// boundary. Such values don't correspond to a user's understanding of a
// statement boundary.
func notStmtBoundary(op Op) bool {
switch op {
case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg:

View File

@ -1881,6 +1881,8 @@ const (
OpPPC64FlagEQ
OpPPC64FlagLT
OpPPC64FlagGT
OpPPC64FlagCarrySet
OpPPC64FlagCarryClear
OpRISCV64ADD
OpRISCV64ADDI
@ -1915,6 +1917,10 @@ const (
OpRISCV64MOVHstore
OpRISCV64MOVWstore
OpRISCV64MOVDstore
OpRISCV64MOVBstorezero
OpRISCV64MOVHstorezero
OpRISCV64MOVWstorezero
OpRISCV64MOVDstorezero
OpRISCV64SLL
OpRISCV64SRA
OpRISCV64SRL
@ -2113,6 +2119,14 @@ const (
OpS390XCDFBRA
OpS390XCEGBRA
OpS390XCDGBRA
OpS390XCLFEBR
OpS390XCLFDBR
OpS390XCLGEBR
OpS390XCLGDBR
OpS390XCELFBR
OpS390XCDLFBR
OpS390XCELGBR
OpS390XCDLGBR
OpS390XLEDBR
OpS390XLDEBR
OpS390XMOVDaddr
@ -2584,6 +2598,7 @@ const (
OpCvt64Fto64
OpCvt32Fto64F
OpCvt64Fto32F
OpCvtBoolToUint8
OpRound32F
OpRound64F
OpIsNonNil
@ -24986,6 +25001,16 @@ var opcodeTable = [...]opInfo{
argLen: 0,
reg: regInfo{},
},
{
name: "FlagCarrySet",
argLen: 0,
reg: regInfo{},
},
{
name: "FlagCarryClear",
argLen: 0,
reg: regInfo{},
},
{
name: "ADD",
@ -25462,6 +25487,58 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "MOVBstorezero",
auxType: auxSymOff,
argLen: 2,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: riscv.AMOVB,
reg: regInfo{
inputs: []inputInfo{
{0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
},
},
},
{
name: "MOVHstorezero",
auxType: auxSymOff,
argLen: 2,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: riscv.AMOVH,
reg: regInfo{
inputs: []inputInfo{
{0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
},
},
},
{
name: "MOVWstorezero",
auxType: auxSymOff,
argLen: 2,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: riscv.AMOVW,
reg: regInfo{
inputs: []inputInfo{
{0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
},
},
},
{
name: "MOVDstorezero",
auxType: auxSymOff,
argLen: 2,
faultOnNilArg0: true,
symEffect: SymWrite,
asm: riscv.AMOV,
reg: regInfo{
inputs: []inputInfo{
{0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
},
},
},
{
name: "SLL",
argLen: 2,
@ -28313,6 +28390,110 @@ var opcodeTable = [...]opInfo{
},
},
},
{
name: "CLFEBR",
argLen: 1,
asm: s390x.ACLFEBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
},
},
{
name: "CLFDBR",
argLen: 1,
asm: s390x.ACLFDBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
},
},
{
name: "CLGEBR",
argLen: 1,
asm: s390x.ACLGEBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
},
},
{
name: "CLGDBR",
argLen: 1,
asm: s390x.ACLGDBR,
reg: regInfo{
inputs: []inputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
outputs: []outputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
},
},
{
name: "CELFBR",
argLen: 1,
asm: s390x.ACELFBR,
reg: regInfo{
inputs: []inputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "CDLFBR",
argLen: 1,
asm: s390x.ACDLFBR,
reg: regInfo{
inputs: []inputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "CELGBR",
argLen: 1,
asm: s390x.ACELGBR,
reg: regInfo{
inputs: []inputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "CDLGBR",
argLen: 1,
asm: s390x.ACDLGBR,
reg: regInfo{
inputs: []inputInfo{
{0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
},
outputs: []outputInfo{
{0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
},
{
name: "LEDBR",
argLen: 1,
@ -32561,6 +32742,11 @@ var opcodeTable = [...]opInfo{
argLen: 1,
generic: true,
},
{
name: "CvtBoolToUint8",
argLen: 1,
generic: true,
},
{
name: "Round32F",
argLen: 1,

View File

@ -148,6 +148,13 @@ func phioptint(v *Value, b0 *Block, reverse int) {
negate = !negate
}
a := b0.Controls[0]
if negate {
a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a)
}
v.AddArg(a)
cvt := v.Block.NewValue1(v.Pos, OpCvtBoolToUint8, a.Type, a)
switch v.Type.Size() {
case 1:
v.reset(OpCopy)
@ -160,12 +167,7 @@ func phioptint(v *Value, b0 *Block, reverse int) {
default:
v.Fatalf("bad int size %d", v.Type.Size())
}
a := b0.Controls[0]
if negate {
a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a)
}
v.AddArg(a)
v.AddArg(cvt)
f := b0.Func
if f.pass.debug > 0 {

View File

@ -487,11 +487,17 @@ func DivisionNeedsFixUp(v *Value) bool {
// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
func auxFrom64F(f float64) int64 {
if f != f {
panic("can't encode a NaN in AuxInt field")
}
return int64(math.Float64bits(f))
}
// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
func auxFrom32F(f float32) int64 {
if f != f {
panic("can't encode a NaN in AuxInt field")
}
return int64(math.Float64bits(extend32Fto64F(f)))
}

File diff suppressed because it is too large Load Diff

View File

@ -37,8 +37,7 @@ func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@ -61,10 +60,8 @@ func rewriteValue386splitload_Op386CMPBload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(x)
v0.AddArg2(ptr, mem)
v.AddArg2(v0, x)
return true
}
}
@ -85,8 +82,7 @@ func rewriteValue386splitload_Op386CMPLconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@ -109,10 +105,8 @@ func rewriteValue386splitload_Op386CMPLload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(x)
v0.AddArg2(ptr, mem)
v.AddArg2(v0, x)
return true
}
}
@ -133,8 +127,7 @@ func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@ -157,10 +150,8 @@ func rewriteValue386splitload_Op386CMPWload(v *Value) bool {
v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(x)
v0.AddArg2(ptr, mem)
v.AddArg2(v0, x)
return true
}
}

File diff suppressed because it is too large Load Diff

View File

@ -41,8 +41,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@ -65,10 +64,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(x)
v0.AddArg2(ptr, mem)
v.AddArg2(v0, x)
return true
}
}
@ -89,8 +86,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@ -113,10 +109,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(x)
v0.AddArg2(ptr, mem)
v.AddArg2(v0, x)
return true
}
}
@ -137,8 +131,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@ -161,10 +154,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(x)
v0.AddArg2(ptr, mem)
v.AddArg2(v0, x)
return true
}
}
@ -185,8 +176,7 @@ func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AuxInt = offOnly(vo)
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v0.AddArg2(ptr, mem)
v.AddArg(v0)
return true
}
@ -209,10 +199,8 @@ func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
v0.AuxInt = off
v0.Aux = sym
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v.AddArg(x)
v0.AddArg2(ptr, mem)
v.AddArg2(v0, x)
return true
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -41,9 +41,7 @@ func rewriteValuedec_OpComplexImag(v *Value) bool {
break
}
imag := v_0.Args[1]
v.reset(OpCopy)
v.Type = imag.Type
v.AddArg(imag)
v.copyOf(imag)
return true
}
return false
@ -56,11 +54,8 @@ func rewriteValuedec_OpComplexReal(v *Value) bool {
if v_0.Op != OpComplexMake {
break
}
_ = v_0.Args[1]
real := v_0.Args[0]
v.reset(OpCopy)
v.Type = real.Type
v.AddArg(real)
v.copyOf(real)
return true
}
return false
@ -74,9 +69,7 @@ func rewriteValuedec_OpIData(v *Value) bool {
break
}
data := v_0.Args[1]
v.reset(OpCopy)
v.Type = data.Type
v.AddArg(data)
v.copyOf(data)
return true
}
return false
@ -89,11 +82,8 @@ func rewriteValuedec_OpITab(v *Value) bool {
if v_0.Op != OpIMake {
break
}
_ = v_0.Args[1]
itab := v_0.Args[0]
v.reset(OpCopy)
v.Type = itab.Type
v.AddArg(itab)
v.copyOf(itab)
return true
}
return false
@ -116,16 +106,13 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v0.AddArg2(ptr, mem)
v1 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
v2.AuxInt = 4
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
v1.AddArg2(v2, mem)
v.AddArg2(v0, v1)
return true
}
// match: (Load <t> ptr mem)
@ -140,16 +127,13 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
v.reset(OpComplexMake)
v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v0.AddArg2(ptr, mem)
v1 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
v2.AuxInt = 8
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
v1.AddArg2(v2, mem)
v.AddArg2(v0, v1)
return true
}
// match: (Load <t> ptr mem)
@ -164,16 +148,13 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
v.reset(OpStringMake)
v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v0.AddArg2(ptr, mem)
v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
v1.AddArg2(v2, mem)
v.AddArg2(v0, v1)
return true
}
// match: (Load <t> ptr mem)
@ -188,23 +169,18 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
v.reset(OpSliceMake)
v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo())
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v0.AddArg2(ptr, mem)
v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
v1.AddArg2(v2, mem)
v3 := b.NewValue0(v.Pos, OpLoad, typ.Int)
v4 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v4.AuxInt = 2 * config.PtrSize
v4.AddArg(ptr)
v3.AddArg(v4)
v3.AddArg(mem)
v.AddArg(v3)
v3.AddArg2(v4, mem)
v.AddArg3(v0, v1, v3)
return true
}
// match: (Load <t> ptr mem)
@ -219,16 +195,13 @@ func rewriteValuedec_OpLoad(v *Value) bool {
}
v.reset(OpIMake)
v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr)
v0.AddArg(ptr)
v0.AddArg(mem)
v.AddArg(v0)
v0.AddArg2(ptr, mem)
v1 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
v2 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(ptr)
v1.AddArg(v2)
v1.AddArg(mem)
v.AddArg(v1)
v1.AddArg2(v2, mem)
v.AddArg2(v0, v1)
return true
}
return false
@ -242,9 +215,7 @@ func rewriteValuedec_OpSliceCap(v *Value) bool {
break
}
cap := v_0.Args[2]
v.reset(OpCopy)
v.Type = cap.Type
v.AddArg(cap)
v.copyOf(cap)
return true
}
return false
@ -257,11 +228,8 @@ func rewriteValuedec_OpSliceLen(v *Value) bool {
if v_0.Op != OpSliceMake {
break
}
_ = v_0.Args[2]
len := v_0.Args[1]
v.reset(OpCopy)
v.Type = len.Type
v.AddArg(len)
v.copyOf(len)
return true
}
return false
@ -274,11 +242,8 @@ func rewriteValuedec_OpSlicePtr(v *Value) bool {
if v_0.Op != OpSliceMake {
break
}
_ = v_0.Args[2]
ptr := v_0.Args[0]
v.reset(OpCopy)
v.Type = ptr.Type
v.AddArg(ptr)
v.copyOf(ptr)
return true
}
return false
@ -310,14 +275,10 @@ func rewriteValuedec_OpStore(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
v0.AuxInt = 4
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = typ.Float32
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
v.AddArg(v1)
v1.AddArg3(dst, real, mem)
v.AddArg3(v0, imag, v1)
return true
}
// match: (Store {t} dst (ComplexMake real imag) mem)
@ -340,14 +301,10 @@ func rewriteValuedec_OpStore(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
v0.AuxInt = 8
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(imag)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = typ.Float64
v1.AddArg(dst)
v1.AddArg(real)
v1.AddArg(mem)
v.AddArg(v1)
v1.AddArg3(dst, real, mem)
v.AddArg3(v0, imag, v1)
return true
}
// match: (Store dst (StringMake ptr len) mem)
@ -365,14 +322,10 @@ func rewriteValuedec_OpStore(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(len)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = typ.BytePtr
v1.AddArg(dst)
v1.AddArg(ptr)
v1.AddArg(mem)
v.AddArg(v1)
v1.AddArg3(dst, ptr, mem)
v.AddArg3(v0, len, v1)
return true
}
// match: (Store dst (SliceMake ptr len cap) mem)
@ -391,22 +344,16 @@ func rewriteValuedec_OpStore(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v0.AuxInt = 2 * config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(cap)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = typ.Int
v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
v2.AuxInt = config.PtrSize
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(len)
v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v3.Aux = typ.BytePtr
v3.AddArg(dst)
v3.AddArg(ptr)
v3.AddArg(mem)
v1.AddArg(v3)
v.AddArg(v1)
v3.AddArg3(dst, ptr, mem)
v1.AddArg3(v2, len, v3)
v.AddArg3(v0, cap, v1)
return true
}
// match: (Store dst (IMake itab data) mem)
@ -424,14 +371,10 @@ func rewriteValuedec_OpStore(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
v0.AuxInt = config.PtrSize
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(data)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = typ.Uintptr
v1.AddArg(dst)
v1.AddArg(itab)
v1.AddArg(mem)
v.AddArg(v1)
v1.AddArg3(dst, itab, mem)
v.AddArg3(v0, data, v1)
return true
}
return false
@ -445,9 +388,7 @@ func rewriteValuedec_OpStringLen(v *Value) bool {
break
}
len := v_0.Args[1]
v.reset(OpCopy)
v.Type = len.Type
v.AddArg(len)
v.copyOf(len)
return true
}
return false
@ -460,11 +401,8 @@ func rewriteValuedec_OpStringPtr(v *Value) bool {
if v_0.Op != OpStringMake {
break
}
_ = v_0.Args[1]
ptr := v_0.Args[0]
v.reset(OpCopy)
v.Type = ptr.Type
v.AddArg(ptr)
v.copyOf(ptr)
return true
}
return false

File diff suppressed because it is too large Load Diff

View File

@ -28,11 +28,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1)
v.AddArg2(v0, v1)
return true
}
// match: (Arg {n} [off])
@ -48,15 +47,13 @@ func rewriteValuedecArgs_OpArg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpArg, v.Type.Elem().PtrTo())
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpArg, typ.Int)
v2.AuxInt = off + 2*config.PtrSize
v2.Aux = n
v.AddArg(v2)
v.AddArg3(v0, v1, v2)
return true
}
// match: (Arg {n} [off])
@ -72,11 +69,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpArg, typ.Uintptr)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
v1.AuxInt = off + config.PtrSize
v1.Aux = n
v.AddArg(v1)
v.AddArg2(v0, v1)
return true
}
// match: (Arg {n} [off])
@ -92,11 +88,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpArg, typ.Float64)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpArg, typ.Float64)
v1.AuxInt = off + 8
v1.Aux = n
v.AddArg(v1)
v.AddArg2(v0, v1)
return true
}
// match: (Arg {n} [off])
@ -112,11 +107,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpArg, typ.Float32)
v0.AuxInt = off
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpArg, typ.Float32)
v1.AuxInt = off + 4
v1.Aux = n
v.AddArg(v1)
v.AddArg2(v0, v1)
return true
}
// match: (Arg <t>)
@ -161,11 +155,10 @@ func rewriteValuedecArgs_OpArg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
v0.AuxInt = off + t.FieldOff(0)
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
v1.AuxInt = off + t.FieldOff(1)
v1.Aux = n
v.AddArg(v1)
v.AddArg2(v0, v1)
return true
}
// match: (Arg <t> {n} [off])
@ -182,15 +175,13 @@ func rewriteValuedecArgs_OpArg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
v0.AuxInt = off + t.FieldOff(0)
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
v1.AuxInt = off + t.FieldOff(1)
v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2))
v2.AuxInt = off + t.FieldOff(2)
v2.Aux = n
v.AddArg(v2)
v.AddArg3(v0, v1, v2)
return true
}
// match: (Arg <t> {n} [off])
@ -207,19 +198,16 @@ func rewriteValuedecArgs_OpArg(v *Value) bool {
v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
v0.AuxInt = off + t.FieldOff(0)
v0.Aux = n
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
v1.AuxInt = off + t.FieldOff(1)
v1.Aux = n
v.AddArg(v1)
v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2))
v2.AuxInt = off + t.FieldOff(2)
v2.Aux = n
v.AddArg(v2)
v3 := b.NewValue0(v.Pos, OpArg, t.FieldType(3))
v3.AuxInt = off + t.FieldOff(3)
v3.Aux = n
v.AddArg(v3)
v.AddArg4(v0, v1, v2, v3)
return true
}
// match: (Arg <t>)

File diff suppressed because it is too large Load Diff

View File

@ -218,6 +218,58 @@ func (v *Value) AddArg(w *Value) {
v.Args = append(v.Args, w)
w.Uses++
}
//go:noinline
func (v *Value) AddArg2(w1, w2 *Value) {
if v.Args == nil {
v.resetArgs() // use argstorage
}
v.Args = append(v.Args, w1, w2)
w1.Uses++
w2.Uses++
}
//go:noinline
func (v *Value) AddArg3(w1, w2, w3 *Value) {
if v.Args == nil {
v.resetArgs() // use argstorage
}
v.Args = append(v.Args, w1, w2, w3)
w1.Uses++
w2.Uses++
w3.Uses++
}
//go:noinline
func (v *Value) AddArg4(w1, w2, w3, w4 *Value) {
v.Args = append(v.Args, w1, w2, w3, w4)
w1.Uses++
w2.Uses++
w3.Uses++
w4.Uses++
}
//go:noinline
func (v *Value) AddArg5(w1, w2, w3, w4, w5 *Value) {
v.Args = append(v.Args, w1, w2, w3, w4, w5)
w1.Uses++
w2.Uses++
w3.Uses++
w4.Uses++
w5.Uses++
}
//go:noinline
func (v *Value) AddArg6(w1, w2, w3, w4, w5, w6 *Value) {
v.Args = append(v.Args, w1, w2, w3, w4, w5, w6)
w1.Uses++
w2.Uses++
w3.Uses++
w4.Uses++
w5.Uses++
w6.Uses++
}
func (v *Value) AddArgs(a ...*Value) {
if v.Args == nil {
v.resetArgs() // use argstorage
@ -258,17 +310,29 @@ func (v *Value) resetArgs() {
v.Args = v.argstorage[:0]
}
// reset is called from most rewrite rules.
// Allowing it to be inlined increases the size
// of cmd/compile by almost 10%, and slows it down.
//go:noinline
func (v *Value) reset(op Op) {
v.Op = op
if op != OpCopy && notStmtBoundary(op) {
// Special case for OpCopy because of how it is used in rewrite
v.Pos = v.Pos.WithNotStmt()
}
v.resetArgs()
v.AuxInt = 0
v.Aux = nil
}
// copyOf is called from rewrite rules.
// It modifies v to be (Copy a).
//go:noinline
func (v *Value) copyOf(a *Value) {
v.Op = OpCopy
v.resetArgs()
v.AddArg(a)
v.AuxInt = 0
v.Aux = nil
v.Type = a.Type
}
// copyInto makes a new value identical to v and adds it to the end of b.
// unlike copyIntoWithXPos this does not check for v.Pos being a statement.
func (v *Value) copyInto(b *Block) *Value {

View File

@ -347,6 +347,7 @@ func writebarrier(f *Func) {
bEnd.Values = append(bEnd.Values, last)
last.Block = bEnd
last.reset(OpPhi)
last.Pos = last.Pos.WithNotStmt()
last.Type = types.TypeMem
last.AddArg(memThen)
last.AddArg(memElse)

View File

@ -419,7 +419,7 @@ func (p *parser) fileOrNil() *File {
}
// p.tok == _EOF
f.Lines = p.source.line
f.Lines = p.line
return f
}

View File

@ -10,6 +10,7 @@ import (
"fmt"
"io/ioutil"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
@ -17,9 +18,12 @@ import (
"time"
)
var fast = flag.Bool("fast", false, "parse package files in parallel")
var src_ = flag.String("src", "parser.go", "source file to parse")
var verify = flag.Bool("verify", false, "verify idempotent printing")
var (
fast = flag.Bool("fast", false, "parse package files in parallel")
verify = flag.Bool("verify", false, "verify idempotent printing")
src_ = flag.String("src", "parser.go", "source file to parse")
skip = flag.String("skip", "", "files matching this regular expression are skipped by TestStdLib")
)
func TestParse(t *testing.T) {
ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0)
@ -30,6 +34,15 @@ func TestStdLib(t *testing.T) {
t.Skip("skipping test in short mode")
}
var skipRx *regexp.Regexp
if *skip != "" {
var err error
skipRx, err = regexp.Compile(*skip)
if err != nil {
t.Fatalf("invalid argument for -skip (%v)", err)
}
}
var m1 runtime.MemStats
runtime.ReadMemStats(&m1)
start := time.Now()
@ -46,6 +59,12 @@ func TestStdLib(t *testing.T) {
runtime.GOROOT(),
} {
walkDirs(t, dir, func(filename string) {
if skipRx != nil && skipRx.MatchString(filename) {
// Always report skipped files since regexp
// typos can lead to surprising results.
fmt.Printf("skipping %s\n", filename)
return
}
if debug {
fmt.Printf("parsing %s\n", filename)
}

View File

@ -6,9 +6,9 @@
// Go source. After initialization, consecutive calls of
// next advance the scanner one token at a time.
//
// This file, source.go, and tokens.go are self-contained
// (go tool compile scanner.go source.go tokens.go compiles)
// and thus could be made into its own package.
// This file, source.go, tokens.go, and token_string.go are self-contained
// (`go tool compile scanner.go source.go tokens.go token_string.go` compiles)
// and thus could be made into their own package.
package syntax
@ -86,20 +86,21 @@ func (s *scanner) next() {
redo:
// skip white space
c := s.getr()
for c == ' ' || c == '\t' || c == '\n' && !nlsemi || c == '\r' {
c = s.getr()
s.stop()
for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !nlsemi || s.ch == '\r' {
s.nextch()
}
// token start
s.line, s.col = s.source.line0, s.source.col0
if isLetter(c) || c >= utf8.RuneSelf && s.isIdentRune(c, true) {
s.line, s.col = s.pos()
s.start()
if isLetter(s.ch) || s.ch >= utf8.RuneSelf && s.atIdentChar(true) {
s.nextch()
s.ident()
return
}
switch c {
switch s.ch {
case -1:
if nlsemi {
s.lit = "EOF"
@ -109,11 +110,12 @@ redo:
s.tok = _EOF
case '\n':
s.nextch()
s.lit = "newline"
s.tok = _Semi
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
s.number(c)
s.number(false)
case '"':
s.stdString()
@ -125,97 +127,110 @@ redo:
s.rune()
case '(':
s.nextch()
s.tok = _Lparen
case '[':
s.nextch()
s.tok = _Lbrack
case '{':
s.nextch()
s.tok = _Lbrace
case ',':
s.nextch()
s.tok = _Comma
case ';':
s.nextch()
s.lit = "semicolon"
s.tok = _Semi
case ')':
s.nextch()
s.nlsemi = true
s.tok = _Rparen
case ']':
s.nextch()
s.nlsemi = true
s.tok = _Rbrack
case '}':
s.nextch()
s.nlsemi = true
s.tok = _Rbrace
case ':':
if s.getr() == '=' {
s.nextch()
if s.ch == '=' {
s.nextch()
s.tok = _Define
break
}
s.ungetr()
s.tok = _Colon
case '.':
c = s.getr()
if isDecimal(c) {
s.ungetr()
s.unread(1) // correct position of '.' (needed by startLit in number)
s.number('.')
s.nextch()
if isDecimal(s.ch) {
s.number(true)
break
}
if c == '.' {
c = s.getr()
if c == '.' {
if s.ch == '.' {
s.nextch()
if s.ch == '.' {
s.nextch()
s.tok = _DotDotDot
break
}
s.unread(1)
s.rewind() // now s.ch holds 1st '.'
s.nextch() // consume 1st '.' again
}
s.ungetr()
s.tok = _Dot
case '+':
s.nextch()
s.op, s.prec = Add, precAdd
c = s.getr()
if c != '+' {
if s.ch != '+' {
goto assignop
}
s.nextch()
s.nlsemi = true
s.tok = _IncOp
case '-':
s.nextch()
s.op, s.prec = Sub, precAdd
c = s.getr()
if c != '-' {
if s.ch != '-' {
goto assignop
}
s.nextch()
s.nlsemi = true
s.tok = _IncOp
case '*':
s.nextch()
s.op, s.prec = Mul, precMul
// don't goto assignop - want _Star token
if s.getr() == '=' {
if s.ch == '=' {
s.nextch()
s.tok = _AssignOp
break
}
s.ungetr()
s.tok = _Star
case '/':
c = s.getr()
if c == '/' {
s.nextch()
if s.ch == '/' {
s.nextch()
s.lineComment()
goto redo
}
if c == '*' {
if s.ch == '*' {
s.nextch()
s.fullComment()
if s.source.line > s.line && nlsemi {
if line, _ := s.pos(); line > s.line && nlsemi {
// A multi-line comment acts like a newline;
// it translates to a ';' if nlsemi is set.
s.lit = "newline"
@ -228,27 +243,29 @@ redo:
goto assignop
case '%':
s.nextch()
s.op, s.prec = Rem, precMul
c = s.getr()
goto assignop
case '&':
c = s.getr()
if c == '&' {
s.nextch()
if s.ch == '&' {
s.nextch()
s.op, s.prec = AndAnd, precAndAnd
s.tok = _Operator
break
}
s.op, s.prec = And, precMul
if c == '^' {
if s.ch == '^' {
s.nextch()
s.op = AndNot
c = s.getr()
}
goto assignop
case '|':
c = s.getr()
if c == '|' {
s.nextch()
if s.ch == '|' {
s.nextch()
s.op, s.prec = OrOr, precOrOr
s.tok = _Operator
break
@ -257,106 +274,100 @@ redo:
goto assignop
case '^':
s.nextch()
s.op, s.prec = Xor, precAdd
c = s.getr()
goto assignop
case '<':
c = s.getr()
if c == '=' {
s.nextch()
if s.ch == '=' {
s.nextch()
s.op, s.prec = Leq, precCmp
s.tok = _Operator
break
}
if c == '<' {
if s.ch == '<' {
s.nextch()
s.op, s.prec = Shl, precMul
c = s.getr()
goto assignop
}
if c == '-' {
if s.ch == '-' {
s.nextch()
s.tok = _Arrow
break
}
s.ungetr()
s.op, s.prec = Lss, precCmp
s.tok = _Operator
case '>':
c = s.getr()
if c == '=' {
s.nextch()
if s.ch == '=' {
s.nextch()
s.op, s.prec = Geq, precCmp
s.tok = _Operator
break
}
if c == '>' {
if s.ch == '>' {
s.nextch()
s.op, s.prec = Shr, precMul
c = s.getr()
goto assignop
}
s.ungetr()
s.op, s.prec = Gtr, precCmp
s.tok = _Operator
case '=':
if s.getr() == '=' {
s.nextch()
if s.ch == '=' {
s.nextch()
s.op, s.prec = Eql, precCmp
s.tok = _Operator
break
}
s.ungetr()
s.tok = _Assign
case '!':
if s.getr() == '=' {
s.nextch()
if s.ch == '=' {
s.nextch()
s.op, s.prec = Neq, precCmp
s.tok = _Operator
break
}
s.ungetr()
s.op, s.prec = Not, 0
s.tok = _Operator
default:
s.tok = 0
s.errorf("invalid character %#U", c)
s.errorf("invalid character %#U", s.ch)
s.nextch()
goto redo
}
return
assignop:
if c == '=' {
if s.ch == '=' {
s.nextch()
s.tok = _AssignOp
return
}
s.ungetr()
s.tok = _Operator
}
func isLetter(c rune) bool {
return 'a' <= lower(c) && lower(c) <= 'z' || c == '_'
}
func (s *scanner) ident() {
s.startLit()
// accelerate common case (7bit ASCII)
c := s.getr()
for isLetter(c) || isDecimal(c) {
c = s.getr()
for isLetter(s.ch) || isDecimal(s.ch) {
s.nextch()
}
// general case
if c >= utf8.RuneSelf {
for s.isIdentRune(c, false) {
c = s.getr()
if s.ch >= utf8.RuneSelf {
for s.atIdentChar(false) {
s.nextch()
}
}
s.ungetr()
lit := s.stopLit()
// possibly a keyword
lit := s.segment()
if len(lit) >= 2 {
if tok := keywordMap[hash(lit)]; tok != 0 && tokStrFast(tok) == string(lit) {
s.nlsemi = contains(1<<_Break|1<<_Continue|1<<_Fallthrough|1<<_Return, tok)
@ -376,16 +387,16 @@ func tokStrFast(tok token) string {
return _token_name[_token_index[tok-1]:_token_index[tok]]
}
func (s *scanner) isIdentRune(c rune, first bool) bool {
func (s *scanner) atIdentChar(first bool) bool {
switch {
case unicode.IsLetter(c) || c == '_':
case unicode.IsLetter(s.ch) || s.ch == '_':
// ok
case unicode.IsDigit(c):
case unicode.IsDigit(s.ch):
if first {
s.errorf("identifier cannot begin with digit %#U", c)
s.errorf("identifier cannot begin with digit %#U", s.ch)
}
case c >= utf8.RuneSelf:
s.errorf("invalid identifier character %#U", c)
case s.ch >= utf8.RuneSelf:
s.errorf("invalid character %#U in identifier", s.ch)
default:
return false
}
@ -411,46 +422,45 @@ func init() {
}
}
func lower(c rune) rune { return ('a' - 'A') | c } // returns lower-case c iff c is ASCII letter
func isDecimal(c rune) bool { return '0' <= c && c <= '9' }
func isHex(c rune) bool { return '0' <= c && c <= '9' || 'a' <= lower(c) && lower(c) <= 'f' }
func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
func isLetter(ch rune) bool { return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' }
func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }
// digits accepts the sequence { digit | '_' } starting with c0.
// digits accepts the sequence { digit | '_' }.
// If base <= 10, digits accepts any decimal digit but records
// the index (relative to the literal start) of a digit >= base
// in *invalid, if *invalid < 0.
// digits returns the first rune that is not part of the sequence
// anymore, and a bitset describing whether the sequence contained
// digits returns a bitset describing whether the sequence contained
// digits (bit 0 is set), or separators '_' (bit 1 is set).
func (s *scanner) digits(c0 rune, base int, invalid *int) (c rune, digsep int) {
c = c0
func (s *scanner) digits(base int, invalid *int) (digsep int) {
if base <= 10 {
max := rune('0' + base)
for isDecimal(c) || c == '_' {
for isDecimal(s.ch) || s.ch == '_' {
ds := 1
if c == '_' {
if s.ch == '_' {
ds = 2
} else if c >= max && *invalid < 0 {
*invalid = int(s.col0 - s.col) // record invalid rune index
} else if s.ch >= max && *invalid < 0 {
_, col := s.pos()
*invalid = int(col - s.col) // record invalid rune index
}
digsep |= ds
c = s.getr()
s.nextch()
}
} else {
for isHex(c) || c == '_' {
for isHex(s.ch) || s.ch == '_' {
ds := 1
if c == '_' {
if s.ch == '_' {
ds = 2
}
digsep |= ds
c = s.getr()
s.nextch()
}
}
return
}
func (s *scanner) number(c rune) {
s.startLit()
func (s *scanner) number(seenPoint bool) {
s.bad = false
base := 10 // number base
@ -459,38 +469,39 @@ func (s *scanner) number(c rune) {
invalid := -1 // index of invalid digit in literal, or < 0
// integer part
var ds int
if c != '.' {
if !seenPoint {
s.kind = IntLit
if c == '0' {
c = s.getr()
switch lower(c) {
if s.ch == '0' {
s.nextch()
switch lower(s.ch) {
case 'x':
c = s.getr()
s.nextch()
base, prefix = 16, 'x'
case 'o':
c = s.getr()
s.nextch()
base, prefix = 8, 'o'
case 'b':
c = s.getr()
s.nextch()
base, prefix = 2, 'b'
default:
base, prefix = 8, '0'
digsep = 1 // leading 0
}
}
c, ds = s.digits(c, base, &invalid)
digsep |= ds
digsep |= s.digits(base, &invalid)
if s.ch == '.' {
if prefix == 'o' || prefix == 'b' {
s.errorf("invalid radix point in %s", litname(prefix))
}
s.nextch()
seenPoint = true
}
}
// fractional part
if c == '.' {
if seenPoint {
s.kind = FloatLit
if prefix == 'o' || prefix == 'b' {
s.errorf("invalid radix point in %s", litname(prefix))
}
c, ds = s.digits(s.getr(), base, &invalid)
digsep |= ds
digsep |= s.digits(base, &invalid)
}
if digsep&1 == 0 && !s.bad {
@ -498,23 +509,22 @@ func (s *scanner) number(c rune) {
}
// exponent
if e := lower(c); e == 'e' || e == 'p' {
if e := lower(s.ch); e == 'e' || e == 'p' {
if !s.bad {
switch {
case e == 'e' && prefix != 0 && prefix != '0':
s.errorf("%q exponent requires decimal mantissa", c)
s.errorf("%q exponent requires decimal mantissa", s.ch)
case e == 'p' && prefix != 'x':
s.errorf("%q exponent requires hexadecimal mantissa", c)
s.errorf("%q exponent requires hexadecimal mantissa", s.ch)
}
}
c = s.getr()
s.nextch()
s.kind = FloatLit
if c == '+' || c == '-' {
c = s.getr()
if s.ch == '+' || s.ch == '-' {
s.nextch()
}
c, ds = s.digits(c, 10, nil)
digsep |= ds
if ds&1 == 0 && !s.bad {
digsep = s.digits(10, nil) | digsep&2 // don't lose sep bit
if digsep&1 == 0 && !s.bad {
s.errorf("exponent has no digits")
}
} else if prefix == 'x' && s.kind == FloatLit && !s.bad {
@ -522,14 +532,13 @@ func (s *scanner) number(c rune) {
}
// suffix 'i'
if c == 'i' {
if s.ch == 'i' {
s.kind = ImagLit
c = s.getr()
s.nextch()
}
s.ungetr()
s.nlsemi = true
s.lit = string(s.stopLit())
s.lit = string(s.segment())
s.tok = _Literal
if s.kind == IntLit && invalid >= 0 && !s.bad {
@ -596,199 +605,195 @@ func invalidSep(x string) int {
}
func (s *scanner) rune() {
s.startLit()
s.bad = false
s.nextch()
n := 0
for ; ; n++ {
r := s.getr()
if r == '\'' {
if s.ch == '\'' {
if !s.bad {
if n == 0 {
s.errorf("empty rune literal or unescaped '")
} else if n != 1 {
s.errorAtf(0, "more than one character in rune literal")
}
}
s.nextch()
break
}
if r == '\\' {
if s.ch == '\\' {
s.nextch()
s.escape('\'')
continue
}
if r == '\n' {
s.ungetr() // assume newline is not part of literal
if s.ch == '\n' {
if !s.bad {
s.errorf("newline in character literal")
s.errorf("newline in rune literal")
}
break
}
if r < 0 {
if s.ch < 0 {
if !s.bad {
s.errorAtf(0, "invalid character literal (missing closing ')")
s.errorAtf(0, "rune literal not terminated")
}
break
}
}
if !s.bad {
if n == 0 {
s.errorf("empty character literal or unescaped ' in character literal")
} else if n != 1 {
s.errorAtf(0, "invalid character literal (more than one character)")
}
s.nextch()
}
s.nlsemi = true
s.lit = string(s.stopLit())
s.lit = string(s.segment())
s.kind = RuneLit
s.tok = _Literal
}
func (s *scanner) stdString() {
s.startLit()
s.bad = false
s.nextch()
for {
r := s.getr()
if r == '"' {
if s.ch == '"' {
s.nextch()
break
}
if r == '\\' {
if s.ch == '\\' {
s.nextch()
s.escape('"')
continue
}
if r == '\n' {
s.ungetr() // assume newline is not part of literal
if s.ch == '\n' {
s.errorf("newline in string")
break
}
if r < 0 {
if s.ch < 0 {
s.errorAtf(0, "string not terminated")
break
}
s.nextch()
}
s.nlsemi = true
s.lit = string(s.stopLit())
s.lit = string(s.segment())
s.kind = StringLit
s.tok = _Literal
}
func (s *scanner) rawString() {
s.startLit()
s.bad = false
s.nextch()
for {
r := s.getr()
if r == '`' {
if s.ch == '`' {
s.nextch()
break
}
if r < 0 {
if s.ch < 0 {
s.errorAtf(0, "string not terminated")
break
}
s.nextch()
}
// We leave CRs in the string since they are part of the
// literal (even though they are not part of the literal
// value).
s.nlsemi = true
s.lit = string(s.stopLit())
s.lit = string(s.segment())
s.kind = StringLit
s.tok = _Literal
}
func (s *scanner) comment(text string) {
s.errh(s.line, s.col, text)
s.errorAtf(0, "%s", text)
}
func (s *scanner) skipLine(r rune) {
for r >= 0 {
if r == '\n' {
s.ungetr() // don't consume '\n' - needed for nlsemi logic
break
}
r = s.getr()
func (s *scanner) skipLine() {
// don't consume '\n' - needed for nlsemi logic
for s.ch >= 0 && s.ch != '\n' {
s.nextch()
}
}
func (s *scanner) lineComment() {
r := s.getr()
// opening has already been consumed
if s.mode&comments != 0 {
s.startLit()
s.skipLine(r)
s.comment("//" + string(s.stopLit()))
s.skipLine()
s.comment(string(s.segment()))
return
}
// directives must start at the beginning of the line (s.col == colbase)
if s.mode&directives == 0 || s.col != colbase || (r != 'g' && r != 'l') {
s.skipLine(r)
if s.mode&directives == 0 || s.col != colbase || (s.ch != 'g' && s.ch != 'l') {
s.stop()
s.skipLine()
return
}
// recognize go: or line directives
prefix := "go:"
if r == 'l' {
if s.ch == 'l' {
prefix = "line "
}
for _, m := range prefix {
if r != m {
s.skipLine(r)
if s.ch != m {
s.stop()
s.skipLine()
return
}
r = s.getr()
s.nextch()
}
// directive text
s.startLit()
s.skipLine(r)
s.comment("//" + prefix + string(s.stopLit()))
s.skipLine()
s.comment(string(s.segment()))
}
func (s *scanner) skipComment(r rune) bool {
for r >= 0 {
for r == '*' {
r = s.getr()
if r == '/' {
func (s *scanner) skipComment() bool {
for s.ch >= 0 {
for s.ch == '*' {
s.nextch()
if s.ch == '/' {
s.nextch()
return true
}
}
r = s.getr()
s.nextch()
}
s.errorAtf(0, "comment not terminated")
return false
}
func (s *scanner) fullComment() {
r := s.getr()
/* opening has already been consumed */
if s.mode&comments != 0 {
s.startLit()
if s.skipComment(r) {
s.comment("/*" + string(s.stopLit()))
} else {
s.killLit() // not a complete comment - ignore
if s.skipComment() {
s.comment(string(s.segment()))
}
return
}
if s.mode&directives == 0 || r != 'l' {
s.skipComment(r)
if s.mode&directives == 0 || s.ch != 'l' {
s.stop()
s.skipComment()
return
}
// recognize line directive
const prefix = "line "
for _, m := range prefix {
if r != m {
s.skipComment(r)
if s.ch != m {
s.stop()
s.skipComment()
return
}
r = s.getr()
s.nextch()
}
// directive text
s.startLit()
if s.skipComment(r) {
s.comment("/*" + prefix + string(s.stopLit()))
} else {
s.killLit() // not a complete comment - ignore
if s.skipComment() {
s.comment(string(s.segment()))
}
}
@ -796,62 +801,59 @@ func (s *scanner) escape(quote rune) {
var n int
var base, max uint32
c := s.getr()
switch c {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
switch s.ch {
case quote, 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\':
s.nextch()
return
case '0', '1', '2', '3', '4', '5', '6', '7':
n, base, max = 3, 8, 255
case 'x':
c = s.getr()
s.nextch()
n, base, max = 2, 16, 255
case 'u':
c = s.getr()
s.nextch()
n, base, max = 4, 16, unicode.MaxRune
case 'U':
c = s.getr()
s.nextch()
n, base, max = 8, 16, unicode.MaxRune
default:
if c < 0 {
if s.ch < 0 {
return // complain in caller about EOF
}
s.errorf("unknown escape sequence")
s.errorf("unknown escape")
return
}
var x uint32
for i := n; i > 0; i-- {
if s.ch < 0 {
return // complain in caller about EOF
}
d := base
switch {
case isDecimal(c):
d = uint32(c) - '0'
case 'a' <= lower(c) && lower(c) <= 'f':
d = uint32(lower(c)) - ('a' - 10)
if isDecimal(s.ch) {
d = uint32(s.ch) - '0'
} else if 'a' <= lower(s.ch) && lower(s.ch) <= 'f' {
d = uint32(lower(s.ch)) - 'a' + 10
}
if d >= base {
if c < 0 {
return // complain in caller about EOF
}
kind := "hex"
if base == 8 {
kind = "octal"
}
s.errorf("non-%s character in escape sequence: %c", kind, c)
s.ungetr()
s.errorf("invalid character %q in %s escape", s.ch, kind)
return
}
// d < base
x = x*base + d
c = s.getr()
s.nextch()
}
s.ungetr()
if x > max && base == 8 {
s.errorf("octal escape value > 255: %d", x)
s.errorf("octal escape value %d > 255", x)
return
}
if x > max || 0xD800 <= x && x < 0xE000 /* surrogate range */ {
s.errorf("escape sequence is invalid Unicode code point %#U", x)
s.errorf("escape is invalid Unicode code point %#U", x)
}
}

View File

@ -12,19 +12,59 @@ import (
"testing"
)
// errh is a default error handler for basic tests.
func errh(line, col uint, msg string) {
panic(fmt.Sprintf("%d:%d: %s", line, col, msg))
}
// Don't bother with other tests if TestSmoke doesn't pass.
func TestSmoke(t *testing.T) {
const src = "if (+foo\t+=..123/***/0.9_0e-0i'a'`raw`\"string\"..f;//$"
tokens := []token{_If, _Lparen, _Operator, _Name, _AssignOp, _Dot, _Literal, _Literal, _Literal, _Literal, _Literal, _Dot, _Dot, _Name, _Semi, _EOF}
var got scanner
got.init(strings.NewReader(src), errh, 0)
for _, want := range tokens {
got.next()
if got.tok != want {
t.Errorf("%d:%d: got %s; want %s", got.line, got.col, got.tok, want)
continue
}
}
}
// Once TestSmoke passes, run TestTokens next.
func TestTokens(t *testing.T) {
var got scanner
for _, want := range sampleTokens {
got.init(strings.NewReader(want.src), func(line, col uint, msg string) {
t.Errorf("%s:%d:%d: %s", want.src, line, col, msg)
}, 0)
got.next()
if got.tok != want.tok {
t.Errorf("%s: got %s; want %s", want.src, got.tok, want.tok)
continue
}
if (got.tok == _Name || got.tok == _Literal) && got.lit != want.src {
t.Errorf("%s: got %q; want %q", want.src, got.lit, want.src)
}
}
}
func TestScanner(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
src, err := os.Open("parser.go")
filename := *src_ // can be changed via -src flag
src, err := os.Open(filename)
if err != nil {
t.Fatal(err)
}
defer src.Close()
var s scanner
s.init(src, nil, 0)
s.init(src, errh, 0)
for {
s.next()
if s.tok == _EOF {
@ -34,64 +74,66 @@ func TestScanner(t *testing.T) {
continue
}
switch s.tok {
case _Name:
fmt.Println(s.line, s.tok, "=>", s.lit)
case _Name, _Literal:
fmt.Printf("%s:%d:%d: %s => %s\n", filename, s.line, s.col, s.tok, s.lit)
case _Operator:
fmt.Println(s.line, s.tok, "=>", s.op, s.prec)
fmt.Printf("%s:%d:%d: %s => %s (prec = %d)\n", filename, s.line, s.col, s.tok, s.op, s.prec)
default:
fmt.Println(s.line, s.tok)
fmt.Printf("%s:%d:%d: %s\n", filename, s.line, s.col, s.tok)
}
}
}
func TestTokens(t *testing.T) {
func TestEmbeddedTokens(t *testing.T) {
// make source
var buf bytes.Buffer
for i, s := range sampleTokens {
buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation
buf.WriteString(s.src) // token
buf.WriteString(" "[:i&7]) // trailing spaces
fmt.Fprintf(&buf, "/*line foo:%d */ // bar\n", i+linebase) // comments (don't crash w/o directive handler)
buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation
buf.WriteString(s.src) // token
buf.WriteString(" "[:i&7]) // trailing spaces
buf.WriteString(fmt.Sprintf("/*line foo:%d */ // bar\n", i)) // comments + newline (don't crash w/o directive handler)
}
// scan source
var got scanner
var src string
got.init(&buf, func(line, col uint, msg string) {
t.Fatalf("%d:%d: %s", line, col, msg)
t.Fatalf("%s:%d:%d: %s", src, line, col, msg)
}, 0)
got.next()
for i, want := range sampleTokens {
src = want.src
nlsemi := false
if got.line != uint(i+linebase) {
t.Errorf("got line %d; want %d", got.line, i+linebase)
if got.line-linebase != uint(i) {
t.Errorf("%s: got line %d; want %d", src, got.line-linebase, i)
}
if got.tok != want.tok {
t.Errorf("got tok = %s; want %s", got.tok, want.tok)
t.Errorf("%s: got tok %s; want %s", src, got.tok, want.tok)
continue
}
switch want.tok {
case _Semi:
if got.lit != "semicolon" {
t.Errorf("got %s; want semicolon", got.lit)
t.Errorf("%s: got %s; want semicolon", src, got.lit)
}
case _Name, _Literal:
if got.lit != want.src {
t.Errorf("got lit = %q; want %q", got.lit, want.src)
t.Errorf("%s: got lit %q; want %q", src, got.lit, want.src)
continue
}
nlsemi = true
case _Operator, _AssignOp, _IncOp:
if got.op != want.op {
t.Errorf("got op = %s; want %s", got.op, want.op)
t.Errorf("%s: got op %s; want %s", src, got.op, want.op)
continue
}
if got.prec != want.prec {
t.Errorf("got prec = %d; want %d", got.prec, want.prec)
t.Errorf("%s: got prec %d; want %d", src, got.prec, want.prec)
continue
}
nlsemi = want.tok == _IncOp
@ -103,11 +145,11 @@ func TestTokens(t *testing.T) {
if nlsemi {
got.next()
if got.tok != _Semi {
t.Errorf("got tok = %s; want ;", got.tok)
t.Errorf("%s: got tok %s; want ;", src, got.tok)
continue
}
if got.lit != "newline" {
t.Errorf("got %s; want newline", got.lit)
t.Errorf("%s: got %s; want newline", src, got.lit)
}
}
@ -299,7 +341,7 @@ func TestComments(t *testing.T) {
{"//", comment{0, 0, "//"}},
/*-style comments */
{"/* regular comment */", comment{0, 0, "/* regular comment */"}},
{"123/* regular comment */", comment{0, 3, "/* regular comment */"}},
{"package p /* regular comment", comment{0, 0, ""}},
{"\n\n\n/*\n*//* want this one */", comment{4, 2, "/* want this one */"}},
{"\n\n/**/", comment{2, 0, "/**/"}},
@ -307,17 +349,16 @@ func TestComments(t *testing.T) {
} {
var s scanner
var got comment
s.init(strings.NewReader(test.src),
func(line, col uint, msg string) {
if msg[0] != '/' {
// error
if msg != "comment not terminated" {
t.Errorf("%q: %s", test.src, msg)
}
return
s.init(strings.NewReader(test.src), func(line, col uint, msg string) {
if msg[0] != '/' {
// error
if msg != "comment not terminated" {
t.Errorf("%q: %s", test.src, msg)
}
got = comment{line - linebase, col - colbase, msg} // keep last one
}, comments)
return
}
got = comment{line - linebase, col - colbase, msg} // keep last one
}, comments)
for {
s.next()
@ -542,7 +583,7 @@ func TestNumbers(t *testing.T) {
func TestScanErrors(t *testing.T) {
for _, test := range []struct {
src, msg string
src, err string
line, col uint // 0-based
}{
// Note: Positions for lexical errors are the earliest position
@ -555,10 +596,10 @@ func TestScanErrors(t *testing.T) {
{"foo\n\n\xff ", "invalid UTF-8 encoding", 2, 0},
// token-level errors
{"\u00BD" /* ½ */, "invalid identifier character U+00BD '½'", 0, 0},
{"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid identifier character U+00BD '½'", 0, 13 /* byte offset */},
{"\u00BD" /* ½ */, "invalid character U+00BD '½' in identifier", 0, 0},
{"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid character U+00BD '½' in identifier", 0, 13 /* byte offset */},
{"\U0001d7d8" /* 𝟘 */, "identifier cannot begin with digit U+1D7D8 '𝟘'", 0, 0},
{"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid identifier character U+00BD '½'", 0, 8 /* byte offset */},
{"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid character U+00BD '½' in identifier", 0, 8 /* byte offset */},
{"x + ~y", "invalid character U+007E '~'", 0, 4},
{"foo$bar = 0", "invalid character U+0024 '$'", 0, 3},
@ -567,22 +608,22 @@ func TestScanErrors(t *testing.T) {
{"0123456789e0 /*\nfoobar", "comment not terminated", 0, 13}, // valid float constant
{"var a, b = 09, 07\n", "invalid digit '9' in octal literal", 0, 12},
{`''`, "empty character literal or unescaped ' in character literal", 0, 1},
{"'\n", "newline in character literal", 0, 1},
{`'\`, "invalid character literal (missing closing ')", 0, 0},
{`'\'`, "invalid character literal (missing closing ')", 0, 0},
{`'\x`, "invalid character literal (missing closing ')", 0, 0},
{`'\x'`, "non-hex character in escape sequence: '", 0, 3},
{`'\y'`, "unknown escape sequence", 0, 2},
{`'\x0'`, "non-hex character in escape sequence: '", 0, 4},
{`'\00'`, "non-octal character in escape sequence: '", 0, 4},
{`''`, "empty rune literal or unescaped '", 0, 1},
{"'\n", "newline in rune literal", 0, 1},
{`'\`, "rune literal not terminated", 0, 0},
{`'\'`, "rune literal not terminated", 0, 0},
{`'\x`, "rune literal not terminated", 0, 0},
{`'\x'`, "invalid character '\\'' in hex escape", 0, 3},
{`'\y'`, "unknown escape", 0, 2},
{`'\x0'`, "invalid character '\\'' in hex escape", 0, 4},
{`'\00'`, "invalid character '\\'' in octal escape", 0, 4},
{`'\377' /*`, "comment not terminated", 0, 7}, // valid octal escape
{`'\378`, "non-octal character in escape sequence: 8", 0, 4},
{`'\400'`, "octal escape value > 255: 256", 0, 5},
{`'xx`, "invalid character literal (missing closing ')", 0, 0},
{`'xx'`, "invalid character literal (more than one character)", 0, 0},
{`'\378`, "invalid character '8' in octal escape", 0, 4},
{`'\400'`, "octal escape value 256 > 255", 0, 5},
{`'xx`, "rune literal not terminated", 0, 0},
{`'xx'`, "more than one character in rune literal", 0, 0},
{"\"\n", "newline in string", 0, 1},
{"\n \"foo\n", "newline in string", 1, 7},
{`"`, "string not terminated", 0, 0},
{`"foo`, "string not terminated", 0, 0},
{"`", "string not terminated", 0, 0},
@ -592,42 +633,34 @@ func TestScanErrors(t *testing.T) {
{`"\`, "string not terminated", 0, 0},
{`"\"`, "string not terminated", 0, 0},
{`"\x`, "string not terminated", 0, 0},
{`"\x"`, "non-hex character in escape sequence: \"", 0, 3},
{`"\y"`, "unknown escape sequence", 0, 2},
{`"\x0"`, "non-hex character in escape sequence: \"", 0, 4},
{`"\00"`, "non-octal character in escape sequence: \"", 0, 4},
{`"\x"`, "invalid character '\"' in hex escape", 0, 3},
{`"\y"`, "unknown escape", 0, 2},
{`"\x0"`, "invalid character '\"' in hex escape", 0, 4},
{`"\00"`, "invalid character '\"' in octal escape", 0, 4},
{`"\377" /*`, "comment not terminated", 0, 7}, // valid octal escape
{`"\378"`, "non-octal character in escape sequence: 8", 0, 4},
{`"\400"`, "octal escape value > 255: 256", 0, 5},
{`"\378"`, "invalid character '8' in octal escape", 0, 4},
{`"\400"`, "octal escape value 256 > 255", 0, 5},
{`s := "foo\z"`, "unknown escape sequence", 0, 10},
{`s := "foo\z00\nbar"`, "unknown escape sequence", 0, 10},
{`s := "foo\z"`, "unknown escape", 0, 10},
{`s := "foo\z00\nbar"`, "unknown escape", 0, 10},
{`"\x`, "string not terminated", 0, 0},
{`"\x"`, "non-hex character in escape sequence: \"", 0, 3},
{`var s string = "\x"`, "non-hex character in escape sequence: \"", 0, 18},
{`return "\Uffffffff"`, "escape sequence is invalid Unicode code point U+FFFFFFFF", 0, 18},
{`"\x"`, "invalid character '\"' in hex escape", 0, 3},
{`var s string = "\x"`, "invalid character '\"' in hex escape", 0, 18},
{`return "\Uffffffff"`, "escape is invalid Unicode code point U+FFFFFFFF", 0, 18},
{"0b.0", "invalid radix point in binary literal", 0, 2},
{"0x.p0\n", "hexadecimal literal has no digits", 0, 3},
// former problem cases
{"package p\n\n\xef", "invalid UTF-8 encoding", 2, 0},
} {
var s scanner
nerrors := 0
s.init(strings.NewReader(test.src), func(line, col uint, msg string) {
nerrors++
// only check the first error
if nerrors == 1 {
if msg != test.msg {
t.Errorf("%q: got msg = %q; want %q", test.src, msg, test.msg)
}
if line != test.line+linebase {
t.Errorf("%q: got line = %d; want %d", test.src, line, test.line+linebase)
}
if col != test.col+colbase {
t.Errorf("%q: got col = %d; want %d", test.src, col, test.col+colbase)
}
} else if nerrors > 1 {
// TODO(gri) make this use position info
t.Errorf("%q: got unexpected %q at line = %d", test.src, msg, line)
var line, col uint
var err string
s.init(strings.NewReader(test.src), func(l, c uint, msg string) {
if err == "" {
line, col = l-linebase, c-colbase
err = msg
}
}, 0)
@ -638,8 +671,18 @@ func TestScanErrors(t *testing.T) {
}
}
if nerrors == 0 {
t.Errorf("%q: got no error; want %q", test.src, test.msg)
if err != "" {
if err != test.err {
t.Errorf("%q: got err = %q; want %q", test.src, err, test.err)
}
if line != test.line {
t.Errorf("%q: got line = %d; want %d", test.src, line, test.line)
}
if col != test.col {
t.Errorf("%q: got col = %d; want %d", test.src, col, test.col)
}
} else {
t.Errorf("%q: got no error; want %q", test.src, test.err)
}
}
}
@ -648,7 +691,7 @@ func TestIssue21938(t *testing.T) {
s := "/*" + strings.Repeat(" ", 4089) + "*/ .5"
var got scanner
got.init(strings.NewReader(s), nil, 0)
got.init(strings.NewReader(s), errh, 0)
got.next()
if got.tok != _Literal || got.lit != ".5" {

View File

@ -3,11 +3,10 @@
// license that can be found in the LICENSE file.
// This file implements source, a buffered rune reader
// which is specialized for the needs of the Go scanner:
// Contiguous sequences of runes (literals) are extracted
// directly as []byte without the need to re-encode the
// runes in UTF-8 (as would be necessary with bufio.Reader).
//
// specialized for scanning Go code: Reading
// ASCII characters, maintaining current (line, col)
// position information, and recording of the most
// recently read source segment are highly optimized.
// This file is self-contained (go tool compile source.go
// compiles) and thus could be made into its own package.
@ -18,202 +17,202 @@ import (
"unicode/utf8"
)
// The source buffer is accessed using three indices b (begin),
// r (read), and e (end):
//
// - If b >= 0, it points to the beginning of a segment of most
// recently read characters (typically a Go literal).
//
// - r points to the byte immediately following the most recently
// read character ch, which starts at r-chw.
//
// - e points to the byte immediately following the last byte that
// was read into the buffer.
//
// The buffer content is terminated at buf[e] with the sentinel
// character utf8.RuneSelf. This makes it possible to test for
// the common case of ASCII characters with a single 'if' (see
// nextch method).
//
// +------ content in use -------+
// v v
// buf [...read...|...segment...|ch|...unread...|s|...free...]
// ^ ^ ^ ^
// | | | |
// b r-chw r e
//
// Invariant: -1 <= b < r <= e < len(buf) && buf[e] == sentinel
type source struct {
in io.Reader
errh func(line, col uint, msg string)
buf []byte // source buffer
ioerr error // pending I/O error, or nil
b, r, e int // buffer indices (see comment above)
line, col uint // source position of ch (0-based)
ch rune // most recently read character
chw int // width of ch
}
const sentinel = utf8.RuneSelf
func (s *source) init(in io.Reader, errh func(line, col uint, msg string)) {
s.in = in
s.errh = errh
if s.buf == nil {
s.buf = make([]byte, nextSize(0))
}
s.buf[0] = sentinel
s.ioerr = nil
s.b, s.r, s.e = -1, 0, 0
s.line, s.col = 0, 0
s.ch = ' '
s.chw = 0
}
// starting points for line and column numbers
const linebase = 1
const colbase = 1
// max. number of bytes to unread
const maxunread = 10
// buf [...read...|...|...unread...|s|...free...]
// ^ ^ ^ ^
// | | | |
// suf r0 r w
type source struct {
src io.Reader
errh func(line, pos uint, msg string)
// source buffer
buf [4 << 10]byte
r0, r, w int // previous/current read and write buf positions, excluding sentinel
line0, line uint // previous/current line
col0, col uint // previous/current column (byte offsets from line start)
ioerr error // pending io error
// literal buffer
lit []byte // literal prefix
suf int // literal suffix; suf >= 0 means we are scanning a literal
}
// init initializes source to read from src and to report errors via errh.
// errh must not be nil.
func (s *source) init(src io.Reader, errh func(line, pos uint, msg string)) {
s.src = src
s.errh = errh
s.buf[0] = utf8.RuneSelf // terminate with sentinel
s.r0, s.r, s.w = 0, 0, 0
s.line0, s.line = 0, linebase
s.col0, s.col = 0, colbase
s.ioerr = nil
s.lit = s.lit[:0]
s.suf = -1
}
// ungetr sets the reading position to a previous reading
// position, usually the one of the most recently read
// rune, but possibly earlier (see unread below).
func (s *source) ungetr() {
s.r, s.line, s.col = s.r0, s.line0, s.col0
}
// unread moves the previous reading position to a position
// that is n bytes earlier in the source. The next ungetr
// call will set the reading position to that moved position.
// The "unread" runes must be single byte and not contain any
// newlines; and 0 <= n <= maxunread must hold.
func (s *source) unread(n int) {
s.r0 -= n
s.col0 -= uint(n)
// pos returns the (line, col) source position of s.ch.
func (s *source) pos() (line, col uint) {
return linebase + s.line, colbase + s.col
}
// error reports the error msg at source position s.pos().
func (s *source) error(msg string) {
s.errh(s.line0, s.col0, msg)
line, col := s.pos()
s.errh(line, col, msg)
}
// getr reads and returns the next rune.
//
// If a read or source encoding error occurs, getr
// calls the error handler installed with init.
// The handler must exist.
//
// The (line, col) position passed to the error handler
// is always at the current source reading position.
func (s *source) getr() rune {
// start starts a new active source segment (including s.ch).
// As long as stop has not been called, the active segment's
// bytes (excluding s.ch) may be retrieved by calling segment.
func (s *source) start() { s.b = s.r - s.chw }
func (s *source) stop() { s.b = -1 }
func (s *source) segment() []byte { return s.buf[s.b : s.r-s.chw] }
// rewind rewinds the scanner's read position and character s.ch
// to the start of the currently active segment, which must not
// contain any newlines (otherwise position information will be
// incorrect). Currently, rewind is only needed for handling the
// source sequence ".."; it must not be called outside an active
// segment.
func (s *source) rewind() {
// ok to verify precondition - rewind is rarely called
if s.b < 0 {
panic("no active segment")
}
s.col -= uint(s.r - s.b)
s.r = s.b
s.nextch()
}
func (s *source) nextch() {
redo:
s.r0, s.line0, s.col0 = s.r, s.line, s.col
// We could avoid at least one test that is always taken in the
// for loop below by duplicating the common case code (ASCII)
// here since we always have at least the sentinel (utf8.RuneSelf)
// in the buffer. Measure and optimize if necessary.
// make sure we have at least one rune in buffer, or we are at EOF
for s.r+utf8.UTFMax > s.w && !utf8.FullRune(s.buf[s.r:s.w]) && s.ioerr == nil && s.w-s.r < len(s.buf) {
s.fill() // s.w-s.r < len(s.buf) => buffer is not full
s.col += uint(s.chw)
if s.ch == '\n' {
s.line++
s.col = 0
}
// common case: ASCII and enough bytes
// (invariant: s.buf[s.w] == utf8.RuneSelf)
if b := s.buf[s.r]; b < utf8.RuneSelf {
// fast common case: at least one ASCII character
if s.ch = rune(s.buf[s.r]); s.ch < sentinel {
s.r++
// TODO(gri) Optimization: Instead of adjusting s.col for each character,
// remember the line offset instead and then compute the offset as needed
// (which is less often).
s.col++
if b == 0 {
s.chw = 1
if s.ch == 0 {
s.error("invalid NUL character")
goto redo
}
if b == '\n' {
s.line++
s.col = colbase
}
return rune(b)
return
}
// slower general case: add more bytes to buffer if we don't have a full rune
for s.e-s.r < utf8.UTFMax && !utf8.FullRune(s.buf[s.r:s.e]) && s.ioerr == nil {
s.fill()
}
// EOF
if s.r == s.w {
if s.r == s.e {
if s.ioerr != io.EOF {
// ensure we never start with a '/' (e.g., rooted path) in the error message
s.error("I/O error: " + s.ioerr.Error())
s.ioerr = nil
}
return -1
s.ch = -1
s.chw = 0
return
}
// uncommon case: not ASCII
r, w := utf8.DecodeRune(s.buf[s.r:s.w])
s.r += w
s.col += uint(w)
s.ch, s.chw = utf8.DecodeRune(s.buf[s.r:s.e])
s.r += s.chw
if r == utf8.RuneError && w == 1 {
if s.ch == utf8.RuneError && s.chw == 1 {
s.error("invalid UTF-8 encoding")
goto redo
}
// BOM's are only allowed as the first character in a file
const BOM = 0xfeff
if r == BOM {
if s.r0 > 0 { // s.r0 is always > 0 after 1st character (fill will set it to maxunread)
if s.ch == BOM {
if s.line > 0 || s.col > 0 {
s.error("invalid BOM in the middle of the file")
}
goto redo
}
return r
}
// fill reads more source bytes into s.buf.
// It returns with at least one more byte in the buffer, or with s.ioerr != nil.
func (s *source) fill() {
// Slide unread bytes to beginning but preserve last read char
// (for one ungetr call) plus maxunread extra bytes (for one
// unread call).
if s.r0 > maxunread {
n := s.r0 - maxunread // number of bytes to slide down
// save literal prefix, if any
// (make sure we keep maxunread bytes and the last
// read char in the buffer)
if s.suf >= 0 {
// we have a literal
if s.suf < n {
// save literal prefix
s.lit = append(s.lit, s.buf[s.suf:n]...)
s.suf = 0
} else {
s.suf -= n
}
}
copy(s.buf[:], s.buf[n:s.w])
s.r0 = maxunread // eqv: s.r0 -= n
s.r -= n
s.w -= n
// determine content to preserve
b := s.r
if s.b >= 0 {
b = s.b
s.b = 0 // after buffer has grown or content has been moved down
}
content := s.buf[b:s.e]
// grow buffer or move content down
if len(content)*2 > len(s.buf) {
s.buf = make([]byte, nextSize(len(s.buf)))
copy(s.buf, content)
} else if b > 0 {
copy(s.buf, content)
}
s.r -= b
s.e -= b
// read more data: try a limited number of times
for i := 100; i > 0; i-- {
n, err := s.src.Read(s.buf[s.w : len(s.buf)-1]) // -1 to leave space for sentinel
for i := 0; i < 10; i++ {
var n int
n, s.ioerr = s.in.Read(s.buf[s.e : len(s.buf)-1]) // -1 to leave space for sentinel
if n < 0 {
panic("negative read") // incorrect underlying io.Reader implementation
}
s.w += n
if n > 0 || err != nil {
s.buf[s.w] = utf8.RuneSelf // sentinel
if err != nil {
s.ioerr = err
}
if n > 0 || s.ioerr != nil {
s.e += n
s.buf[s.e] = sentinel
return
}
// n == 0
}
s.buf[s.w] = utf8.RuneSelf // sentinel
s.buf[s.e] = sentinel
s.ioerr = io.ErrNoProgress
}
func (s *source) startLit() {
s.suf = s.r0
s.lit = s.lit[:0] // reuse lit
}
func (s *source) stopLit() []byte {
lit := s.buf[s.suf:s.r]
if len(s.lit) > 0 {
lit = append(s.lit, lit...)
// nextSize returns the next bigger size for a buffer of a given size.
func nextSize(size int) int {
const min = 4 << 10 // 4K: minimum buffer size
const max = 1 << 20 // 1M: maximum buffer size which is still doubled
if size < min {
return min
}
s.killLit()
return lit
}
func (s *source) killLit() {
s.suf = -1 // no pending literal
if size <= max {
return size << 1
}
return size + max
}

View File

@ -326,7 +326,7 @@ func push(s *gc.SSAGenState, v *ssa.Value) {
}
// popAndSave pops a value off of the floating-point stack and stores
// it in the reigster assigned to v.
// it in the register assigned to v.
func popAndSave(s *gc.SSAGenState, v *ssa.Value) {
r := v.Reg()
if _, ok := s.SSEto387[r]; ok {

View File

@ -941,6 +941,8 @@ func (t *tester) internalLinkPIE() bool {
case "linux-amd64", "linux-arm64",
"android-arm64":
return true
case "windows-amd64", "windows-386", "windows-arm":
return true
}
return false
}
@ -997,6 +999,8 @@ func (t *tester) supportedBuildmode(mode string) bool {
return true
case "darwin-amd64":
return true
case "windows-amd64", "windows-386", "windows-arm":
return true
}
return false

View File

@ -724,6 +724,40 @@ var tests = []test{
},
},
// Merging comments with -src.
{
"merge comments with -src A",
[]string{"-src", p + "/merge", `A`},
[]string{
`A doc`,
`func A`,
`A comment`,
},
[]string{
`Package A doc`,
`Package B doc`,
`B doc`,
`B comment`,
`B doc`,
},
},
{
"merge comments with -src B",
[]string{"-src", p + "/merge", `B`},
[]string{
`B doc`,
`func B`,
`B comment`,
},
[]string{
`Package A doc`,
`Package B doc`,
`A doc`,
`A comment`,
`A doc`,
},
},
// No dups with -u. Issue 21797.
{
"case matching on, no dups",

7
src/cmd/doc/testdata/merge/aa.go vendored Normal file
View File

@ -0,0 +1,7 @@
// Package comment A.
package merge
// A doc.
func A() {
// A comment.
}

7
src/cmd/doc/testdata/merge/bb.go vendored Normal file
View File

@ -0,0 +1,7 @@
// Package comment B.
package merge
// B doc.
func B() {
// B comment.
}

View File

@ -9,6 +9,7 @@ import (
"context"
"debug/elf"
"debug/macho"
"debug/pe"
"flag"
"fmt"
"go/format"
@ -2146,19 +2147,37 @@ func TestBuildmodePIE(t *testing.T) {
switch platform {
case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x",
"android/amd64", "android/arm", "android/arm64", "android/386",
"freebsd/amd64":
"freebsd/amd64",
"windows/386", "windows/amd64", "windows/arm":
case "darwin/amd64":
default:
t.Skipf("skipping test because buildmode=pie is not supported on %s", platform)
}
t.Run("non-cgo", func(t *testing.T) {
testBuildmodePIE(t, false)
})
if canCgo {
switch runtime.GOOS {
case "darwin", "freebsd", "linux", "windows":
t.Run("cgo", func(t *testing.T) {
testBuildmodePIE(t, true)
})
}
}
}
func testBuildmodePIE(t *testing.T, useCgo bool) {
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
tg.tempFile("main.go", `package main; func main() { print("hello") }`)
var s string
if useCgo {
s = `import "C";`
}
tg.tempFile("main.go", fmt.Sprintf(`package main;%s func main() { print("hello") }`, s))
src := tg.path("main.go")
obj := tg.path("main")
obj := tg.path("main.exe")
tg.run("build", "-buildmode=pie", "-o", obj, src)
switch runtime.GOOS {
@ -2183,6 +2202,38 @@ func TestBuildmodePIE(t *testing.T) {
if f.Flags&macho.FlagPIE == 0 {
t.Error("PIE must have PIE flag, but not")
}
case "windows":
f, err := pe.Open(obj)
if err != nil {
t.Fatal(err)
}
defer f.Close()
const (
IMAGE_FILE_RELOCS_STRIPPED = 0x0001
IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040
)
if f.Section(".reloc") == nil {
t.Error(".reloc section is not present")
}
if (f.FileHeader.Characteristics & IMAGE_FILE_RELOCS_STRIPPED) != 0 {
t.Error("IMAGE_FILE_RELOCS_STRIPPED flag is set")
}
var dc uint16
switch oh := f.OptionalHeader.(type) {
case *pe.OptionalHeader32:
dc = oh.DllCharacteristics
case *pe.OptionalHeader64:
dc = oh.DllCharacteristics
if (dc & IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA) == 0 {
t.Error("IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA flag is not set")
}
default:
t.Fatalf("unexpected optional header type of %T", f.OptionalHeader)
}
if (dc & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) == 0 {
t.Error("IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE flag is not set")
}
default:
panic("unreachable")
}
@ -2380,30 +2431,6 @@ func TestTestCache(t *testing.T) {
tg.setenv("GOPATH", tg.tempdir)
tg.setenv("GOCACHE", tg.path("cache"))
if runtime.Compiler != "gccgo" {
// timeout here should not affect result being cached
// or being retrieved later.
tg.run("test", "-x", "-timeout=10s", "errors")
tg.grepStderr(`[\\/]compile|gccgo`, "did not run compiler")
tg.grepStderr(`[\\/]link|gccgo`, "did not run linker")
tg.grepStderr(`errors\.test`, "did not run test")
tg.run("test", "-x", "errors")
tg.grepStdout(`ok \terrors\t\(cached\)`, "did not report cached result")
tg.grepStderrNot(`[\\/]compile|gccgo`, "incorrectly ran compiler")
tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker")
tg.grepStderrNot(`errors\.test`, "incorrectly ran test")
tg.grepStderrNot("DO NOT USE", "poisoned action status leaked")
// Even very low timeouts do not disqualify cached entries.
tg.run("test", "-timeout=1ns", "-x", "errors")
tg.grepStderrNot(`errors\.test`, "incorrectly ran test")
tg.run("clean", "-testcache")
tg.run("test", "-x", "errors")
tg.grepStderr(`errors\.test`, "did not run test")
}
// The -p=1 in the commands below just makes the -x output easier to read.
t.Log("\n\nINITIAL\n\n")

View File

@ -286,11 +286,12 @@ func download(arg string, parent *load.Package, stk *load.ImportStack, mode int)
if wildcardOkay && strings.Contains(arg, "...") {
match := search.NewMatch(arg)
if match.IsLocal() {
match.MatchPackagesInFS()
match.MatchDirs()
args = match.Dirs
} else {
match.MatchPackages()
args = match.Pkgs
}
args = match.Pkgs
for _, err := range match.Errs {
base.Errorf("%s", err)
}

View File

@ -65,24 +65,13 @@ func ImportPaths(patterns []string) []*search.Match {
// packages. The build tags should typically be imports.Tags() or
// imports.AnyTags(); a nil map has no special meaning.
func ImportPathsQuiet(patterns []string, tags map[string]bool) []*search.Match {
var fsDirs [][]string
updateMatches := func(matches []*search.Match, iterating bool) {
for i, m := range matches {
for _, m := range matches {
switch {
case m.IsLocal():
// Evaluate list of file system directories on first iteration.
if fsDirs == nil {
fsDirs = make([][]string, len(matches))
}
if fsDirs[i] == nil {
if m.IsLiteral() {
fsDirs[i] = []string{m.Pattern()}
} else {
m.MatchPackagesInFS()
// Pull out the matching directories: we are going to resolve them
// to package paths below.
fsDirs[i], m.Pkgs = m.Pkgs, nil
}
if m.Dirs == nil {
matchLocalDirs(m)
}
// Make a copy of the directory list and translate to import paths.
@ -91,10 +80,9 @@ func ImportPathsQuiet(patterns []string, tags map[string]bool) []*search.Match {
// from not being in the build list to being in it and back as
// the exact version of a particular module increases during
// the loader iterations.
pkgs := str.StringList(fsDirs[i])
m.Pkgs = pkgs[:0]
for _, pkg := range pkgs {
pkg, err := resolveLocalPackage(pkg)
m.Pkgs = m.Pkgs[:0]
for _, dir := range m.Dirs {
pkg, err := resolveLocalPackage(dir)
if err != nil {
if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) {
continue // Don't include "builtin" or GOROOT/src in wildcard patterns.
@ -131,7 +119,7 @@ func ImportPathsQuiet(patterns []string, tags map[string]bool) []*search.Match {
}
case m.Pattern() == "std" || m.Pattern() == "cmd":
if len(m.Pkgs) == 0 {
if m.Pkgs == nil {
m.MatchPackages() // Locate the packages within GOROOT/src.
}
@ -186,6 +174,34 @@ func checkMultiplePaths() {
base.ExitIfErrors()
}
// matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories
// outside of the standard library and active modules.
func matchLocalDirs(m *search.Match) {
if !m.IsLocal() {
panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern()))
}
if i := strings.Index(m.Pattern(), "..."); i >= 0 {
// The pattern is local, but it is a wildcard. Its packages will
// only resolve to paths if they are inside of the standard
// library, the main module, or some dependency of the main
// module. Verify that before we walk the filesystem: a filesystem
// walk in a directory like /var or /etc can be very expensive!
dir := filepath.Dir(filepath.Clean(m.Pattern()[:i+3]))
absDir := dir
if !filepath.IsAbs(dir) {
absDir = filepath.Join(base.Cwd, dir)
}
if search.InDir(absDir, cfg.GOROOTsrc) == "" && search.InDir(absDir, ModRoot()) == "" && pathInModuleCache(absDir) == "" {
m.Dirs = []string{}
m.AddError(fmt.Errorf("directory prefix %s outside available modules", base.ShortPath(absDir)))
return
}
}
m.MatchDirs()
}
// resolveLocalPackage resolves a filesystem path to a package path.
func resolveLocalPackage(dir string) (string, error) {
var absDir string
@ -269,7 +285,11 @@ func resolveLocalPackage(dir string) (string, error) {
}
if sub := search.InDir(absDir, cfg.GOROOTsrc); sub != "" && sub != "." && !strings.Contains(sub, "@") {
return filepath.ToSlash(sub), nil
pkg := filepath.ToSlash(sub)
if pkg == "builtin" {
return "", errPkgIsBuiltin
}
return pkg, nil
}
pkg := pathInModuleCache(absDir)

View File

@ -9,11 +9,13 @@ package renameio
import (
"encoding/binary"
"errors"
"internal/testenv"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
@ -24,6 +26,10 @@ import (
)
func TestConcurrentReadsAndWrites(t *testing.T) {
if runtime.GOOS == "darwin" && strings.HasSuffix(testenv.Builder(), "-10_14") {
testenv.SkipFlaky(t, 33041)
}
dir, err := ioutil.TempDir("", "renameio")
if err != nil {
t.Fatal(err)

View File

@ -19,7 +19,8 @@ import (
// A Match represents the result of matching a single package pattern.
type Match struct {
pattern string // the pattern itself
Pkgs []string // matching packages (dirs or import paths)
Dirs []string // if the pattern is local, directories that potentially contain matching packages
Pkgs []string // matching packages (import paths)
Errs []error // errors matching the patterns to packages, NOT errors loading those packages
// Errs may be non-empty even if len(Pkgs) > 0, indicating that some matching
@ -84,20 +85,25 @@ func (e *MatchError) Unwrap() error {
return e.Err
}
// MatchPackages sets m.Pkgs to contain all the packages that can be found
// under the $GOPATH directories and $GOROOT matching pattern.
// The pattern is either "all" (all packages), "std" (standard packages),
// "cmd" (standard commands), or a path including "...".
// MatchPackages sets m.Pkgs to a non-nil slice containing all the packages that
// can be found under the $GOPATH directories and $GOROOT that match the
// pattern. The pattern must be either "all" (all packages), "std" (standard
// packages), "cmd" (standard commands), or a path including "...".
//
// MatchPackages sets m.Errs to contain any errors encountered while processing
// the match.
// If any errors may have caused the set of packages to be incomplete,
// MatchPackages appends those errors to m.Errs.
func (m *Match) MatchPackages() {
m.Pkgs, m.Errs = nil, nil
m.Pkgs = []string{}
if m.IsLocal() {
m.AddError(fmt.Errorf("internal error: MatchPackages: %s is not a valid package pattern", m.pattern))
return
}
if m.IsLiteral() {
m.Pkgs = []string{m.pattern}
return
}
match := func(string) bool { return true }
treeCanMatch := func(string) bool { return true }
if !m.IsMeta() {
@ -197,16 +203,22 @@ func SetModRoot(dir string) {
modRoot = dir
}
// MatchPackagesInFS is like MatchPackages but is passed a pattern that
// begins with an absolute path or "./" or "../". On Windows, the pattern may
// use slash or backslash separators or a mix of both.
// MatchDirs sets m.Dirs to a non-nil slice containing all directories that
// potentially match a local pattern. The pattern must begin with an absolute
// path, or "./", or "../". On Windows, the pattern may use slash or backslash
// separators or a mix of both.
//
// MatchPackagesInFS scans the tree rooted at the directory that contains the
// first "..." wildcard.
func (m *Match) MatchPackagesInFS() {
m.Pkgs, m.Errs = nil, nil
// If any errors may have caused the set of directories to be incomplete,
// MatchDirs appends those errors to m.Errs.
func (m *Match) MatchDirs() {
m.Dirs = []string{}
if !m.IsLocal() {
m.AddError(fmt.Errorf("internal error: MatchPackagesInFS: %s is not a valid filesystem pattern", m.pattern))
m.AddError(fmt.Errorf("internal error: MatchDirs: %s is not a valid filesystem pattern", m.pattern))
return
}
if m.IsLiteral() {
m.Dirs = []string{m.pattern}
return
}
@ -301,7 +313,7 @@ func (m *Match) MatchPackagesInFS() {
// which is all that Match promises to do.
// Ignore the import error.
}
m.Pkgs = append(m.Pkgs, name)
m.Dirs = append(m.Dirs, name)
return nil
})
if err != nil {
@ -416,25 +428,23 @@ func ImportPathsQuiet(patterns []string) []*Match {
for _, a := range CleanPatterns(patterns) {
m := NewMatch(a)
if m.IsLocal() {
if m.IsLiteral() {
m.Pkgs = []string{a}
} else {
m.MatchPackagesInFS()
}
m.MatchDirs()
// Change the file import path to a regular import path if the package
// is in GOPATH or GOROOT. We don't report errors here; LoadImport
// (or something similar) will report them later.
for i, dir := range m.Pkgs {
m.Pkgs = make([]string, len(m.Dirs))
for i, dir := range m.Dirs {
absDir := dir
if !filepath.IsAbs(dir) {
dir = filepath.Join(base.Cwd, dir)
absDir = filepath.Join(base.Cwd, dir)
}
if bp, _ := cfg.BuildContext.ImportDir(dir, build.FindOnly); bp.ImportPath != "" && bp.ImportPath != "." {
if bp, _ := cfg.BuildContext.ImportDir(absDir, build.FindOnly); bp.ImportPath != "" && bp.ImportPath != "." {
m.Pkgs[i] = bp.ImportPath
} else {
m.Pkgs[i] = dir
}
}
} else if m.IsLiteral() {
m.Pkgs = []string{a}
} else {
m.MatchPackages()
}

View File

@ -1239,6 +1239,14 @@ func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error {
if len(out) == 0 {
fmt.Fprintf(cmd.Stdout, "%s\n", err)
}
// NOTE(golang.org/issue/37555): test2json reports that a test passes
// unless "FAIL" is printed at the beginning of a line. The test may not
// actually print that if it panics, exits, or terminates abnormally,
// so we print it here. We can't always check whether it was printed
// because some tests need stdout to be a terminal (golang.org/issue/34791),
// not a pipe.
// TODO(golang.org/issue/29062): tests that exit with status 0 without
// printing a final result should fail.
fmt.Fprintf(cmd.Stdout, "FAIL\t%s\t%s\n", a.Package.ImportPath, t)
}
@ -1291,16 +1299,13 @@ func (c *runCache) tryCacheWithID(b *work.Builder, a *work.Action, id string) bo
"-test.parallel",
"-test.run",
"-test.short",
"-test.timeout",
"-test.v":
// These are cacheable.
// Note that this list is documented above,
// so if you add to this list, update the docs too.
cacheArgs = append(cacheArgs, arg)
case "-test.timeout":
// Special case: this is cacheable but ignored during the hash.
// Do not add to cacheArgs.
default:
// nothing else is cacheable
if cache.DebugTest {

View File

@ -161,8 +161,12 @@ func buildModeInit() {
}
if gccgo {
codegenArg = "-fPIE"
} else if cfg.Goos != "aix" {
codegenArg = "-shared"
} else {
switch cfg.Goos {
case "aix", "windows":
default:
codegenArg = "-shared"
}
}
ldBuildmode = "pie"
case "shared":

View File

@ -14,6 +14,16 @@ go list cmd/...
stdout ^cmd/compile
! stdout ^cmd/vendor/golang\.org/x/arch/x86/x86asm
# GOROOT/src/... should list the packages in std as if it were a module
# dependency: omitting vendored dependencies and stopping at the 'cmd' module
# boundary.
go list $GOROOT/src/...
stdout ^bytes$
! stdout ^builtin$
! stdout ^cmd/
! stdout ^vendor/
# Within the std module, listing ./... should omit the 'std' prefix:
# the package paths should be the same via ./... or the 'std' meta-pattern.

View File

@ -29,6 +29,23 @@ go test testcache -run=TestLookupEnv
go test testcache -run=TestLookupEnv
stdout '\(cached\)'
# Changes in arguments forwarded to the test should invalidate cached test
# results.
go test testcache -run=TestOSArgs -v hello
! stdout '\(cached\)'
stdout 'hello'
go test testcache -run=TestOSArgs -v goodbye
! stdout '\(cached\)'
stdout 'goodbye'
# golang.org/issue/36134: that includes the `-timeout` argument.
go test testcache -run=TestOSArgs -timeout=20m -v
! stdout '\(cached\)'
stdout '-test\.timeout[= ]20m'
go test testcache -run=TestOSArgs -timeout=5s -v
! stdout '\(cached\)'
stdout '-test\.timeout[= ]5s'
# If the test stats a file, changes to the file should invalidate the cache.
go test testcache -run=FileSize
go test testcache -run=FileSize
@ -207,6 +224,10 @@ func TestExternalFile(t *testing.T) {
t.Fatal(err)
}
}
func TestOSArgs(t *testing.T) {
t.Log(os.Args)
}
-- mkold.go --
package main

View File

@ -0,0 +1,69 @@
# Verifies golang.org/issue/37555.
[short] skip
# 'go test -json' should say a test passes if it says it passes.
go test -json ./pass
stdout '"Action":"pass".*\n\z'
! stdout '"Test":.*\n\z'
# 'go test -json' should say a test passes if it exits 0 and prints nothing.
# TODO(golang.org/issue/29062): this should fail in the future.
go test -json ./exit0main
stdout '"Action":"pass".*\n\z'
! stdout '"Test":.*\n\z'
# 'go test -json' should say a test fails if it exits 1 and prints nothing.
! go test -json ./exit1main
stdout '"Action":"fail".*\n\z'
! stdout '"Test":.*\n\z'
# 'go test -json' should say a test fails if it panics.
! go test -json ./panic
stdout '"Action":"fail".*\n\z'
! stdout '"Test":.*\n\z'
-- go.mod --
module example.com/test
go 1.14
-- pass/pass_test.go --
package pass_test
import "testing"
func TestPass(t *testing.T) {}
-- exit0main/exit0main_test.go --
package exit0_test
import (
"os"
"testing"
)
func TestMain(m *testing.M) {
os.Exit(0)
}
-- exit1main/exit1main_test.go --
package exit1_test
import (
"os"
"testing"
)
func TestMain(m *testing.M) {
os.Exit(1)
}
-- panic/panic_test.go --
package panic_test
import "testing"
func TestPanic(t *testing.T) {
panic("oh no")
}

View File

@ -22,8 +22,6 @@ stdout '^\tpath\trsc.io/fortune'
stdout '^\tmod\trsc.io/fortune\tv1.0.0'
# Repeat the test with -buildmode=pie.
# TODO(golang.org/issue/27144): don't skip after -buildmode=pie is implemented
# on Windows.
[!buildmode:pie] stop
go build -buildmode=pie -o external.exe rsc.io/fortune
go version external.exe
@ -33,8 +31,8 @@ stdout '^\tpath\trsc.io/fortune'
stdout '^\tmod\trsc.io/fortune\tv1.0.0'
# Also test PIE with internal linking.
# currently only supported on linux/amd64 and linux/arm64.
[!linux] stop
# currently only supported on linux/amd64, linux/arm64 and windows/amd64.
[!linux] [!windows] stop
[!amd64] [!arm64] stop
go build -buildmode=pie -ldflags=-linkmode=internal -o internal.exe rsc.io/fortune
go version internal.exe

View File

@ -43,6 +43,7 @@ const (
NSYM = 50
NREG = 32 /* number of general registers */
NFREG = 32 /* number of floating point registers */
NWREG = 32 /* number of MSA registers */
)
const (
@ -180,6 +181,41 @@ const (
REG_FCR30
REG_FCR31
// MSA registers
// The lower bits of W registers are alias to F registers
REG_W0 // must be a multiple of 32
REG_W1
REG_W2
REG_W3
REG_W4
REG_W5
REG_W6
REG_W7
REG_W8
REG_W9
REG_W10
REG_W11
REG_W12
REG_W13
REG_W14
REG_W15
REG_W16
REG_W17
REG_W18
REG_W19
REG_W20
REG_W21
REG_W22
REG_W23
REG_W24
REG_W25
REG_W26
REG_W27
REG_W28
REG_W29
REG_W30
REG_W31
REG_HI
REG_LO
@ -217,6 +253,8 @@ func init() {
f(REG_F0, REG_F31, 32) // For 32-bit MIPS, compiler only uses even numbered registers -- see cmd/compile/internal/ssa/gen/MIPSOps.go
MIPSDWARFRegisters[REG_HI] = 64
MIPSDWARFRegisters[REG_LO] = 65
// The lower bits of W registers are alias to F registers
f(REG_W0, REG_W31, 32)
}
const (
@ -243,6 +281,7 @@ const (
C_FREG
C_FCREG
C_MREG /* special processor register */
C_WREG /* MSA registers */
C_HI
C_LO
C_ZCON
@ -405,6 +444,12 @@ const (
AMOVVF
AMOVVD
/* MSA */
AVMOVB
AVMOVH
AVMOVW
AVMOVD
ALAST
// aliases
@ -430,4 +475,7 @@ func init() {
if REG_FCR0%32 != 0 {
panic("REG_FCR0 is not a multiple of 32")
}
if REG_W0%32 != 0 {
panic("REG_W0 is not a multiple of 32")
}
}

View File

@ -127,5 +127,9 @@ var Anames = []string{
"MOVDV",
"MOVVF",
"MOVVD",
"VMOVB",
"VMOVH",
"VMOVW",
"VMOVD",
"LAST",
}

View File

@ -10,6 +10,7 @@ var cnames0 = []string{
"FREG",
"FCREG",
"MREG",
"WREG",
"HI",
"LO",
"ZCON",

View File

@ -377,6 +377,11 @@ var optab = []Optab{
{ATEQ, C_SCON, C_NONE, C_REG, 15, 4, 0, 0, 0},
{ACMOVT, C_REG, C_NONE, C_REG, 17, 4, 0, 0, 0},
{AVMOVB, C_SCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0},
{AVMOVB, C_ADDCON, C_NONE, C_WREG, 56, 4, 0, sys.MIPS64, 0},
{AVMOVB, C_SOREG, C_NONE, C_WREG, 57, 4, 0, sys.MIPS64, 0},
{AVMOVB, C_WREG, C_NONE, C_SOREG, 58, 4, 0, sys.MIPS64, 0},
{ABREAK, C_REG, C_NONE, C_SEXT, 7, 4, REGSB, sys.MIPS64, 0}, /* really CACHE instruction */
{ABREAK, C_REG, C_NONE, C_SAUTO, 7, 4, REGSP, sys.MIPS64, 0},
{ABREAK, C_REG, C_NONE, C_SOREG, 7, 4, REGZERO, sys.MIPS64, 0},
@ -556,6 +561,9 @@ func (c *ctxt0) aclass(a *obj.Addr) int {
if REG_FCR0 <= a.Reg && a.Reg <= REG_FCR31 {
return C_FCREG
}
if REG_W0 <= a.Reg && a.Reg <= REG_W31 {
return C_WREG
}
if a.Reg == REG_LO {
return C_LO
}
@ -1029,6 +1037,11 @@ func buildop(ctxt *obj.Link) {
case AMOVVL:
opset(AMOVVR, r0)
case AVMOVB:
opset(AVMOVH, r0)
opset(AVMOVW, r0)
opset(AVMOVD, r0)
case AMOVW,
AMOVD,
AMOVF,
@ -1121,6 +1134,14 @@ func OP_JMP(op uint32, i uint32) uint32 {
return op | i&0x3FFFFFF
}
func OP_VI10(op uint32, df uint32, s10 int32, wd uint32, minor uint32) uint32 {
return 0x1e<<26 | (op&7)<<23 | (df&3)<<21 | uint32(s10&0x3FF)<<11 | (wd&31)<<6 | minor&0x3F
}
func OP_VMI10(s10 int32, rs uint32, wd uint32, minor uint32, df uint32) uint32 {
return 0x1e<<26 | uint32(s10&0x3FF)<<16 | (rs&31)<<11 | (wd&31)<<6 | (minor&15)<<2 | df&3
}
func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 := uint32(0)
o2 := uint32(0)
@ -1629,6 +1650,19 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
rel.Type = objabi.R_ADDRMIPSTLS
case 56: /* vmov{b,h,w,d} $scon, wr */
v := c.regoff(&p.From)
o1 = OP_VI10(110, c.twobitdf(p.As), v, uint32(p.To.Reg), 7)
case 57: /* vld $soreg, wr */
v := c.lsoffset(p.As, c.regoff(&p.From))
o1 = OP_VMI10(v, uint32(p.From.Reg), uint32(p.To.Reg), 8, c.twobitdf(p.As))
case 58: /* vst wr, $soreg */
v := c.lsoffset(p.As, c.regoff(&p.To))
o1 = OP_VMI10(v, uint32(p.To.Reg), uint32(p.From.Reg), 9, c.twobitdf(p.As))
}
out[0] = o1
@ -2009,3 +2043,43 @@ func vshift(a obj.As) bool {
}
return false
}
// MSA Two-bit Data Format Field Encoding
func (c *ctxt0) twobitdf(a obj.As) uint32 {
switch a {
case AVMOVB:
return 0
case AVMOVH:
return 1
case AVMOVW:
return 2
case AVMOVD:
return 3
default:
c.ctxt.Diag("unsupported data format %v", a)
}
return 0
}
// MSA Load/Store offset have to be multiple of size of data format
func (c *ctxt0) lsoffset(a obj.As, o int32) int32 {
var mod int32
switch a {
case AVMOVB:
mod = 1
case AVMOVH:
mod = 2
case AVMOVW:
mod = 4
case AVMOVD:
mod = 8
default:
c.ctxt.Diag("unsupported instruction:%v", a)
}
if o%mod != 0 {
c.ctxt.Diag("invalid offset for %v: %d is not a multiple of %d", a, o, mod)
}
return o / mod
}

View File

@ -59,6 +59,9 @@ func rconv(r int) string {
if REG_FCR0 <= r && r <= REG_FCR31 {
return fmt.Sprintf("FCR%d", r-REG_FCR0)
}
if REG_W0 <= r && r <= REG_W31 {
return fmt.Sprintf("W%d", r-REG_W0)
}
if r == REG_HI {
return "HI"
}

View File

@ -87,7 +87,8 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool {
"android/amd64", "android/arm", "android/arm64", "android/386",
"freebsd/amd64",
"darwin/amd64",
"aix/ppc64":
"aix/ppc64",
"windows/386", "windows/amd64", "windows/arm":
return true
}
return false

View File

@ -128,9 +128,16 @@ func (c *converter) Write(b []byte) (int, error) {
}
var (
// printed by test on successful run.
bigPass = []byte("PASS\n")
// printed by test after a normal test failure.
bigFail = []byte("FAIL\n")
// printed by 'go test' along with an error if the test binary terminates
// with an error.
bigFailErrorPrefix = []byte("FAIL\t")
updates = [][]byte{
[]byte("=== RUN "),
[]byte("=== PAUSE "),
@ -155,7 +162,7 @@ var (
// before or after emitting other events.
func (c *converter) handleInputLine(line []byte) {
// Final PASS or FAIL.
if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) {
if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) || bytes.HasPrefix(line, bigFailErrorPrefix) {
c.flushReport(0)
c.output.write(line)
if bytes.Equal(line, bigPass) {

View File

@ -13,7 +13,7 @@
{"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:909 +0xc9\n"}
{"Action":"output","Test":"TestPanic","Output":"created by testing.(*T).Run\n"}
{"Action":"output","Test":"TestPanic","Output":"\tgo/src/testing/testing.go:960 +0x350\n"}
{"Action":"output","Test":"TestPanic","Output":"FAIL\tcommand-line-arguments\t0.042s\n"}
{"Action":"fail","Test":"TestPanic"}
{"Action":"output","Output":"FAIL\tcommand-line-arguments\t0.042s\n"}
{"Action":"output","Output":"FAIL\n"}
{"Action":"fail"}

View File

@ -38,7 +38,7 @@ func (mode *BuildMode) Set(s string) error {
*mode = BuildModeExe
case "pie":
switch objabi.GOOS {
case "aix", "android", "linux":
case "aix", "android", "linux", "windows":
case "darwin", "freebsd":
switch objabi.GOARCH {
case "amd64":
@ -209,6 +209,7 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) {
case BuildModePIE:
switch objabi.GOOS + "/" + objabi.GOARCH {
case "linux/amd64", "linux/arm64", "android/arm64":
case "windows/386", "windows/amd64", "windows/arm":
default:
// Internal linking does not support TLS_IE.
return true, "buildmode=pie"

View File

@ -1287,8 +1287,20 @@ func (ctxt *Link) hostlink() {
}
}
case BuildModePIE:
// ELF.
if ctxt.HeadType != objabi.Hdarwin && ctxt.HeadType != objabi.Haix {
switch ctxt.HeadType {
case objabi.Hdarwin, objabi.Haix:
case objabi.Hwindows:
// Enable ASLR.
argv = append(argv, "-Wl,--dynamicbase")
// enable high-entropy ASLR on 64-bit.
if ctxt.Arch.PtrSize >= 8 {
argv = append(argv, "-Wl,--high-entropy-va")
}
// Work around binutils limitation that strips relocation table for dynamicbase.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011
argv = append(argv, "-Wl,--export-all-symbols")
default:
// ELF.
if ctxt.UseRelro() {
argv = append(argv, "-Wl,-z,relro")
}

View File

@ -95,6 +95,7 @@ const (
IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR = 14
IMAGE_SUBSYSTEM_WINDOWS_GUI = 2
IMAGE_SUBSYSTEM_WINDOWS_CUI = 3
IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100
IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000
@ -127,6 +128,7 @@ const (
IMAGE_REL_ARM_SECREL = 0x000F
IMAGE_REL_BASED_HIGHLOW = 3
IMAGE_REL_BASED_DIR64 = 10
)
const (
@ -753,12 +755,12 @@ func (f *peFile) writeSymbolTableAndStringTable(ctxt *Link) {
}
// writeFileHeader writes COFF file header for peFile f.
func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) {
func (f *peFile) writeFileHeader(ctxt *Link) {
var fh pe.FileHeader
switch arch.Family {
switch ctxt.Arch.Family {
default:
Exitf("unknown PE architecture: %v", arch.Family)
Exitf("unknown PE architecture: %v", ctxt.Arch.Family)
case sys.AMD64:
fh.Machine = IMAGE_FILE_MACHINE_AMD64
case sys.I386:
@ -773,16 +775,15 @@ func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode)
// much more beneficial than having build timestamp in the header.
fh.TimeDateStamp = 0
if linkmode == LinkExternal {
if ctxt.LinkMode == LinkExternal {
fh.Characteristics = IMAGE_FILE_LINE_NUMS_STRIPPED
} else {
switch arch.Family {
default:
Exitf("write COFF(ext): unknown PE architecture: %v", arch.Family)
fh.Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED
switch ctxt.Arch.Family {
case sys.AMD64, sys.I386:
fh.Characteristics = IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED
case sys.ARM:
fh.Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED
if ctxt.BuildMode != BuildModePIE {
fh.Characteristics |= IMAGE_FILE_RELOCS_STRIPPED
}
}
}
if pe64 != 0 {
@ -798,7 +799,7 @@ func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode)
fh.PointerToSymbolTable = uint32(f.symtabOffset)
fh.NumberOfSymbols = uint32(f.symbolCount)
binary.Write(out, binary.LittleEndian, &fh)
binary.Write(ctxt.Out, binary.LittleEndian, &fh)
}
// writeOptionalHeader writes COFF optional header for peFile f.
@ -860,12 +861,6 @@ func (f *peFile) writeOptionalHeader(ctxt *Link) {
oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI
}
switch ctxt.Arch.Family {
case sys.ARM:
oh64.DllCharacteristics = IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
oh.DllCharacteristics = IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
}
// Mark as having awareness of terminal services, to avoid ancient compatibility hacks.
oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE
oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE
@ -874,6 +869,23 @@ func (f *peFile) writeOptionalHeader(ctxt *Link) {
oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_NX_COMPAT
oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_NX_COMPAT
// The DLL can be relocated at load time.
switch ctxt.Arch.Family {
case sys.AMD64, sys.I386:
if ctxt.BuildMode == BuildModePIE {
oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
}
case sys.ARM:
oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
oh.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE
}
// Image can handle a high entropy 64-bit virtual address space.
if ctxt.BuildMode == BuildModePIE {
oh64.DllCharacteristics |= IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA
}
// Disable stack growth as we don't want Windows to
// fiddle with the thread stack limits, which we set
// ourselves to circumvent the stack checks in the
@ -1005,7 +1017,7 @@ func pewrite(ctxt *Link) {
ctxt.Out.WriteStringN("PE", 4)
}
pefile.writeFileHeader(ctxt.Arch, ctxt.Out, ctxt.LinkMode)
pefile.writeFileHeader(ctxt)
pefile.writeOptionalHeader(ctxt)
@ -1384,6 +1396,8 @@ func (rt *peBaseRelocTable) addentry(ctxt *Link, s *sym.Symbol, r *sym.Reloc) {
Exitf("unsupported relocation size %d\n", r.Siz)
case 4:
e.typeOff |= uint16(IMAGE_REL_BASED_HIGHLOW << 12)
case 8:
e.typeOff |= uint16(IMAGE_REL_BASED_DIR64 << 12)
}
b.entries = append(b.entries, e)
@ -1438,11 +1452,15 @@ func addPEBaseRelocSym(ctxt *Link, s *sym.Symbol, rt *peBaseRelocTable) {
}
func addPEBaseReloc(ctxt *Link) {
// We only generate base relocation table for ARM (and ... ARM64), x86, and AMD64 are marked as legacy
// archs and can use fixed base with no base relocation information
// Arm does not work without base relocation table.
// 386 and amd64 will only require the table for BuildModePIE.
switch ctxt.Arch.Family {
default:
return
case sys.I386, sys.AMD64:
if ctxt.BuildMode != BuildModePIE {
return
}
case sys.ARM:
}

View File

@ -267,7 +267,6 @@ func BenchmarkDecode(b *testing.B) {
func BenchmarkDump(b *testing.B) {
for _, size := range []int{256, 1024, 4096, 16384} {
src := bytes.Repeat([]byte{2, 3, 5, 7, 9, 11, 13, 17}, size/8)
sink = make([]byte, 2*size)
b.Run(fmt.Sprintf("%v", size), func(b *testing.B) {
b.SetBytes(int64(size))

View File

@ -9,9 +9,9 @@
Define flags using flag.String(), Bool(), Int(), etc.
This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
This declares an integer flag, -n, stored in the pointer nFlag, with type *int:
import "flag"
var ip = flag.Int("flagname", 1234, "help message for flagname")
var nFlag = flag.Int("n", 1234, "help message for flag n")
If you like, you can bind the flag to a variable using the Var() functions.
var flagvar int
func init() {

View File

@ -133,13 +133,7 @@ func ParseFile(fset *token.FileSet, filename string, src interface{}, mode Mode)
// first error encountered are returned.
//
func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error) {
fd, err := os.Open(path)
if err != nil {
return nil, err
}
defer fd.Close()
list, err := fd.Readdir(-1)
list, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}

View File

@ -120,6 +120,9 @@ import "fmt"
type Celsius float64
func (c Celsius) String() string { return fmt.Sprintf("%g°C", c) }
func (c *Celsius) SetF(f float64) { *c = Celsius(f - 32 / 9 * 5) }
type S struct { I; m int }
type I interface { m() byte }
`
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "celsius.go", input, 0)
@ -147,6 +150,11 @@ func (c *Celsius) SetF(f float64) { *c = Celsius(f - 32 / 9 * 5) }
fmt.Println()
}
// Print the method set of S.
styp := pkg.Scope().Lookup("S").Type()
fmt.Printf("Method set of %s:\n", styp)
fmt.Println(types.NewMethodSet(styp))
// Output:
// Method set of temperature.Celsius:
// method (temperature.Celsius) String() string
@ -154,6 +162,9 @@ func (c *Celsius) SetF(f float64) { *c = Celsius(f - 32 / 9 * 5) }
// Method set of *temperature.Celsius:
// method (*temperature.Celsius) SetF(f float64)
// method (*temperature.Celsius) String() string
//
// Method set of temperature.S:
// MethodSet {}
}
// ExampleInfo prints various facts recorded by the type checker in a

Some files were not shown because too many files have changed in this diff Show More