cmd/compile: update rules to generate more prefixed instructions

This modifies some existing rules to allow more prefixed instructions
to be generated when using GOPPC64=power10. Some rules also check
if PCRel is available, which is currently supported for linux/ppc64le
and linux/ppc64 (internal linking only).

Prior to p10, DS-offset loads and stores had a 16 bit size limit for
the offset field. If the offset of the data for load or store was
beyond this range then an indexed load or store would be selected by
the rules.

In p10 the assembler can generate prefixed instructions in this case,
but does not if an indexed instruction was selected during the lowering
pass.

This allows many more cases to use prefixed loads or stores, reducing
function sizes and improving performance in some cases where the code
change happens in key loops.

For example in strconv BenchmarkAppendQuoteRune before:

  12c5e4:       15 00 10 06     pla     r10,1425660
  12c5e8:       fc c0 40 39
  12c5ec:       00 00 6a e8     ld      r3,0(r10)
  12c5f0:       10 00 aa e8     ld      r5,16(r10)

After this change:

  12a828:       15 00 10 04     pld     r3,1433272
  12a82c:       b8 de 60 e4
  12a830:       15 00 10 04     pld     r5,1433280
  12a834:       c0 de a0 e4

Performs better in the second case.

A testcase was added to verify that the rules correctly select a load or
store based on the offset and whether power10 or earlier.

Change-Id: I4335fed0bd9b8aba8a4f84d69b89f819cc464846
Reviewed-on: https://go-review.googlesource.com/c/go/+/477398
Reviewed-by: Heschi Kreinick <heschi@google.com>
Reviewed-by: Archana Ravindar <aravind5@in.ibm.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Run-TryBot: Lynn Boger <laboger@linux.vnet.ibm.com>
Reviewed-by: Paul Murphy <murp@ibm.com>
This commit is contained in:
Lynn Boger 2023-03-17 15:22:31 -05:00
parent 268d2f7cf2
commit 4481042c43
4 changed files with 230 additions and 141 deletions

View File

@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Lowering arithmetic // GOPPC64 values indicate power8, power9, etc.
// That means the code is compiled for that target,
// and will not run on earlier targets.
//
(Add(Ptr|64|32|16|8) ...) => (ADD ...) (Add(Ptr|64|32|16|8) ...) => (ADD ...)
(Add64F ...) => (FADD ...) (Add64F ...) => (FADD ...)
(Add32F ...) => (FADDS ...) (Add32F ...) => (FADDS ...)
@ -705,71 +708,79 @@
(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem) (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem) (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
// Rules for MOV* or FMOV* ops determine when indexed (MOV*loadidx or MOV*storeidx)
// or non-indexed (MOV*load or MOV*store) should be used. Indexed instructions
// require an extra instruction and register to load the index so non-indexed is preferred.
// Indexed ops generate indexed load or store instructions for all GOPPC64 values.
// Non-indexed ops generate DS-form loads and stores when the offset fits in 16 bits,
// and on power8 and power9, a multiple of 4 is required for MOVW and MOVD ops.
// On power10, prefixed loads and stores can be used for offsets > 16 bits and <= 32 bits.
// and support for PC relative addressing must be available if relocation is needed.
// On power10, the assembler will determine when to use DS-form or prefixed
// instructions for non-indexed ops depending on the value of the offset.
//
// Fold offsets for stores. // Fold offsets for stores.
(MOV(D|W|H|B)store [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOV(D|W|H|B)store [off1+int32(off2)] {sym} x val mem) (MOV(D|W|H|B)store [off1] {sym} (ADDconst [off2] x) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|H|B)store [off1+int32(off2)] {sym} x val mem)
(FMOV(S|D)store [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOV(S|D)store [off1+int32(off2)] {sym} ptr val mem) (FMOV(S|D)store [off1] {sym} (ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)store [off1+int32(off2)] {sym} ptr val mem)
// Fold address into load/store. // Fold address into load/store.
// The assembler needs to generate several instructions and use // If power10 with PCRel is not available, then
// the assembler needs to generate several instructions and use
// temp register for accessing global, and each time it will reload // temp register for accessing global, and each time it will reload
// the temp register. So don't fold address of global, unless there // the temp register. So don't fold address of global in that case if there is more than
// is only one use. // one use.
(MOV(B|H|W|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (MOV(B|H|W|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(FMOV(S|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) (FMOV(S|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(FMOV(S|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) (FMOV(S|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
(MOV(B|H|W)Zload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOV(B|H|W)Zload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(MOV(B|H|W)Zload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOV(B|H|W)Zload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOV(H|W|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (MOV(H|W|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(MOV(H|W|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (MOV(H|W|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(FMOV(S|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) (FMOV(S|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
&& is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) => && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(FMOV(S|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem) (FMOV(S|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// Fold offsets for loads. // Fold offsets for loads.
(FMOV(S|D)load [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOV(S|D)load [off1+int32(off2)] {sym} ptr mem) (FMOV(S|D)load [off1] {sym} (ADDconst [off2] ptr) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)load [off1+int32(off2)] {sym} ptr mem)
(MOV(D|W|WZ|H|HZ|BZ)load [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOV(D|W|WZ|H|HZ|BZ)load [off1+int32(off2)] {sym} x mem) (MOV(D|W|WZ|H|HZ|BZ)load [off1] {sym} (ADDconst [off2] x) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|WZ|H|HZ|BZ)load [off1+int32(off2)] {sym} x mem)
// Determine load + addressing that can be done as a register indexed load // Determine load + addressing that can be done as a register indexed load
(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem) (MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
// Determine if there is benefit to using a non-indexed load, since that saves the load // See comments above concerning selection of indexed vs. non-indexed ops.
// of the index register. With MOVDload and MOVWload, there is no benefit if the offset // These cases don't have relocation.
// value is not a multiple of 4, since that results in an extra instruction in the base (MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
// register address computation. (MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem) (MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem) (MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
// Store of zero => storezero // Store of zero => storezero
(MOV(D|W|H|B)store [off] {sym} ptr (MOVDconst [0]) mem) => (MOV(D|W|H|B)storezero [off] {sym} ptr mem) (MOV(D|W|H|B)store [off] {sym} ptr (MOVDconst [0]) mem) => (MOV(D|W|H|B)storezero [off] {sym} ptr mem)
// Fold offsets for storezero // Fold offsets for storezero
(MOV(D|W|H|B)storezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOV(D|W|H|B)storezero [off1] {sym} (ADDconst [off2] x) mem) && ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) =>
(MOV(D|W|H|B)storezero [off1+int32(off2)] {sym} x mem) (MOV(D|W|H|B)storezero [off1+int32(off2)] {sym} x mem)
// Stores with addressing that can be done as indexed stores // Stores with addressing that can be done as indexed stores
(MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem) (MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
// Stores with constant index values can be done without indexed instructions (MOVDstoreidx ptr (MOVDconst [c]) val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
// No need to lower the idx cases if c%4 is not 0 (MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem) (MOVDstoreidx (MOVDconst [c]) ptr val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem) (MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
(MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
// Fold symbols into storezero // Fold symbols into storezero
(MOV(D|W|H|B)storezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) (MOV(D|W|H|B)storezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
&& (x.Op != OpSB || p.Uses == 1) => && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
(MOV(D|W|H|B)storezero [off1+off2] {mergeSym(sym1,sym2)} x mem) (MOV(D|W|H|B)storezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
// atomic intrinsics // atomic intrinsics

View File

@ -14,6 +14,7 @@ import (
"cmd/internal/src" "cmd/internal/src"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"internal/buildcfg"
"io" "io"
"math" "math"
"math/bits" "math/bits"
@ -1438,6 +1439,12 @@ func hasSmallRotate(c *Config) bool {
} }
} }
func supportsPPC64PCRel() bool {
// PCRel is currently supported for >= power10, linux only
// Internal and external linking supports this on ppc64le; internal linking on ppc64.
return buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
}
func newPPC64ShiftAuxInt(sh, mb, me, sz int64) int32 { func newPPC64ShiftAuxInt(sh, mb, me, sz int64) int32 {
if sh < 0 || sh >= sz { if sh < 0 || sh >= sz {
panic("PPC64 shift arg sh out of range") panic("PPC64 shift arg sh out of range")

View File

@ -5298,7 +5298,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
return true return true
} }
// match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -5311,7 +5311,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
ptr := p.Args[0] ptr := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64FMOVDload) v.reset(OpPPC64FMOVDload)
@ -5321,7 +5321,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
return true return true
} }
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (FMOVDload [off1+int32(off2)] {sym} ptr mem) // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -5332,7 +5332,7 @@ func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64FMOVDload) v.reset(OpPPC64FMOVDload)
@ -5365,7 +5365,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
return true return true
} }
// match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem) // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -5377,7 +5377,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64FMOVDstore) v.reset(OpPPC64FMOVDstore)
@ -5387,7 +5387,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
return true return true
} }
// match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) // match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -5401,7 +5401,7 @@ func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
ptr := p.Args[0] ptr := p.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64FMOVDstore) v.reset(OpPPC64FMOVDstore)
@ -5416,7 +5416,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -5429,7 +5429,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
ptr := p.Args[0] ptr := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64FMOVSload) v.reset(OpPPC64FMOVSload)
@ -5439,7 +5439,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
return true return true
} }
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (FMOVSload [off1+int32(off2)] {sym} ptr mem) // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -5450,7 +5450,7 @@ func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
ptr := v_0.Args[0] ptr := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64FMOVSload) v.reset(OpPPC64FMOVSload)
@ -5466,7 +5466,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem) // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -5478,7 +5478,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
ptr := v_0.Args[0] ptr := v_0.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64FMOVSstore) v.reset(OpPPC64FMOVSstore)
@ -5488,7 +5488,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
return true return true
} }
// match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) // match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -5502,7 +5502,7 @@ func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
ptr := p.Args[0] ptr := p.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64FMOVSstore) v.reset(OpPPC64FMOVSstore)
@ -6422,7 +6422,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -6435,7 +6435,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
ptr := p.Args[0] ptr := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVBZload) v.reset(OpPPC64MOVBZload)
@ -6445,7 +6445,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
return true return true
} }
// match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVBZload [off1+int32(off2)] {sym} x mem) // result: (MOVBZload [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -6456,7 +6456,7 @@ func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVBZload) v.reset(OpPPC64MOVBZload)
@ -6494,7 +6494,7 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVBZloadidx ptr (MOVDconst [c]) mem) // match: (MOVBZloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVBZload [int32(c)] ptr mem) // result: (MOVBZload [int32(c)] ptr mem)
for { for {
ptr := v_0 ptr := v_0
@ -6503,7 +6503,7 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool {
} }
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
mem := v_2 mem := v_2
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVBZload) v.reset(OpPPC64MOVBZload)
@ -6512,7 +6512,7 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool {
return true return true
} }
// match: (MOVBZloadidx (MOVDconst [c]) ptr mem) // match: (MOVBZloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVBZload [int32(c)] ptr mem) // result: (MOVBZload [int32(c)] ptr mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -6521,7 +6521,7 @@ func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool {
c := auxIntToInt64(v_0.AuxInt) c := auxIntToInt64(v_0.AuxInt)
ptr := v_1 ptr := v_1
mem := v_2 mem := v_2
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVBZload) v.reset(OpPPC64MOVBZload)
@ -7177,7 +7177,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
b := v.Block b := v.Block
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
// match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVBstore [off1+int32(off2)] {sym} x val mem) // result: (MOVBstore [off1+int32(off2)] {sym} x val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -7189,7 +7189,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
x := v_0.Args[0] x := v_0.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVBstore) v.reset(OpPPC64MOVBstore)
@ -7199,7 +7199,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
return true return true
} }
// match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -7213,7 +7213,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
ptr := p.Args[0] ptr := p.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVBstore) v.reset(OpPPC64MOVBstore)
@ -7489,7 +7489,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool {
b := v.Block b := v.Block
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
// match: (MOVBstoreidx ptr (MOVDconst [c]) val mem) // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVBstore [int32(c)] ptr val mem) // result: (MOVBstore [int32(c)] ptr val mem)
for { for {
ptr := v_0 ptr := v_0
@ -7499,7 +7499,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool {
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
val := v_2 val := v_2
mem := v_3 mem := v_3
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVBstore) v.reset(OpPPC64MOVBstore)
@ -7508,7 +7508,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool {
return true return true
} }
// match: (MOVBstoreidx (MOVDconst [c]) ptr val mem) // match: (MOVBstoreidx (MOVDconst [c]) ptr val mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVBstore [int32(c)] ptr val mem) // result: (MOVBstore [int32(c)] ptr val mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -7518,7 +7518,7 @@ func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool {
ptr := v_1 ptr := v_1
val := v_2 val := v_2
mem := v_3 mem := v_3
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVBstore) v.reset(OpPPC64MOVBstore)
@ -7720,7 +7720,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
// result: (MOVBstorezero [off1+int32(off2)] {sym} x mem) // result: (MOVBstorezero [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -7731,7 +7731,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
break break
} }
v.reset(OpPPC64MOVBstorezero) v.reset(OpPPC64MOVBstorezero)
@ -7741,7 +7741,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
return true return true
} }
// match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) // match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -7754,7 +7754,7 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
x := p.Args[0] x := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVBstorezero) v.reset(OpPPC64MOVBstorezero)
@ -7786,7 +7786,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
return true return true
} }
// match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -7799,7 +7799,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
ptr := p.Args[0] ptr := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVDload) v.reset(OpPPC64MOVDload)
@ -7809,7 +7809,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
return true return true
} }
// match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVDload [off1+int32(off2)] {sym} x mem) // result: (MOVDload [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -7820,7 +7820,7 @@ func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVDload) v.reset(OpPPC64MOVDload)
@ -7858,7 +7858,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVDloadidx ptr (MOVDconst [c]) mem) // match: (MOVDloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c) && c%4 == 0 // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVDload [int32(c)] ptr mem) // result: (MOVDload [int32(c)] ptr mem)
for { for {
ptr := v_0 ptr := v_0
@ -7867,7 +7867,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool {
} }
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
mem := v_2 mem := v_2
if !(is16Bit(c) && c%4 == 0) { if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVDload) v.reset(OpPPC64MOVDload)
@ -7876,7 +7876,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool {
return true return true
} }
// match: (MOVDloadidx (MOVDconst [c]) ptr mem) // match: (MOVDloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c) && c%4 == 0 // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVDload [int32(c)] ptr mem) // result: (MOVDload [int32(c)] ptr mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -7885,7 +7885,7 @@ func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool {
c := auxIntToInt64(v_0.AuxInt) c := auxIntToInt64(v_0.AuxInt)
ptr := v_1 ptr := v_1
mem := v_2 mem := v_2
if !(is16Bit(c) && c%4 == 0) { if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVDload) v.reset(OpPPC64MOVDload)
@ -7918,7 +7918,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
return true return true
} }
// match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVDstore [off1+int32(off2)] {sym} x val mem) // result: (MOVDstore [off1+int32(off2)] {sym} x val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -7930,7 +7930,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
x := v_0.Args[0] x := v_0.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVDstore) v.reset(OpPPC64MOVDstore)
@ -7940,7 +7940,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
return true return true
} }
// match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -7954,7 +7954,7 @@ func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
ptr := p.Args[0] ptr := p.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVDstore) v.reset(OpPPC64MOVDstore)
@ -8053,7 +8053,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVDstoreidx ptr (MOVDconst [c]) val mem) // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
// cond: is16Bit(c) && c%4 == 0 // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVDstore [int32(c)] ptr val mem) // result: (MOVDstore [int32(c)] ptr val mem)
for { for {
ptr := v_0 ptr := v_0
@ -8063,7 +8063,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool {
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
val := v_2 val := v_2
mem := v_3 mem := v_3
if !(is16Bit(c) && c%4 == 0) { if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVDstore) v.reset(OpPPC64MOVDstore)
@ -8072,7 +8072,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool {
return true return true
} }
// match: (MOVDstoreidx (MOVDconst [c]) ptr val mem) // match: (MOVDstoreidx (MOVDconst [c]) ptr val mem)
// cond: is16Bit(c) && c%4 == 0 // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVDstore [int32(c)] ptr val mem) // result: (MOVDstore [int32(c)] ptr val mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -8082,7 +8082,7 @@ func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool {
ptr := v_1 ptr := v_1
val := v_2 val := v_2
mem := v_3 mem := v_3
if !(is16Bit(c) && c%4 == 0) { if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVDstore) v.reset(OpPPC64MOVDstore)
@ -8129,7 +8129,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
// result: (MOVDstorezero [off1+int32(off2)] {sym} x mem) // result: (MOVDstorezero [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -8140,7 +8140,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
break break
} }
v.reset(OpPPC64MOVDstorezero) v.reset(OpPPC64MOVDstorezero)
@ -8150,7 +8150,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
return true return true
} }
// match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) // match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -8163,7 +8163,7 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
x := p.Args[0] x := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVDstorezero) v.reset(OpPPC64MOVDstorezero)
@ -8236,7 +8236,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -8249,7 +8249,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
ptr := p.Args[0] ptr := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVHZload) v.reset(OpPPC64MOVHZload)
@ -8259,7 +8259,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
return true return true
} }
// match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVHZload [off1+int32(off2)] {sym} x mem) // result: (MOVHZload [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -8270,7 +8270,7 @@ func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVHZload) v.reset(OpPPC64MOVHZload)
@ -8308,7 +8308,7 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVHZloadidx ptr (MOVDconst [c]) mem) // match: (MOVHZloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHZload [int32(c)] ptr mem) // result: (MOVHZload [int32(c)] ptr mem)
for { for {
ptr := v_0 ptr := v_0
@ -8317,7 +8317,7 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool {
} }
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
mem := v_2 mem := v_2
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVHZload) v.reset(OpPPC64MOVHZload)
@ -8326,7 +8326,7 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool {
return true return true
} }
// match: (MOVHZloadidx (MOVDconst [c]) ptr mem) // match: (MOVHZloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHZload [int32(c)] ptr mem) // result: (MOVHZload [int32(c)] ptr mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -8335,7 +8335,7 @@ func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool {
c := auxIntToInt64(v_0.AuxInt) c := auxIntToInt64(v_0.AuxInt)
ptr := v_1 ptr := v_1
mem := v_2 mem := v_2
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVHZload) v.reset(OpPPC64MOVHZload)
@ -8770,7 +8770,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -8783,7 +8783,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
ptr := p.Args[0] ptr := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVHload) v.reset(OpPPC64MOVHload)
@ -8793,7 +8793,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
return true return true
} }
// match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVHload [off1+int32(off2)] {sym} x mem) // result: (MOVHload [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -8804,7 +8804,7 @@ func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVHload) v.reset(OpPPC64MOVHload)
@ -8842,7 +8842,7 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVHloadidx ptr (MOVDconst [c]) mem) // match: (MOVHloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHload [int32(c)] ptr mem) // result: (MOVHload [int32(c)] ptr mem)
for { for {
ptr := v_0 ptr := v_0
@ -8851,7 +8851,7 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool {
} }
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
mem := v_2 mem := v_2
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVHload) v.reset(OpPPC64MOVHload)
@ -8860,7 +8860,7 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool {
return true return true
} }
// match: (MOVHloadidx (MOVDconst [c]) ptr mem) // match: (MOVHloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHload [int32(c)] ptr mem) // result: (MOVHload [int32(c)] ptr mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -8869,7 +8869,7 @@ func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool {
c := auxIntToInt64(v_0.AuxInt) c := auxIntToInt64(v_0.AuxInt)
ptr := v_1 ptr := v_1
mem := v_2 mem := v_2
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVHload) v.reset(OpPPC64MOVHload)
@ -9145,7 +9145,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
// match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVHstore [off1+int32(off2)] {sym} x val mem) // result: (MOVHstore [off1+int32(off2)] {sym} x val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -9157,7 +9157,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
x := v_0.Args[0] x := v_0.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVHstore) v.reset(OpPPC64MOVHstore)
@ -9167,7 +9167,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
return true return true
} }
// match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -9181,7 +9181,7 @@ func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
ptr := p.Args[0] ptr := p.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVHstore) v.reset(OpPPC64MOVHstore)
@ -9348,7 +9348,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVHstoreidx ptr (MOVDconst [c]) val mem) // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHstore [int32(c)] ptr val mem) // result: (MOVHstore [int32(c)] ptr val mem)
for { for {
ptr := v_0 ptr := v_0
@ -9358,7 +9358,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool {
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
val := v_2 val := v_2
mem := v_3 mem := v_3
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVHstore) v.reset(OpPPC64MOVHstore)
@ -9367,7 +9367,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool {
return true return true
} }
// match: (MOVHstoreidx (MOVDconst [c]) ptr val mem) // match: (MOVHstoreidx (MOVDconst [c]) ptr val mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVHstore [int32(c)] ptr val mem) // result: (MOVHstore [int32(c)] ptr val mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -9377,7 +9377,7 @@ func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool {
ptr := v_1 ptr := v_1
val := v_2 val := v_2
mem := v_3 mem := v_3
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVHstore) v.reset(OpPPC64MOVHstore)
@ -9480,7 +9480,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
// result: (MOVHstorezero [off1+int32(off2)] {sym} x mem) // result: (MOVHstorezero [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -9491,7 +9491,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
break break
} }
v.reset(OpPPC64MOVHstorezero) v.reset(OpPPC64MOVHstorezero)
@ -9501,7 +9501,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
return true return true
} }
// match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -9514,7 +9514,7 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
x := p.Args[0] x := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVHstorezero) v.reset(OpPPC64MOVHstorezero)
@ -9561,7 +9561,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -9574,7 +9574,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
ptr := p.Args[0] ptr := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVWZload) v.reset(OpPPC64MOVWZload)
@ -9584,7 +9584,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
return true return true
} }
// match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVWZload [off1+int32(off2)] {sym} x mem) // result: (MOVWZload [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -9595,7 +9595,7 @@ func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVWZload) v.reset(OpPPC64MOVWZload)
@ -9633,7 +9633,7 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVWZloadidx ptr (MOVDconst [c]) mem) // match: (MOVWZloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWZload [int32(c)] ptr mem) // result: (MOVWZload [int32(c)] ptr mem)
for { for {
ptr := v_0 ptr := v_0
@ -9642,7 +9642,7 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool {
} }
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
mem := v_2 mem := v_2
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVWZload) v.reset(OpPPC64MOVWZload)
@ -9651,7 +9651,7 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool {
return true return true
} }
// match: (MOVWZloadidx (MOVDconst [c]) ptr mem) // match: (MOVWZloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWZload [int32(c)] ptr mem) // result: (MOVWZload [int32(c)] ptr mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -9660,7 +9660,7 @@ func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool {
c := auxIntToInt64(v_0.AuxInt) c := auxIntToInt64(v_0.AuxInt)
ptr := v_1 ptr := v_1
mem := v_2 mem := v_2
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVWZload) v.reset(OpPPC64MOVWZload)
@ -10120,7 +10120,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -10133,7 +10133,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
ptr := p.Args[0] ptr := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVWload) v.reset(OpPPC64MOVWload)
@ -10143,7 +10143,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
return true return true
} }
// match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVWload [off1+int32(off2)] {sym} x mem) // result: (MOVWload [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -10154,7 +10154,7 @@ func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVWload) v.reset(OpPPC64MOVWload)
@ -10192,7 +10192,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVWloadidx ptr (MOVDconst [c]) mem) // match: (MOVWloadidx ptr (MOVDconst [c]) mem)
// cond: is16Bit(c) && c%4 == 0 // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWload [int32(c)] ptr mem) // result: (MOVWload [int32(c)] ptr mem)
for { for {
ptr := v_0 ptr := v_0
@ -10201,7 +10201,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool {
} }
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
mem := v_2 mem := v_2
if !(is16Bit(c) && c%4 == 0) { if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVWload) v.reset(OpPPC64MOVWload)
@ -10210,7 +10210,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool {
return true return true
} }
// match: (MOVWloadidx (MOVDconst [c]) ptr mem) // match: (MOVWloadidx (MOVDconst [c]) ptr mem)
// cond: is16Bit(c) && c%4 == 0 // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWload [int32(c)] ptr mem) // result: (MOVWload [int32(c)] ptr mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -10219,7 +10219,7 @@ func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool {
c := auxIntToInt64(v_0.AuxInt) c := auxIntToInt64(v_0.AuxInt)
ptr := v_1 ptr := v_1
mem := v_2 mem := v_2
if !(is16Bit(c) && c%4 == 0) { if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVWload) v.reset(OpPPC64MOVWload)
@ -10516,7 +10516,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
// match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(int64(off1)+off2) // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
// result: (MOVWstore [off1+int32(off2)] {sym} x val mem) // result: (MOVWstore [off1+int32(off2)] {sym} x val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -10528,7 +10528,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
x := v_0.Args[0] x := v_0.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(is16Bit(int64(off1) + off2)) { if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
break break
} }
v.reset(OpPPC64MOVWstore) v.reset(OpPPC64MOVWstore)
@ -10538,7 +10538,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
return true return true
} }
// match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) // match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
// cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -10552,7 +10552,7 @@ func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
ptr := p.Args[0] ptr := p.Args[0]
val := v_1 val := v_1
mem := v_2 mem := v_2
if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVWstore) v.reset(OpPPC64MOVWstore)
@ -10685,7 +10685,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVWstoreidx ptr (MOVDconst [c]) val mem) // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWstore [int32(c)] ptr val mem) // result: (MOVWstore [int32(c)] ptr val mem)
for { for {
ptr := v_0 ptr := v_0
@ -10695,7 +10695,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool {
c := auxIntToInt64(v_1.AuxInt) c := auxIntToInt64(v_1.AuxInt)
val := v_2 val := v_2
mem := v_3 mem := v_3
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVWstore) v.reset(OpPPC64MOVWstore)
@ -10704,7 +10704,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool {
return true return true
} }
// match: (MOVWstoreidx (MOVDconst [c]) ptr val mem) // match: (MOVWstoreidx (MOVDconst [c]) ptr val mem)
// cond: is16Bit(c) // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
// result: (MOVWstore [int32(c)] ptr val mem) // result: (MOVWstore [int32(c)] ptr val mem)
for { for {
if v_0.Op != OpPPC64MOVDconst { if v_0.Op != OpPPC64MOVDconst {
@ -10714,7 +10714,7 @@ func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool {
ptr := v_1 ptr := v_1
val := v_2 val := v_2
mem := v_3 mem := v_3
if !(is16Bit(c)) { if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
break break
} }
v.reset(OpPPC64MOVWstore) v.reset(OpPPC64MOVWstore)
@ -10789,7 +10789,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(int64(off1)+off2) // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
// result: (MOVWstorezero [off1+int32(off2)] {sym} x mem) // result: (MOVWstorezero [off1+int32(off2)] {sym} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -10800,7 +10800,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
off2 := auxIntToInt64(v_0.AuxInt) off2 := auxIntToInt64(v_0.AuxInt)
x := v_0.Args[0] x := v_0.Args[0]
mem := v_1 mem := v_1
if !(is16Bit(int64(off1) + off2)) { if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
break break
} }
v.reset(OpPPC64MOVWstorezero) v.reset(OpPPC64MOVWstorezero)
@ -10810,7 +10810,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
return true return true
} }
// match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) // match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
// cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
// result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
for { for {
off1 := auxIntToInt32(v.AuxInt) off1 := auxIntToInt32(v.AuxInt)
@ -10823,7 +10823,7 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
sym2 := auxToSym(p.Aux) sym2 := auxToSym(p.Aux)
x := p.Args[0] x := p.Args[0]
mem := v_1 mem := v_1
if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
break break
} }
v.reset(OpPPC64MOVWstorezero) v.reset(OpPPC64MOVWstorezero)

View File

@ -0,0 +1,71 @@
// asmcheck
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package codegen
type big1 struct {
w [1<<30 - 1]uint32
}
type big2 struct {
d [1<<29 - 1]uint64
}
func loadLargeOffset(sw *big1, sd *big2) (uint32, uint64) {
// ppc64x:`MOVWZ\s+[0-9]+\(R[0-9]+\)`,-`ADD`
a3 := sw.w[1<<10]
// ppc64le/power10:`MOVWZ\s+[0-9]+\(R[0-9]+\),\sR[0-9]+`,-`ADD`
// ppc64x/power9:`ADD`,`MOVWZ\s+\(R[0-9]+\),\sR[0-9]+`
// ppc64x/power8:`ADD`,`MOVWZ\s+\(R[0-9]+\),\sR[0-9]+`
b3 := sw.w[1<<16]
// ppc64le/power10:`MOVWZ\s+[0-9]+\(R[0-9]+\),\sR[0-9]+`,-`ADD`
// ppc64x/power9:`ADD`,`MOVWZ\s+\(R[0-9]+\),\sR[0-9]+`
// ppc64x/power8:`ADD`,`MOVWZ\s+\(R[0-9]+\),\sR[0-9]+`
c3 := sw.w[1<<28]
// ppc64x:`MOVWZ\s+\(R[0-9]+\)\(R[0-9]+\),\sR[0-9]+`
d3 := sw.w[1<<29]
// ppc64x:`MOVD\s+[0-9]+\(R[0-9]+\)`,-`ADD`
a4 := sd.d[1<<10]
// ppc64le/power10:`MOVD\s+[0-9]+\(R[0-9]+\)`,-`ADD`
// ppc64x/power9:`ADD`,`MOVD\s+\(R[0-9]+\),\sR[0-9]+`
// ppc64x/power8:`ADD`,`MOVD\s+\(R[0-9]+\),\sR[0-9]+`
b4 := sd.d[1<<16]
// ppc64le/power10`:`MOVD\s+[0-9]+\(R[0-9]+\)`,-`ADD`
// ppc64x/power9:`ADD`,`MOVD\s+\(R[0-9]+\),\sR[0-9]+`
// ppc64x/power8:`ADD`,`MOVD\s+\(R[0-9]+\),\sR[0-9]+`
c4 := sd.d[1<<27]
// ppc64x:`MOVD\s+\(R[0-9]+\)\(R[0-9]+\),\sR[0-9]+`
d4 := sd.d[1<<28]
return a3 + b3 + c3 + d3, a4 + b4 + c4 + d4
}
func storeLargeOffset(sw *big1, sd *big2) {
// ppc64x:`MOVW\s+R[0-9]+,\s[0-9]+\(R[0-9]+\)`,-`ADD`
sw.w[1<<10] = uint32(10)
// ppc64le/power10:`MOVW\s+R[0-9]+,\s[0-9]+\(R[0-9]+\)`,-`ADD`
// ppc64x/power9:`MOVW\s+R[0-9]+\,\s\(R[0-9]+\)`,`ADD`
// ppc64x/power8:`MOVW\s+R[0-9]+\,\s\(R[0-9]+\)`,`ADD`
sw.w[1<<16] = uint32(20)
// ppc64le/power10:`MOVW\s+R[0-9]+,\s[0-9]+\(R[0-9]+\)`,-`ADD`
// ppc64x/power9:`MOVW\s+R[0-9]+,\s\(R[0-9]+\)`,`ADD`
// ppc64x/power8:`MOVW\s+R[0-9]+,\s\(R[0-9]+\)`,`ADD`
sw.w[1<<28] = uint32(30)
// ppc64x:`MOVW\s+R[0-9]+,\s\(R[0-9]+\)`
sw.w[1<<29] = uint32(40)
// ppc64x:`MOVD\s+R[0-9]+,\s[0-9]+\(R[0-9]+\)`,-`ADD`
sd.d[1<<10] = uint64(40)
// ppc64le/power10:`MOVD\s+R[0-9]+,\s[0-9]+\(R[0-9]+\)`,-`ADD`
// ppc64x/power9:`MOVD\s+R[0-9]+,\s\(R[0-9]+\)`,`ADD`
// ppc64x/power8:`MOVD\s+R[0-9]+,\s\(R[0-9]+\)`,`ADD`
sd.d[1<<16] = uint64(50)
// ppc64le/power10`:`MOVD\s+R[0-9]+,\s[0-9]+\(R[0-9]+\)`,-`ADD`
// ppc64x/power9:`MOVD\s+R[0-9]+,\s\(R[0-9]+\)`,`ADD`
// ppc64x/power8:`MOVD\s+R[0-9]+,\s\(R[0-9]+\)`,`ADD`
sd.d[1<<27] = uint64(60)
// ppc64x:`MOVD\s+R[0-9]+,\s\(R[0-9]+\)`
sd.d[1<<28] = uint64(70)
}