diff --git a/src/runtime/asm_power64x.s b/src/runtime/asm_power64x.s index b489f6accb..21220e5cb8 100644 --- a/src/runtime/asm_power64x.s +++ b/src/runtime/asm_power64x.s @@ -557,13 +557,27 @@ TEXT runtime·atomicstore64(SB), NOSPLIT, $0-16 // void runtime·atomicor8(byte volatile*, byte); TEXT runtime·atomicor8(SB), NOSPLIT, $0-9 - MOVD 0(FP), R3 - MOVD 8(FP), R4 + MOVD ptr+0(FP), R3 + MOVBZ val+8(FP), R4 + // Align ptr down to 4 bytes so we can use 32-bit load/store. + // R5 = (R3 << 0) & ~3 + RLDCR $0, R3, $~3, R5 + // Compute val shift. +#ifdef GOARCH_power64 + // Big endian. ptr = ptr ^ 3 + XOR $3, R3 +#endif + // R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8) + RLDC $3, R3, $(3*8), R6 + // Shift val for aligned ptr. R4 = val << R6 + SLD R6, R4, R4 + +atomicor8_again: SYNC - LWAR (R3), R5 - OR R4, R5 - STWCCC R5, (R3) - BNE -3(PC) + LWAR (R5), R6 + OR R4, R6 + STWCCC R6, (R5) + BNE atomicor8_again SYNC ISYNC RETURN diff --git a/src/runtime/runtime.c b/src/runtime/runtime.c index b3503fb909..d984983ce2 100644 --- a/src/runtime/runtime.c +++ b/src/runtime/runtime.c @@ -185,6 +185,7 @@ runtime·check(void) float64 j, j1; byte *k, *k1; uint16* l; + byte m[4]; struct x1 { byte x; }; @@ -236,6 +237,11 @@ runtime·check(void) if(k != k1) runtime·throw("casp3"); + m[0] = m[1] = m[2] = m[3] = 0x1; + runtime·atomicor8(&m[1], 0xf0); + if (m[0] != 0x1 || m[1] != 0xf1 || m[2] != 0x1 || m[3] != 0x1) + runtime·throw("atomicor8"); + *(uint64*)&j = ~0ULL; if(j == j) runtime·throw("float64nan");