mirror of https://github.com/golang/go.git
sync: convert RWMutex.{readerCount,readerWait} to atomic type
Change-Id: I7fd9c0636cd00891f5cdf36c0f68f897772042f2 Reviewed-on: https://go-review.googlesource.com/c/go/+/429767 TryBot-Result: Gopher Robot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Bryan Mills <bcmills@google.com> Auto-Submit: Cuong Manh Le <cuong.manhle.vn@gmail.com> Run-TryBot: Cuong Manh Le <cuong.manhle.vn@gmail.com>
This commit is contained in:
parent
9dd9174fa5
commit
e509452727
|
|
@ -34,11 +34,11 @@ import (
|
|||
// and the corresponding call to RUnlock “synchronizes before”
|
||||
// the n+1'th call to Lock.
|
||||
type RWMutex struct {
|
||||
w Mutex // held if there are pending writers
|
||||
writerSem uint32 // semaphore for writers to wait for completing readers
|
||||
readerSem uint32 // semaphore for readers to wait for completing writers
|
||||
readerCount int32 // number of pending readers
|
||||
readerWait int32 // number of departing readers
|
||||
w Mutex // held if there are pending writers
|
||||
writerSem uint32 // semaphore for writers to wait for completing readers
|
||||
readerSem uint32 // semaphore for readers to wait for completing writers
|
||||
readerCount atomic.Int32 // number of pending readers
|
||||
readerWait atomic.Int32 // number of departing readers
|
||||
}
|
||||
|
||||
const rwmutexMaxReaders = 1 << 30
|
||||
|
|
@ -66,7 +66,7 @@ func (rw *RWMutex) RLock() {
|
|||
_ = rw.w.state
|
||||
race.Disable()
|
||||
}
|
||||
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
|
||||
if rw.readerCount.Add(1) < 0 {
|
||||
// A writer is pending, wait for it.
|
||||
runtime_SemacquireMutex(&rw.readerSem, false, 0)
|
||||
}
|
||||
|
|
@ -87,14 +87,14 @@ func (rw *RWMutex) TryRLock() bool {
|
|||
race.Disable()
|
||||
}
|
||||
for {
|
||||
c := atomic.LoadInt32(&rw.readerCount)
|
||||
c := rw.readerCount.Load()
|
||||
if c < 0 {
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
}
|
||||
return false
|
||||
}
|
||||
if atomic.CompareAndSwapInt32(&rw.readerCount, c, c+1) {
|
||||
if rw.readerCount.CompareAndSwap(c, c+1) {
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
race.Acquire(unsafe.Pointer(&rw.readerSem))
|
||||
|
|
@ -114,7 +114,7 @@ func (rw *RWMutex) RUnlock() {
|
|||
race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
|
||||
race.Disable()
|
||||
}
|
||||
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
|
||||
if r := rw.readerCount.Add(-1); r < 0 {
|
||||
// Outlined slow-path to allow the fast-path to be inlined
|
||||
rw.rUnlockSlow(r)
|
||||
}
|
||||
|
|
@ -129,7 +129,7 @@ func (rw *RWMutex) rUnlockSlow(r int32) {
|
|||
fatal("sync: RUnlock of unlocked RWMutex")
|
||||
}
|
||||
// A writer is pending.
|
||||
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
|
||||
if rw.readerWait.Add(-1) == 0 {
|
||||
// The last reader unblocks the writer.
|
||||
runtime_Semrelease(&rw.writerSem, false, 1)
|
||||
}
|
||||
|
|
@ -146,9 +146,9 @@ func (rw *RWMutex) Lock() {
|
|||
// First, resolve competition with other writers.
|
||||
rw.w.Lock()
|
||||
// Announce to readers there is a pending writer.
|
||||
r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
|
||||
r := rw.readerCount.Add(-rwmutexMaxReaders) + rwmutexMaxReaders
|
||||
// Wait for active readers.
|
||||
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
|
||||
if r != 0 && rw.readerWait.Add(r) != 0 {
|
||||
runtime_SemacquireMutex(&rw.writerSem, false, 0)
|
||||
}
|
||||
if race.Enabled {
|
||||
|
|
@ -174,7 +174,7 @@ func (rw *RWMutex) TryLock() bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) {
|
||||
if !rw.readerCount.CompareAndSwap(0, -rwmutexMaxReaders) {
|
||||
rw.w.Unlock()
|
||||
if race.Enabled {
|
||||
race.Enable()
|
||||
|
|
@ -203,7 +203,7 @@ func (rw *RWMutex) Unlock() {
|
|||
}
|
||||
|
||||
// Announce to readers there is no active writer.
|
||||
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
|
||||
r := rw.readerCount.Add(rwmutexMaxReaders)
|
||||
if r >= rwmutexMaxReaders {
|
||||
race.Enable()
|
||||
fatal("sync: Unlock of unlocked RWMutex")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
// +build !nacl,!386,!wasm,!arm,!gcflags_noopt
|
||||
// errorcheck -0 -m
|
||||
|
||||
//go:build !nacl && !386 && !wasm && !arm && !gcflags_noopt
|
||||
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
|
@ -35,19 +36,18 @@ func small6() { // ERROR "can inline small6"
|
|||
var once *sync.Once
|
||||
|
||||
func small7() { // ERROR "can inline small7"
|
||||
// the Do fast path should be inlined
|
||||
once.Do(small5) // ERROR "inlining call to sync\.\(\*Once\)\.Do"
|
||||
// the Do fast path should be inlined
|
||||
once.Do(small5) // ERROR "inlining call to sync\.\(\*Once\)\.Do"
|
||||
}
|
||||
|
||||
var rwmutex *sync.RWMutex
|
||||
|
||||
func small8() { // ERROR "can inline small8"
|
||||
// the RUnlock fast path should be inlined
|
||||
rwmutex.RUnlock() // ERROR "inlining call to sync\.\(\*RWMutex\)\.RUnlock"
|
||||
func small8() {
|
||||
// the RUnlock fast path should be inlined
|
||||
rwmutex.RUnlock() // ERROR "inlining call to sync\.\(\*RWMutex\)\.RUnlock" "inlining call to atomic\.\(\*Int32\)\.Add"
|
||||
}
|
||||
|
||||
func small9() { // ERROR "can inline small9"
|
||||
// the RLock fast path should be inlined
|
||||
rwmutex.RLock() // ERROR "inlining call to sync\.\(\*RWMutex\)\.RLock"
|
||||
func small9() {
|
||||
// the RLock fast path should be inlined
|
||||
rwmutex.RLock() // ERROR "inlining call to sync\.\(\*RWMutex\)\.RLock" "inlining call to atomic\.\(\*Int32\)\.Add"
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue