diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 0e287d0b8e..1dc5b415fb 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -110,6 +110,8 @@ func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr { return as.retval } +var ncgocall uint64 // number of cgo calls in total + // Call from Go to C. // // This must be nosplit because it's used for syscalls on some @@ -130,8 +132,8 @@ func cgocall(fn, arg unsafe.Pointer) int32 { racereleasemerge(unsafe.Pointer(&racecgosync)) } + atomic.Xadd64(&ncgocall, 1) mp := getg().m - mp.ncgocall++ mp.ncgo++ // Reset traceback. diff --git a/src/runtime/debug.go b/src/runtime/debug.go index f411b22676..45cfa6ed08 100644 --- a/src/runtime/debug.go +++ b/src/runtime/debug.go @@ -6,7 +6,7 @@ package runtime import ( "runtime/internal/atomic" - "unsafe" + _ "unsafe" ) // GOMAXPROCS sets the maximum number of CPUs that can be executing @@ -45,11 +45,7 @@ func NumCPU() int { // NumCgoCall returns the number of cgo calls made by the current process. func NumCgoCall() int64 { - var n int64 - for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink { - n += int64(mp.ncgocall) - } - return n + return int64(atomic.Load64(&ncgocall)) } // NumGoroutine returns the number of goroutines that currently exist. diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 0e0eb0b728..541a55b35d 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -541,7 +541,6 @@ type m struct { fastrand [2]uint32 needextram bool traceback uint8 - ncgocall uint64 // number of cgo calls in total ncgo int32 // number of cgo calls currently in progress cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily cgoCallers *cgoCallers // cgo traceback if crashing in cgo call