Source file src/runtime/rwmutex_test.go

     1  // Copyright 2017 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // GOMAXPROCS=10 go test
     6  
     7  // This is a copy of sync/rwmutex_test.go rewritten to test the
     8  // runtime rwmutex.
     9  
    10  package runtime_test
    11  
    12  import (
    13  	"fmt"
    14  	. "runtime"
    15  	"runtime/debug"
    16  	"sync/atomic"
    17  	"testing"
    18  )
    19  
    20  func parallelReader(m *RWMutex, clocked chan bool, cunlock *atomic.Bool, cdone chan bool) {
    21  	m.RLock()
    22  	clocked <- true
    23  	for !cunlock.Load() {
    24  	}
    25  	m.RUnlock()
    26  	cdone <- true
    27  }
    28  
    29  func doTestParallelReaders(numReaders int) {
    30  	GOMAXPROCS(numReaders + 1)
    31  	var m RWMutex
    32  	m.Init()
    33  	clocked := make(chan bool, numReaders)
    34  	var cunlock atomic.Bool
    35  	cdone := make(chan bool)
    36  	for i := 0; i < numReaders; i++ {
    37  		go parallelReader(&m, clocked, &cunlock, cdone)
    38  	}
    39  	// Wait for all parallel RLock()s to succeed.
    40  	for i := 0; i < numReaders; i++ {
    41  		<-clocked
    42  	}
    43  	cunlock.Store(true)
    44  	// Wait for the goroutines to finish.
    45  	for i := 0; i < numReaders; i++ {
    46  		<-cdone
    47  	}
    48  }
    49  
    50  func TestParallelRWMutexReaders(t *testing.T) {
    51  	if GOARCH == "wasm" {
    52  		t.Skip("wasm has no threads yet")
    53  	}
    54  	defer GOMAXPROCS(GOMAXPROCS(-1))
    55  	// If runtime triggers a forced GC during this test then it will deadlock,
    56  	// since the goroutines can't be stopped/preempted.
    57  	// Disable GC for this test (see issue #10958).
    58  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
    59  	// SetGCPercent waits until the mark phase is over, but the runtime
    60  	// also preempts at the start of the sweep phase, so make sure that's
    61  	// done too.
    62  	GC()
    63  
    64  	doTestParallelReaders(1)
    65  	doTestParallelReaders(3)
    66  	doTestParallelReaders(4)
    67  }
    68  
    69  func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    70  	for i := 0; i < num_iterations; i++ {
    71  		rwm.RLock()
    72  		n := atomic.AddInt32(activity, 1)
    73  		if n < 1 || n >= 10000 {
    74  			panic(fmt.Sprintf("wlock(%d)\n", n))
    75  		}
    76  		for i := 0; i < 100; i++ {
    77  		}
    78  		atomic.AddInt32(activity, -1)
    79  		rwm.RUnlock()
    80  	}
    81  	cdone <- true
    82  }
    83  
    84  func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
    85  	for i := 0; i < num_iterations; i++ {
    86  		rwm.Lock()
    87  		n := atomic.AddInt32(activity, 10000)
    88  		if n != 10000 {
    89  			panic(fmt.Sprintf("wlock(%d)\n", n))
    90  		}
    91  		for i := 0; i < 100; i++ {
    92  		}
    93  		atomic.AddInt32(activity, -10000)
    94  		rwm.Unlock()
    95  	}
    96  	cdone <- true
    97  }
    98  
    99  func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
   100  	GOMAXPROCS(gomaxprocs)
   101  	// Number of active readers + 10000 * number of active writers.
   102  	var activity int32
   103  	var rwm RWMutex
   104  	rwm.Init()
   105  	cdone := make(chan bool)
   106  	go writer(&rwm, num_iterations, &activity, cdone)
   107  	var i int
   108  	for i = 0; i < numReaders/2; i++ {
   109  		go reader(&rwm, num_iterations, &activity, cdone)
   110  	}
   111  	go writer(&rwm, num_iterations, &activity, cdone)
   112  	for ; i < numReaders; i++ {
   113  		go reader(&rwm, num_iterations, &activity, cdone)
   114  	}
   115  	// Wait for the 2 writers and all readers to finish.
   116  	for i := 0; i < 2+numReaders; i++ {
   117  		<-cdone
   118  	}
   119  }
   120  
   121  func TestRWMutex(t *testing.T) {
   122  	defer GOMAXPROCS(GOMAXPROCS(-1))
   123  	n := 1000
   124  	if testing.Short() {
   125  		n = 5
   126  	}
   127  	HammerRWMutex(1, 1, n)
   128  	HammerRWMutex(1, 3, n)
   129  	HammerRWMutex(1, 10, n)
   130  	HammerRWMutex(4, 1, n)
   131  	HammerRWMutex(4, 3, n)
   132  	HammerRWMutex(4, 10, n)
   133  	HammerRWMutex(10, 1, n)
   134  	HammerRWMutex(10, 3, n)
   135  	HammerRWMutex(10, 10, n)
   136  	HammerRWMutex(10, 5, n)
   137  }
   138  
   139  func BenchmarkRWMutexUncontended(b *testing.B) {
   140  	type PaddedRWMutex struct {
   141  		RWMutex
   142  		pad [32]uint32
   143  	}
   144  	b.RunParallel(func(pb *testing.PB) {
   145  		var rwm PaddedRWMutex
   146  		rwm.Init()
   147  		for pb.Next() {
   148  			rwm.RLock()
   149  			rwm.RLock()
   150  			rwm.RUnlock()
   151  			rwm.RUnlock()
   152  			rwm.Lock()
   153  			rwm.Unlock()
   154  		}
   155  	})
   156  }
   157  
   158  func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
   159  	var rwm RWMutex
   160  	rwm.Init()
   161  	b.RunParallel(func(pb *testing.PB) {
   162  		foo := 0
   163  		for pb.Next() {
   164  			foo++
   165  			if foo%writeRatio == 0 {
   166  				rwm.Lock()
   167  				rwm.Unlock()
   168  			} else {
   169  				rwm.RLock()
   170  				for i := 0; i != localWork; i += 1 {
   171  					foo *= 2
   172  					foo /= 2
   173  				}
   174  				rwm.RUnlock()
   175  			}
   176  		}
   177  		_ = foo
   178  	})
   179  }
   180  
   181  func BenchmarkRWMutexWrite100(b *testing.B) {
   182  	benchmarkRWMutex(b, 0, 100)
   183  }
   184  
   185  func BenchmarkRWMutexWrite10(b *testing.B) {
   186  	benchmarkRWMutex(b, 0, 10)
   187  }
   188  
   189  func BenchmarkRWMutexWorkWrite100(b *testing.B) {
   190  	benchmarkRWMutex(b, 100, 100)
   191  }
   192  
   193  func BenchmarkRWMutexWorkWrite10(b *testing.B) {
   194  	benchmarkRWMutex(b, 100, 10)
   195  }
   196  

View as plain text