Source file src/runtime/mgcscavenge_test.go

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	"internal/goos"
    10  	"math/rand"
    11  	. "runtime"
    12  	"runtime/internal/atomic"
    13  	"testing"
    14  	"time"
    15  )
    16  
    17  // makePallocData produces an initialized PallocData by setting
    18  // the ranges of described in alloc and scavenge.
    19  func makePallocData(alloc, scavenged []BitRange) *PallocData {
    20  	b := new(PallocData)
    21  	for _, v := range alloc {
    22  		if v.N == 0 {
    23  			// Skip N==0. It's harmless and allocRange doesn't
    24  			// handle this case.
    25  			continue
    26  		}
    27  		b.AllocRange(v.I, v.N)
    28  	}
    29  	for _, v := range scavenged {
    30  		if v.N == 0 {
    31  			// See the previous loop.
    32  			continue
    33  		}
    34  		b.ScavengedSetRange(v.I, v.N)
    35  	}
    36  	return b
    37  }
    38  
    39  func TestFillAligned(t *testing.T) {
    40  	fillAlignedSlow := func(x uint64, m uint) uint64 {
    41  		if m == 1 {
    42  			return x
    43  		}
    44  		out := uint64(0)
    45  		for i := uint(0); i < 64; i += m {
    46  			for j := uint(0); j < m; j++ {
    47  				if x&(uint64(1)<<(i+j)) != 0 {
    48  					out |= ((uint64(1) << m) - 1) << i
    49  					break
    50  				}
    51  			}
    52  		}
    53  		return out
    54  	}
    55  	check := func(x uint64, m uint) {
    56  		want := fillAlignedSlow(x, m)
    57  		if got := FillAligned(x, m); got != want {
    58  			t.Logf("got:  %064b", got)
    59  			t.Logf("want: %064b", want)
    60  			t.Errorf("bad fillAligned(%016x, %d)", x, m)
    61  		}
    62  	}
    63  	for m := uint(1); m <= 64; m *= 2 {
    64  		tests := []uint64{
    65  			0x0000000000000000,
    66  			0x00000000ffffffff,
    67  			0xffffffff00000000,
    68  			0x8000000000000001,
    69  			0xf00000000000000f,
    70  			0xf00000010050000f,
    71  			0xffffffffffffffff,
    72  			0x0000000000000001,
    73  			0x0000000000000002,
    74  			0x0000000000000008,
    75  			uint64(1) << (m - 1),
    76  			uint64(1) << m,
    77  			// Try a few fixed arbitrary examples.
    78  			0xb02b9effcf137016,
    79  			0x3975a076a9fbff18,
    80  			0x0f8c88ec3b81506e,
    81  			0x60f14d80ef2fa0e6,
    82  		}
    83  		for _, test := range tests {
    84  			check(test, m)
    85  		}
    86  		for i := 0; i < 1000; i++ {
    87  			// Try a pseudo-random numbers.
    88  			check(rand.Uint64(), m)
    89  
    90  			if m > 1 {
    91  				// For m != 1, let's construct a slightly more interesting
    92  				// random test. Generate a bitmap which is either 0 or
    93  				// randomly set bits for each m-aligned group of m bits.
    94  				val := uint64(0)
    95  				for n := uint(0); n < 64; n += m {
    96  					// For each group of m bits, flip a coin:
    97  					// * Leave them as zero.
    98  					// * Set them randomly.
    99  					if rand.Uint64()%2 == 0 {
   100  						val |= (rand.Uint64() & ((1 << m) - 1)) << n
   101  					}
   102  				}
   103  				check(val, m)
   104  			}
   105  		}
   106  	}
   107  }
   108  
   109  func TestPallocDataFindScavengeCandidate(t *testing.T) {
   110  	type test struct {
   111  		alloc, scavenged []BitRange
   112  		min, max         uintptr
   113  		want             BitRange
   114  	}
   115  	tests := map[string]test{
   116  		"MixedMin1": {
   117  			alloc:     []BitRange{{0, 40}, {42, PallocChunkPages - 42}},
   118  			scavenged: []BitRange{{0, 41}, {42, PallocChunkPages - 42}},
   119  			min:       1,
   120  			max:       PallocChunkPages,
   121  			want:      BitRange{41, 1},
   122  		},
   123  		"MultiMin1": {
   124  			alloc:     []BitRange{{0, 63}, {65, 20}, {87, PallocChunkPages - 87}},
   125  			scavenged: []BitRange{{86, 1}},
   126  			min:       1,
   127  			max:       PallocChunkPages,
   128  			want:      BitRange{85, 1},
   129  		},
   130  	}
   131  	// Try out different page minimums.
   132  	for m := uintptr(1); m <= 64; m *= 2 {
   133  		suffix := fmt.Sprintf("Min%d", m)
   134  		tests["AllFree"+suffix] = test{
   135  			min:  m,
   136  			max:  PallocChunkPages,
   137  			want: BitRange{0, PallocChunkPages},
   138  		}
   139  		tests["AllScavenged"+suffix] = test{
   140  			scavenged: []BitRange{{0, PallocChunkPages}},
   141  			min:       m,
   142  			max:       PallocChunkPages,
   143  			want:      BitRange{0, 0},
   144  		}
   145  		tests["NoneFree"+suffix] = test{
   146  			alloc:     []BitRange{{0, PallocChunkPages}},
   147  			scavenged: []BitRange{{PallocChunkPages / 2, PallocChunkPages / 2}},
   148  			min:       m,
   149  			max:       PallocChunkPages,
   150  			want:      BitRange{0, 0},
   151  		}
   152  		tests["StartFree"+suffix] = test{
   153  			alloc: []BitRange{{uint(m), PallocChunkPages - uint(m)}},
   154  			min:   m,
   155  			max:   PallocChunkPages,
   156  			want:  BitRange{0, uint(m)},
   157  		}
   158  		tests["EndFree"+suffix] = test{
   159  			alloc: []BitRange{{0, PallocChunkPages - uint(m)}},
   160  			min:   m,
   161  			max:   PallocChunkPages,
   162  			want:  BitRange{PallocChunkPages - uint(m), uint(m)},
   163  		}
   164  		tests["Straddle64"+suffix] = test{
   165  			alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}},
   166  			min:   m,
   167  			max:   2 * m,
   168  			want:  BitRange{64 - uint(m), 2 * uint(m)},
   169  		}
   170  		tests["BottomEdge64WithFull"+suffix] = test{
   171  			alloc:     []BitRange{{64, 64}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
   172  			scavenged: []BitRange{{1, 10}},
   173  			min:       m,
   174  			max:       3 * m,
   175  			want:      BitRange{128, 3 * uint(m)},
   176  		}
   177  		tests["BottomEdge64WithPocket"+suffix] = test{
   178  			alloc:     []BitRange{{64, 62}, {127, 1}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
   179  			scavenged: []BitRange{{1, 10}},
   180  			min:       m,
   181  			max:       3 * m,
   182  			want:      BitRange{128, 3 * uint(m)},
   183  		}
   184  		tests["Max0"+suffix] = test{
   185  			scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
   186  			min:       m,
   187  			max:       0,
   188  			want:      BitRange{PallocChunkPages - uint(m), uint(m)},
   189  		}
   190  		if m <= 8 {
   191  			tests["OneFree"] = test{
   192  				alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
   193  				min:   m,
   194  				max:   PallocChunkPages,
   195  				want:  BitRange{40, uint(m)},
   196  			}
   197  			tests["OneScavenged"] = test{
   198  				alloc:     []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
   199  				scavenged: []BitRange{{40, 1}},
   200  				min:       m,
   201  				max:       PallocChunkPages,
   202  				want:      BitRange{0, 0},
   203  			}
   204  		}
   205  		if m > 1 {
   206  			tests["MaxUnaligned"+suffix] = test{
   207  				scavenged: []BitRange{{0, PallocChunkPages - uint(m*2-1)}},
   208  				min:       m,
   209  				max:       m - 2,
   210  				want:      BitRange{PallocChunkPages - uint(m), uint(m)},
   211  			}
   212  			tests["SkipSmall"+suffix] = test{
   213  				alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}},
   214  				min:   m,
   215  				max:   m,
   216  				want:  BitRange{64 - uint(m), uint(m)},
   217  			}
   218  			tests["SkipMisaligned"+suffix] = test{
   219  				alloc: []BitRange{{0, 64 - uint(m)}, {64, 63}, {127 + uint(m), PallocChunkPages - (127 + uint(m))}},
   220  				min:   m,
   221  				max:   m,
   222  				want:  BitRange{64 - uint(m), uint(m)},
   223  			}
   224  			tests["MaxLessThan"+suffix] = test{
   225  				scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
   226  				min:       m,
   227  				max:       1,
   228  				want:      BitRange{PallocChunkPages - uint(m), uint(m)},
   229  			}
   230  		}
   231  	}
   232  	if PhysHugePageSize > uintptr(PageSize) {
   233  		// Check hugepage preserving behavior.
   234  		bits := uint(PhysHugePageSize / uintptr(PageSize))
   235  		if bits < PallocChunkPages {
   236  			tests["PreserveHugePageBottom"] = test{
   237  				alloc: []BitRange{{bits + 2, PallocChunkPages - (bits + 2)}},
   238  				min:   1,
   239  				max:   3, // Make it so that max would have us try to break the huge page.
   240  				want:  BitRange{0, bits + 2},
   241  			}
   242  			if 3*bits < PallocChunkPages {
   243  				// We need at least 3 huge pages in a chunk for this test to make sense.
   244  				tests["PreserveHugePageMiddle"] = test{
   245  					alloc: []BitRange{{0, bits - 10}, {2*bits + 10, PallocChunkPages - (2*bits + 10)}},
   246  					min:   1,
   247  					max:   12, // Make it so that max would have us try to break the huge page.
   248  					want:  BitRange{bits, bits + 10},
   249  				}
   250  			}
   251  			tests["PreserveHugePageTop"] = test{
   252  				alloc: []BitRange{{0, PallocChunkPages - bits}},
   253  				min:   1,
   254  				max:   1, // Even one page would break a huge page in this case.
   255  				want:  BitRange{PallocChunkPages - bits, bits},
   256  			}
   257  		} else if bits == PallocChunkPages {
   258  			tests["PreserveHugePageAll"] = test{
   259  				min:  1,
   260  				max:  1, // Even one page would break a huge page in this case.
   261  				want: BitRange{0, PallocChunkPages},
   262  			}
   263  		} else {
   264  			// The huge page size is greater than pallocChunkPages, so it should
   265  			// be effectively disabled. There's no way we can possible scavenge
   266  			// a huge page out of this bitmap chunk.
   267  			tests["PreserveHugePageNone"] = test{
   268  				min:  1,
   269  				max:  1,
   270  				want: BitRange{PallocChunkPages - 1, 1},
   271  			}
   272  		}
   273  	}
   274  	for name, v := range tests {
   275  		v := v
   276  		t.Run(name, func(t *testing.T) {
   277  			b := makePallocData(v.alloc, v.scavenged)
   278  			start, size := b.FindScavengeCandidate(PallocChunkPages-1, v.min, v.max)
   279  			got := BitRange{start, size}
   280  			if !(got.N == 0 && v.want.N == 0) && got != v.want {
   281  				t.Fatalf("candidate mismatch: got %v, want %v", got, v.want)
   282  			}
   283  		})
   284  	}
   285  }
   286  
   287  // Tests end-to-end scavenging on a pageAlloc.
   288  func TestPageAllocScavenge(t *testing.T) {
   289  	if GOOS == "openbsd" && testing.Short() {
   290  		t.Skip("skipping because virtual memory is limited; see #36210")
   291  	}
   292  	type test struct {
   293  		request, expect uintptr
   294  	}
   295  	minPages := PhysPageSize / PageSize
   296  	if minPages < 1 {
   297  		minPages = 1
   298  	}
   299  	type setup struct {
   300  		beforeAlloc map[ChunkIdx][]BitRange
   301  		beforeScav  map[ChunkIdx][]BitRange
   302  		expect      []test
   303  		afterScav   map[ChunkIdx][]BitRange
   304  	}
   305  	tests := map[string]setup{
   306  		"AllFreeUnscavExhaust": {
   307  			beforeAlloc: map[ChunkIdx][]BitRange{
   308  				BaseChunkIdx:     {},
   309  				BaseChunkIdx + 1: {},
   310  				BaseChunkIdx + 2: {},
   311  			},
   312  			beforeScav: map[ChunkIdx][]BitRange{
   313  				BaseChunkIdx:     {},
   314  				BaseChunkIdx + 1: {},
   315  				BaseChunkIdx + 2: {},
   316  			},
   317  			expect: []test{
   318  				{^uintptr(0), 3 * PallocChunkPages * PageSize},
   319  			},
   320  			afterScav: map[ChunkIdx][]BitRange{
   321  				BaseChunkIdx:     {{0, PallocChunkPages}},
   322  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   323  				BaseChunkIdx + 2: {{0, PallocChunkPages}},
   324  			},
   325  		},
   326  		"NoneFreeUnscavExhaust": {
   327  			beforeAlloc: map[ChunkIdx][]BitRange{
   328  				BaseChunkIdx:     {{0, PallocChunkPages}},
   329  				BaseChunkIdx + 1: {},
   330  				BaseChunkIdx + 2: {{0, PallocChunkPages}},
   331  			},
   332  			beforeScav: map[ChunkIdx][]BitRange{
   333  				BaseChunkIdx:     {},
   334  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   335  				BaseChunkIdx + 2: {},
   336  			},
   337  			expect: []test{
   338  				{^uintptr(0), 0},
   339  			},
   340  			afterScav: map[ChunkIdx][]BitRange{
   341  				BaseChunkIdx:     {},
   342  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   343  				BaseChunkIdx + 2: {},
   344  			},
   345  		},
   346  		"ScavHighestPageFirst": {
   347  			beforeAlloc: map[ChunkIdx][]BitRange{
   348  				BaseChunkIdx: {},
   349  			},
   350  			beforeScav: map[ChunkIdx][]BitRange{
   351  				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
   352  			},
   353  			expect: []test{
   354  				{1, minPages * PageSize},
   355  			},
   356  			afterScav: map[ChunkIdx][]BitRange{
   357  				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(minPages)}},
   358  			},
   359  		},
   360  		"ScavMultiple": {
   361  			beforeAlloc: map[ChunkIdx][]BitRange{
   362  				BaseChunkIdx: {},
   363  			},
   364  			beforeScav: map[ChunkIdx][]BitRange{
   365  				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
   366  			},
   367  			expect: []test{
   368  				{minPages * PageSize, minPages * PageSize},
   369  				{minPages * PageSize, minPages * PageSize},
   370  			},
   371  			afterScav: map[ChunkIdx][]BitRange{
   372  				BaseChunkIdx: {{0, PallocChunkPages}},
   373  			},
   374  		},
   375  		"ScavMultiple2": {
   376  			beforeAlloc: map[ChunkIdx][]BitRange{
   377  				BaseChunkIdx:     {},
   378  				BaseChunkIdx + 1: {},
   379  			},
   380  			beforeScav: map[ChunkIdx][]BitRange{
   381  				BaseChunkIdx:     {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
   382  				BaseChunkIdx + 1: {{0, PallocChunkPages - uint(2*minPages)}},
   383  			},
   384  			expect: []test{
   385  				{2 * minPages * PageSize, 2 * minPages * PageSize},
   386  				{minPages * PageSize, minPages * PageSize},
   387  				{minPages * PageSize, minPages * PageSize},
   388  			},
   389  			afterScav: map[ChunkIdx][]BitRange{
   390  				BaseChunkIdx:     {{0, PallocChunkPages}},
   391  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
   392  			},
   393  		},
   394  		"ScavDiscontiguous": {
   395  			beforeAlloc: map[ChunkIdx][]BitRange{
   396  				BaseChunkIdx:       {},
   397  				BaseChunkIdx + 0xe: {},
   398  			},
   399  			beforeScav: map[ChunkIdx][]BitRange{
   400  				BaseChunkIdx:       {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
   401  				BaseChunkIdx + 0xe: {{uint(2 * minPages), PallocChunkPages - uint(2*minPages)}},
   402  			},
   403  			expect: []test{
   404  				{2 * minPages * PageSize, 2 * minPages * PageSize},
   405  				{^uintptr(0), 2 * minPages * PageSize},
   406  				{^uintptr(0), 0},
   407  			},
   408  			afterScav: map[ChunkIdx][]BitRange{
   409  				BaseChunkIdx:       {{0, PallocChunkPages}},
   410  				BaseChunkIdx + 0xe: {{0, PallocChunkPages}},
   411  			},
   412  		},
   413  	}
   414  	// Disable these tests on iOS since we have a small address space.
   415  	// See #46860.
   416  	if PageAlloc64Bit != 0 && goos.IsIos == 0 {
   417  		tests["ScavAllVeryDiscontiguous"] = setup{
   418  			beforeAlloc: map[ChunkIdx][]BitRange{
   419  				BaseChunkIdx:          {},
   420  				BaseChunkIdx + 0x1000: {},
   421  			},
   422  			beforeScav: map[ChunkIdx][]BitRange{
   423  				BaseChunkIdx:          {},
   424  				BaseChunkIdx + 0x1000: {},
   425  			},
   426  			expect: []test{
   427  				{^uintptr(0), 2 * PallocChunkPages * PageSize},
   428  				{^uintptr(0), 0},
   429  			},
   430  			afterScav: map[ChunkIdx][]BitRange{
   431  				BaseChunkIdx:          {{0, PallocChunkPages}},
   432  				BaseChunkIdx + 0x1000: {{0, PallocChunkPages}},
   433  			},
   434  		}
   435  	}
   436  	for name, v := range tests {
   437  		v := v
   438  		t.Run(name, func(t *testing.T) {
   439  			b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
   440  			defer FreePageAlloc(b)
   441  
   442  			for iter, h := range v.expect {
   443  				if got := b.Scavenge(h.request); got != h.expect {
   444  					t.Fatalf("bad scavenge #%d: want %d, got %d", iter+1, h.expect, got)
   445  				}
   446  			}
   447  			want := NewPageAlloc(v.beforeAlloc, v.afterScav)
   448  			defer FreePageAlloc(want)
   449  
   450  			checkPageAlloc(t, want, b)
   451  		})
   452  	}
   453  }
   454  
   455  func TestScavenger(t *testing.T) {
   456  	// workedTime is a standard conversion of bytes of scavenge
   457  	// work to time elapsed.
   458  	workedTime := func(bytes uintptr) int64 {
   459  		return int64((bytes+4095)/4096) * int64(10*time.Microsecond)
   460  	}
   461  
   462  	// Set up a bunch of state that we're going to track and verify
   463  	// throughout the test.
   464  	totalWork := uint64(64<<20 - 3*PhysPageSize)
   465  	var totalSlept, totalWorked atomic.Int64
   466  	var availableWork atomic.Uint64
   467  	var stopAt atomic.Uint64 // How much available work to stop at.
   468  
   469  	// Set up the scavenger.
   470  	var s Scavenger
   471  	s.Sleep = func(ns int64) int64 {
   472  		totalSlept.Add(ns)
   473  		return ns
   474  	}
   475  	s.Scavenge = func(bytes uintptr) (uintptr, int64) {
   476  		avail := availableWork.Load()
   477  		if uint64(bytes) > avail {
   478  			bytes = uintptr(avail)
   479  		}
   480  		t := workedTime(bytes)
   481  		if bytes != 0 {
   482  			availableWork.Add(-int64(bytes))
   483  			totalWorked.Add(t)
   484  		}
   485  		return bytes, t
   486  	}
   487  	s.ShouldStop = func() bool {
   488  		if availableWork.Load() <= stopAt.Load() {
   489  			return true
   490  		}
   491  		return false
   492  	}
   493  	s.GoMaxProcs = func() int32 {
   494  		return 1
   495  	}
   496  
   497  	// Define a helper for verifying that various properties hold.
   498  	verifyScavengerState := func(t *testing.T, expWork uint64) {
   499  		t.Helper()
   500  
   501  		// Check to make sure it did the amount of work we expected.
   502  		if workDone := uint64(s.Released()); workDone != expWork {
   503  			t.Errorf("want %d bytes of work done, got %d", expWork, workDone)
   504  		}
   505  		// Check to make sure the scavenger is meeting its CPU target.
   506  		idealFraction := float64(ScavengePercent) / 100.0
   507  		cpuFraction := float64(totalWorked.Load()) / float64(totalWorked.Load()+totalSlept.Load())
   508  		if cpuFraction < idealFraction-0.005 || cpuFraction > idealFraction+0.005 {
   509  			t.Errorf("want %f CPU fraction, got %f", idealFraction, cpuFraction)
   510  		}
   511  	}
   512  
   513  	// Start the scavenger.
   514  	s.Start()
   515  
   516  	// Set up some work and let the scavenger run to completion.
   517  	availableWork.Store(totalWork)
   518  	s.Wake()
   519  	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
   520  		t.Fatal("timed out waiting for scavenger to run to completion")
   521  	}
   522  	// Run a check.
   523  	verifyScavengerState(t, totalWork)
   524  
   525  	// Now let's do it again and see what happens when we have no work to do.
   526  	// It should've gone right back to sleep.
   527  	s.Wake()
   528  	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
   529  		t.Fatal("timed out waiting for scavenger to run to completion")
   530  	}
   531  	// Run another check.
   532  	verifyScavengerState(t, totalWork)
   533  
   534  	// One more time, this time doing the same amount of work as the first time.
   535  	// Let's see if we can get the scavenger to continue.
   536  	availableWork.Store(totalWork)
   537  	s.Wake()
   538  	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
   539  		t.Fatal("timed out waiting for scavenger to run to completion")
   540  	}
   541  	// Run another check.
   542  	verifyScavengerState(t, 2*totalWork)
   543  
   544  	// This time, let's stop after a certain amount of work.
   545  	//
   546  	// Pick a stopping point such that when subtracted from totalWork
   547  	// we get a multiple of a relatively large power of 2. verifyScavengerState
   548  	// always makes an exact check, but the scavenger might go a little over,
   549  	// which is OK. If this breaks often or gets annoying to maintain, modify
   550  	// verifyScavengerState.
   551  	availableWork.Store(totalWork)
   552  	stoppingPoint := uint64(1<<20 - 3*PhysPageSize)
   553  	stopAt.Store(stoppingPoint)
   554  	s.Wake()
   555  	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
   556  		t.Fatal("timed out waiting for scavenger to run to completion")
   557  	}
   558  	// Run another check.
   559  	verifyScavengerState(t, 2*totalWork+(totalWork-stoppingPoint))
   560  
   561  	// Clean up.
   562  	s.Stop()
   563  }
   564  
   565  func TestScavengeIndex(t *testing.T) {
   566  	setup := func(t *testing.T) (func(ChunkIdx, uint), func(uintptr, uintptr)) {
   567  		t.Helper()
   568  
   569  		// Pick some reasonable bounds. We don't need a huge range just to test.
   570  		si := NewScavengeIndex(BaseChunkIdx, BaseChunkIdx+64)
   571  		find := func(want ChunkIdx, wantOffset uint) {
   572  			t.Helper()
   573  
   574  			got, gotOffset := si.Find()
   575  			if want != got {
   576  				t.Errorf("find: wanted chunk index %d, got %d", want, got)
   577  			}
   578  			if want != got {
   579  				t.Errorf("find: wanted page offset %d, got %d", wantOffset, gotOffset)
   580  			}
   581  			if t.Failed() {
   582  				t.FailNow()
   583  			}
   584  			si.Clear(got)
   585  		}
   586  		mark := func(base, limit uintptr) {
   587  			t.Helper()
   588  
   589  			si.Mark(base, limit)
   590  		}
   591  		return find, mark
   592  	}
   593  	t.Run("Uninitialized", func(t *testing.T) {
   594  		find, _ := setup(t)
   595  		find(0, 0)
   596  	})
   597  	t.Run("OnePage", func(t *testing.T) {
   598  		find, mark := setup(t)
   599  		mark(PageBase(BaseChunkIdx, 3), PageBase(BaseChunkIdx, 4))
   600  		find(BaseChunkIdx, 3)
   601  		find(0, 0)
   602  	})
   603  	t.Run("FirstPage", func(t *testing.T) {
   604  		find, mark := setup(t)
   605  		mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx, 1))
   606  		find(BaseChunkIdx, 0)
   607  		find(0, 0)
   608  	})
   609  	t.Run("SeveralPages", func(t *testing.T) {
   610  		find, mark := setup(t)
   611  		mark(PageBase(BaseChunkIdx, 9), PageBase(BaseChunkIdx, 14))
   612  		find(BaseChunkIdx, 13)
   613  		find(0, 0)
   614  	})
   615  	t.Run("WholeChunk", func(t *testing.T) {
   616  		find, mark := setup(t)
   617  		mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0))
   618  		find(BaseChunkIdx, PallocChunkPages-1)
   619  		find(0, 0)
   620  	})
   621  	t.Run("LastPage", func(t *testing.T) {
   622  		find, mark := setup(t)
   623  		mark(PageBase(BaseChunkIdx, PallocChunkPages-1), PageBase(BaseChunkIdx+1, 0))
   624  		find(BaseChunkIdx, PallocChunkPages-1)
   625  		find(0, 0)
   626  	})
   627  	t.Run("TwoChunks", func(t *testing.T) {
   628  		find, mark := setup(t)
   629  		mark(PageBase(BaseChunkIdx, 128), PageBase(BaseChunkIdx+1, 128))
   630  		find(BaseChunkIdx+1, 127)
   631  		find(BaseChunkIdx, PallocChunkPages-1)
   632  		find(0, 0)
   633  	})
   634  	t.Run("TwoChunksOffset", func(t *testing.T) {
   635  		find, mark := setup(t)
   636  		mark(PageBase(BaseChunkIdx+7, 128), PageBase(BaseChunkIdx+8, 129))
   637  		find(BaseChunkIdx+8, 128)
   638  		find(BaseChunkIdx+7, PallocChunkPages-1)
   639  		find(0, 0)
   640  	})
   641  	t.Run("SevenChunksOffset", func(t *testing.T) {
   642  		find, mark := setup(t)
   643  		mark(PageBase(BaseChunkIdx+6, 11), PageBase(BaseChunkIdx+13, 15))
   644  		find(BaseChunkIdx+13, 14)
   645  		for i := BaseChunkIdx + 12; i >= BaseChunkIdx+6; i-- {
   646  			find(i, PallocChunkPages-1)
   647  		}
   648  		find(0, 0)
   649  	})
   650  	t.Run("ThirtyTwoChunks", func(t *testing.T) {
   651  		find, mark := setup(t)
   652  		mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0))
   653  		for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- {
   654  			find(i, PallocChunkPages-1)
   655  		}
   656  		find(0, 0)
   657  	})
   658  	t.Run("ThirtyTwoChunksOffset", func(t *testing.T) {
   659  		find, mark := setup(t)
   660  		mark(PageBase(BaseChunkIdx+3, 0), PageBase(BaseChunkIdx+35, 0))
   661  		for i := BaseChunkIdx + 34; i >= BaseChunkIdx+3; i-- {
   662  			find(i, PallocChunkPages-1)
   663  		}
   664  		find(0, 0)
   665  	})
   666  	t.Run("Mark", func(t *testing.T) {
   667  		find, mark := setup(t)
   668  		for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ {
   669  			mark(PageBase(i, 0), PageBase(i+1, 0))
   670  		}
   671  		for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- {
   672  			find(i, PallocChunkPages-1)
   673  		}
   674  		find(0, 0)
   675  	})
   676  	t.Run("MarkInterleaved", func(t *testing.T) {
   677  		find, mark := setup(t)
   678  		for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ {
   679  			mark(PageBase(i, 0), PageBase(i+1, 0))
   680  			find(i, PallocChunkPages-1)
   681  		}
   682  		find(0, 0)
   683  	})
   684  	t.Run("MarkIdempotentOneChunk", func(t *testing.T) {
   685  		find, mark := setup(t)
   686  		mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0))
   687  		mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0))
   688  		find(BaseChunkIdx, PallocChunkPages-1)
   689  		find(0, 0)
   690  	})
   691  	t.Run("MarkIdempotentThirtyTwoChunks", func(t *testing.T) {
   692  		find, mark := setup(t)
   693  		mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0))
   694  		mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0))
   695  		for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- {
   696  			find(i, PallocChunkPages-1)
   697  		}
   698  		find(0, 0)
   699  	})
   700  	t.Run("MarkIdempotentThirtyTwoChunksOffset", func(t *testing.T) {
   701  		find, mark := setup(t)
   702  		mark(PageBase(BaseChunkIdx+4, 0), PageBase(BaseChunkIdx+31, 0))
   703  		mark(PageBase(BaseChunkIdx+5, 0), PageBase(BaseChunkIdx+36, 0))
   704  		for i := BaseChunkIdx + 35; i >= BaseChunkIdx+4; i-- {
   705  			find(i, PallocChunkPages-1)
   706  		}
   707  		find(0, 0)
   708  	})
   709  }
   710  

View as plain text