Source file src/runtime/slice.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"runtime/internal/math"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  type slice struct {
    16  	array unsafe.Pointer
    17  	len   int
    18  	cap   int
    19  }
    20  
    21  // A notInHeapSlice is a slice backed by go:notinheap memory.
    22  type notInHeapSlice struct {
    23  	array *notInHeap
    24  	len   int
    25  	cap   int
    26  }
    27  
    28  func panicmakeslicelen() {
    29  	panic(errorString("makeslice: len out of range"))
    30  }
    31  
    32  func panicmakeslicecap() {
    33  	panic(errorString("makeslice: cap out of range"))
    34  }
    35  
    36  // makeslicecopy allocates a slice of "tolen" elements of type "et",
    37  // then copies "fromlen" elements of type "et" into that new allocation from "from".
    38  func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer {
    39  	var tomem, copymem uintptr
    40  	if uintptr(tolen) > uintptr(fromlen) {
    41  		var overflow bool
    42  		tomem, overflow = math.MulUintptr(et.size, uintptr(tolen))
    43  		if overflow || tomem > maxAlloc || tolen < 0 {
    44  			panicmakeslicelen()
    45  		}
    46  		copymem = et.size * uintptr(fromlen)
    47  	} else {
    48  		// fromlen is a known good length providing and equal or greater than tolen,
    49  		// thereby making tolen a good slice length too as from and to slices have the
    50  		// same element width.
    51  		tomem = et.size * uintptr(tolen)
    52  		copymem = tomem
    53  	}
    54  
    55  	var to unsafe.Pointer
    56  	if et.ptrdata == 0 {
    57  		to = mallocgc(tomem, nil, false)
    58  		if copymem < tomem {
    59  			memclrNoHeapPointers(add(to, copymem), tomem-copymem)
    60  		}
    61  	} else {
    62  		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
    63  		to = mallocgc(tomem, et, true)
    64  		if copymem > 0 && writeBarrier.enabled {
    65  			// Only shade the pointers in old.array since we know the destination slice to
    66  			// only contains nil pointers because it has been cleared during alloc.
    67  			bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem)
    68  		}
    69  	}
    70  
    71  	if raceenabled {
    72  		callerpc := getcallerpc()
    73  		pc := abi.FuncPCABIInternal(makeslicecopy)
    74  		racereadrangepc(from, copymem, callerpc, pc)
    75  	}
    76  	if msanenabled {
    77  		msanread(from, copymem)
    78  	}
    79  	if asanenabled {
    80  		asanread(from, copymem)
    81  	}
    82  
    83  	memmove(to, from, copymem)
    84  
    85  	return to
    86  }
    87  
    88  func makeslice(et *_type, len, cap int) unsafe.Pointer {
    89  	mem, overflow := math.MulUintptr(et.size, uintptr(cap))
    90  	if overflow || mem > maxAlloc || len < 0 || len > cap {
    91  		// NOTE: Produce a 'len out of range' error instead of a
    92  		// 'cap out of range' error when someone does make([]T, bignumber).
    93  		// 'cap out of range' is true too, but since the cap is only being
    94  		// supplied implicitly, saying len is clearer.
    95  		// See golang.org/issue/4085.
    96  		mem, overflow := math.MulUintptr(et.size, uintptr(len))
    97  		if overflow || mem > maxAlloc || len < 0 {
    98  			panicmakeslicelen()
    99  		}
   100  		panicmakeslicecap()
   101  	}
   102  
   103  	return mallocgc(mem, et, true)
   104  }
   105  
   106  func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
   107  	len := int(len64)
   108  	if int64(len) != len64 {
   109  		panicmakeslicelen()
   110  	}
   111  
   112  	cap := int(cap64)
   113  	if int64(cap) != cap64 {
   114  		panicmakeslicecap()
   115  	}
   116  
   117  	return makeslice(et, len, cap)
   118  }
   119  
   120  // This is a wrapper over runtime/internal/math.MulUintptr,
   121  // so the compiler can recognize and treat it as an intrinsic.
   122  func mulUintptr(a, b uintptr) (uintptr, bool) {
   123  	return math.MulUintptr(a, b)
   124  }
   125  
   126  // Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
   127  func unsafeslice(et *_type, ptr unsafe.Pointer, len int) {
   128  	if len < 0 {
   129  		panicunsafeslicelen()
   130  	}
   131  
   132  	mem, overflow := math.MulUintptr(et.size, uintptr(len))
   133  	if overflow || mem > -uintptr(ptr) {
   134  		if ptr == nil {
   135  			panicunsafeslicenilptr()
   136  		}
   137  		panicunsafeslicelen()
   138  	}
   139  }
   140  
   141  // Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
   142  func unsafeslice64(et *_type, ptr unsafe.Pointer, len64 int64) {
   143  	len := int(len64)
   144  	if int64(len) != len64 {
   145  		panicunsafeslicelen()
   146  	}
   147  	unsafeslice(et, ptr, len)
   148  }
   149  
   150  func unsafeslicecheckptr(et *_type, ptr unsafe.Pointer, len64 int64) {
   151  	unsafeslice64(et, ptr, len64)
   152  
   153  	// Check that underlying array doesn't straddle multiple heap objects.
   154  	// unsafeslice64 has already checked for overflow.
   155  	if checkptrStraddles(ptr, uintptr(len64)*et.size) {
   156  		throw("checkptr: unsafe.Slice result straddles multiple allocations")
   157  	}
   158  }
   159  
   160  func panicunsafeslicelen() {
   161  	panic(errorString("unsafe.Slice: len out of range"))
   162  }
   163  
   164  func panicunsafeslicenilptr() {
   165  	panic(errorString("unsafe.Slice: ptr is nil and len is not zero"))
   166  }
   167  
   168  // growslice handles slice growth during append.
   169  // It is passed the slice element type, the old slice, and the desired new minimum capacity,
   170  // and it returns a new slice with at least that capacity, with the old data
   171  // copied into it.
   172  // The new slice's length is set to the old slice's length,
   173  // NOT to the new requested capacity.
   174  // This is for codegen convenience. The old slice's length is used immediately
   175  // to calculate where to write new values during an append.
   176  // TODO: When the old backend is gone, reconsider this decision.
   177  // The SSA backend might prefer the new length or to return only ptr/cap and save stack space.
   178  func growslice(et *_type, old slice, cap int) slice {
   179  	if raceenabled {
   180  		callerpc := getcallerpc()
   181  		racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice))
   182  	}
   183  	if msanenabled {
   184  		msanread(old.array, uintptr(old.len*int(et.size)))
   185  	}
   186  	if asanenabled {
   187  		asanread(old.array, uintptr(old.len*int(et.size)))
   188  	}
   189  
   190  	if cap < old.cap {
   191  		panic(errorString("growslice: cap out of range"))
   192  	}
   193  
   194  	if et.size == 0 {
   195  		// append should not create a slice with nil pointer but non-zero len.
   196  		// We assume that append doesn't need to preserve old.array in this case.
   197  		return slice{unsafe.Pointer(&zerobase), old.len, cap}
   198  	}
   199  
   200  	newcap := old.cap
   201  	doublecap := newcap + newcap
   202  	if cap > doublecap {
   203  		newcap = cap
   204  	} else {
   205  		const threshold = 256
   206  		if old.cap < threshold {
   207  			newcap = doublecap
   208  		} else {
   209  			// Check 0 < newcap to detect overflow
   210  			// and prevent an infinite loop.
   211  			for 0 < newcap && newcap < cap {
   212  				// Transition from growing 2x for small slices
   213  				// to growing 1.25x for large slices. This formula
   214  				// gives a smooth-ish transition between the two.
   215  				newcap += (newcap + 3*threshold) / 4
   216  			}
   217  			// Set newcap to the requested cap when
   218  			// the newcap calculation overflowed.
   219  			if newcap <= 0 {
   220  				newcap = cap
   221  			}
   222  		}
   223  	}
   224  
   225  	var overflow bool
   226  	var lenmem, newlenmem, capmem uintptr
   227  	// Specialize for common values of et.size.
   228  	// For 1 we don't need any division/multiplication.
   229  	// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
   230  	// For powers of 2, use a variable shift.
   231  	switch {
   232  	case et.size == 1:
   233  		lenmem = uintptr(old.len)
   234  		newlenmem = uintptr(cap)
   235  		capmem = roundupsize(uintptr(newcap))
   236  		overflow = uintptr(newcap) > maxAlloc
   237  		newcap = int(capmem)
   238  	case et.size == goarch.PtrSize:
   239  		lenmem = uintptr(old.len) * goarch.PtrSize
   240  		newlenmem = uintptr(cap) * goarch.PtrSize
   241  		capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
   242  		overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
   243  		newcap = int(capmem / goarch.PtrSize)
   244  	case isPowerOfTwo(et.size):
   245  		var shift uintptr
   246  		if goarch.PtrSize == 8 {
   247  			// Mask shift for better code generation.
   248  			shift = uintptr(sys.Ctz64(uint64(et.size))) & 63
   249  		} else {
   250  			shift = uintptr(sys.Ctz32(uint32(et.size))) & 31
   251  		}
   252  		lenmem = uintptr(old.len) << shift
   253  		newlenmem = uintptr(cap) << shift
   254  		capmem = roundupsize(uintptr(newcap) << shift)
   255  		overflow = uintptr(newcap) > (maxAlloc >> shift)
   256  		newcap = int(capmem >> shift)
   257  	default:
   258  		lenmem = uintptr(old.len) * et.size
   259  		newlenmem = uintptr(cap) * et.size
   260  		capmem, overflow = math.MulUintptr(et.size, uintptr(newcap))
   261  		capmem = roundupsize(capmem)
   262  		newcap = int(capmem / et.size)
   263  	}
   264  
   265  	// The check of overflow in addition to capmem > maxAlloc is needed
   266  	// to prevent an overflow which can be used to trigger a segfault
   267  	// on 32bit architectures with this example program:
   268  	//
   269  	// type T [1<<27 + 1]int64
   270  	//
   271  	// var d T
   272  	// var s []T
   273  	//
   274  	// func main() {
   275  	//   s = append(s, d, d, d, d)
   276  	//   print(len(s), "\n")
   277  	// }
   278  	if overflow || capmem > maxAlloc {
   279  		panic(errorString("growslice: cap out of range"))
   280  	}
   281  
   282  	var p unsafe.Pointer
   283  	if et.ptrdata == 0 {
   284  		p = mallocgc(capmem, nil, false)
   285  		// The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length).
   286  		// Only clear the part that will not be overwritten.
   287  		memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem)
   288  	} else {
   289  		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
   290  		p = mallocgc(capmem, et, true)
   291  		if lenmem > 0 && writeBarrier.enabled {
   292  			// Only shade the pointers in old.array since we know the destination slice p
   293  			// only contains nil pointers because it has been cleared during alloc.
   294  			bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem-et.size+et.ptrdata)
   295  		}
   296  	}
   297  	memmove(p, old.array, lenmem)
   298  
   299  	return slice{p, old.len, newcap}
   300  }
   301  
   302  func isPowerOfTwo(x uintptr) bool {
   303  	return x&(x-1) == 0
   304  }
   305  
   306  // slicecopy is used to copy from a string or slice of pointerless elements into a slice.
   307  func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int {
   308  	if fromLen == 0 || toLen == 0 {
   309  		return 0
   310  	}
   311  
   312  	n := fromLen
   313  	if toLen < n {
   314  		n = toLen
   315  	}
   316  
   317  	if width == 0 {
   318  		return n
   319  	}
   320  
   321  	size := uintptr(n) * width
   322  	if raceenabled {
   323  		callerpc := getcallerpc()
   324  		pc := abi.FuncPCABIInternal(slicecopy)
   325  		racereadrangepc(fromPtr, size, callerpc, pc)
   326  		racewriterangepc(toPtr, size, callerpc, pc)
   327  	}
   328  	if msanenabled {
   329  		msanread(fromPtr, size)
   330  		msanwrite(toPtr, size)
   331  	}
   332  	if asanenabled {
   333  		asanread(fromPtr, size)
   334  		asanwrite(toPtr, size)
   335  	}
   336  
   337  	if size == 1 { // common case worth about 2x to do here
   338  		// TODO: is this still worth it with new memmove impl?
   339  		*(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer
   340  	} else {
   341  		memmove(toPtr, fromPtr, size)
   342  	}
   343  	return n
   344  }
   345  

View as plain text