Source file
src/runtime/mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 package runtime
57
58 import (
59 "internal/abi"
60 "internal/goarch"
61 "internal/runtime/atomic"
62 "internal/runtime/sys"
63 "unsafe"
64 )
65
66 const (
67
68
69
70
71 mallocHeaderSize = 8
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101 minSizeForMallocHeader = goarch.PtrSize * ptrBits
102 )
103
104
105
106
107
108
109
110
111
112 func heapBitsInSpan(userSize uintptr) bool {
113
114
115 return userSize <= minSizeForMallocHeader
116 }
117
118
119
120
121
122 type typePointers struct {
123
124
125
126 elem uintptr
127
128
129
130 addr uintptr
131
132
133
134
135
136 mask uintptr
137
138
139
140 typ *_type
141 }
142
143
144
145
146
147
148
149
150
151
152
153
154 func (span *mspan) typePointersOf(addr, size uintptr) typePointers {
155 base := span.objBase(addr)
156 tp := span.typePointersOfUnchecked(base)
157 if base == addr && size == span.elemsize {
158 return tp
159 }
160 return tp.fastForward(addr-tp.addr, addr+size)
161 }
162
163
164
165
166
167
168
169
170
171 func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers {
172 const doubleCheck = false
173 if doubleCheck && span.objBase(addr) != addr {
174 print("runtime: addr=", addr, " base=", span.objBase(addr), "\n")
175 throw("typePointersOfUnchecked consisting of non-base-address for object")
176 }
177
178 spc := span.spanclass
179 if spc.noscan() {
180 return typePointers{}
181 }
182 if heapBitsInSpan(span.elemsize) {
183
184 return typePointers{elem: addr, addr: addr, mask: span.heapBitsSmallForAddr(addr)}
185 }
186
187
188 var typ *_type
189 if spc.sizeclass() != 0 {
190
191 typ = *(**_type)(unsafe.Pointer(addr))
192 addr += mallocHeaderSize
193 } else {
194
195
196 typ = (*_type)(atomic.Loadp(unsafe.Pointer(&span.largeType)))
197 if typ == nil {
198
199 return typePointers{}
200 }
201 }
202 gcmask := getGCMask(typ)
203 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
204 }
205
206
207
208
209
210
211
212
213
214
215 func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers {
216 const doubleCheck = false
217 if doubleCheck && typ == nil {
218 throw("bad type passed to typePointersOfType")
219 }
220 if span.spanclass.noscan() {
221 return typePointers{}
222 }
223
224 gcmask := getGCMask(typ)
225 return typePointers{elem: addr, addr: addr, mask: readUintptr(gcmask), typ: typ}
226 }
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248 func (tp typePointers) nextFast() (typePointers, uintptr) {
249
250 if tp.mask == 0 {
251 return tp, 0
252 }
253
254 var i int
255 if goarch.PtrSize == 8 {
256 i = sys.TrailingZeros64(uint64(tp.mask))
257 } else {
258 i = sys.TrailingZeros32(uint32(tp.mask))
259 }
260
261 tp.mask ^= uintptr(1) << (i & (ptrBits - 1))
262
263 return tp, tp.addr + uintptr(i)*goarch.PtrSize
264 }
265
266
267
268
269
270
271
272
273
274 func (tp typePointers) next(limit uintptr) (typePointers, uintptr) {
275 for {
276 if tp.mask != 0 {
277 return tp.nextFast()
278 }
279
280
281 if tp.typ == nil {
282 return typePointers{}, 0
283 }
284
285
286 if tp.addr+goarch.PtrSize*ptrBits >= tp.elem+tp.typ.PtrBytes {
287 tp.elem += tp.typ.Size_
288 tp.addr = tp.elem
289 } else {
290 tp.addr += ptrBits * goarch.PtrSize
291 }
292
293
294 if tp.addr >= limit {
295 return typePointers{}, 0
296 }
297
298
299 tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
300 if tp.addr+goarch.PtrSize*ptrBits > limit {
301 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
302 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
303 }
304 }
305 }
306
307
308
309
310
311
312
313
314 func (tp typePointers) fastForward(n, limit uintptr) typePointers {
315
316 target := tp.addr + n
317 if target >= limit {
318 return typePointers{}
319 }
320 if tp.typ == nil {
321
322
323 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
324
325 if tp.addr+goarch.PtrSize*ptrBits > limit {
326 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
327 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
328 }
329 return tp
330 }
331
332
333
334 if n >= tp.typ.Size_ {
335
336
337 oldelem := tp.elem
338 tp.elem += (tp.addr - tp.elem + n) / tp.typ.Size_ * tp.typ.Size_
339 tp.addr = tp.elem + alignDown(n-(tp.elem-oldelem), ptrBits*goarch.PtrSize)
340 } else {
341 tp.addr += alignDown(n, ptrBits*goarch.PtrSize)
342 }
343
344 if tp.addr-tp.elem >= tp.typ.PtrBytes {
345
346
347 tp.elem += tp.typ.Size_
348 tp.addr = tp.elem
349 tp.mask = readUintptr(getGCMask(tp.typ))
350
351
352 if tp.addr >= limit {
353 return typePointers{}
354 }
355 } else {
356
357
358 tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8))
359 tp.mask &^= (1 << ((target - tp.addr) / goarch.PtrSize)) - 1
360 }
361 if tp.addr+goarch.PtrSize*ptrBits > limit {
362 bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize
363 tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits)
364 }
365 return tp
366 }
367
368
369
370
371
372
373 func (span *mspan) objBase(addr uintptr) uintptr {
374 return span.base() + span.objIndex(addr)*span.elemsize
375 }
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419 func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) {
420 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
421 throw("bulkBarrierPreWrite: unaligned arguments")
422 }
423 if !writeBarrier.enabled {
424 return
425 }
426 s := spanOf(dst)
427 if s == nil {
428
429
430 for _, datap := range activeModules() {
431 if datap.data <= dst && dst < datap.edata {
432 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
433 return
434 }
435 }
436 for _, datap := range activeModules() {
437 if datap.bss <= dst && dst < datap.ebss {
438 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
439 return
440 }
441 }
442 return
443 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
444
445
446
447
448
449
450 return
451 }
452 buf := &getg().m.p.ptr().wbBuf
453
454
455 const doubleCheck = false
456 if doubleCheck {
457 doubleCheckTypePointersOfType(s, typ, dst, size)
458 }
459
460 var tp typePointers
461 if typ != nil {
462 tp = s.typePointersOfType(typ, dst)
463 } else {
464 tp = s.typePointersOf(dst, size)
465 }
466 if src == 0 {
467 for {
468 var addr uintptr
469 if tp, addr = tp.next(dst + size); addr == 0 {
470 break
471 }
472 dstx := (*uintptr)(unsafe.Pointer(addr))
473 p := buf.get1()
474 p[0] = *dstx
475 }
476 } else {
477 for {
478 var addr uintptr
479 if tp, addr = tp.next(dst + size); addr == 0 {
480 break
481 }
482 dstx := (*uintptr)(unsafe.Pointer(addr))
483 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
484 p := buf.get2()
485 p[0] = *dstx
486 p[1] = *srcx
487 }
488 }
489 }
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr, typ *abi.Type) {
506 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
507 throw("bulkBarrierPreWrite: unaligned arguments")
508 }
509 if !writeBarrier.enabled {
510 return
511 }
512 buf := &getg().m.p.ptr().wbBuf
513 s := spanOf(dst)
514
515
516 const doubleCheck = false
517 if doubleCheck {
518 doubleCheckTypePointersOfType(s, typ, dst, size)
519 }
520
521 var tp typePointers
522 if typ != nil {
523 tp = s.typePointersOfType(typ, dst)
524 } else {
525 tp = s.typePointersOf(dst, size)
526 }
527 for {
528 var addr uintptr
529 if tp, addr = tp.next(dst + size); addr == 0 {
530 break
531 }
532 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
533 p := buf.get1()
534 p[0] = *srcx
535 }
536 }
537
538
539 func (s *mspan) initHeapBits() {
540 if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 {
541 b := s.heapBits()
542 for i := range b {
543 b[i] = ^uintptr(0)
544 }
545 } else if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
546 b := s.heapBits()
547 clear(b)
548 }
549 }
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565 func (span *mspan) heapBits() []uintptr {
566 const doubleCheck = false
567
568 if doubleCheck && !span.isUserArenaChunk {
569 if span.spanclass.noscan() {
570 throw("heapBits called for noscan")
571 }
572 if span.elemsize > minSizeForMallocHeader {
573 throw("heapBits called for span class that should have a malloc header")
574 }
575 }
576
577
578
579 if span.npages == 1 {
580
581 return heapBitsSlice(span.base(), pageSize)
582 }
583 return heapBitsSlice(span.base(), span.npages*pageSize)
584 }
585
586
587
588
589 func heapBitsSlice(spanBase, spanSize uintptr) []uintptr {
590 bitmapSize := spanSize / goarch.PtrSize / 8
591 elems := int(bitmapSize / goarch.PtrSize)
592 var sl notInHeapSlice
593 sl = notInHeapSlice{(*notInHeap)(unsafe.Pointer(spanBase + spanSize - bitmapSize)), elems, elems}
594 return *(*[]uintptr)(unsafe.Pointer(&sl))
595 }
596
597
598
599
600
601
602
603 func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
604 spanSize := span.npages * pageSize
605 bitmapSize := spanSize / goarch.PtrSize / 8
606 hbits := (*byte)(unsafe.Pointer(span.base() + spanSize - bitmapSize))
607
608
609
610
611
612
613
614
615
616 i := (addr - span.base()) / goarch.PtrSize / ptrBits
617 j := (addr - span.base()) / goarch.PtrSize % ptrBits
618 bits := span.elemsize / goarch.PtrSize
619 word0 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+0))))
620 word1 := (*uintptr)(unsafe.Pointer(addb(hbits, goarch.PtrSize*(i+1))))
621
622 var read uintptr
623 if j+bits > ptrBits {
624
625 bits0 := ptrBits - j
626 bits1 := bits - bits0
627 read = *word0 >> j
628 read |= (*word1 & ((1 << bits1) - 1)) << bits0
629 } else {
630
631 read = (*word0 >> j) & ((1 << bits) - 1)
632 }
633 return read
634 }
635
636
637
638
639
640
641
642
643 func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
644
645 src0 := readUintptr(getGCMask(typ))
646
647
648 scanSize = typ.PtrBytes
649 src := src0
650 if typ.Size_ == goarch.PtrSize {
651 src = (1 << (dataSize / goarch.PtrSize)) - 1
652 } else {
653
654
655
656 if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
657 throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
658 }
659 for i := typ.Size_; i < dataSize; i += typ.Size_ {
660 src |= src0 << (i / goarch.PtrSize)
661 scanSize += typ.Size_
662 }
663 if asanenabled {
664
665
666 src &= (1 << (dataSize / goarch.PtrSize)) - 1
667 }
668 }
669
670
671
672 dst := unsafe.Pointer(span.base() + pageSize - pageSize/goarch.PtrSize/8)
673 o := (x - span.base()) / goarch.PtrSize
674 i := o / ptrBits
675 j := o % ptrBits
676 bits := span.elemsize / goarch.PtrSize
677 if j+bits > ptrBits {
678
679 bits0 := ptrBits - j
680 bits1 := bits - bits0
681 dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
682 dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
683 *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
684 *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
685 } else {
686
687 dst := (*uintptr)(add(dst, i*goarch.PtrSize))
688 *dst = (*dst)&^(((1<<bits)-1)<<j) | (src << j)
689 }
690
691 const doubleCheck = false
692 if doubleCheck {
693 srcRead := span.heapBitsSmallForAddr(x)
694 if srcRead != src {
695 print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n")
696 print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n")
697 print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n")
698 throw("bad pointer bits written for small object")
699 }
700 }
701 return
702 }
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721 const doubleCheckHeapSetType = doubleCheckMalloc
722
723 func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
724 if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) {
725 throw("tried to write heap bits, but no heap bits in span")
726 }
727 scanSize := span.writeHeapBitsSmall(x, dataSize, typ)
728 if doubleCheckHeapSetType {
729 doubleCheckHeapType(x, dataSize, typ, nil, span)
730 }
731 return scanSize
732 }
733
734 func heapSetTypeSmallHeader(x, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr {
735 *header = typ
736 if doubleCheckHeapSetType {
737 doubleCheckHeapType(x, dataSize, typ, header, span)
738 }
739 return span.elemsize
740 }
741
742 func heapSetTypeLarge(x, dataSize uintptr, typ *_type, span *mspan) uintptr {
743 gctyp := typ
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793 atomic.StorepNoWB(unsafe.Pointer(&span.largeType), unsafe.Pointer(gctyp))
794 if doubleCheckHeapSetType {
795 doubleCheckHeapType(x, dataSize, typ, &span.largeType, span)
796 }
797 return span.elemsize
798 }
799
800 func doubleCheckHeapType(x, dataSize uintptr, gctyp *_type, header **_type, span *mspan) {
801 doubleCheckHeapPointers(x, dataSize, gctyp, header, span)
802
803
804
805
806 maxIterBytes := span.elemsize
807 if header == nil {
808 maxIterBytes = dataSize
809 }
810 off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize)
811 size := dataSize - off
812 if size == 0 {
813 off -= goarch.PtrSize
814 size += goarch.PtrSize
815 }
816 interior := x + off
817 size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize)
818 if size == 0 {
819 size = goarch.PtrSize
820 }
821
822 size = (size + gctyp.Size_ - 1) / gctyp.Size_ * gctyp.Size_
823 if interior+size > x+maxIterBytes {
824 size = x + maxIterBytes - interior
825 }
826 doubleCheckHeapPointersInterior(x, interior, size, dataSize, gctyp, header, span)
827 }
828
829 func doubleCheckHeapPointers(x, dataSize uintptr, typ *_type, header **_type, span *mspan) {
830
831 tp := span.typePointersOfUnchecked(span.objBase(x))
832 maxIterBytes := span.elemsize
833 if header == nil {
834 maxIterBytes = dataSize
835 }
836 bad := false
837 for i := uintptr(0); i < maxIterBytes; i += goarch.PtrSize {
838
839 want := false
840 if i < span.elemsize {
841 off := i % typ.Size_
842 if off < typ.PtrBytes {
843 j := off / goarch.PtrSize
844 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
845 }
846 }
847 if want {
848 var addr uintptr
849 tp, addr = tp.next(x + span.elemsize)
850 if addr == 0 {
851 println("runtime: found bad iterator")
852 }
853 if addr != x+i {
854 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
855 bad = true
856 }
857 }
858 }
859 if !bad {
860 var addr uintptr
861 tp, addr = tp.next(x + span.elemsize)
862 if addr == 0 {
863 return
864 }
865 println("runtime: extra pointer:", hex(addr))
866 }
867 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, " TFlagGCMaskOnDemaind=", typ.TFlag&abi.TFlagGCMaskOnDemand != 0, "\n")
868 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, "\n")
869 print("runtime: typ=", unsafe.Pointer(typ), " typ.PtrBytes=", typ.PtrBytes, "\n")
870 print("runtime: limit=", hex(x+span.elemsize), "\n")
871 tp = span.typePointersOfUnchecked(x)
872 dumpTypePointers(tp)
873 for {
874 var addr uintptr
875 if tp, addr = tp.next(x + span.elemsize); addr == 0 {
876 println("runtime: would've stopped here")
877 dumpTypePointers(tp)
878 break
879 }
880 print("runtime: addr=", hex(addr), "\n")
881 dumpTypePointers(tp)
882 }
883 throw("heapSetType: pointer entry not correct")
884 }
885
886 func doubleCheckHeapPointersInterior(x, interior, size, dataSize uintptr, typ *_type, header **_type, span *mspan) {
887 bad := false
888 if interior < x {
889 print("runtime: interior=", hex(interior), " x=", hex(x), "\n")
890 throw("found bad interior pointer")
891 }
892 off := interior - x
893 tp := span.typePointersOf(interior, size)
894 for i := off; i < off+size; i += goarch.PtrSize {
895
896 want := false
897 if i < span.elemsize {
898 off := i % typ.Size_
899 if off < typ.PtrBytes {
900 j := off / goarch.PtrSize
901 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
902 }
903 }
904 if want {
905 var addr uintptr
906 tp, addr = tp.next(interior + size)
907 if addr == 0 {
908 println("runtime: found bad iterator")
909 bad = true
910 }
911 if addr != x+i {
912 print("runtime: addr=", hex(addr), " x+i=", hex(x+i), "\n")
913 bad = true
914 }
915 }
916 }
917 if !bad {
918 var addr uintptr
919 tp, addr = tp.next(interior + size)
920 if addr == 0 {
921 return
922 }
923 println("runtime: extra pointer:", hex(addr))
924 }
925 print("runtime: hasHeader=", header != nil, " typ.Size_=", typ.Size_, "\n")
926 print("runtime: x=", hex(x), " dataSize=", dataSize, " elemsize=", span.elemsize, " interior=", hex(interior), " size=", size, "\n")
927 print("runtime: limit=", hex(interior+size), "\n")
928 tp = span.typePointersOf(interior, size)
929 dumpTypePointers(tp)
930 for {
931 var addr uintptr
932 if tp, addr = tp.next(interior + size); addr == 0 {
933 println("runtime: would've stopped here")
934 dumpTypePointers(tp)
935 break
936 }
937 print("runtime: addr=", hex(addr), "\n")
938 dumpTypePointers(tp)
939 }
940
941 print("runtime: want: ")
942 for i := off; i < off+size; i += goarch.PtrSize {
943
944 want := false
945 if i < dataSize {
946 off := i % typ.Size_
947 if off < typ.PtrBytes {
948 j := off / goarch.PtrSize
949 want = *addb(getGCMask(typ), j/8)>>(j%8)&1 != 0
950 }
951 }
952 if want {
953 print("1")
954 } else {
955 print("0")
956 }
957 }
958 println()
959
960 throw("heapSetType: pointer entry not correct")
961 }
962
963
964 func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
965 if typ == nil {
966 return
967 }
968 if typ.Kind_&abi.KindMask == abi.Interface {
969
970
971
972 return
973 }
974 tp0 := s.typePointersOfType(typ, addr)
975 tp1 := s.typePointersOf(addr, size)
976 failed := false
977 for {
978 var addr0, addr1 uintptr
979 tp0, addr0 = tp0.next(addr + size)
980 tp1, addr1 = tp1.next(addr + size)
981 if addr0 != addr1 {
982 failed = true
983 break
984 }
985 if addr0 == 0 {
986 break
987 }
988 }
989 if failed {
990 tp0 := s.typePointersOfType(typ, addr)
991 tp1 := s.typePointersOf(addr, size)
992 print("runtime: addr=", hex(addr), " size=", size, "\n")
993 print("runtime: type=", toRType(typ).string(), "\n")
994 dumpTypePointers(tp0)
995 dumpTypePointers(tp1)
996 for {
997 var addr0, addr1 uintptr
998 tp0, addr0 = tp0.next(addr + size)
999 tp1, addr1 = tp1.next(addr + size)
1000 print("runtime: ", hex(addr0), " ", hex(addr1), "\n")
1001 if addr0 == 0 && addr1 == 0 {
1002 break
1003 }
1004 }
1005 throw("mismatch between typePointersOfType and typePointersOf")
1006 }
1007 }
1008
1009 func dumpTypePointers(tp typePointers) {
1010 print("runtime: tp.elem=", hex(tp.elem), " tp.typ=", unsafe.Pointer(tp.typ), "\n")
1011 print("runtime: tp.addr=", hex(tp.addr), " tp.mask=")
1012 for i := uintptr(0); i < ptrBits; i++ {
1013 if tp.mask&(uintptr(1)<<i) != 0 {
1014 print("1")
1015 } else {
1016 print("0")
1017 }
1018 }
1019 println()
1020 }
1021
1022
1023
1024
1025
1026 func addb(p *byte, n uintptr) *byte {
1027
1028
1029
1030 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
1031 }
1032
1033
1034
1035
1036
1037 func subtractb(p *byte, n uintptr) *byte {
1038
1039
1040
1041 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
1042 }
1043
1044
1045
1046
1047
1048 func add1(p *byte) *byte {
1049
1050
1051
1052 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
1053 }
1054
1055
1056
1057
1058
1059
1060
1061 func subtract1(p *byte) *byte {
1062
1063
1064
1065 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
1066 }
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 type markBits struct {
1078 bytep *uint8
1079 mask uint8
1080 index uintptr
1081 }
1082
1083
1084 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
1085 bytep, mask := s.allocBits.bitp(allocBitIndex)
1086 return markBits{bytep, mask, allocBitIndex}
1087 }
1088
1089
1090
1091
1092
1093 func (s *mspan) refillAllocCache(whichByte uint16) {
1094 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))
1095 aCache := uint64(0)
1096 aCache |= uint64(bytes[0])
1097 aCache |= uint64(bytes[1]) << (1 * 8)
1098 aCache |= uint64(bytes[2]) << (2 * 8)
1099 aCache |= uint64(bytes[3]) << (3 * 8)
1100 aCache |= uint64(bytes[4]) << (4 * 8)
1101 aCache |= uint64(bytes[5]) << (5 * 8)
1102 aCache |= uint64(bytes[6]) << (6 * 8)
1103 aCache |= uint64(bytes[7]) << (7 * 8)
1104 s.allocCache = ^aCache
1105 }
1106
1107
1108
1109
1110
1111 func (s *mspan) nextFreeIndex() uint16 {
1112 sfreeindex := s.freeindex
1113 snelems := s.nelems
1114 if sfreeindex == snelems {
1115 return sfreeindex
1116 }
1117 if sfreeindex > snelems {
1118 throw("s.freeindex > s.nelems")
1119 }
1120
1121 aCache := s.allocCache
1122
1123 bitIndex := sys.TrailingZeros64(aCache)
1124 for bitIndex == 64 {
1125
1126 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
1127 if sfreeindex >= snelems {
1128 s.freeindex = snelems
1129 return snelems
1130 }
1131 whichByte := sfreeindex / 8
1132
1133 s.refillAllocCache(whichByte)
1134 aCache = s.allocCache
1135 bitIndex = sys.TrailingZeros64(aCache)
1136
1137
1138 }
1139 result := sfreeindex + uint16(bitIndex)
1140 if result >= snelems {
1141 s.freeindex = snelems
1142 return snelems
1143 }
1144
1145 s.allocCache >>= uint(bitIndex + 1)
1146 sfreeindex = result + 1
1147
1148 if sfreeindex%64 == 0 && sfreeindex != snelems {
1149
1150
1151
1152
1153
1154 whichByte := sfreeindex / 8
1155 s.refillAllocCache(whichByte)
1156 }
1157 s.freeindex = sfreeindex
1158 return result
1159 }
1160
1161
1162
1163
1164
1165
1166 func (s *mspan) isFree(index uintptr) bool {
1167 if index < uintptr(s.freeIndexForScan) {
1168 return false
1169 }
1170 bytep, mask := s.allocBits.bitp(index)
1171 return *bytep&mask == 0
1172 }
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182 func (s *mspan) divideByElemSize(n uintptr) uintptr {
1183 const doubleCheck = false
1184
1185
1186 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
1187
1188 if doubleCheck && q != n/s.elemsize {
1189 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
1190 throw("bad magic division")
1191 }
1192 return q
1193 }
1194
1195
1196
1197
1198 func (s *mspan) objIndex(p uintptr) uintptr {
1199 return s.divideByElemSize(p - s.base())
1200 }
1201
1202 func markBitsForAddr(p uintptr) markBits {
1203 s := spanOf(p)
1204 objIndex := s.objIndex(p)
1205 return s.markBitsForIndex(objIndex)
1206 }
1207
1208 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
1209 bytep, mask := s.gcmarkBits.bitp(objIndex)
1210 return markBits{bytep, mask, objIndex}
1211 }
1212
1213 func (s *mspan) markBitsForBase() markBits {
1214 return markBits{&s.gcmarkBits.x, uint8(1), 0}
1215 }
1216
1217
1218 func (m markBits) isMarked() bool {
1219 return *m.bytep&m.mask != 0
1220 }
1221
1222
1223 func (m markBits) setMarked() {
1224
1225
1226
1227 atomic.Or8(m.bytep, m.mask)
1228 }
1229
1230
1231 func (m markBits) setMarkedNonAtomic() {
1232 *m.bytep |= m.mask
1233 }
1234
1235
1236 func (m markBits) clearMarked() {
1237
1238
1239
1240 atomic.And8(m.bytep, ^m.mask)
1241 }
1242
1243
1244 func markBitsForSpan(base uintptr) (mbits markBits) {
1245 mbits = markBitsForAddr(base)
1246 if mbits.mask != 1 {
1247 throw("markBitsForSpan: unaligned start")
1248 }
1249 return mbits
1250 }
1251
1252
1253 func (m *markBits) advance() {
1254 if m.mask == 1<<7 {
1255 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
1256 m.mask = 1
1257 } else {
1258 m.mask = m.mask << 1
1259 }
1260 m.index++
1261 }
1262
1263
1264
1265 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
1266
1267
1268 func badPointer(s *mspan, p, refBase, refOff uintptr) {
1269
1270
1271
1272
1273
1274
1275
1276
1277 printlock()
1278 print("runtime: pointer ", hex(p))
1279 if s != nil {
1280 state := s.state.get()
1281 if state != mSpanInUse {
1282 print(" to unallocated span")
1283 } else {
1284 print(" to unused region of span")
1285 }
1286 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
1287 }
1288 print("\n")
1289 if refBase != 0 {
1290 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
1291 gcDumpObject("object", refBase, refOff)
1292 }
1293 getg().m.traceback = 2
1294 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
1295 }
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
1322 s = spanOf(p)
1323
1324
1325 if s == nil {
1326 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
1327
1328
1329
1330 badPointer(s, p, refBase, refOff)
1331 }
1332 return
1333 }
1334
1335
1336
1337
1338 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
1339
1340 if state == mSpanManual {
1341 return
1342 }
1343
1344
1345 if debug.invalidptr != 0 {
1346 badPointer(s, p, refBase, refOff)
1347 }
1348 return
1349 }
1350
1351 objIndex = s.objIndex(p)
1352 base = s.base() + objIndex*s.elemsize
1353 return
1354 }
1355
1356
1357
1358
1359 func reflect_verifyNotInHeapPtr(p uintptr) bool {
1360
1361
1362
1363 return spanOf(p) == nil && p != clobberdeadPtr
1364 }
1365
1366 const ptrBits = 8 * goarch.PtrSize
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
1377 word := maskOffset / goarch.PtrSize
1378 bits = addb(bits, word/8)
1379 mask := uint8(1) << (word % 8)
1380
1381 buf := &getg().m.p.ptr().wbBuf
1382 for i := uintptr(0); i < size; i += goarch.PtrSize {
1383 if mask == 0 {
1384 bits = addb(bits, 1)
1385 if *bits == 0 {
1386
1387 i += 7 * goarch.PtrSize
1388 continue
1389 }
1390 mask = 1
1391 }
1392 if *bits&mask != 0 {
1393 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1394 if src == 0 {
1395 p := buf.get1()
1396 p[0] = *dstx
1397 } else {
1398 srcx := (*uintptr)(unsafe.Pointer(src + i))
1399 p := buf.get2()
1400 p[0] = *dstx
1401 p[1] = *srcx
1402 }
1403 }
1404 mask <<= 1
1405 }
1406 }
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
1423 if typ == nil {
1424 throw("runtime: typeBitsBulkBarrier without type")
1425 }
1426 if typ.Size_ != size {
1427 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
1428 throw("runtime: invalid typeBitsBulkBarrier")
1429 }
1430 if !writeBarrier.enabled {
1431 return
1432 }
1433 ptrmask := getGCMask(typ)
1434 buf := &getg().m.p.ptr().wbBuf
1435 var bits uint32
1436 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
1437 if i&(goarch.PtrSize*8-1) == 0 {
1438 bits = uint32(*ptrmask)
1439 ptrmask = addb(ptrmask, 1)
1440 } else {
1441 bits = bits >> 1
1442 }
1443 if bits&1 != 0 {
1444 dstx := (*uintptr)(unsafe.Pointer(dst + i))
1445 srcx := (*uintptr)(unsafe.Pointer(src + i))
1446 p := buf.get2()
1447 p[0] = *dstx
1448 p[1] = *srcx
1449 }
1450 }
1451 }
1452
1453
1454
1455 func (s *mspan) countAlloc() int {
1456 count := 0
1457 bytes := divRoundUp(uintptr(s.nelems), 8)
1458
1459
1460
1461
1462 for i := uintptr(0); i < bytes; i += 8 {
1463
1464
1465
1466
1467 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
1468 count += sys.OnesCount64(mrkBits)
1469 }
1470 return count
1471 }
1472
1473
1474
1475 func readUintptr(p *byte) uintptr {
1476 x := *(*uintptr)(unsafe.Pointer(p))
1477 if goarch.BigEndian {
1478 if goarch.PtrSize == 8 {
1479 return uintptr(sys.Bswap64(uint64(x)))
1480 }
1481 return uintptr(sys.Bswap32(uint32(x)))
1482 }
1483 return x
1484 }
1485
1486 var debugPtrmask struct {
1487 lock mutex
1488 data *byte
1489 }
1490
1491
1492
1493
1494 func progToPointerMask(prog *byte, size uintptr) bitvector {
1495 n := (size/goarch.PtrSize + 7) / 8
1496 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1497 x[len(x)-1] = 0xa1
1498 n = runGCProg(prog, &x[0])
1499 if x[len(x)-1] != 0xa1 {
1500 throw("progToPointerMask: overflow")
1501 }
1502 return bitvector{int32(n), &x[0]}
1503 }
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523 func runGCProg(prog, dst *byte) uintptr {
1524 dstStart := dst
1525
1526
1527 var bits uintptr
1528 var nbits uintptr
1529
1530 p := prog
1531 Run:
1532 for {
1533
1534
1535 for ; nbits >= 8; nbits -= 8 {
1536 *dst = uint8(bits)
1537 dst = add1(dst)
1538 bits >>= 8
1539 }
1540
1541
1542 inst := uintptr(*p)
1543 p = add1(p)
1544 n := inst & 0x7F
1545 if inst&0x80 == 0 {
1546
1547 if n == 0 {
1548
1549 break Run
1550 }
1551 nbyte := n / 8
1552 for i := uintptr(0); i < nbyte; i++ {
1553 bits |= uintptr(*p) << nbits
1554 p = add1(p)
1555 *dst = uint8(bits)
1556 dst = add1(dst)
1557 bits >>= 8
1558 }
1559 if n %= 8; n > 0 {
1560 bits |= uintptr(*p) << nbits
1561 p = add1(p)
1562 nbits += n
1563 }
1564 continue Run
1565 }
1566
1567
1568 if n == 0 {
1569 for off := uint(0); ; off += 7 {
1570 x := uintptr(*p)
1571 p = add1(p)
1572 n |= (x & 0x7F) << off
1573 if x&0x80 == 0 {
1574 break
1575 }
1576 }
1577 }
1578
1579
1580 c := uintptr(0)
1581 for off := uint(0); ; off += 7 {
1582 x := uintptr(*p)
1583 p = add1(p)
1584 c |= (x & 0x7F) << off
1585 if x&0x80 == 0 {
1586 break
1587 }
1588 }
1589 c *= n
1590
1591
1592
1593
1594
1595
1596
1597
1598 src := dst
1599 const maxBits = goarch.PtrSize*8 - 7
1600 if n <= maxBits {
1601
1602 pattern := bits
1603 npattern := nbits
1604
1605
1606 src = subtract1(src)
1607 for npattern < n {
1608 pattern <<= 8
1609 pattern |= uintptr(*src)
1610 src = subtract1(src)
1611 npattern += 8
1612 }
1613
1614
1615
1616
1617
1618 if npattern > n {
1619 pattern >>= npattern - n
1620 npattern = n
1621 }
1622
1623
1624 if npattern == 1 {
1625
1626
1627
1628
1629
1630
1631 if pattern == 1 {
1632 pattern = 1<<maxBits - 1
1633 npattern = maxBits
1634 } else {
1635 npattern = c
1636 }
1637 } else {
1638 b := pattern
1639 nb := npattern
1640 if nb+nb <= maxBits {
1641
1642 for nb <= goarch.PtrSize*8 {
1643 b |= b << nb
1644 nb += nb
1645 }
1646
1647
1648 nb = maxBits / npattern * npattern
1649 b &= 1<<nb - 1
1650 pattern = b
1651 npattern = nb
1652 }
1653 }
1654
1655
1656
1657
1658 for ; c >= npattern; c -= npattern {
1659 bits |= pattern << nbits
1660 nbits += npattern
1661 for nbits >= 8 {
1662 *dst = uint8(bits)
1663 dst = add1(dst)
1664 bits >>= 8
1665 nbits -= 8
1666 }
1667 }
1668
1669
1670 if c > 0 {
1671 pattern &= 1<<c - 1
1672 bits |= pattern << nbits
1673 nbits += c
1674 }
1675 continue Run
1676 }
1677
1678
1679
1680
1681 off := n - nbits
1682
1683 src = subtractb(src, (off+7)/8)
1684 if frag := off & 7; frag != 0 {
1685 bits |= uintptr(*src) >> (8 - frag) << nbits
1686 src = add1(src)
1687 nbits += frag
1688 c -= frag
1689 }
1690
1691
1692 for i := c / 8; i > 0; i-- {
1693 bits |= uintptr(*src) << nbits
1694 src = add1(src)
1695 *dst = uint8(bits)
1696 dst = add1(dst)
1697 bits >>= 8
1698 }
1699
1700 if c %= 8; c > 0 {
1701 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1702 nbits += c
1703 }
1704 }
1705
1706
1707 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1708 nbits += -nbits & 7
1709 for ; nbits > 0; nbits -= 8 {
1710 *dst = uint8(bits)
1711 dst = add1(dst)
1712 bits >>= 8
1713 }
1714 return totalBits
1715 }
1716
1717 func dumpGCProg(p *byte) {
1718 nptr := 0
1719 for {
1720 x := *p
1721 p = add1(p)
1722 if x == 0 {
1723 print("\t", nptr, " end\n")
1724 break
1725 }
1726 if x&0x80 == 0 {
1727 print("\t", nptr, " lit ", x, ":")
1728 n := int(x+7) / 8
1729 for i := 0; i < n; i++ {
1730 print(" ", hex(*p))
1731 p = add1(p)
1732 }
1733 print("\n")
1734 nptr += int(x)
1735 } else {
1736 nbit := int(x &^ 0x80)
1737 if nbit == 0 {
1738 for nb := uint(0); ; nb += 7 {
1739 x := *p
1740 p = add1(p)
1741 nbit |= int(x&0x7f) << nb
1742 if x&0x80 == 0 {
1743 break
1744 }
1745 }
1746 }
1747 count := 0
1748 for nb := uint(0); ; nb += 7 {
1749 x := *p
1750 p = add1(p)
1751 count |= int(x&0x7f) << nb
1752 if x&0x80 == 0 {
1753 break
1754 }
1755 }
1756 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1757 nptr += nbit * count
1758 }
1759 }
1760 }
1761
1762
1763
1764
1765
1766
1767
1768 func reflect_gcbits(x any) []byte {
1769 return pointerMask(x)
1770 }
1771
1772
1773
1774
1775 func pointerMask(ep any) (mask []byte) {
1776 e := *efaceOf(&ep)
1777 p := e.data
1778 t := e._type
1779
1780 var et *_type
1781 if t.Kind_&abi.KindMask != abi.Pointer {
1782 throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
1783 }
1784 et = (*ptrtype)(unsafe.Pointer(t)).Elem
1785
1786
1787 for _, datap := range activeModules() {
1788
1789 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1790 bitmap := datap.gcdatamask.bytedata
1791 n := et.Size_
1792 mask = make([]byte, n/goarch.PtrSize)
1793 for i := uintptr(0); i < n; i += goarch.PtrSize {
1794 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1795 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1796 }
1797 return
1798 }
1799
1800
1801 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1802 bitmap := datap.gcbssmask.bytedata
1803 n := et.Size_
1804 mask = make([]byte, n/goarch.PtrSize)
1805 for i := uintptr(0); i < n; i += goarch.PtrSize {
1806 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1807 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1808 }
1809 return
1810 }
1811 }
1812
1813
1814 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1815 if s.spanclass.noscan() {
1816 return nil
1817 }
1818 limit := base + s.elemsize
1819
1820
1821
1822
1823 tp := s.typePointersOfUnchecked(base)
1824 base = tp.addr
1825
1826
1827 maskFromHeap := make([]byte, (limit-base)/goarch.PtrSize)
1828 for {
1829 var addr uintptr
1830 if tp, addr = tp.next(limit); addr == 0 {
1831 break
1832 }
1833 maskFromHeap[(addr-base)/goarch.PtrSize] = 1
1834 }
1835
1836
1837
1838
1839 for i := limit; i < s.elemsize; i++ {
1840 if *(*byte)(unsafe.Pointer(i)) != 0 {
1841 throw("found non-zeroed tail of allocation")
1842 }
1843 }
1844
1845
1846
1847 for len(maskFromHeap) > 0 && maskFromHeap[len(maskFromHeap)-1] == 0 {
1848 maskFromHeap = maskFromHeap[:len(maskFromHeap)-1]
1849 }
1850
1851
1852 maskFromType := make([]byte, (limit-base)/goarch.PtrSize)
1853 tp = s.typePointersOfType(et, base)
1854 for {
1855 var addr uintptr
1856 if tp, addr = tp.next(limit); addr == 0 {
1857 break
1858 }
1859 maskFromType[(addr-base)/goarch.PtrSize] = 1
1860 }
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872 differs := false
1873 for i := range maskFromHeap {
1874 if maskFromHeap[i] != maskFromType[i] {
1875 differs = true
1876 break
1877 }
1878 }
1879
1880 if differs {
1881 print("runtime: heap mask=")
1882 for _, b := range maskFromHeap {
1883 print(b)
1884 }
1885 println()
1886 print("runtime: type mask=")
1887 for _, b := range maskFromType {
1888 print(b)
1889 }
1890 println()
1891 print("runtime: type=", toRType(et).string(), "\n")
1892 throw("found two different masks from two different methods")
1893 }
1894
1895
1896 mask = maskFromHeap
1897
1898
1899
1900
1901 KeepAlive(ep)
1902 return
1903 }
1904
1905
1906 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1907 found := false
1908 var u unwinder
1909 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1910 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1911 found = true
1912 break
1913 }
1914 }
1915 if found {
1916 locals, _, _ := u.frame.getStackMap(false)
1917 if locals.n == 0 {
1918 return
1919 }
1920 size := uintptr(locals.n) * goarch.PtrSize
1921 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1922 mask = make([]byte, n/goarch.PtrSize)
1923 for i := uintptr(0); i < n; i += goarch.PtrSize {
1924 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1925 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1926 }
1927 }
1928 return
1929 }
1930
1931
1932
1933
1934 return
1935 }
1936
View as plain text