Source file
src/runtime/mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 package runtime
41
42 import (
43 "internal/goarch"
44 "runtime/internal/atomic"
45 "runtime/internal/sys"
46 "unsafe"
47 )
48
49
50
51
52
53 func addb(p *byte, n uintptr) *byte {
54
55
56
57 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
58 }
59
60
61
62
63
64 func subtractb(p *byte, n uintptr) *byte {
65
66
67
68 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
69 }
70
71
72
73
74
75 func add1(p *byte) *byte {
76
77
78
79 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
80 }
81
82
83
84
85
86
87
88 func subtract1(p *byte) *byte {
89
90
91
92 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
93 }
94
95
96
97
98
99
100
101
102
103
104 type markBits struct {
105 bytep *uint8
106 mask uint8
107 index uintptr
108 }
109
110
111 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
112 bytep, mask := s.allocBits.bitp(allocBitIndex)
113 return markBits{bytep, mask, allocBitIndex}
114 }
115
116
117
118
119
120 func (s *mspan) refillAllocCache(whichByte uintptr) {
121 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
122 aCache := uint64(0)
123 aCache |= uint64(bytes[0])
124 aCache |= uint64(bytes[1]) << (1 * 8)
125 aCache |= uint64(bytes[2]) << (2 * 8)
126 aCache |= uint64(bytes[3]) << (3 * 8)
127 aCache |= uint64(bytes[4]) << (4 * 8)
128 aCache |= uint64(bytes[5]) << (5 * 8)
129 aCache |= uint64(bytes[6]) << (6 * 8)
130 aCache |= uint64(bytes[7]) << (7 * 8)
131 s.allocCache = ^aCache
132 }
133
134
135
136
137
138 func (s *mspan) nextFreeIndex() uintptr {
139 sfreeindex := s.freeindex
140 snelems := s.nelems
141 if sfreeindex == snelems {
142 return sfreeindex
143 }
144 if sfreeindex > snelems {
145 throw("s.freeindex > s.nelems")
146 }
147
148 aCache := s.allocCache
149
150 bitIndex := sys.TrailingZeros64(aCache)
151 for bitIndex == 64 {
152
153 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
154 if sfreeindex >= snelems {
155 s.freeindex = snelems
156 return snelems
157 }
158 whichByte := sfreeindex / 8
159
160 s.refillAllocCache(whichByte)
161 aCache = s.allocCache
162 bitIndex = sys.TrailingZeros64(aCache)
163
164
165 }
166 result := sfreeindex + uintptr(bitIndex)
167 if result >= snelems {
168 s.freeindex = snelems
169 return snelems
170 }
171
172 s.allocCache >>= uint(bitIndex + 1)
173 sfreeindex = result + 1
174
175 if sfreeindex%64 == 0 && sfreeindex != snelems {
176
177
178
179
180
181 whichByte := sfreeindex / 8
182 s.refillAllocCache(whichByte)
183 }
184 s.freeindex = sfreeindex
185 return result
186 }
187
188
189
190
191
192
193 func (s *mspan) isFree(index uintptr) bool {
194 if index < s.freeIndexForScan {
195 return false
196 }
197 bytep, mask := s.allocBits.bitp(index)
198 return *bytep&mask == 0
199 }
200
201
202
203
204
205
206
207
208
209 func (s *mspan) divideByElemSize(n uintptr) uintptr {
210 const doubleCheck = false
211
212
213 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
214
215 if doubleCheck && q != n/s.elemsize {
216 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
217 throw("bad magic division")
218 }
219 return q
220 }
221
222
223
224
225 func (s *mspan) objIndex(p uintptr) uintptr {
226 return s.divideByElemSize(p - s.base())
227 }
228
229 func markBitsForAddr(p uintptr) markBits {
230 s := spanOf(p)
231 objIndex := s.objIndex(p)
232 return s.markBitsForIndex(objIndex)
233 }
234
235 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
236 bytep, mask := s.gcmarkBits.bitp(objIndex)
237 return markBits{bytep, mask, objIndex}
238 }
239
240 func (s *mspan) markBitsForBase() markBits {
241 return markBits{&s.gcmarkBits.x, uint8(1), 0}
242 }
243
244
245 func (m markBits) isMarked() bool {
246 return *m.bytep&m.mask != 0
247 }
248
249
250 func (m markBits) setMarked() {
251
252
253
254 atomic.Or8(m.bytep, m.mask)
255 }
256
257
258 func (m markBits) setMarkedNonAtomic() {
259 *m.bytep |= m.mask
260 }
261
262
263 func (m markBits) clearMarked() {
264
265
266
267 atomic.And8(m.bytep, ^m.mask)
268 }
269
270
271 func markBitsForSpan(base uintptr) (mbits markBits) {
272 mbits = markBitsForAddr(base)
273 if mbits.mask != 1 {
274 throw("markBitsForSpan: unaligned start")
275 }
276 return mbits
277 }
278
279
280 func (m *markBits) advance() {
281 if m.mask == 1<<7 {
282 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
283 m.mask = 1
284 } else {
285 m.mask = m.mask << 1
286 }
287 m.index++
288 }
289
290
291
292 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
293
294
295 func badPointer(s *mspan, p, refBase, refOff uintptr) {
296
297
298
299
300
301
302
303
304 printlock()
305 print("runtime: pointer ", hex(p))
306 if s != nil {
307 state := s.state.get()
308 if state != mSpanInUse {
309 print(" to unallocated span")
310 } else {
311 print(" to unused region of span")
312 }
313 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
314 }
315 print("\n")
316 if refBase != 0 {
317 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
318 gcDumpObject("object", refBase, refOff)
319 }
320 getg().m.traceback = 2
321 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
322 }
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
340 s = spanOf(p)
341
342
343 if s == nil {
344 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
345
346
347
348 badPointer(s, p, refBase, refOff)
349 }
350 return
351 }
352
353
354
355
356 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
357
358 if state == mSpanManual {
359 return
360 }
361
362
363 if debug.invalidptr != 0 {
364 badPointer(s, p, refBase, refOff)
365 }
366 return
367 }
368
369 objIndex = s.objIndex(p)
370 base = s.base() + objIndex*s.elemsize
371 return
372 }
373
374
375
376
377 func reflect_verifyNotInHeapPtr(p uintptr) bool {
378
379
380
381 return spanOf(p) == nil && p != clobberdeadPtr
382 }
383
384 const ptrBits = 8 * goarch.PtrSize
385
386
387
388
389
390 type heapBits struct {
391
392
393
394 addr, size uintptr
395
396
397
398 mask uintptr
399
400 valid uintptr
401 }
402
403
404
405
406
407
408
409
410 func heapBitsForAddr(addr, size uintptr) heapBits {
411
412 ai := arenaIndex(addr)
413 ha := mheap_.arenas[ai.l1()][ai.l2()]
414
415
416 word := addr / goarch.PtrSize % heapArenaWords
417
418
419 idx := word / ptrBits
420 off := word % ptrBits
421
422
423 mask := ha.bitmap[idx] >> off
424 valid := ptrBits - off
425
426
427 nptr := size / goarch.PtrSize
428 if nptr < valid {
429
430
431 mask &= 1<<(nptr&(ptrBits-1)) - 1
432 valid = nptr
433 } else if nptr == valid {
434
435
436 } else {
437
438
439 if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
440
441
442 size = valid * goarch.PtrSize
443 }
444 }
445
446 return heapBits{addr: addr, size: size, mask: mask, valid: valid}
447 }
448
449
450
451
452
453
454
455
456
457 func (h heapBits) next() (heapBits, uintptr) {
458 for {
459 if h.mask != 0 {
460 var i int
461 if goarch.PtrSize == 8 {
462 i = sys.TrailingZeros64(uint64(h.mask))
463 } else {
464 i = sys.TrailingZeros32(uint32(h.mask))
465 }
466 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
467 return h, h.addr + uintptr(i)*goarch.PtrSize
468 }
469
470
471 h.addr += h.valid * goarch.PtrSize
472 h.size -= h.valid * goarch.PtrSize
473 if h.size == 0 {
474 return h, 0
475 }
476
477
478 h = heapBitsForAddr(h.addr, h.size)
479 }
480 }
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496 func (h heapBits) nextFast() (heapBits, uintptr) {
497
498 if h.mask == 0 {
499 return h, 0
500 }
501
502 var i int
503 if goarch.PtrSize == 8 {
504 i = sys.TrailingZeros64(uint64(h.mask))
505 } else {
506 i = sys.TrailingZeros32(uint32(h.mask))
507 }
508
509 h.mask ^= uintptr(1) << (i & (ptrBits - 1))
510
511 return h, h.addr + uintptr(i)*goarch.PtrSize
512 }
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541 func bulkBarrierPreWrite(dst, src, size uintptr) {
542 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
543 throw("bulkBarrierPreWrite: unaligned arguments")
544 }
545 if !writeBarrier.needed {
546 return
547 }
548 if s := spanOf(dst); s == nil {
549
550
551 for _, datap := range activeModules() {
552 if datap.data <= dst && dst < datap.edata {
553 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
554 return
555 }
556 }
557 for _, datap := range activeModules() {
558 if datap.bss <= dst && dst < datap.ebss {
559 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
560 return
561 }
562 }
563 return
564 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
565
566
567
568
569
570
571 return
572 }
573
574 buf := &getg().m.p.ptr().wbBuf
575 h := heapBitsForAddr(dst, size)
576 if src == 0 {
577 for {
578 var addr uintptr
579 if h, addr = h.next(); addr == 0 {
580 break
581 }
582 dstx := (*uintptr)(unsafe.Pointer(addr))
583 p := buf.get1()
584 p[0] = *dstx
585 }
586 } else {
587 for {
588 var addr uintptr
589 if h, addr = h.next(); addr == 0 {
590 break
591 }
592 dstx := (*uintptr)(unsafe.Pointer(addr))
593 srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
594 p := buf.get2()
595 p[0] = *dstx
596 p[1] = *srcx
597 }
598 }
599 }
600
601
602
603
604
605
606
607
608
609
610
611 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
612 if (dst|src|size)&(goarch.PtrSize-1) != 0 {
613 throw("bulkBarrierPreWrite: unaligned arguments")
614 }
615 if !writeBarrier.needed {
616 return
617 }
618 buf := &getg().m.p.ptr().wbBuf
619 h := heapBitsForAddr(dst, size)
620 for {
621 var addr uintptr
622 if h, addr = h.next(); addr == 0 {
623 break
624 }
625 srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
626 p := buf.get1()
627 p[0] = *srcx
628 }
629 }
630
631
632
633
634
635
636
637
638
639 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
640 word := maskOffset / goarch.PtrSize
641 bits = addb(bits, word/8)
642 mask := uint8(1) << (word % 8)
643
644 buf := &getg().m.p.ptr().wbBuf
645 for i := uintptr(0); i < size; i += goarch.PtrSize {
646 if mask == 0 {
647 bits = addb(bits, 1)
648 if *bits == 0 {
649
650 i += 7 * goarch.PtrSize
651 continue
652 }
653 mask = 1
654 }
655 if *bits&mask != 0 {
656 dstx := (*uintptr)(unsafe.Pointer(dst + i))
657 if src == 0 {
658 p := buf.get1()
659 p[0] = *dstx
660 } else {
661 srcx := (*uintptr)(unsafe.Pointer(src + i))
662 p := buf.get2()
663 p[0] = *dstx
664 p[1] = *srcx
665 }
666 }
667 mask <<= 1
668 }
669 }
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
689 if typ == nil {
690 throw("runtime: typeBitsBulkBarrier without type")
691 }
692 if typ.Size_ != size {
693 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
694 throw("runtime: invalid typeBitsBulkBarrier")
695 }
696 if typ.Kind_&kindGCProg != 0 {
697 println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " with GC prog")
698 throw("runtime: invalid typeBitsBulkBarrier")
699 }
700 if !writeBarrier.needed {
701 return
702 }
703 ptrmask := typ.GCData
704 buf := &getg().m.p.ptr().wbBuf
705 var bits uint32
706 for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
707 if i&(goarch.PtrSize*8-1) == 0 {
708 bits = uint32(*ptrmask)
709 ptrmask = addb(ptrmask, 1)
710 } else {
711 bits = bits >> 1
712 }
713 if bits&1 != 0 {
714 dstx := (*uintptr)(unsafe.Pointer(dst + i))
715 srcx := (*uintptr)(unsafe.Pointer(src + i))
716 p := buf.get2()
717 p[0] = *dstx
718 p[1] = *srcx
719 }
720 }
721 }
722
723
724
725
726 func (s *mspan) initHeapBits(forceClear bool) {
727 if forceClear || s.spanclass.noscan() {
728
729
730
731 base := s.base()
732 size := s.npages * pageSize
733 h := writeHeapBitsForAddr(base)
734 h.flush(base, size)
735 return
736 }
737 isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
738 if !isPtrs {
739 return
740 }
741 h := writeHeapBitsForAddr(s.base())
742 size := s.npages * pageSize
743 nptrs := size / goarch.PtrSize
744 for i := uintptr(0); i < nptrs; i += ptrBits {
745 h = h.write(^uintptr(0), ptrBits)
746 }
747 h.flush(s.base(), size)
748 }
749
750
751
752 func (s *mspan) countAlloc() int {
753 count := 0
754 bytes := divRoundUp(s.nelems, 8)
755
756
757
758
759 for i := uintptr(0); i < bytes; i += 8 {
760
761
762
763
764 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
765 count += sys.OnesCount64(mrkBits)
766 }
767 return count
768 }
769
770 type writeHeapBits struct {
771 addr uintptr
772 mask uintptr
773 valid uintptr
774 low uintptr
775 }
776
777 func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
778
779
780
781 h.low = addr / goarch.PtrSize % ptrBits
782
783
784 h.addr = addr - h.low*goarch.PtrSize
785
786
787 h.mask = 0
788 h.valid = h.low
789
790 return
791 }
792
793
794
795 func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
796 if h.valid+valid <= ptrBits {
797
798 h.mask |= bits << h.valid
799 h.valid += valid
800 return h
801 }
802
803
804
805 data := h.mask | bits<<h.valid
806 h.mask = bits >> (ptrBits - h.valid)
807 h.valid += valid - ptrBits
808
809
810
811 ai := arenaIndex(h.addr)
812 ha := mheap_.arenas[ai.l1()][ai.l2()]
813 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
814 m := uintptr(1)<<h.low - 1
815 ha.bitmap[idx] = ha.bitmap[idx]&m | data
816
817
818
819
820
821
822
823 ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
824
825
826
827 h.addr += ptrBits * goarch.PtrSize
828 h.low = 0
829 return h
830 }
831
832
833 func (h writeHeapBits) pad(size uintptr) writeHeapBits {
834 if size == 0 {
835 return h
836 }
837 words := size / goarch.PtrSize
838 for words > ptrBits {
839 h = h.write(0, ptrBits)
840 words -= ptrBits
841 }
842 return h.write(0, words)
843 }
844
845
846
847 func (h writeHeapBits) flush(addr, size uintptr) {
848
849
850
851 zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
852
853
854 if zeros > 0 {
855 z := ptrBits - h.valid
856 if z > zeros {
857 z = zeros
858 }
859 h.valid += z
860 zeros -= z
861 }
862
863
864 ai := arenaIndex(h.addr)
865 ha := mheap_.arenas[ai.l1()][ai.l2()]
866 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
867
868
869 if h.valid != h.low {
870 m := uintptr(1)<<h.low - 1
871 m |= ^(uintptr(1)<<h.valid - 1)
872 ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
873 }
874 if zeros == 0 {
875 return
876 }
877
878
879
880 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
881
882
883 h.addr += ptrBits * goarch.PtrSize
884
885
886
887
888
889
890 for {
891
892 ai := arenaIndex(h.addr)
893 ha := mheap_.arenas[ai.l1()][ai.l2()]
894 idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
895 if zeros < ptrBits {
896 ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
897 break
898 } else if zeros == ptrBits {
899 ha.bitmap[idx] = 0
900 break
901 } else {
902 ha.bitmap[idx] = 0
903 zeros -= ptrBits
904 }
905 ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
906 h.addr += ptrBits * goarch.PtrSize
907 }
908 }
909
910
911
912 func readUintptr(p *byte) uintptr {
913 x := *(*uintptr)(unsafe.Pointer(p))
914 if goarch.BigEndian {
915 if goarch.PtrSize == 8 {
916 return uintptr(sys.Bswap64(uint64(x)))
917 }
918 return uintptr(sys.Bswap32(uint32(x)))
919 }
920 return x
921 }
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
947 const doubleCheck = false
948
949 if doubleCheck && dataSize%typ.Size_ != 0 {
950 throw("heapBitsSetType: dataSize not a multiple of typ.Size")
951 }
952
953 if goarch.PtrSize == 8 && size == goarch.PtrSize {
954
955
956
957
958
959 if doubleCheck {
960 h, addr := heapBitsForAddr(x, size).next()
961 if addr != x {
962 throw("heapBitsSetType: pointer bit missing")
963 }
964 _, addr = h.next()
965 if addr != 0 {
966 throw("heapBitsSetType: second pointer bit found")
967 }
968 }
969 return
970 }
971
972 h := writeHeapBitsForAddr(x)
973
974
975 if typ.Kind_&kindGCProg != 0 {
976
977 obj := (*uint8)(unsafe.Pointer(x))
978 n := runGCProg(addb(typ.GCData, 4), obj)
979
980 for i := uintptr(0); true; i += typ.Size_ {
981
982 p := obj
983 j := n
984 for j > 8 {
985 h = h.write(uintptr(*p), 8)
986 p = add1(p)
987 j -= 8
988 }
989 h = h.write(uintptr(*p), j)
990
991 if i+typ.Size_ == dataSize {
992 break
993 }
994
995
996 h = h.pad(typ.Size_ - n*goarch.PtrSize)
997 }
998
999 h.flush(x, size)
1000
1001
1002 memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
1003 return
1004 }
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026 ptrs := typ.PtrBytes / goarch.PtrSize
1027 if typ.Size_ == dataSize {
1028 if ptrs <= ptrBits {
1029 m := readUintptr(typ.GCData)
1030 h = h.write(m, ptrs)
1031 } else {
1032 p := typ.GCData
1033 for {
1034 h = h.write(readUintptr(p), ptrBits)
1035 p = addb(p, ptrBits/8)
1036 ptrs -= ptrBits
1037 if ptrs <= ptrBits {
1038 break
1039 }
1040 }
1041 m := readUintptr(p)
1042 h = h.write(m, ptrs)
1043 }
1044 } else {
1045 words := typ.Size_ / goarch.PtrSize
1046 if words <= ptrBits {
1047 n := dataSize / typ.Size_
1048 m := readUintptr(typ.GCData)
1049
1050 for words <= ptrBits/2 {
1051 if n&1 != 0 {
1052 h = h.write(m, words)
1053 }
1054 n /= 2
1055 m |= m << words
1056 ptrs += words
1057 words *= 2
1058 if n == 1 {
1059 break
1060 }
1061 }
1062 for n > 1 {
1063 h = h.write(m, words)
1064 n--
1065 }
1066 h = h.write(m, ptrs)
1067 } else {
1068 for i := uintptr(0); true; i += typ.Size_ {
1069 p := typ.GCData
1070 j := ptrs
1071 for j > ptrBits {
1072 h = h.write(readUintptr(p), ptrBits)
1073 p = addb(p, ptrBits/8)
1074 j -= ptrBits
1075 }
1076 m := readUintptr(p)
1077 h = h.write(m, j)
1078 if i+typ.Size_ == dataSize {
1079 break
1080 }
1081
1082 h = h.pad(typ.Size_ - typ.PtrBytes)
1083 }
1084 }
1085 }
1086 h.flush(x, size)
1087
1088 if doubleCheck {
1089 h := heapBitsForAddr(x, size)
1090 for i := uintptr(0); i < size; i += goarch.PtrSize {
1091
1092 want := false
1093 if i < dataSize {
1094 off := i % typ.Size_
1095 if off < typ.PtrBytes {
1096 j := off / goarch.PtrSize
1097 want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0
1098 }
1099 }
1100 if want {
1101 var addr uintptr
1102 h, addr = h.next()
1103 if addr != x+i {
1104 throw("heapBitsSetType: pointer entry not correct")
1105 }
1106 }
1107 }
1108 if _, addr := h.next(); addr != 0 {
1109 throw("heapBitsSetType: extra pointer")
1110 }
1111 }
1112 }
1113
1114 var debugPtrmask struct {
1115 lock mutex
1116 data *byte
1117 }
1118
1119
1120
1121
1122 func progToPointerMask(prog *byte, size uintptr) bitvector {
1123 n := (size/goarch.PtrSize + 7) / 8
1124 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1125 x[len(x)-1] = 0xa1
1126 n = runGCProg(prog, &x[0])
1127 if x[len(x)-1] != 0xa1 {
1128 throw("progToPointerMask: overflow")
1129 }
1130 return bitvector{int32(n), &x[0]}
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 func runGCProg(prog, dst *byte) uintptr {
1149 dstStart := dst
1150
1151
1152 var bits uintptr
1153 var nbits uintptr
1154
1155 p := prog
1156 Run:
1157 for {
1158
1159
1160 for ; nbits >= 8; nbits -= 8 {
1161 *dst = uint8(bits)
1162 dst = add1(dst)
1163 bits >>= 8
1164 }
1165
1166
1167 inst := uintptr(*p)
1168 p = add1(p)
1169 n := inst & 0x7F
1170 if inst&0x80 == 0 {
1171
1172 if n == 0 {
1173
1174 break Run
1175 }
1176 nbyte := n / 8
1177 for i := uintptr(0); i < nbyte; i++ {
1178 bits |= uintptr(*p) << nbits
1179 p = add1(p)
1180 *dst = uint8(bits)
1181 dst = add1(dst)
1182 bits >>= 8
1183 }
1184 if n %= 8; n > 0 {
1185 bits |= uintptr(*p) << nbits
1186 p = add1(p)
1187 nbits += n
1188 }
1189 continue Run
1190 }
1191
1192
1193 if n == 0 {
1194 for off := uint(0); ; off += 7 {
1195 x := uintptr(*p)
1196 p = add1(p)
1197 n |= (x & 0x7F) << off
1198 if x&0x80 == 0 {
1199 break
1200 }
1201 }
1202 }
1203
1204
1205 c := uintptr(0)
1206 for off := uint(0); ; off += 7 {
1207 x := uintptr(*p)
1208 p = add1(p)
1209 c |= (x & 0x7F) << off
1210 if x&0x80 == 0 {
1211 break
1212 }
1213 }
1214 c *= n
1215
1216
1217
1218
1219
1220
1221
1222
1223 src := dst
1224 const maxBits = goarch.PtrSize*8 - 7
1225 if n <= maxBits {
1226
1227 pattern := bits
1228 npattern := nbits
1229
1230
1231 src = subtract1(src)
1232 for npattern < n {
1233 pattern <<= 8
1234 pattern |= uintptr(*src)
1235 src = subtract1(src)
1236 npattern += 8
1237 }
1238
1239
1240
1241
1242
1243 if npattern > n {
1244 pattern >>= npattern - n
1245 npattern = n
1246 }
1247
1248
1249 if npattern == 1 {
1250
1251
1252
1253
1254
1255
1256 if pattern == 1 {
1257 pattern = 1<<maxBits - 1
1258 npattern = maxBits
1259 } else {
1260 npattern = c
1261 }
1262 } else {
1263 b := pattern
1264 nb := npattern
1265 if nb+nb <= maxBits {
1266
1267 for nb <= goarch.PtrSize*8 {
1268 b |= b << nb
1269 nb += nb
1270 }
1271
1272
1273 nb = maxBits / npattern * npattern
1274 b &= 1<<nb - 1
1275 pattern = b
1276 npattern = nb
1277 }
1278 }
1279
1280
1281
1282
1283 for ; c >= npattern; c -= npattern {
1284 bits |= pattern << nbits
1285 nbits += npattern
1286 for nbits >= 8 {
1287 *dst = uint8(bits)
1288 dst = add1(dst)
1289 bits >>= 8
1290 nbits -= 8
1291 }
1292 }
1293
1294
1295 if c > 0 {
1296 pattern &= 1<<c - 1
1297 bits |= pattern << nbits
1298 nbits += c
1299 }
1300 continue Run
1301 }
1302
1303
1304
1305
1306 off := n - nbits
1307
1308 src = subtractb(src, (off+7)/8)
1309 if frag := off & 7; frag != 0 {
1310 bits |= uintptr(*src) >> (8 - frag) << nbits
1311 src = add1(src)
1312 nbits += frag
1313 c -= frag
1314 }
1315
1316
1317 for i := c / 8; i > 0; i-- {
1318 bits |= uintptr(*src) << nbits
1319 src = add1(src)
1320 *dst = uint8(bits)
1321 dst = add1(dst)
1322 bits >>= 8
1323 }
1324
1325 if c %= 8; c > 0 {
1326 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1327 nbits += c
1328 }
1329 }
1330
1331
1332 totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1333 nbits += -nbits & 7
1334 for ; nbits > 0; nbits -= 8 {
1335 *dst = uint8(bits)
1336 dst = add1(dst)
1337 bits >>= 8
1338 }
1339 return totalBits
1340 }
1341
1342
1343
1344
1345
1346
1347 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1348
1349 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
1350
1351 pages := divRoundUp(bitmapBytes, pageSize)
1352 s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
1353 runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
1354 return s
1355 }
1356 func dematerializeGCProg(s *mspan) {
1357 mheap_.freeManual(s, spanAllocPtrScalarBits)
1358 }
1359
1360 func dumpGCProg(p *byte) {
1361 nptr := 0
1362 for {
1363 x := *p
1364 p = add1(p)
1365 if x == 0 {
1366 print("\t", nptr, " end\n")
1367 break
1368 }
1369 if x&0x80 == 0 {
1370 print("\t", nptr, " lit ", x, ":")
1371 n := int(x+7) / 8
1372 for i := 0; i < n; i++ {
1373 print(" ", hex(*p))
1374 p = add1(p)
1375 }
1376 print("\n")
1377 nptr += int(x)
1378 } else {
1379 nbit := int(x &^ 0x80)
1380 if nbit == 0 {
1381 for nb := uint(0); ; nb += 7 {
1382 x := *p
1383 p = add1(p)
1384 nbit |= int(x&0x7f) << nb
1385 if x&0x80 == 0 {
1386 break
1387 }
1388 }
1389 }
1390 count := 0
1391 for nb := uint(0); ; nb += 7 {
1392 x := *p
1393 p = add1(p)
1394 count |= int(x&0x7f) << nb
1395 if x&0x80 == 0 {
1396 break
1397 }
1398 }
1399 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1400 nptr += nbit * count
1401 }
1402 }
1403 }
1404
1405
1406
1407
1408
1409
1410
1411 func reflect_gcbits(x any) []byte {
1412 return getgcmask(x)
1413 }
1414
1415
1416
1417
1418 func getgcmask(ep any) (mask []byte) {
1419 e := *efaceOf(&ep)
1420 p := e.data
1421 t := e._type
1422
1423 for _, datap := range activeModules() {
1424
1425 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
1426 bitmap := datap.gcdatamask.bytedata
1427 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1428 mask = make([]byte, n/goarch.PtrSize)
1429 for i := uintptr(0); i < n; i += goarch.PtrSize {
1430 off := (uintptr(p) + i - datap.data) / goarch.PtrSize
1431 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1432 }
1433 return
1434 }
1435
1436
1437 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
1438 bitmap := datap.gcbssmask.bytedata
1439 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1440 mask = make([]byte, n/goarch.PtrSize)
1441 for i := uintptr(0); i < n; i += goarch.PtrSize {
1442 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
1443 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
1444 }
1445 return
1446 }
1447 }
1448
1449
1450 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
1451 if s.spanclass.noscan() {
1452 return nil
1453 }
1454 n := s.elemsize
1455 hbits := heapBitsForAddr(base, n)
1456 mask = make([]byte, n/goarch.PtrSize)
1457 for {
1458 var addr uintptr
1459 if hbits, addr = hbits.next(); addr == 0 {
1460 break
1461 }
1462 mask[(addr-base)/goarch.PtrSize] = 1
1463 }
1464
1465 for len(mask) > 0 && mask[len(mask)-1] == 0 {
1466 mask = mask[:len(mask)-1]
1467 }
1468 return
1469 }
1470
1471
1472 if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
1473 found := false
1474 var u unwinder
1475 for u.initAt(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0); u.valid(); u.next() {
1476 if u.frame.sp <= uintptr(p) && uintptr(p) < u.frame.varp {
1477 found = true
1478 break
1479 }
1480 }
1481 if found {
1482 locals, _, _ := u.frame.getStackMap(nil, false)
1483 if locals.n == 0 {
1484 return
1485 }
1486 size := uintptr(locals.n) * goarch.PtrSize
1487 n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
1488 mask = make([]byte, n/goarch.PtrSize)
1489 for i := uintptr(0); i < n; i += goarch.PtrSize {
1490 off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
1491 mask[i/goarch.PtrSize] = locals.ptrbit(off)
1492 }
1493 }
1494 return
1495 }
1496
1497
1498
1499
1500 return
1501 }
1502
View as plain text