1
2
3
4
5 package runtime
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 import (
57 "internal/abi"
58 "internal/goarch"
59 "runtime/internal/atomic"
60 "runtime/internal/math"
61 "unsafe"
62 )
63
64 const (
65
66 bucketCntBits = abi.MapBucketCountBits
67 bucketCnt = abi.MapBucketCount
68
69
70
71
72 loadFactorDen = 2
73 loadFactorNum = (bucketCnt * 13 / 16) * loadFactorDen
74
75
76
77
78
79 maxKeySize = abi.MapMaxKeyBytes
80 maxElemSize = abi.MapMaxElemBytes
81
82
83
84
85 dataOffset = unsafe.Offsetof(struct {
86 b bmap
87 v int64
88 }{}.v)
89
90
91
92
93
94 emptyRest = 0
95 emptyOne = 1
96 evacuatedX = 2
97 evacuatedY = 3
98 evacuatedEmpty = 4
99 minTopHash = 5
100
101
102 iterator = 1
103 oldIterator = 2
104 hashWriting = 4
105 sameSizeGrow = 8
106
107
108 noCheck = 1<<(8*goarch.PtrSize) - 1
109 )
110
111
112 func isEmpty(x uint8) bool {
113 return x <= emptyOne
114 }
115
116
117 type hmap struct {
118
119
120 count int
121 flags uint8
122 B uint8
123 noverflow uint16
124 hash0 uint32
125
126 buckets unsafe.Pointer
127 oldbuckets unsafe.Pointer
128 nevacuate uintptr
129
130 extra *mapextra
131 }
132
133
134 type mapextra struct {
135
136
137
138
139
140
141
142
143 overflow *[]*bmap
144 oldoverflow *[]*bmap
145
146
147 nextOverflow *bmap
148 }
149
150
151 type bmap struct {
152
153
154
155 tophash [bucketCnt]uint8
156
157
158
159
160
161 }
162
163
164
165
166 type hiter struct {
167 key unsafe.Pointer
168 elem unsafe.Pointer
169 t *maptype
170 h *hmap
171 buckets unsafe.Pointer
172 bptr *bmap
173 overflow *[]*bmap
174 oldoverflow *[]*bmap
175 startBucket uintptr
176 offset uint8
177 wrapped bool
178 B uint8
179 i uint8
180 bucket uintptr
181 checkBucket uintptr
182 }
183
184
185 func bucketShift(b uint8) uintptr {
186
187 return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
188 }
189
190
191 func bucketMask(b uint8) uintptr {
192 return bucketShift(b) - 1
193 }
194
195
196 func tophash(hash uintptr) uint8 {
197 top := uint8(hash >> (goarch.PtrSize*8 - 8))
198 if top < minTopHash {
199 top += minTopHash
200 }
201 return top
202 }
203
204 func evacuated(b *bmap) bool {
205 h := b.tophash[0]
206 return h > emptyOne && h < minTopHash
207 }
208
209 func (b *bmap) overflow(t *maptype) *bmap {
210 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
211 }
212
213 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
214 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
215 }
216
217 func (b *bmap) keys() unsafe.Pointer {
218 return add(unsafe.Pointer(b), dataOffset)
219 }
220
221
222
223
224
225
226
227
228 func (h *hmap) incrnoverflow() {
229
230
231
232 if h.B < 16 {
233 h.noverflow++
234 return
235 }
236
237
238
239 mask := uint32(1)<<(h.B-15) - 1
240
241
242 if fastrand()&mask == 0 {
243 h.noverflow++
244 }
245 }
246
247 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
248 var ovf *bmap
249 if h.extra != nil && h.extra.nextOverflow != nil {
250
251
252 ovf = h.extra.nextOverflow
253 if ovf.overflow(t) == nil {
254
255 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
256 } else {
257
258
259
260 ovf.setoverflow(t, nil)
261 h.extra.nextOverflow = nil
262 }
263 } else {
264 ovf = (*bmap)(newobject(t.Bucket))
265 }
266 h.incrnoverflow()
267 if t.Bucket.PtrBytes == 0 {
268 h.createOverflow()
269 *h.extra.overflow = append(*h.extra.overflow, ovf)
270 }
271 b.setoverflow(t, ovf)
272 return ovf
273 }
274
275 func (h *hmap) createOverflow() {
276 if h.extra == nil {
277 h.extra = new(mapextra)
278 }
279 if h.extra.overflow == nil {
280 h.extra.overflow = new([]*bmap)
281 }
282 }
283
284 func makemap64(t *maptype, hint int64, h *hmap) *hmap {
285 if int64(int(hint)) != hint {
286 hint = 0
287 }
288 return makemap(t, int(hint), h)
289 }
290
291
292
293
294 func makemap_small() *hmap {
295 h := new(hmap)
296 h.hash0 = fastrand()
297 return h
298 }
299
300
301
302
303
304
305 func makemap(t *maptype, hint int, h *hmap) *hmap {
306 mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
307 if overflow || mem > maxAlloc {
308 hint = 0
309 }
310
311
312 if h == nil {
313 h = new(hmap)
314 }
315 h.hash0 = fastrand()
316
317
318
319 B := uint8(0)
320 for overLoadFactor(hint, B) {
321 B++
322 }
323 h.B = B
324
325
326
327
328 if h.B != 0 {
329 var nextOverflow *bmap
330 h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
331 if nextOverflow != nil {
332 h.extra = new(mapextra)
333 h.extra.nextOverflow = nextOverflow
334 }
335 }
336
337 return h
338 }
339
340
341
342
343
344
345
346 func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
347 base := bucketShift(b)
348 nbuckets := base
349
350
351 if b >= 4 {
352
353
354
355 nbuckets += bucketShift(b - 4)
356 sz := t.Bucket.Size_ * nbuckets
357 up := roundupsize(sz)
358 if up != sz {
359 nbuckets = up / t.Bucket.Size_
360 }
361 }
362
363 if dirtyalloc == nil {
364 buckets = newarray(t.Bucket, int(nbuckets))
365 } else {
366
367
368
369 buckets = dirtyalloc
370 size := t.Bucket.Size_ * nbuckets
371 if t.Bucket.PtrBytes != 0 {
372 memclrHasPointers(buckets, size)
373 } else {
374 memclrNoHeapPointers(buckets, size)
375 }
376 }
377
378 if base != nbuckets {
379
380
381
382
383
384 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
385 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
386 last.setoverflow(t, (*bmap)(buckets))
387 }
388 return buckets, nextOverflow
389 }
390
391
392
393
394
395
396 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
397 if raceenabled && h != nil {
398 callerpc := getcallerpc()
399 pc := abi.FuncPCABIInternal(mapaccess1)
400 racereadpc(unsafe.Pointer(h), callerpc, pc)
401 raceReadObjectPC(t.Key, key, callerpc, pc)
402 }
403 if msanenabled && h != nil {
404 msanread(key, t.Key.Size_)
405 }
406 if asanenabled && h != nil {
407 asanread(key, t.Key.Size_)
408 }
409 if h == nil || h.count == 0 {
410 if t.HashMightPanic() {
411 t.Hasher(key, 0)
412 }
413 return unsafe.Pointer(&zeroVal[0])
414 }
415 if h.flags&hashWriting != 0 {
416 fatal("concurrent map read and map write")
417 }
418 hash := t.Hasher(key, uintptr(h.hash0))
419 m := bucketMask(h.B)
420 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
421 if c := h.oldbuckets; c != nil {
422 if !h.sameSizeGrow() {
423
424 m >>= 1
425 }
426 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
427 if !evacuated(oldb) {
428 b = oldb
429 }
430 }
431 top := tophash(hash)
432 bucketloop:
433 for ; b != nil; b = b.overflow(t) {
434 for i := uintptr(0); i < bucketCnt; i++ {
435 if b.tophash[i] != top {
436 if b.tophash[i] == emptyRest {
437 break bucketloop
438 }
439 continue
440 }
441 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
442 if t.IndirectKey() {
443 k = *((*unsafe.Pointer)(k))
444 }
445 if t.Key.Equal(key, k) {
446 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
447 if t.IndirectElem() {
448 e = *((*unsafe.Pointer)(e))
449 }
450 return e
451 }
452 }
453 }
454 return unsafe.Pointer(&zeroVal[0])
455 }
456
457 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
458 if raceenabled && h != nil {
459 callerpc := getcallerpc()
460 pc := abi.FuncPCABIInternal(mapaccess2)
461 racereadpc(unsafe.Pointer(h), callerpc, pc)
462 raceReadObjectPC(t.Key, key, callerpc, pc)
463 }
464 if msanenabled && h != nil {
465 msanread(key, t.Key.Size_)
466 }
467 if asanenabled && h != nil {
468 asanread(key, t.Key.Size_)
469 }
470 if h == nil || h.count == 0 {
471 if t.HashMightPanic() {
472 t.Hasher(key, 0)
473 }
474 return unsafe.Pointer(&zeroVal[0]), false
475 }
476 if h.flags&hashWriting != 0 {
477 fatal("concurrent map read and map write")
478 }
479 hash := t.Hasher(key, uintptr(h.hash0))
480 m := bucketMask(h.B)
481 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
482 if c := h.oldbuckets; c != nil {
483 if !h.sameSizeGrow() {
484
485 m >>= 1
486 }
487 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
488 if !evacuated(oldb) {
489 b = oldb
490 }
491 }
492 top := tophash(hash)
493 bucketloop:
494 for ; b != nil; b = b.overflow(t) {
495 for i := uintptr(0); i < bucketCnt; i++ {
496 if b.tophash[i] != top {
497 if b.tophash[i] == emptyRest {
498 break bucketloop
499 }
500 continue
501 }
502 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
503 if t.IndirectKey() {
504 k = *((*unsafe.Pointer)(k))
505 }
506 if t.Key.Equal(key, k) {
507 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
508 if t.IndirectElem() {
509 e = *((*unsafe.Pointer)(e))
510 }
511 return e, true
512 }
513 }
514 }
515 return unsafe.Pointer(&zeroVal[0]), false
516 }
517
518
519 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
520 if h == nil || h.count == 0 {
521 return nil, nil
522 }
523 hash := t.Hasher(key, uintptr(h.hash0))
524 m := bucketMask(h.B)
525 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
526 if c := h.oldbuckets; c != nil {
527 if !h.sameSizeGrow() {
528
529 m >>= 1
530 }
531 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
532 if !evacuated(oldb) {
533 b = oldb
534 }
535 }
536 top := tophash(hash)
537 bucketloop:
538 for ; b != nil; b = b.overflow(t) {
539 for i := uintptr(0); i < bucketCnt; i++ {
540 if b.tophash[i] != top {
541 if b.tophash[i] == emptyRest {
542 break bucketloop
543 }
544 continue
545 }
546 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
547 if t.IndirectKey() {
548 k = *((*unsafe.Pointer)(k))
549 }
550 if t.Key.Equal(key, k) {
551 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
552 if t.IndirectElem() {
553 e = *((*unsafe.Pointer)(e))
554 }
555 return k, e
556 }
557 }
558 }
559 return nil, nil
560 }
561
562 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
563 e := mapaccess1(t, h, key)
564 if e == unsafe.Pointer(&zeroVal[0]) {
565 return zero
566 }
567 return e
568 }
569
570 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
571 e := mapaccess1(t, h, key)
572 if e == unsafe.Pointer(&zeroVal[0]) {
573 return zero, false
574 }
575 return e, true
576 }
577
578
579 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
580 if h == nil {
581 panic(plainError("assignment to entry in nil map"))
582 }
583 if raceenabled {
584 callerpc := getcallerpc()
585 pc := abi.FuncPCABIInternal(mapassign)
586 racewritepc(unsafe.Pointer(h), callerpc, pc)
587 raceReadObjectPC(t.Key, key, callerpc, pc)
588 }
589 if msanenabled {
590 msanread(key, t.Key.Size_)
591 }
592 if asanenabled {
593 asanread(key, t.Key.Size_)
594 }
595 if h.flags&hashWriting != 0 {
596 fatal("concurrent map writes")
597 }
598 hash := t.Hasher(key, uintptr(h.hash0))
599
600
601
602 h.flags ^= hashWriting
603
604 if h.buckets == nil {
605 h.buckets = newobject(t.Bucket)
606 }
607
608 again:
609 bucket := hash & bucketMask(h.B)
610 if h.growing() {
611 growWork(t, h, bucket)
612 }
613 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
614 top := tophash(hash)
615
616 var inserti *uint8
617 var insertk unsafe.Pointer
618 var elem unsafe.Pointer
619 bucketloop:
620 for {
621 for i := uintptr(0); i < bucketCnt; i++ {
622 if b.tophash[i] != top {
623 if isEmpty(b.tophash[i]) && inserti == nil {
624 inserti = &b.tophash[i]
625 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
626 elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
627 }
628 if b.tophash[i] == emptyRest {
629 break bucketloop
630 }
631 continue
632 }
633 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
634 if t.IndirectKey() {
635 k = *((*unsafe.Pointer)(k))
636 }
637 if !t.Key.Equal(key, k) {
638 continue
639 }
640
641 if t.NeedKeyUpdate() {
642 typedmemmove(t.Key, k, key)
643 }
644 elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
645 goto done
646 }
647 ovf := b.overflow(t)
648 if ovf == nil {
649 break
650 }
651 b = ovf
652 }
653
654
655
656
657
658 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
659 hashGrow(t, h)
660 goto again
661 }
662
663 if inserti == nil {
664
665 newb := h.newoverflow(t, b)
666 inserti = &newb.tophash[0]
667 insertk = add(unsafe.Pointer(newb), dataOffset)
668 elem = add(insertk, bucketCnt*uintptr(t.KeySize))
669 }
670
671
672 if t.IndirectKey() {
673 kmem := newobject(t.Key)
674 *(*unsafe.Pointer)(insertk) = kmem
675 insertk = kmem
676 }
677 if t.IndirectElem() {
678 vmem := newobject(t.Elem)
679 *(*unsafe.Pointer)(elem) = vmem
680 }
681 typedmemmove(t.Key, insertk, key)
682 *inserti = top
683 h.count++
684
685 done:
686 if h.flags&hashWriting == 0 {
687 fatal("concurrent map writes")
688 }
689 h.flags &^= hashWriting
690 if t.IndirectElem() {
691 elem = *((*unsafe.Pointer)(elem))
692 }
693 return elem
694 }
695
696 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
697 if raceenabled && h != nil {
698 callerpc := getcallerpc()
699 pc := abi.FuncPCABIInternal(mapdelete)
700 racewritepc(unsafe.Pointer(h), callerpc, pc)
701 raceReadObjectPC(t.Key, key, callerpc, pc)
702 }
703 if msanenabled && h != nil {
704 msanread(key, t.Key.Size_)
705 }
706 if asanenabled && h != nil {
707 asanread(key, t.Key.Size_)
708 }
709 if h == nil || h.count == 0 {
710 if t.HashMightPanic() {
711 t.Hasher(key, 0)
712 }
713 return
714 }
715 if h.flags&hashWriting != 0 {
716 fatal("concurrent map writes")
717 }
718
719 hash := t.Hasher(key, uintptr(h.hash0))
720
721
722
723 h.flags ^= hashWriting
724
725 bucket := hash & bucketMask(h.B)
726 if h.growing() {
727 growWork(t, h, bucket)
728 }
729 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
730 bOrig := b
731 top := tophash(hash)
732 search:
733 for ; b != nil; b = b.overflow(t) {
734 for i := uintptr(0); i < bucketCnt; i++ {
735 if b.tophash[i] != top {
736 if b.tophash[i] == emptyRest {
737 break search
738 }
739 continue
740 }
741 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
742 k2 := k
743 if t.IndirectKey() {
744 k2 = *((*unsafe.Pointer)(k2))
745 }
746 if !t.Key.Equal(key, k2) {
747 continue
748 }
749
750 if t.IndirectKey() {
751 *(*unsafe.Pointer)(k) = nil
752 } else if t.Key.PtrBytes != 0 {
753 memclrHasPointers(k, t.Key.Size_)
754 }
755 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
756 if t.IndirectElem() {
757 *(*unsafe.Pointer)(e) = nil
758 } else if t.Elem.PtrBytes != 0 {
759 memclrHasPointers(e, t.Elem.Size_)
760 } else {
761 memclrNoHeapPointers(e, t.Elem.Size_)
762 }
763 b.tophash[i] = emptyOne
764
765
766
767
768 if i == bucketCnt-1 {
769 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
770 goto notLast
771 }
772 } else {
773 if b.tophash[i+1] != emptyRest {
774 goto notLast
775 }
776 }
777 for {
778 b.tophash[i] = emptyRest
779 if i == 0 {
780 if b == bOrig {
781 break
782 }
783
784 c := b
785 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
786 }
787 i = bucketCnt - 1
788 } else {
789 i--
790 }
791 if b.tophash[i] != emptyOne {
792 break
793 }
794 }
795 notLast:
796 h.count--
797
798
799 if h.count == 0 {
800 h.hash0 = fastrand()
801 }
802 break search
803 }
804 }
805
806 if h.flags&hashWriting == 0 {
807 fatal("concurrent map writes")
808 }
809 h.flags &^= hashWriting
810 }
811
812
813
814
815
816 func mapiterinit(t *maptype, h *hmap, it *hiter) {
817 if raceenabled && h != nil {
818 callerpc := getcallerpc()
819 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
820 }
821
822 it.t = t
823 if h == nil || h.count == 0 {
824 return
825 }
826
827 if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
828 throw("hash_iter size incorrect")
829 }
830 it.h = h
831
832
833 it.B = h.B
834 it.buckets = h.buckets
835 if t.Bucket.PtrBytes == 0 {
836
837
838
839
840 h.createOverflow()
841 it.overflow = h.extra.overflow
842 it.oldoverflow = h.extra.oldoverflow
843 }
844
845
846 var r uintptr
847 if h.B > 31-bucketCntBits {
848 r = uintptr(fastrand64())
849 } else {
850 r = uintptr(fastrand())
851 }
852 it.startBucket = r & bucketMask(h.B)
853 it.offset = uint8(r >> h.B & (bucketCnt - 1))
854
855
856 it.bucket = it.startBucket
857
858
859
860 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
861 atomic.Or8(&h.flags, iterator|oldIterator)
862 }
863
864 mapiternext(it)
865 }
866
867 func mapiternext(it *hiter) {
868 h := it.h
869 if raceenabled {
870 callerpc := getcallerpc()
871 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
872 }
873 if h.flags&hashWriting != 0 {
874 fatal("concurrent map iteration and map write")
875 }
876 t := it.t
877 bucket := it.bucket
878 b := it.bptr
879 i := it.i
880 checkBucket := it.checkBucket
881
882 next:
883 if b == nil {
884 if bucket == it.startBucket && it.wrapped {
885
886 it.key = nil
887 it.elem = nil
888 return
889 }
890 if h.growing() && it.B == h.B {
891
892
893
894
895 oldbucket := bucket & it.h.oldbucketmask()
896 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
897 if !evacuated(b) {
898 checkBucket = bucket
899 } else {
900 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
901 checkBucket = noCheck
902 }
903 } else {
904 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
905 checkBucket = noCheck
906 }
907 bucket++
908 if bucket == bucketShift(it.B) {
909 bucket = 0
910 it.wrapped = true
911 }
912 i = 0
913 }
914 for ; i < bucketCnt; i++ {
915 offi := (i + it.offset) & (bucketCnt - 1)
916 if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
917
918
919 continue
920 }
921 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
922 if t.IndirectKey() {
923 k = *((*unsafe.Pointer)(k))
924 }
925 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
926 if checkBucket != noCheck && !h.sameSizeGrow() {
927
928
929
930
931
932
933
934 if t.ReflexiveKey() || t.Key.Equal(k, k) {
935
936
937 hash := t.Hasher(k, uintptr(h.hash0))
938 if hash&bucketMask(it.B) != checkBucket {
939 continue
940 }
941 } else {
942
943
944
945
946
947
948
949 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
950 continue
951 }
952 }
953 }
954 if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
955 !(t.ReflexiveKey() || t.Key.Equal(k, k)) {
956
957
958
959
960 it.key = k
961 if t.IndirectElem() {
962 e = *((*unsafe.Pointer)(e))
963 }
964 it.elem = e
965 } else {
966
967
968
969
970
971
972
973 rk, re := mapaccessK(t, h, k)
974 if rk == nil {
975 continue
976 }
977 it.key = rk
978 it.elem = re
979 }
980 it.bucket = bucket
981 if it.bptr != b {
982 it.bptr = b
983 }
984 it.i = i + 1
985 it.checkBucket = checkBucket
986 return
987 }
988 b = b.overflow(t)
989 i = 0
990 goto next
991 }
992
993
994 func mapclear(t *maptype, h *hmap) {
995 if raceenabled && h != nil {
996 callerpc := getcallerpc()
997 pc := abi.FuncPCABIInternal(mapclear)
998 racewritepc(unsafe.Pointer(h), callerpc, pc)
999 }
1000
1001 if h == nil || h.count == 0 {
1002 return
1003 }
1004
1005 if h.flags&hashWriting != 0 {
1006 fatal("concurrent map writes")
1007 }
1008
1009 h.flags ^= hashWriting
1010
1011
1012 markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
1013 for i := uintptr(0); i <= mask; i++ {
1014 b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
1015 for ; b != nil; b = b.overflow(t) {
1016 for i := uintptr(0); i < bucketCnt; i++ {
1017 b.tophash[i] = emptyRest
1018 }
1019 }
1020 }
1021 }
1022 markBucketsEmpty(h.buckets, bucketMask(h.B))
1023 if oldBuckets := h.oldbuckets; oldBuckets != nil {
1024 markBucketsEmpty(oldBuckets, h.oldbucketmask())
1025 }
1026
1027 h.flags &^= sameSizeGrow
1028 h.oldbuckets = nil
1029 h.nevacuate = 0
1030 h.noverflow = 0
1031 h.count = 0
1032
1033
1034
1035 h.hash0 = fastrand()
1036
1037
1038 if h.extra != nil {
1039 *h.extra = mapextra{}
1040 }
1041
1042
1043
1044
1045 _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1046 if nextOverflow != nil {
1047
1048
1049 h.extra.nextOverflow = nextOverflow
1050 }
1051
1052 if h.flags&hashWriting == 0 {
1053 fatal("concurrent map writes")
1054 }
1055 h.flags &^= hashWriting
1056 }
1057
1058 func hashGrow(t *maptype, h *hmap) {
1059
1060
1061
1062 bigger := uint8(1)
1063 if !overLoadFactor(h.count+1, h.B) {
1064 bigger = 0
1065 h.flags |= sameSizeGrow
1066 }
1067 oldbuckets := h.buckets
1068 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1069
1070 flags := h.flags &^ (iterator | oldIterator)
1071 if h.flags&iterator != 0 {
1072 flags |= oldIterator
1073 }
1074
1075 h.B += bigger
1076 h.flags = flags
1077 h.oldbuckets = oldbuckets
1078 h.buckets = newbuckets
1079 h.nevacuate = 0
1080 h.noverflow = 0
1081
1082 if h.extra != nil && h.extra.overflow != nil {
1083
1084 if h.extra.oldoverflow != nil {
1085 throw("oldoverflow is not nil")
1086 }
1087 h.extra.oldoverflow = h.extra.overflow
1088 h.extra.overflow = nil
1089 }
1090 if nextOverflow != nil {
1091 if h.extra == nil {
1092 h.extra = new(mapextra)
1093 }
1094 h.extra.nextOverflow = nextOverflow
1095 }
1096
1097
1098
1099 }
1100
1101
1102 func overLoadFactor(count int, B uint8) bool {
1103 return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1104 }
1105
1106
1107
1108
1109 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1110
1111
1112
1113
1114 if B > 15 {
1115 B = 15
1116 }
1117
1118 return noverflow >= uint16(1)<<(B&15)
1119 }
1120
1121
1122 func (h *hmap) growing() bool {
1123 return h.oldbuckets != nil
1124 }
1125
1126
1127 func (h *hmap) sameSizeGrow() bool {
1128 return h.flags&sameSizeGrow != 0
1129 }
1130
1131
1132 func (h *hmap) noldbuckets() uintptr {
1133 oldB := h.B
1134 if !h.sameSizeGrow() {
1135 oldB--
1136 }
1137 return bucketShift(oldB)
1138 }
1139
1140
1141 func (h *hmap) oldbucketmask() uintptr {
1142 return h.noldbuckets() - 1
1143 }
1144
1145 func growWork(t *maptype, h *hmap, bucket uintptr) {
1146
1147
1148 evacuate(t, h, bucket&h.oldbucketmask())
1149
1150
1151 if h.growing() {
1152 evacuate(t, h, h.nevacuate)
1153 }
1154 }
1155
1156 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1157 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
1158 return evacuated(b)
1159 }
1160
1161
1162 type evacDst struct {
1163 b *bmap
1164 i int
1165 k unsafe.Pointer
1166 e unsafe.Pointer
1167 }
1168
1169 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1170 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
1171 newbit := h.noldbuckets()
1172 if !evacuated(b) {
1173
1174
1175
1176
1177 var xy [2]evacDst
1178 x := &xy[0]
1179 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
1180 x.k = add(unsafe.Pointer(x.b), dataOffset)
1181 x.e = add(x.k, bucketCnt*uintptr(t.KeySize))
1182
1183 if !h.sameSizeGrow() {
1184
1185
1186 y := &xy[1]
1187 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
1188 y.k = add(unsafe.Pointer(y.b), dataOffset)
1189 y.e = add(y.k, bucketCnt*uintptr(t.KeySize))
1190 }
1191
1192 for ; b != nil; b = b.overflow(t) {
1193 k := add(unsafe.Pointer(b), dataOffset)
1194 e := add(k, bucketCnt*uintptr(t.KeySize))
1195 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
1196 top := b.tophash[i]
1197 if isEmpty(top) {
1198 b.tophash[i] = evacuatedEmpty
1199 continue
1200 }
1201 if top < minTopHash {
1202 throw("bad map state")
1203 }
1204 k2 := k
1205 if t.IndirectKey() {
1206 k2 = *((*unsafe.Pointer)(k2))
1207 }
1208 var useY uint8
1209 if !h.sameSizeGrow() {
1210
1211
1212 hash := t.Hasher(k2, uintptr(h.hash0))
1213 if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225 useY = top & 1
1226 top = tophash(hash)
1227 } else {
1228 if hash&newbit != 0 {
1229 useY = 1
1230 }
1231 }
1232 }
1233
1234 if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1235 throw("bad evacuatedN")
1236 }
1237
1238 b.tophash[i] = evacuatedX + useY
1239 dst := &xy[useY]
1240
1241 if dst.i == bucketCnt {
1242 dst.b = h.newoverflow(t, dst.b)
1243 dst.i = 0
1244 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1245 dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize))
1246 }
1247 dst.b.tophash[dst.i&(bucketCnt-1)] = top
1248 if t.IndirectKey() {
1249 *(*unsafe.Pointer)(dst.k) = k2
1250 } else {
1251 typedmemmove(t.Key, dst.k, k)
1252 }
1253 if t.IndirectElem() {
1254 *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1255 } else {
1256 typedmemmove(t.Elem, dst.e, e)
1257 }
1258 dst.i++
1259
1260
1261
1262
1263 dst.k = add(dst.k, uintptr(t.KeySize))
1264 dst.e = add(dst.e, uintptr(t.ValueSize))
1265 }
1266 }
1267
1268 if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
1269 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
1270
1271
1272 ptr := add(b, dataOffset)
1273 n := uintptr(t.BucketSize) - dataOffset
1274 memclrHasPointers(ptr, n)
1275 }
1276 }
1277
1278 if oldbucket == h.nevacuate {
1279 advanceEvacuationMark(h, t, newbit)
1280 }
1281 }
1282
1283 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1284 h.nevacuate++
1285
1286
1287 stop := h.nevacuate + 1024
1288 if stop > newbit {
1289 stop = newbit
1290 }
1291 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1292 h.nevacuate++
1293 }
1294 if h.nevacuate == newbit {
1295
1296 h.oldbuckets = nil
1297
1298
1299
1300 if h.extra != nil {
1301 h.extra.oldoverflow = nil
1302 }
1303 h.flags &^= sameSizeGrow
1304 }
1305 }
1306
1307
1308
1309
1310 func reflect_makemap(t *maptype, cap int) *hmap {
1311
1312 if t.Key.Equal == nil {
1313 throw("runtime.reflect_makemap: unsupported map key type")
1314 }
1315 if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
1316 t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
1317 throw("key size wrong")
1318 }
1319 if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
1320 t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
1321 throw("elem size wrong")
1322 }
1323 if t.Key.Align_ > bucketCnt {
1324 throw("key align too big")
1325 }
1326 if t.Elem.Align_ > bucketCnt {
1327 throw("elem align too big")
1328 }
1329 if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
1330 throw("key size not a multiple of key align")
1331 }
1332 if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
1333 throw("elem size not a multiple of elem align")
1334 }
1335 if bucketCnt < 8 {
1336 throw("bucketsize too small for proper alignment")
1337 }
1338 if dataOffset%uintptr(t.Key.Align_) != 0 {
1339 throw("need padding in bucket (key)")
1340 }
1341 if dataOffset%uintptr(t.Elem.Align_) != 0 {
1342 throw("need padding in bucket (elem)")
1343 }
1344
1345 return makemap(t, cap, nil)
1346 }
1347
1348
1349 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1350 elem, ok := mapaccess2(t, h, key)
1351 if !ok {
1352
1353 elem = nil
1354 }
1355 return elem
1356 }
1357
1358
1359 func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
1360 elem, ok := mapaccess2_faststr(t, h, key)
1361 if !ok {
1362
1363 elem = nil
1364 }
1365 return elem
1366 }
1367
1368
1369 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1370 p := mapassign(t, h, key)
1371 typedmemmove(t.Elem, p, elem)
1372 }
1373
1374
1375 func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
1376 p := mapassign_faststr(t, h, key)
1377 typedmemmove(t.Elem, p, elem)
1378 }
1379
1380
1381 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1382 mapdelete(t, h, key)
1383 }
1384
1385
1386 func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
1387 mapdelete_faststr(t, h, key)
1388 }
1389
1390
1391 func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
1392 mapiterinit(t, h, it)
1393 }
1394
1395
1396 func reflect_mapiternext(it *hiter) {
1397 mapiternext(it)
1398 }
1399
1400
1401 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1402 return it.key
1403 }
1404
1405
1406 func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1407 return it.elem
1408 }
1409
1410
1411 func reflect_maplen(h *hmap) int {
1412 if h == nil {
1413 return 0
1414 }
1415 if raceenabled {
1416 callerpc := getcallerpc()
1417 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1418 }
1419 return h.count
1420 }
1421
1422
1423 func reflect_mapclear(t *maptype, h *hmap) {
1424 mapclear(t, h)
1425 }
1426
1427
1428 func reflectlite_maplen(h *hmap) int {
1429 if h == nil {
1430 return 0
1431 }
1432 if raceenabled {
1433 callerpc := getcallerpc()
1434 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1435 }
1436 return h.count
1437 }
1438
1439 const maxZero = 1024
1440 var zeroVal [maxZero]byte
1441
1442
1443
1444
1445
1446
1447 func mapinitnoop()
1448
1449
1450
1451
1452 func mapclone(m any) any {
1453 e := efaceOf(&m)
1454 e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
1455 return m
1456 }
1457
1458
1459
1460 func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
1461 for i := 0; i < bucketCnt; i++ {
1462 if isEmpty(src.tophash[i]) {
1463 continue
1464 }
1465
1466 for ; pos < bucketCnt; pos++ {
1467 if isEmpty(dst.tophash[pos]) {
1468 break
1469 }
1470 }
1471
1472 if pos == bucketCnt {
1473 dst = h.newoverflow(t, dst)
1474 pos = 0
1475 }
1476
1477 srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
1478 srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
1479 dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
1480 dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
1481
1482 dst.tophash[pos] = src.tophash[i]
1483 if t.IndirectKey() {
1484 *(*unsafe.Pointer)(dstK) = *(*unsafe.Pointer)(srcK)
1485 } else {
1486 typedmemmove(t.Key, dstK, srcK)
1487 }
1488 if t.IndirectElem() {
1489 *(*unsafe.Pointer)(dstEle) = *(*unsafe.Pointer)(srcEle)
1490 } else {
1491 typedmemmove(t.Elem, dstEle, srcEle)
1492 }
1493 pos++
1494 h.count++
1495 }
1496 return dst, pos
1497 }
1498
1499 func mapclone2(t *maptype, src *hmap) *hmap {
1500 dst := makemap(t, src.count, nil)
1501 dst.hash0 = src.hash0
1502 dst.nevacuate = 0
1503
1504
1505 if src.count == 0 {
1506 return dst
1507 }
1508
1509 if src.flags&hashWriting != 0 {
1510 fatal("concurrent map clone and map write")
1511 }
1512
1513 if src.B == 0 {
1514 dst.buckets = newobject(t.Bucket)
1515 dst.count = src.count
1516 typedmemmove(t.Bucket, dst.buckets, src.buckets)
1517 return dst
1518 }
1519
1520
1521 if dst.B == 0 {
1522 dst.buckets = newobject(t.Bucket)
1523 }
1524 dstArraySize := int(bucketShift(dst.B))
1525 srcArraySize := int(bucketShift(src.B))
1526 for i := 0; i < dstArraySize; i++ {
1527 dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
1528 pos := 0
1529 for j := 0; j < srcArraySize; j += dstArraySize {
1530 srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
1531 for srcBmap != nil {
1532 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1533 srcBmap = srcBmap.overflow(t)
1534 }
1535 }
1536 }
1537
1538 if src.oldbuckets == nil {
1539 return dst
1540 }
1541
1542 oldB := src.B
1543 srcOldbuckets := src.oldbuckets
1544 if !src.sameSizeGrow() {
1545 oldB--
1546 }
1547 oldSrcArraySize := int(bucketShift(oldB))
1548
1549 for i := 0; i < oldSrcArraySize; i++ {
1550 srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
1551 if evacuated(srcBmap) {
1552 continue
1553 }
1554
1555 if oldB >= dst.B {
1556 dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
1557 for dstBmap.overflow(t) != nil {
1558 dstBmap = dstBmap.overflow(t)
1559 }
1560 pos := 0
1561 for srcBmap != nil {
1562 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1563 srcBmap = srcBmap.overflow(t)
1564 }
1565 continue
1566 }
1567
1568 for srcBmap != nil {
1569
1570 for i := uintptr(0); i < bucketCnt; i++ {
1571 if isEmpty(srcBmap.tophash[i]) {
1572 continue
1573 }
1574
1575 if src.flags&hashWriting != 0 {
1576 fatal("concurrent map clone and map write")
1577 }
1578
1579 srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
1580 if t.IndirectKey() {
1581 srcK = *((*unsafe.Pointer)(srcK))
1582 }
1583
1584 srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
1585 if t.IndirectElem() {
1586 srcEle = *((*unsafe.Pointer)(srcEle))
1587 }
1588 dstEle := mapassign(t, dst, srcK)
1589 typedmemmove(t.Elem, dstEle, srcEle)
1590 }
1591 srcBmap = srcBmap.overflow(t)
1592 }
1593 }
1594 return dst
1595 }
1596
1597
1598
1599
1600 func keys(m any, p unsafe.Pointer) {
1601 e := efaceOf(&m)
1602 t := (*maptype)(unsafe.Pointer(e._type))
1603 h := (*hmap)(e.data)
1604
1605 if h == nil || h.count == 0 {
1606 return
1607 }
1608 s := (*slice)(p)
1609 r := int(fastrand())
1610 offset := uint8(r >> h.B & (bucketCnt - 1))
1611 if h.B == 0 {
1612 copyKeys(t, h, (*bmap)(h.buckets), s, offset)
1613 return
1614 }
1615 arraySize := int(bucketShift(h.B))
1616 buckets := h.buckets
1617 for i := 0; i < arraySize; i++ {
1618 bucket := (i + r) & (arraySize - 1)
1619 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1620 copyKeys(t, h, b, s, offset)
1621 }
1622
1623 if h.growing() {
1624 oldArraySize := int(h.noldbuckets())
1625 for i := 0; i < oldArraySize; i++ {
1626 bucket := (i + r) & (oldArraySize - 1)
1627 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1628 if evacuated(b) {
1629 continue
1630 }
1631 copyKeys(t, h, b, s, offset)
1632 }
1633 }
1634 return
1635 }
1636
1637 func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1638 for b != nil {
1639 for i := uintptr(0); i < bucketCnt; i++ {
1640 offi := (i + uintptr(offset)) & (bucketCnt - 1)
1641 if isEmpty(b.tophash[offi]) {
1642 continue
1643 }
1644 if h.flags&hashWriting != 0 {
1645 fatal("concurrent map read and map write")
1646 }
1647 k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
1648 if t.IndirectKey() {
1649 k = *((*unsafe.Pointer)(k))
1650 }
1651 if s.len >= s.cap {
1652 fatal("concurrent map read and map write")
1653 }
1654 typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.KeySize)), k)
1655 s.len++
1656 }
1657 b = b.overflow(t)
1658 }
1659 }
1660
1661
1662
1663
1664 func values(m any, p unsafe.Pointer) {
1665 e := efaceOf(&m)
1666 t := (*maptype)(unsafe.Pointer(e._type))
1667 h := (*hmap)(e.data)
1668 if h == nil || h.count == 0 {
1669 return
1670 }
1671 s := (*slice)(p)
1672 r := int(fastrand())
1673 offset := uint8(r >> h.B & (bucketCnt - 1))
1674 if h.B == 0 {
1675 copyValues(t, h, (*bmap)(h.buckets), s, offset)
1676 return
1677 }
1678 arraySize := int(bucketShift(h.B))
1679 buckets := h.buckets
1680 for i := 0; i < arraySize; i++ {
1681 bucket := (i + r) & (arraySize - 1)
1682 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1683 copyValues(t, h, b, s, offset)
1684 }
1685
1686 if h.growing() {
1687 oldArraySize := int(h.noldbuckets())
1688 for i := 0; i < oldArraySize; i++ {
1689 bucket := (i + r) & (oldArraySize - 1)
1690 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1691 if evacuated(b) {
1692 continue
1693 }
1694 copyValues(t, h, b, s, offset)
1695 }
1696 }
1697 return
1698 }
1699
1700 func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1701 for b != nil {
1702 for i := uintptr(0); i < bucketCnt; i++ {
1703 offi := (i + uintptr(offset)) & (bucketCnt - 1)
1704 if isEmpty(b.tophash[offi]) {
1705 continue
1706 }
1707
1708 if h.flags&hashWriting != 0 {
1709 fatal("concurrent map read and map write")
1710 }
1711
1712 ele := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
1713 if t.IndirectElem() {
1714 ele = *((*unsafe.Pointer)(ele))
1715 }
1716 if s.len >= s.cap {
1717 fatal("concurrent map read and map write")
1718 }
1719 typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.ValueSize)), ele)
1720 s.len++
1721 }
1722 b = b.overflow(t)
1723 }
1724 }
1725
View as plain text