Source file
src/runtime/runtime1.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/bytealg"
9 "internal/goarch"
10 "runtime/internal/atomic"
11 "unsafe"
12 )
13
14
15
16
17
18
19 const (
20 tracebackCrash = 1 << iota
21 tracebackAll
22 tracebackShift = iota
23 )
24
25 var traceback_cache uint32 = 2 << tracebackShift
26 var traceback_env uint32
27
28
29
30
31
32
33
34
35
36
37 func gotraceback() (level int32, all, crash bool) {
38 gp := getg()
39 t := atomic.Load(&traceback_cache)
40 crash = t&tracebackCrash != 0
41 all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
42 if gp.m.traceback != 0 {
43 level = int32(gp.m.traceback)
44 } else if gp.m.throwing >= throwTypeRuntime {
45
46
47 level = 2
48 } else {
49 level = int32(t >> tracebackShift)
50 }
51 return
52 }
53
54 var (
55 argc int32
56 argv **byte
57 )
58
59
60
61
62 func argv_index(argv **byte, i int32) *byte {
63 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
64 }
65
66 func args(c int32, v **byte) {
67 argc = c
68 argv = v
69 sysargs(c, v)
70 }
71
72 func goargs() {
73 if GOOS == "windows" {
74 return
75 }
76 argslice = make([]string, argc)
77 for i := int32(0); i < argc; i++ {
78 argslice[i] = gostringnocopy(argv_index(argv, i))
79 }
80 }
81
82 func goenvs_unix() {
83
84
85
86 n := int32(0)
87 for argv_index(argv, argc+1+n) != nil {
88 n++
89 }
90
91 envs = make([]string, n)
92 for i := int32(0); i < n; i++ {
93 envs[i] = gostring(argv_index(argv, argc+1+i))
94 }
95 }
96
97 func environ() []string {
98 return envs
99 }
100
101
102
103 var test_z64, test_x64 uint64
104
105 func testAtomic64() {
106 test_z64 = 42
107 test_x64 = 0
108 if atomic.Cas64(&test_z64, test_x64, 1) {
109 throw("cas64 failed")
110 }
111 if test_x64 != 0 {
112 throw("cas64 failed")
113 }
114 test_x64 = 42
115 if !atomic.Cas64(&test_z64, test_x64, 1) {
116 throw("cas64 failed")
117 }
118 if test_x64 != 42 || test_z64 != 1 {
119 throw("cas64 failed")
120 }
121 if atomic.Load64(&test_z64) != 1 {
122 throw("load64 failed")
123 }
124 atomic.Store64(&test_z64, (1<<40)+1)
125 if atomic.Load64(&test_z64) != (1<<40)+1 {
126 throw("store64 failed")
127 }
128 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
129 throw("xadd64 failed")
130 }
131 if atomic.Load64(&test_z64) != (2<<40)+2 {
132 throw("xadd64 failed")
133 }
134 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
135 throw("xchg64 failed")
136 }
137 if atomic.Load64(&test_z64) != (3<<40)+3 {
138 throw("xchg64 failed")
139 }
140 }
141
142 func check() {
143 var (
144 a int8
145 b uint8
146 c int16
147 d uint16
148 e int32
149 f uint32
150 g int64
151 h uint64
152 i, i1 float32
153 j, j1 float64
154 k unsafe.Pointer
155 l *uint16
156 m [4]byte
157 )
158 type x1t struct {
159 x uint8
160 }
161 type y1t struct {
162 x1 x1t
163 y uint8
164 }
165 var x1 x1t
166 var y1 y1t
167
168 if unsafe.Sizeof(a) != 1 {
169 throw("bad a")
170 }
171 if unsafe.Sizeof(b) != 1 {
172 throw("bad b")
173 }
174 if unsafe.Sizeof(c) != 2 {
175 throw("bad c")
176 }
177 if unsafe.Sizeof(d) != 2 {
178 throw("bad d")
179 }
180 if unsafe.Sizeof(e) != 4 {
181 throw("bad e")
182 }
183 if unsafe.Sizeof(f) != 4 {
184 throw("bad f")
185 }
186 if unsafe.Sizeof(g) != 8 {
187 throw("bad g")
188 }
189 if unsafe.Sizeof(h) != 8 {
190 throw("bad h")
191 }
192 if unsafe.Sizeof(i) != 4 {
193 throw("bad i")
194 }
195 if unsafe.Sizeof(j) != 8 {
196 throw("bad j")
197 }
198 if unsafe.Sizeof(k) != goarch.PtrSize {
199 throw("bad k")
200 }
201 if unsafe.Sizeof(l) != goarch.PtrSize {
202 throw("bad l")
203 }
204 if unsafe.Sizeof(x1) != 1 {
205 throw("bad unsafe.Sizeof x1")
206 }
207 if unsafe.Offsetof(y1.y) != 1 {
208 throw("bad offsetof y1.y")
209 }
210 if unsafe.Sizeof(y1) != 2 {
211 throw("bad unsafe.Sizeof y1")
212 }
213
214 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
215 throw("bad timediv")
216 }
217
218 var z uint32
219 z = 1
220 if !atomic.Cas(&z, 1, 2) {
221 throw("cas1")
222 }
223 if z != 2 {
224 throw("cas2")
225 }
226
227 z = 4
228 if atomic.Cas(&z, 5, 6) {
229 throw("cas3")
230 }
231 if z != 4 {
232 throw("cas4")
233 }
234
235 z = 0xffffffff
236 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
237 throw("cas5")
238 }
239 if z != 0xfffffffe {
240 throw("cas6")
241 }
242
243 m = [4]byte{1, 1, 1, 1}
244 atomic.Or8(&m[1], 0xf0)
245 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
246 throw("atomicor8")
247 }
248
249 m = [4]byte{0xff, 0xff, 0xff, 0xff}
250 atomic.And8(&m[1], 0x1)
251 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
252 throw("atomicand8")
253 }
254
255 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
256 if j == j {
257 throw("float64nan")
258 }
259 if !(j != j) {
260 throw("float64nan1")
261 }
262
263 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
264 if j == j1 {
265 throw("float64nan2")
266 }
267 if !(j != j1) {
268 throw("float64nan3")
269 }
270
271 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
272 if i == i {
273 throw("float32nan")
274 }
275 if i == i {
276 throw("float32nan1")
277 }
278
279 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
280 if i == i1 {
281 throw("float32nan2")
282 }
283 if i == i1 {
284 throw("float32nan3")
285 }
286
287 testAtomic64()
288
289 if fixedStack != round2(fixedStack) {
290 throw("FixedStack is not power-of-2")
291 }
292
293 if !checkASM() {
294 throw("assembly checks failed")
295 }
296 }
297
298 type dbgVar struct {
299 name string
300 value *int32
301 atomic *atomic.Int32
302 def int32
303 }
304
305
306
307
308
309 var debug struct {
310 cgocheck int32
311 clobberfree int32
312 dontfreezetheworld int32
313 efence int32
314 gccheckmark int32
315 gcpacertrace int32
316 gcshrinkstackoff int32
317 gcstoptheworld int32
318 gctrace int32
319 invalidptr int32
320 madvdontneed int32
321 scavtrace int32
322 scheddetail int32
323 schedtrace int32
324 tracebackancestors int32
325 asyncpreemptoff int32
326 harddecommit int32
327 adaptivestackstart int32
328 tracefpunwindoff int32
329
330
331
332
333 malloc bool
334 allocfreetrace int32
335 inittrace int32
336 sbrk int32
337
338 panicnil atomic.Int32
339 }
340
341 var dbgvars = []*dbgVar{
342 {name: "allocfreetrace", value: &debug.allocfreetrace},
343 {name: "clobberfree", value: &debug.clobberfree},
344 {name: "cgocheck", value: &debug.cgocheck},
345 {name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
346 {name: "efence", value: &debug.efence},
347 {name: "gccheckmark", value: &debug.gccheckmark},
348 {name: "gcpacertrace", value: &debug.gcpacertrace},
349 {name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
350 {name: "gcstoptheworld", value: &debug.gcstoptheworld},
351 {name: "gctrace", value: &debug.gctrace},
352 {name: "invalidptr", value: &debug.invalidptr},
353 {name: "madvdontneed", value: &debug.madvdontneed},
354 {name: "sbrk", value: &debug.sbrk},
355 {name: "scavtrace", value: &debug.scavtrace},
356 {name: "scheddetail", value: &debug.scheddetail},
357 {name: "schedtrace", value: &debug.schedtrace},
358 {name: "tracebackancestors", value: &debug.tracebackancestors},
359 {name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
360 {name: "inittrace", value: &debug.inittrace},
361 {name: "harddecommit", value: &debug.harddecommit},
362 {name: "adaptivestackstart", value: &debug.adaptivestackstart},
363 {name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
364 {name: "panicnil", atomic: &debug.panicnil},
365 }
366
367 func parsedebugvars() {
368
369 debug.cgocheck = 1
370 debug.invalidptr = 1
371 debug.adaptivestackstart = 1
372 if GOOS == "linux" {
373
374
375
376
377
378
379
380
381 debug.madvdontneed = 1
382 }
383
384 godebug := gogetenv("GODEBUG")
385
386 p := new(string)
387 *p = godebug
388 godebugEnv.Store(p)
389
390
391 for _, v := range dbgvars {
392 if v.def != 0 {
393
394 if v.value != nil {
395 *v.value = v.def
396 } else if v.atomic != nil {
397 v.atomic.Store(v.def)
398 }
399 }
400 }
401
402
403 parsegodebug(godebugDefault, nil)
404
405
406 parsegodebug(godebug, nil)
407
408 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
409
410 setTraceback(gogetenv("GOTRACEBACK"))
411 traceback_env = traceback_cache
412 }
413
414
415
416 func reparsedebugvars(env string) {
417 seen := make(map[string]bool)
418
419 parsegodebug(env, seen)
420
421 parsegodebug(godebugDefault, seen)
422
423 for _, v := range dbgvars {
424 if v.atomic != nil && !seen[v.name] {
425 v.atomic.Store(0)
426 }
427 }
428 }
429
430
431
432
433
434
435
436
437
438
439
440 func parsegodebug(godebug string, seen map[string]bool) {
441 for p := godebug; p != ""; {
442 var field string
443 if seen == nil {
444
445 i := bytealg.IndexByteString(p, ',')
446 if i < 0 {
447 field, p = p, ""
448 } else {
449 field, p = p[:i], p[i+1:]
450 }
451 } else {
452
453 i := len(p) - 1
454 for i >= 0 && p[i] != ',' {
455 i--
456 }
457 if i < 0 {
458 p, field = "", p
459 } else {
460 p, field = p[:i], p[i+1:]
461 }
462 }
463 i := bytealg.IndexByteString(field, '=')
464 if i < 0 {
465 continue
466 }
467 key, value := field[:i], field[i+1:]
468 if seen[key] {
469 continue
470 }
471 if seen != nil {
472 seen[key] = true
473 }
474
475
476
477
478 if seen == nil && key == "memprofilerate" {
479 if n, ok := atoi(value); ok {
480 MemProfileRate = n
481 }
482 } else {
483 for _, v := range dbgvars {
484 if v.name == key {
485 if n, ok := atoi32(value); ok {
486 if seen == nil && v.value != nil {
487 *v.value = n
488 } else if v.atomic != nil {
489 v.atomic.Store(n)
490 }
491 }
492 }
493 }
494 }
495 }
496
497 if debug.cgocheck > 1 {
498 throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
499 }
500 }
501
502
503 func setTraceback(level string) {
504 var t uint32
505 switch level {
506 case "none":
507 t = 0
508 case "single", "":
509 t = 1 << tracebackShift
510 case "all":
511 t = 1<<tracebackShift | tracebackAll
512 case "system":
513 t = 2<<tracebackShift | tracebackAll
514 case "crash":
515 t = 2<<tracebackShift | tracebackAll | tracebackCrash
516 case "wer":
517 if GOOS == "windows" {
518 t = 2<<tracebackShift | tracebackAll | tracebackCrash
519 enableWER()
520 break
521 }
522 fallthrough
523 default:
524 t = tracebackAll
525 if n, ok := atoi(level); ok && n == int(uint32(n)) {
526 t |= uint32(n) << tracebackShift
527 }
528 }
529
530
531 if islibrary || isarchive {
532 t |= tracebackCrash
533 }
534
535 t |= traceback_env
536
537 atomic.Store(&traceback_cache, t)
538 }
539
540
541
542
543
544
545
546
547 func timediv(v int64, div int32, rem *int32) int32 {
548 res := int32(0)
549 for bit := 30; bit >= 0; bit-- {
550 if v >= int64(div)<<uint(bit) {
551 v = v - (int64(div) << uint(bit))
552
553
554 res |= 1 << uint(bit)
555 }
556 }
557 if v >= int64(div) {
558 if rem != nil {
559 *rem = 0
560 }
561 return 0x7fffffff
562 }
563 if rem != nil {
564 *rem = int32(v)
565 }
566 return res
567 }
568
569
570
571
572 func acquirem() *m {
573 gp := getg()
574 gp.m.locks++
575 return gp.m
576 }
577
578
579 func releasem(mp *m) {
580 gp := getg()
581 mp.locks--
582 if mp.locks == 0 && gp.preempt {
583
584 gp.stackguard0 = stackPreempt
585 }
586 }
587
588
589 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
590 modules := activeModules()
591 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
592 ret := [][]int32{modules[0].typelinks}
593 for _, md := range modules[1:] {
594 sections = append(sections, unsafe.Pointer(md.types))
595 ret = append(ret, md.typelinks)
596 }
597 return sections, ret
598 }
599
600
601
602
603 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
604 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
605 }
606
607
608
609
610 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
611 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
612 }
613
614
615
616
617 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
618 return toRType((*_type)(rtype)).textOff(textOff(off))
619
620 }
621
622
623
624
625 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
626 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
627 }
628
629
630
631
632 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
633 return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
634 }
635
636
637
638
639 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
640 reflectOffsLock()
641 if reflectOffs.m == nil {
642 reflectOffs.m = make(map[int32]unsafe.Pointer)
643 reflectOffs.minv = make(map[unsafe.Pointer]int32)
644 reflectOffs.next = -1
645 }
646 id, found := reflectOffs.minv[ptr]
647 if !found {
648 id = reflectOffs.next
649 reflectOffs.next--
650 reflectOffs.m[id] = ptr
651 reflectOffs.minv[ptr] = id
652 }
653 reflectOffsUnlock()
654 return id
655 }
656
View as plain text