Text file
src/runtime/race_ppc64le.s
1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 //go:build race
6
7 #include "go_asm.h"
8 #include "go_tls.h"
9 #include "funcdata.h"
10 #include "textflag.h"
11 #include "asm_ppc64x.h"
12
13 // The following functions allow calling the clang-compiled race runtime directly
14 // from Go code without going all the way through cgo.
15 // First, it's much faster (up to 50% speedup for real Go programs).
16 // Second, it eliminates race-related special cases from cgocall and scheduler.
17 // Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
18
19 // A brief recap of the ppc64le calling convention.
20 // Arguments are passed in R3, R4, R5 ...
21 // SP must be 16-byte aligned.
22
23 // Note that for ppc64x, LLVM follows the standard ABI and
24 // expects arguments in registers, so these functions move
25 // the arguments from storage to the registers expected
26 // by the ABI.
27
28 // When calling from Go to Clang tsan code:
29 // R3 is the 1st argument and is usually the ThreadState*
30 // R4-? are the 2nd, 3rd, 4th, etc. arguments
31
32 // When calling racecalladdr:
33 // R8 is the call target address
34
35 // The race ctx is passed in R3 and loaded in
36 // racecalladdr.
37 //
38 // The sequence used to get the race ctx:
39 // MOVD runtime·tls_g(SB), R10 // Address of TLS variable
40 // MOVD 0(R10), g // g = R30
41 // MOVD g_racectx(g), R3 // racectx == ThreadState
42
43 // func runtime·RaceRead(addr uintptr)
44 // Called from instrumented Go code
45 TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
46 MOVD R3, R4 // addr
47 MOVD LR, R5 // caller of this?
48 // void __tsan_read(ThreadState *thr, void *addr, void *pc);
49 MOVD $__tsan_read(SB), R8
50 BR racecalladdr<>(SB)
51
52 TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
53 BR runtime·raceread(SB)
54
55 // void runtime·racereadpc(void *addr, void *callpc, void *pc)
56 TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
57 MOVD addr+0(FP), R4
58 MOVD callpc+8(FP), R5
59 MOVD pc+16(FP), R6
60 // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
61 MOVD $__tsan_read_pc(SB), R8
62 BR racecalladdr<>(SB)
63
64 // func runtime·RaceWrite(addr uintptr)
65 // Called from instrumented Go code
66 TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
67 MOVD R3, R4 // addr
68 MOVD LR, R5 // caller has set LR via BL inst
69 // void __tsan_write(ThreadState *thr, void *addr, void *pc);
70 MOVD $__tsan_write(SB), R8
71 BR racecalladdr<>(SB)
72
73 TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
74 JMP runtime·racewrite(SB)
75
76 // void runtime·racewritepc(void *addr, void *callpc, void *pc)
77 TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
78 MOVD addr+0(FP), R4
79 MOVD callpc+8(FP), R5
80 MOVD pc+16(FP), R6
81 // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
82 MOVD $__tsan_write_pc(SB), R8
83 BR racecalladdr<>(SB)
84
85 // func runtime·RaceReadRange(addr, size uintptr)
86 // Called from instrumented Go code.
87 TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
88 MOVD R4, R5 // size
89 MOVD R3, R4 // addr
90 MOVD LR, R6
91 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
92 MOVD $__tsan_read_range(SB), R8
93 BR racecalladdr<>(SB)
94
95 // void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
96 TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
97 MOVD addr+0(FP), R4
98 MOVD size+8(FP), R5
99 MOVD pc+16(FP), R6
100 ADD $4, R6 // tsan wants return addr
101 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
102 MOVD $__tsan_read_range(SB), R8
103 BR racecalladdr<>(SB)
104
105 TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
106 BR runtime·racereadrange(SB)
107
108 // func runtime·RaceWriteRange(addr, size uintptr)
109 // Called from instrumented Go code.
110 TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
111 MOVD R4, R5 // size
112 MOVD R3, R4 // addr
113 MOVD LR, R6
114 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
115 MOVD $__tsan_write_range(SB), R8
116 BR racecalladdr<>(SB)
117
118 TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
119 BR runtime·racewriterange(SB)
120
121 // void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
122 // Called from instrumented Go code
123 TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
124 MOVD addr+0(FP), R4
125 MOVD size+8(FP), R5
126 MOVD pc+16(FP), R6
127 ADD $4, R6 // add 4 to inst offset?
128 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
129 MOVD $__tsan_write_range(SB), R8
130 BR racecalladdr<>(SB)
131
132 // Call a __tsan function from Go code.
133 // R8 = tsan function address
134 // R3 = *ThreadState a.k.a. g_racectx from g
135 // R4 = addr passed to __tsan function
136 //
137 // Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
138 TEXT racecalladdr<>(SB), NOSPLIT, $0-0
139 MOVD runtime·tls_g(SB), R10
140 MOVD 0(R10), g
141 MOVD g_racectx(g), R3 // goroutine context
142 // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
143 MOVD runtime·racearenastart(SB), R9
144 CMP R4, R9
145 BLT data
146 MOVD runtime·racearenaend(SB), R9
147 CMP R4, R9
148 BLT call
149 data:
150 MOVD runtime·racedatastart(SB), R9
151 CMP R4, R9
152 BLT ret
153 MOVD runtime·racedataend(SB), R9
154 CMP R4, R9
155 BGT ret
156 call:
157 // Careful!! racecall will save LR on its
158 // stack, which is OK as long as racecalladdr
159 // doesn't change in a way that generates a stack.
160 // racecall should return to the caller of
161 // recalladdr.
162 BR racecall<>(SB)
163 ret:
164 RET
165
166 // func runtime·racefuncenter(pc uintptr)
167 // Called from instrumented Go code.
168 TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
169 MOVD callpc+0(FP), R8
170 BR racefuncenter<>(SB)
171
172 // Common code for racefuncenter
173 // R11 = caller's return address
174 TEXT racefuncenter<>(SB), NOSPLIT, $0-0
175 MOVD runtime·tls_g(SB), R10
176 MOVD 0(R10), g
177 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
178 MOVD R8, R4 // caller pc set by caller in R8
179 // void __tsan_func_enter(ThreadState *thr, void *pc);
180 MOVD $__tsan_func_enter(SB), R8
181 BR racecall<>(SB)
182 RET
183
184 // func runtime·racefuncexit()
185 // Called from Go instrumented code.
186 TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
187 MOVD runtime·tls_g(SB), R10
188 MOVD 0(R10), g
189 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
190 // void __tsan_func_exit(ThreadState *thr);
191 MOVD $__tsan_func_exit(SB), R8
192 BR racecall<>(SB)
193
194 // Atomic operations for sync/atomic package.
195 // Some use the __tsan versions instead
196 // R6 = addr of arguments passed to this function
197 // R3, R4, R5 set in racecallatomic
198
199 // Load atomic in tsan
200 TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12
201 GO_ARGS
202 // void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
203 MOVD $__tsan_go_atomic32_load(SB), R8
204 ADD $32, R1, R6 // addr of caller's 1st arg
205 BR racecallatomic<>(SB)
206 RET
207
208 TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16
209 GO_ARGS
210 // void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
211 MOVD $__tsan_go_atomic64_load(SB), R8
212 ADD $32, R1, R6 // addr of caller's 1st arg
213 BR racecallatomic<>(SB)
214 RET
215
216 TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12
217 GO_ARGS
218 BR sync∕atomic·LoadInt32(SB)
219
220 TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16
221 GO_ARGS
222 BR sync∕atomic·LoadInt64(SB)
223
224 TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16
225 GO_ARGS
226 BR sync∕atomic·LoadInt64(SB)
227
228 TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16
229 GO_ARGS
230 BR sync∕atomic·LoadInt64(SB)
231
232 // Store atomic in tsan
233 TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12
234 GO_ARGS
235 // void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
236 MOVD $__tsan_go_atomic32_store(SB), R8
237 ADD $32, R1, R6 // addr of caller's 1st arg
238 BR racecallatomic<>(SB)
239
240 TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16
241 GO_ARGS
242 // void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
243 MOVD $__tsan_go_atomic64_store(SB), R8
244 ADD $32, R1, R6 // addr of caller's 1st arg
245 BR racecallatomic<>(SB)
246
247 TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12
248 GO_ARGS
249 BR sync∕atomic·StoreInt32(SB)
250
251 TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16
252 GO_ARGS
253 BR sync∕atomic·StoreInt64(SB)
254
255 TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16
256 GO_ARGS
257 BR sync∕atomic·StoreInt64(SB)
258
259 // Swap in tsan
260 TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20
261 GO_ARGS
262 // void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
263 MOVD $__tsan_go_atomic32_exchange(SB), R8
264 ADD $32, R1, R6 // addr of caller's 1st arg
265 BR racecallatomic<>(SB)
266
267 TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24
268 GO_ARGS
269 // void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a)
270 MOVD $__tsan_go_atomic64_exchange(SB), R8
271 ADD $32, R1, R6 // addr of caller's 1st arg
272 BR racecallatomic<>(SB)
273
274 TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20
275 GO_ARGS
276 BR sync∕atomic·SwapInt32(SB)
277
278 TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24
279 GO_ARGS
280 BR sync∕atomic·SwapInt64(SB)
281
282 TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24
283 GO_ARGS
284 BR sync∕atomic·SwapInt64(SB)
285
286 // Add atomic in tsan
287 TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20
288 GO_ARGS
289 // void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
290 MOVD $__tsan_go_atomic32_fetch_add(SB), R8
291 ADD $64, R1, R6 // addr of caller's 1st arg
292 BL racecallatomic<>(SB)
293 // The tsan fetch_add result is not as expected by Go,
294 // so the 'add' must be added to the result.
295 MOVW add+8(FP), R3 // The tsa fetch_add does not return the
296 MOVW ret+16(FP), R4 // result as expected by go, so fix it.
297 ADD R3, R4, R3
298 MOVW R3, ret+16(FP)
299 RET
300
301 TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24
302 GO_ARGS
303 // void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
304 MOVD $__tsan_go_atomic64_fetch_add(SB), R8
305 ADD $64, R1, R6 // addr of caller's 1st arg
306 BL racecallatomic<>(SB)
307 // The tsan fetch_add result is not as expected by Go,
308 // so the 'add' must be added to the result.
309 MOVD add+8(FP), R3
310 MOVD ret+16(FP), R4
311 ADD R3, R4, R3
312 MOVD R3, ret+16(FP)
313 RET
314
315 TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20
316 GO_ARGS
317 BR sync∕atomic·AddInt32(SB)
318
319 TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24
320 GO_ARGS
321 BR sync∕atomic·AddInt64(SB)
322
323 TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24
324 GO_ARGS
325 BR sync∕atomic·AddInt64(SB)
326
327 // CompareAndSwap in tsan
328 TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17
329 GO_ARGS
330 // void __tsan_go_atomic32_compare_exchange(
331 // ThreadState *thr, uptr cpc, uptr pc, u8 *a)
332 MOVD $__tsan_go_atomic32_compare_exchange(SB), R8
333 ADD $32, R1, R6 // addr of caller's 1st arg
334 BR racecallatomic<>(SB)
335
336 TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25
337 GO_ARGS
338 // void __tsan_go_atomic32_compare_exchange(
339 // ThreadState *thr, uptr cpc, uptr pc, u8 *a)
340 MOVD $__tsan_go_atomic64_compare_exchange(SB), R8
341 ADD $32, R1, R6 // addr of caller's 1st arg
342 BR racecallatomic<>(SB)
343
344 TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17
345 GO_ARGS
346 BR sync∕atomic·CompareAndSwapInt32(SB)
347
348 TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25
349 GO_ARGS
350 BR sync∕atomic·CompareAndSwapInt64(SB)
351
352 TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25
353 GO_ARGS
354 BR sync∕atomic·CompareAndSwapInt64(SB)
355
356 // Common function used to call tsan's atomic functions
357 // R3 = *ThreadState
358 // R4 = TODO: What's this supposed to be?
359 // R5 = caller pc
360 // R6 = addr of incoming arg list
361 // R8 contains addr of target function.
362 TEXT racecallatomic<>(SB), NOSPLIT, $0-0
363 // Trigger SIGSEGV early if address passed to atomic function is bad.
364 MOVD (R6), R7 // 1st arg is addr
365 MOVD (R7), R9 // segv here if addr is bad
366 // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
367 MOVD runtime·racearenastart(SB), R9
368 CMP R7, R9
369 BLT racecallatomic_data
370 MOVD runtime·racearenaend(SB), R9
371 CMP R7, R9
372 BLT racecallatomic_ok
373 racecallatomic_data:
374 MOVD runtime·racedatastart(SB), R9
375 CMP R7, R9
376 BLT racecallatomic_ignore
377 MOVD runtime·racedataend(SB), R9
378 CMP R7, R9
379 BGE racecallatomic_ignore
380 racecallatomic_ok:
381 // Addr is within the good range, call the atomic function.
382 MOVD runtime·tls_g(SB), R10
383 MOVD 0(R10), g
384 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState
385 MOVD R8, R5 // pc is the function called
386 MOVD (R1), R4 // caller pc from stack
387 BL racecall<>(SB) // BL needed to maintain stack consistency
388 RET //
389 racecallatomic_ignore:
390 // Addr is outside the good range.
391 // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
392 // An attempt to synchronize on the address would cause crash.
393 MOVD R8, R15 // save the original function
394 MOVD R6, R17 // save the original arg list addr
395 MOVD $__tsan_go_ignore_sync_begin(SB), R8 // func addr to call
396 MOVD runtime·tls_g(SB), R10
397 MOVD 0(R10), g
398 MOVD g_racectx(g), R3 // goroutine context
399 BL racecall<>(SB)
400 MOVD R15, R8 // restore the original function
401 MOVD R17, R6 // restore arg list addr
402 // Call the atomic function.
403 // racecall will call LLVM race code which might clobber r30 (g)
404 MOVD runtime·tls_g(SB), R10
405 MOVD 0(R10), g
406
407 MOVD g_racectx(g), R3
408 MOVD R8, R4 // pc being called same TODO as above
409 MOVD (R1), R5 // caller pc from latest LR
410 BL racecall<>(SB)
411 // Call __tsan_go_ignore_sync_end.
412 MOVD $__tsan_go_ignore_sync_end(SB), R8
413 MOVD g_racectx(g), R3 // goroutine context g should still be good?
414 BL racecall<>(SB)
415 RET
416
417 // void runtime·racecall(void(*f)(...), ...)
418 // Calls C function f from race runtime and passes up to 4 arguments to it.
419 // The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
420 TEXT runtime·racecall(SB), NOSPLIT, $0-0
421 MOVD fn+0(FP), R8
422 MOVD arg0+8(FP), R3
423 MOVD arg1+16(FP), R4
424 MOVD arg2+24(FP), R5
425 MOVD arg3+32(FP), R6
426 JMP racecall<>(SB)
427
428 // Finds g0 and sets its stack
429 // Arguments were loaded for call from Go to C
430 TEXT racecall<>(SB), NOSPLIT, $0-0
431 // Set the LR slot for the ppc64 ABI
432 MOVD LR, R10
433 MOVD R10, 0(R1) // Go expectation
434 MOVD R10, 16(R1) // C ABI
435 // Get info from the current goroutine
436 MOVD runtime·tls_g(SB), R10 // g offset in TLS
437 MOVD 0(R10), g
438 MOVD g_m(g), R7 // m for g
439 MOVD R1, R16 // callee-saved, preserved across C call
440 MOVD m_g0(R7), R10 // g0 for m
441 CMP R10, g // same g0?
442 BEQ call // already on g0
443 MOVD (g_sched+gobuf_sp)(R10), R1 // switch R1
444 call:
445 // prepare frame for C ABI
446 SUB $32, R1 // create frame for callee saving LR, CR, R2 etc.
447 RLDCR $0, R1, $~15, R1 // align SP to 16 bytes
448 MOVD R8, CTR // R8 = caller addr
449 MOVD R8, R12 // expected by PPC64 ABI
450 BL (CTR)
451 XOR R0, R0 // clear R0 on return from Clang
452 MOVD R16, R1 // restore R1; R16 nonvol in Clang
453 MOVD runtime·tls_g(SB), R10 // find correct g
454 MOVD 0(R10), g
455 MOVD 16(R1), R10 // LR was saved away, restore for return
456 MOVD R10, LR
457 RET
458
459 // C->Go callback thunk that allows to call runtime·racesymbolize from C code.
460 // Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
461 // The overall effect of Go->C->Go call chain is similar to that of mcall.
462 // RARG0 contains command code. RARG1 contains command-specific context.
463 // See racecallback for command codes.
464 TEXT runtime·racecallbackthunk(SB), NOSPLIT, $-8
465 // Handle command raceGetProcCmd (0) here.
466 // First, code below assumes that we are on curg, while raceGetProcCmd
467 // can be executed on g0. Second, it is called frequently, so will
468 // benefit from this fast path.
469 XOR R0, R0 // clear R0 since we came from C code
470 CMP R3, $0
471 BNE rest
472 // g0 TODO: Don't modify g here since R30 is nonvolatile
473 MOVD g, R9
474 MOVD runtime·tls_g(SB), R10
475 MOVD 0(R10), g
476 MOVD g_m(g), R3
477 MOVD m_p(R3), R3
478 MOVD p_raceprocctx(R3), R3
479 MOVD R3, (R4)
480 MOVD R9, g // restore R30 ??
481 RET
482
483 // This is all similar to what cgo does
484 // Save registers according to the ppc64 ABI
485 rest:
486 MOVD LR, R10 // save link register
487 MOVD R10, 16(R1)
488 MOVW CR, R10
489 MOVW R10, 8(R1)
490 MOVDU R1, -336(R1) // Allocate frame needed for outargs and register save area
491
492 MOVD R14, 328(R1)
493 MOVD R15, 48(R1)
494 MOVD R16, 56(R1)
495 MOVD R17, 64(R1)
496 MOVD R18, 72(R1)
497 MOVD R19, 80(R1)
498 MOVD R20, 88(R1)
499 MOVD R21, 96(R1)
500 MOVD R22, 104(R1)
501 MOVD R23, 112(R1)
502 MOVD R24, 120(R1)
503 MOVD R25, 128(R1)
504 MOVD R26, 136(R1)
505 MOVD R27, 144(R1)
506 MOVD R28, 152(R1)
507 MOVD R29, 160(R1)
508 MOVD g, 168(R1) // R30
509 MOVD R31, 176(R1)
510 FMOVD F14, 184(R1)
511 FMOVD F15, 192(R1)
512 FMOVD F16, 200(R1)
513 FMOVD F17, 208(R1)
514 FMOVD F18, 216(R1)
515 FMOVD F19, 224(R1)
516 FMOVD F20, 232(R1)
517 FMOVD F21, 240(R1)
518 FMOVD F22, 248(R1)
519 FMOVD F23, 256(R1)
520 FMOVD F24, 264(R1)
521 FMOVD F25, 272(R1)
522 FMOVD F26, 280(R1)
523 FMOVD F27, 288(R1)
524 FMOVD F28, 296(R1)
525 FMOVD F29, 304(R1)
526 FMOVD F30, 312(R1)
527 FMOVD F31, 320(R1)
528
529 MOVD R3, FIXED_FRAME+0(R1)
530 MOVD R4, FIXED_FRAME+8(R1)
531
532 MOVD runtime·tls_g(SB), R10
533 MOVD 0(R10), g
534
535 MOVD g_m(g), R7
536 MOVD m_g0(R7), R8
537 CMP g, R8
538 BEQ noswitch
539
540 MOVD R8, g // set g = m-> g0
541
542 BL runtime·racecallback(SB)
543
544 // All registers are clobbered after Go code, reload.
545 MOVD runtime·tls_g(SB), R10
546 MOVD 0(R10), g
547
548 MOVD g_m(g), R7
549 MOVD m_curg(R7), g // restore g = m->curg
550
551 ret:
552 MOVD 328(R1), R14
553 MOVD 48(R1), R15
554 MOVD 56(R1), R16
555 MOVD 64(R1), R17
556 MOVD 72(R1), R18
557 MOVD 80(R1), R19
558 MOVD 88(R1), R20
559 MOVD 96(R1), R21
560 MOVD 104(R1), R22
561 MOVD 112(R1), R23
562 MOVD 120(R1), R24
563 MOVD 128(R1), R25
564 MOVD 136(R1), R26
565 MOVD 144(R1), R27
566 MOVD 152(R1), R28
567 MOVD 160(R1), R29
568 MOVD 168(R1), g // R30
569 MOVD 176(R1), R31
570 FMOVD 184(R1), F14
571 FMOVD 192(R1), F15
572 FMOVD 200(R1), F16
573 FMOVD 208(R1), F17
574 FMOVD 216(R1), F18
575 FMOVD 224(R1), F19
576 FMOVD 232(R1), F20
577 FMOVD 240(R1), F21
578 FMOVD 248(R1), F22
579 FMOVD 256(R1), F23
580 FMOVD 264(R1), F24
581 FMOVD 272(R1), F25
582 FMOVD 280(R1), F26
583 FMOVD 288(R1), F27
584 FMOVD 296(R1), F28
585 FMOVD 304(R1), F29
586 FMOVD 312(R1), F30
587 FMOVD 320(R1), F31
588
589 ADD $336, R1
590 MOVD 8(R1), R10
591 MOVFL R10, $0xff // Restore of CR
592 MOVD 16(R1), R10 // needed?
593 MOVD R10, LR
594 RET
595
596 noswitch:
597 BL runtime·racecallback(SB)
598 JMP ret
599
600 // tls_g, g value for each thread in TLS
601 GLOBL runtime·tls_g+0(SB), TLSBSS+DUPOK, $8
602
View as plain text