Text file src/runtime/libfuzzer_amd64.s

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build libfuzzer
     6  
     7  #include "go_asm.h"
     8  #include "go_tls.h"
     9  #include "textflag.h"
    10  
    11  // Based on race_amd64.s; see commentary there.
    12  
    13  #ifdef GOOS_windows
    14  #define RARG0 CX
    15  #define RARG1 DX
    16  #define RARG2 R8
    17  #define RARG3 R9
    18  #else
    19  #define RARG0 DI
    20  #define RARG1 SI
    21  #define RARG2 DX
    22  #define RARG3 CX
    23  #endif
    24  
    25  // void runtime·libfuzzerCall4(fn, hookId int, s1, s2 unsafe.Pointer, result uintptr)
    26  // Calls C function fn from libFuzzer and passes 4 arguments to it.
    27  TEXT	runtime·libfuzzerCall4(SB), NOSPLIT, $0-40
    28  	MOVQ	fn+0(FP), AX
    29  	MOVQ	hookId+8(FP), RARG0
    30  	MOVQ	s1+16(FP), RARG1
    31  	MOVQ	s2+24(FP), RARG2
    32  	MOVQ	result+32(FP), RARG3
    33  
    34  	get_tls(R12)
    35  	MOVQ	g(R12), R14
    36  	MOVQ	g_m(R14), R13
    37  
    38  	// Switch to g0 stack.
    39  	MOVQ	SP, R12		// callee-saved, preserved across the CALL
    40  	MOVQ	m_g0(R13), R10
    41  	CMPQ	R10, R14
    42  	JE	call	// already on g0
    43  	MOVQ	(g_sched+gobuf_sp)(R10), SP
    44  call:
    45  	ANDQ	$~15, SP	// alignment for gcc ABI
    46  	CALL	AX
    47  	MOVQ	R12, SP
    48  	RET
    49  
    50  // void runtime·libfuzzerCallTraceIntCmp(fn, arg0, arg1, fakePC uintptr)
    51  // Calls C function fn from libFuzzer and passes 2 arguments to it after
    52  // manipulating the return address so that libfuzzer's integer compare hooks
    53  // work
    54  // libFuzzer's compare hooks obtain the caller's address from the compiler
    55  // builtin __builtin_return_address. Since we invoke the hooks always
    56  // from the same native function, this builtin would always return the same
    57  // value. Internally, the libFuzzer hooks call through to the always inlined
    58  // HandleCmp and thus can't be mimicked without patching libFuzzer.
    59  //
    60  // We solve this problem via an inline assembly trampoline construction that
    61  // translates a runtime argument `fake_pc` in the range [0, 512) into a call to
    62  // a hook with a fake return address whose lower 9 bits are `fake_pc` up to a
    63  // constant shift. This is achieved by pushing a return address pointing into
    64  // 512 ret instructions at offset `fake_pc` onto the stack and then jumping
    65  // directly to the address of the hook.
    66  //
    67  // Note: We only set the lowest 9 bits of the return address since only these
    68  // bits are used by the libFuzzer value profiling mode for integer compares, see
    69  // https://github.com/llvm/llvm-project/blob/704d92607d26e696daba596b72cb70effe79a872/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp#L390
    70  // as well as
    71  // https://github.com/llvm/llvm-project/blob/704d92607d26e696daba596b72cb70effe79a872/compiler-rt/lib/fuzzer/FuzzerValueBitMap.h#L34
    72  // ValueProfileMap.AddValue() truncates its argument to 16 bits and shifts the
    73  // PC to the left by log_2(128)=7, which means that only the lowest 16 - 7 bits
    74  // of the return address matter. String compare hooks use the lowest 12 bits,
    75  // but take the return address as an argument and thus don't require the
    76  // indirection through a trampoline.
    77  // TODO: Remove the inline assembly trampoline once a PC argument has been added to libfuzzer's int compare hooks.
    78  TEXT	runtime·libfuzzerCallTraceIntCmp(SB), NOSPLIT, $0-32
    79  	MOVQ	fn+0(FP), AX
    80  	MOVQ	arg0+8(FP), RARG0
    81  	MOVQ	arg1+16(FP), RARG1
    82  	MOVQ	fakePC+24(FP), R8
    83  
    84  	get_tls(R12)
    85  	MOVQ	g(R12), R14
    86  	MOVQ	g_m(R14), R13
    87  
    88  	// Switch to g0 stack.
    89  	MOVQ	SP, R12		// callee-saved, preserved across the CALL
    90  	MOVQ	m_g0(R13), R10
    91  	CMPQ	R10, R14
    92  	JE	call	// already on g0
    93  	MOVQ	(g_sched+gobuf_sp)(R10), SP
    94  call:
    95  	ANDQ	$~15, SP	// alignment for gcc ABI
    96  	SUBQ	$8, SP
    97  	// Load the address of the end of the function and push it into the stack.
    98  	// This address will be jumped to after executing the return instruction
    99  	// from the return sled. There we reset the stack pointer and return.
   100  	MOVQ    $end_of_function<>(SB), BX
   101  	PUSHQ   BX
   102  	// Load the starting address of the return sled into BX.
   103  	MOVQ    $ret_sled<>(SB), BX
   104  	// Load the address of the i'th return instruction from the return sled.
   105  	// The index is given in the fakePC argument.
   106  	ADDQ    R8, BX
   107  	PUSHQ   BX
   108  	// Call the original function with the fakePC return address on the stack.
   109  	// Function arguments arg0 and arg1 are passed in the registers specified
   110  	// by the x64 calling convention.
   111  	JMP     AX
   112  // This code will not be executed and is only there to satisfy assembler
   113  // check of a balanced stack.
   114  not_reachable:
   115  	POPQ    BX
   116  	POPQ    BX
   117  	RET
   118  
   119  TEXT end_of_function<>(SB), NOSPLIT, $0-0
   120  	MOVQ	R12, SP
   121  	RET
   122  
   123  #define REPEAT_8(a) a \
   124    a \
   125    a \
   126    a \
   127    a \
   128    a \
   129    a \
   130    a
   131  
   132  #define REPEAT_512(a) REPEAT_8(REPEAT_8(REPEAT_8(a)))
   133  
   134  TEXT ret_sled<>(SB), NOSPLIT, $0-0
   135  	REPEAT_512(RET)
   136  
   137  // void runtime·libfuzzerCallWithTwoByteBuffers(fn, start, end *byte)
   138  // Calls C function fn from libFuzzer and passes 2 arguments of type *byte to it.
   139  TEXT	runtime·libfuzzerCallWithTwoByteBuffers(SB), NOSPLIT, $0-24
   140  	MOVQ	fn+0(FP), AX
   141  	MOVQ	start+8(FP), RARG0
   142  	MOVQ	end+16(FP), RARG1
   143  
   144  	get_tls(R12)
   145  	MOVQ	g(R12), R14
   146  	MOVQ	g_m(R14), R13
   147  
   148  	// Switch to g0 stack.
   149  	MOVQ	SP, R12		// callee-saved, preserved across the CALL
   150  	MOVQ	m_g0(R13), R10
   151  	CMPQ	R10, R14
   152  	JE	call	// already on g0
   153  	MOVQ	(g_sched+gobuf_sp)(R10), SP
   154  call:
   155  	ANDQ	$~15, SP	// alignment for gcc ABI
   156  	CALL	AX
   157  	MOVQ	R12, SP
   158  	RET
   159  

View as plain text