Text file src/runtime/libfuzzer_amd64.s

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build libfuzzer
     6  
     7  #include "go_asm.h"
     8  #include "go_tls.h"
     9  #include "textflag.h"
    10  
    11  // Based on race_amd64.s; see commentary there.
    12  
    13  #ifdef GOOS_windows
    14  #define RARG0 CX
    15  #define RARG1 DX
    16  #define RARG2 R8
    17  #define RARG3 R9
    18  #else
    19  #define RARG0 DI
    20  #define RARG1 SI
    21  #define RARG2 DX
    22  #define RARG3 CX
    23  #endif
    24  
    25  // void runtime·libfuzzerCall4(fn, hookId int, s1, s2 unsafe.Pointer, result uintptr)
    26  // Calls C function fn from libFuzzer and passes 4 arguments to it.
    27  TEXT	runtime·libfuzzerCall4(SB), NOSPLIT, $0-40
    28  	MOVQ	fn+0(FP), AX
    29  	MOVQ	hookId+8(FP), RARG0
    30  	MOVQ	s1+16(FP), RARG1
    31  	MOVQ	s2+24(FP), RARG2
    32  	MOVQ	result+32(FP), RARG3
    33  
    34  	get_tls(R12)
    35  	MOVQ	g(R12), R14
    36  	MOVQ	g_m(R14), R13
    37  
    38  	// Switch to g0 stack.
    39  	MOVQ	SP, R12		// callee-saved, preserved across the CALL
    40  	MOVQ	m_g0(R13), R10
    41  	CMPQ	R10, R14
    42  	JE	call	// already on g0
    43  	MOVQ	(g_sched+gobuf_sp)(R10), SP
    44  call:
    45  	ANDQ	$~15, SP	// alignment for gcc ABI
    46  	CALL	AX
    47  	MOVQ	R12, SP
    48  	RET
    49  
    50  // void runtime·libfuzzerCallTraceIntCmp(fn, arg0, arg1, fakePC uintptr)
    51  // Calls C function fn from libFuzzer and passes 2 arguments to it after
    52  // manipulating the return address so that libfuzzer's integer compare hooks
    53  // work
    54  // libFuzzer's compare hooks obtain the caller's address from the compiler
    55  // builtin __builtin_return_adress. Since we invoke the hooks always
    56  // from the same native function, this builtin would always return the same
    57  // value. Internally, the libFuzzer hooks call through to the always inlined
    58  // HandleCmp and thus can't be mimicked without patching libFuzzer.
    59  //
    60  // We solve this problem via an inline assembly trampoline construction that
    61  // translates a runtime argument `fake_pc` in the range [0, 512) into a call to
    62  // a hook with a fake return address whose lower 9 bits are `fake_pc` up to a
    63  // constant shift. This is achieved by pushing a return address pointing into
    64  // 512 ret instructions at offset `fake_pc` onto the stack and then jumping
    65  // directly to the address of the hook.
    66  //
    67  // Note: We only set the lowest 9 bits of the return address since only these
    68  // bits are used by the libFuzzer value profiling mode for integer compares, see
    69  // https://github.com/llvm/llvm-project/blob/704d92607d26e696daba596b72cb70effe79a872/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp#L390
    70  // as well as
    71  // https://github.com/llvm/llvm-project/blob/704d92607d26e696daba596b72cb70effe79a872/compiler-rt/lib/fuzzer/FuzzerValueBitMap.h#L34
    72  // ValueProfileMap.AddValue() truncates its argument to 16 bits and shifts the
    73  // PC to the left by log_2(128)=7, which means that only the lowest 16 - 7 bits
    74  // of the return address matter. String compare hooks use the lowest 12 bits,
    75  // but take the return address as an argument and thus don't require the
    76  // indirection through a trampoline.
    77  // TODO: Remove the inline assembly trampoline once a PC argument has been added to libfuzzer's int compare hooks.
    78  TEXT	runtime·libfuzzerCallTraceIntCmp(SB), NOSPLIT, $0-32
    79  	MOVQ	fn+0(FP), AX
    80  	MOVQ	arg0+8(FP), RARG0
    81  	MOVQ	arg1+16(FP), RARG1
    82  	MOVQ	fakePC+24(FP), R8
    83  
    84  	get_tls(R12)
    85  	MOVQ	g(R12), R14
    86  	MOVQ	g_m(R14), R13
    87  
    88  	// Switch to g0 stack.
    89  	MOVQ	SP, R12		// callee-saved, preserved across the CALL
    90  	MOVQ	m_g0(R13), R10
    91  	CMPQ	R10, R14
    92  	JE	call	// already on g0
    93  	MOVQ	(g_sched+gobuf_sp)(R10), SP
    94  call:
    95  	ANDQ	$~15, SP	// alignment for gcc ABI
    96  	// Load the address of the end of the function and push it into the stack.
    97  	// This address will be jumped to after executing the return instruction
    98  	// from the return sled. There we reset the stack pointer and return.
    99  	MOVQ    $end_of_function<>(SB), BX
   100  	PUSHQ   BX
   101  	// Load the starting address of the return sled into BX.
   102  	MOVQ    $ret_sled<>(SB), BX
   103  	// Load the address of the i'th return instruction fron the return sled.
   104  	// The index is given in the fakePC argument.
   105  	ADDQ    R8, BX
   106  	PUSHQ   BX
   107  	// Call the original function with the fakePC return address on the stack.
   108  	// Function arguments arg0 and arg1 are passed in the registers specified
   109  	// by the x64 calling convention.
   110  	JMP     AX
   111  // This code will not be executed and is only there to statisfy assembler
   112  // check of a balanced stack.
   113  not_reachable:
   114  	POPQ    BX
   115  	POPQ    BX
   116  	RET
   117  
   118  TEXT end_of_function<>(SB), NOSPLIT, $0-0
   119  	MOVQ	R12, SP
   120  	RET
   121  
   122  #define REPEAT_8(a) a \
   123    a \
   124    a \
   125    a \
   126    a \
   127    a \
   128    a \
   129    a
   130  
   131  #define REPEAT_512(a) REPEAT_8(REPEAT_8(REPEAT_8(a)))
   132  
   133  TEXT ret_sled<>(SB), NOSPLIT, $0-0
   134  	REPEAT_512(RET)
   135  
   136  // void runtime·libfuzzerCallWithTwoByteBuffers(fn, start, end *byte)
   137  // Calls C function fn from libFuzzer and passes 2 arguments of type *byte to it.
   138  TEXT	runtime·libfuzzerCallWithTwoByteBuffers(SB), NOSPLIT, $0-24
   139  	MOVQ	fn+0(FP), AX
   140  	MOVQ	start+8(FP), RARG0
   141  	MOVQ	end+16(FP), RARG1
   142  
   143  	get_tls(R12)
   144  	MOVQ	g(R12), R14
   145  	MOVQ	g_m(R14), R13
   146  
   147  	// Switch to g0 stack.
   148  	MOVQ	SP, R12		// callee-saved, preserved across the CALL
   149  	MOVQ	m_g0(R13), R10
   150  	CMPQ	R10, R14
   151  	JE	call	// already on g0
   152  	MOVQ	(g_sched+gobuf_sp)(R10), SP
   153  call:
   154  	ANDQ	$~15, SP	// alignment for gcc ABI
   155  	CALL	AX
   156  	MOVQ	R12, SP
   157  	RET
   158  

View as plain text