Source file src/runtime/syscall_windows.go

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"unsafe"
    11  )
    12  
    13  // cbs stores all registered Go callbacks.
    14  var cbs struct {
    15  	lock  mutex // use cbsLock / cbsUnlock for race instrumentation.
    16  	ctxt  [cb_max]winCallback
    17  	index map[winCallbackKey]int
    18  	n     int
    19  }
    20  
    21  func cbsLock() {
    22  	lock(&cbs.lock)
    23  	// compileCallback is used by goenvs prior to completion of schedinit.
    24  	// raceacquire involves a racecallback to get the proc, which is not
    25  	// safe prior to scheduler initialization. Thus avoid instrumentation
    26  	// until then.
    27  	if raceenabled && mainStarted {
    28  		raceacquire(unsafe.Pointer(&cbs.lock))
    29  	}
    30  }
    31  
    32  func cbsUnlock() {
    33  	if raceenabled && mainStarted {
    34  		racerelease(unsafe.Pointer(&cbs.lock))
    35  	}
    36  	unlock(&cbs.lock)
    37  }
    38  
    39  // winCallback records information about a registered Go callback.
    40  type winCallback struct {
    41  	fn     *funcval // Go function
    42  	retPop uintptr  // For 386 cdecl, how many bytes to pop on return
    43  	abiMap abiDesc
    44  }
    45  
    46  // abiPartKind is the action an abiPart should take.
    47  type abiPartKind int
    48  
    49  const (
    50  	abiPartBad   abiPartKind = iota
    51  	abiPartStack             // Move a value from memory to the stack.
    52  	abiPartReg               // Move a value from memory to a register.
    53  )
    54  
    55  // abiPart encodes a step in translating between calling ABIs.
    56  type abiPart struct {
    57  	kind           abiPartKind
    58  	srcStackOffset uintptr
    59  	dstStackOffset uintptr // used if kind == abiPartStack
    60  	dstRegister    int     // used if kind == abiPartReg
    61  	len            uintptr
    62  }
    63  
    64  func (a *abiPart) tryMerge(b abiPart) bool {
    65  	if a.kind != abiPartStack || b.kind != abiPartStack {
    66  		return false
    67  	}
    68  	if a.srcStackOffset+a.len == b.srcStackOffset && a.dstStackOffset+a.len == b.dstStackOffset {
    69  		a.len += b.len
    70  		return true
    71  	}
    72  	return false
    73  }
    74  
    75  // abiDesc specifies how to translate from a C frame to a Go
    76  // frame. This does not specify how to translate back because
    77  // the result is always a uintptr. If the C ABI is fastcall,
    78  // this assumes the four fastcall registers were first spilled
    79  // to the shadow space.
    80  type abiDesc struct {
    81  	parts []abiPart
    82  
    83  	srcStackSize uintptr // stdcall/fastcall stack space tracking
    84  	dstStackSize uintptr // Go stack space used
    85  	dstSpill     uintptr // Extra stack space for argument spill slots
    86  	dstRegisters int     // Go ABI int argument registers used
    87  
    88  	// retOffset is the offset of the uintptr-sized result in the Go
    89  	// frame.
    90  	retOffset uintptr
    91  }
    92  
    93  func (p *abiDesc) assignArg(t *_type) {
    94  	if t.size > goarch.PtrSize {
    95  		// We don't support this right now. In
    96  		// stdcall/cdecl, 64-bit ints and doubles are
    97  		// passed as two words (little endian); and
    98  		// structs are pushed on the stack. In
    99  		// fastcall, arguments larger than the word
   100  		// size are passed by reference. On arm,
   101  		// 8-byte aligned arguments round up to the
   102  		// next even register and can be split across
   103  		// registers and the stack.
   104  		panic("compileCallback: argument size is larger than uintptr")
   105  	}
   106  	if k := t.kind & kindMask; GOARCH != "386" && (k == kindFloat32 || k == kindFloat64) {
   107  		// In fastcall, floating-point arguments in
   108  		// the first four positions are passed in
   109  		// floating-point registers, which we don't
   110  		// currently spill. arm passes floating-point
   111  		// arguments in VFP registers, which we also
   112  		// don't support.
   113  		// So basically we only support 386.
   114  		panic("compileCallback: float arguments not supported")
   115  	}
   116  
   117  	if t.size == 0 {
   118  		// The Go ABI aligns for zero-sized types.
   119  		p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.align))
   120  		return
   121  	}
   122  
   123  	// In the C ABI, we're already on a word boundary.
   124  	// Also, sub-word-sized fastcall register arguments
   125  	// are stored to the least-significant bytes of the
   126  	// argument word and all supported Windows
   127  	// architectures are little endian, so srcStackOffset
   128  	// is already pointing to the right place for smaller
   129  	// arguments. The same is true on arm.
   130  
   131  	oldParts := p.parts
   132  	if p.tryRegAssignArg(t, 0) {
   133  		// Account for spill space.
   134  		//
   135  		// TODO(mknyszek): Remove this when we no longer have
   136  		// caller reserved spill space.
   137  		p.dstSpill = alignUp(p.dstSpill, uintptr(t.align))
   138  		p.dstSpill += t.size
   139  	} else {
   140  		// Register assignment failed.
   141  		// Undo the work and stack assign.
   142  		p.parts = oldParts
   143  
   144  		// The Go ABI aligns arguments.
   145  		p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.align))
   146  
   147  		// Copy just the size of the argument. Note that this
   148  		// could be a small by-value struct, but C and Go
   149  		// struct layouts are compatible, so we can copy these
   150  		// directly, too.
   151  		part := abiPart{
   152  			kind:           abiPartStack,
   153  			srcStackOffset: p.srcStackSize,
   154  			dstStackOffset: p.dstStackSize,
   155  			len:            t.size,
   156  		}
   157  		// Add this step to the adapter.
   158  		if len(p.parts) == 0 || !p.parts[len(p.parts)-1].tryMerge(part) {
   159  			p.parts = append(p.parts, part)
   160  		}
   161  		// The Go ABI packs arguments.
   162  		p.dstStackSize += t.size
   163  	}
   164  
   165  	// cdecl, stdcall, fastcall, and arm pad arguments to word size.
   166  	// TODO(rsc): On arm and arm64 do we need to skip the caller's saved LR?
   167  	p.srcStackSize += goarch.PtrSize
   168  }
   169  
   170  // tryRegAssignArg tries to register-assign a value of type t.
   171  // If this type is nested in an aggregate type, then offset is the
   172  // offset of this type within its parent type.
   173  // Assumes t.size <= goarch.PtrSize and t.size != 0.
   174  //
   175  // Returns whether the assignment succeeded.
   176  func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool {
   177  	switch k := t.kind & kindMask; k {
   178  	case kindBool, kindInt, kindInt8, kindInt16, kindInt32, kindUint, kindUint8, kindUint16, kindUint32, kindUintptr, kindPtr, kindUnsafePointer:
   179  		// Assign a register for all these types.
   180  		return p.assignReg(t.size, offset)
   181  	case kindInt64, kindUint64:
   182  		// Only register-assign if the registers are big enough.
   183  		if goarch.PtrSize == 8 {
   184  			return p.assignReg(t.size, offset)
   185  		}
   186  	case kindArray:
   187  		at := (*arraytype)(unsafe.Pointer(t))
   188  		if at.len == 1 {
   189  			return p.tryRegAssignArg(at.elem, offset)
   190  		}
   191  	case kindStruct:
   192  		st := (*structtype)(unsafe.Pointer(t))
   193  		for i := range st.fields {
   194  			f := &st.fields[i]
   195  			if !p.tryRegAssignArg(f.typ, offset+f.offset) {
   196  				return false
   197  			}
   198  		}
   199  		return true
   200  	}
   201  	// Pointer-sized types such as maps and channels are currently
   202  	// not supported.
   203  	panic("compileCallabck: type " + t.string() + " is currently not supported for use in system callbacks")
   204  }
   205  
   206  // assignReg attempts to assign a single register for an
   207  // argument with the given size, at the given offset into the
   208  // value in the C ABI space.
   209  //
   210  // Returns whether the assignment was successful.
   211  func (p *abiDesc) assignReg(size, offset uintptr) bool {
   212  	if p.dstRegisters >= intArgRegs {
   213  		return false
   214  	}
   215  	p.parts = append(p.parts, abiPart{
   216  		kind:           abiPartReg,
   217  		srcStackOffset: p.srcStackSize + offset,
   218  		dstRegister:    p.dstRegisters,
   219  		len:            size,
   220  	})
   221  	p.dstRegisters++
   222  	return true
   223  }
   224  
   225  type winCallbackKey struct {
   226  	fn    *funcval
   227  	cdecl bool
   228  }
   229  
   230  func callbackasm()
   231  
   232  // callbackasmAddr returns address of runtime.callbackasm
   233  // function adjusted by i.
   234  // On x86 and amd64, runtime.callbackasm is a series of CALL instructions,
   235  // and we want callback to arrive at
   236  // correspondent call instruction instead of start of
   237  // runtime.callbackasm.
   238  // On ARM, runtime.callbackasm is a series of mov and branch instructions.
   239  // R12 is loaded with the callback index. Each entry is two instructions,
   240  // hence 8 bytes.
   241  func callbackasmAddr(i int) uintptr {
   242  	var entrySize int
   243  	switch GOARCH {
   244  	default:
   245  		panic("unsupported architecture")
   246  	case "386", "amd64":
   247  		entrySize = 5
   248  	case "arm", "arm64":
   249  		// On ARM and ARM64, each entry is a MOV instruction
   250  		// followed by a branch instruction
   251  		entrySize = 8
   252  	}
   253  	return abi.FuncPCABI0(callbackasm) + uintptr(i*entrySize)
   254  }
   255  
   256  const callbackMaxFrame = 64 * goarch.PtrSize
   257  
   258  // compileCallback converts a Go function fn into a C function pointer
   259  // that can be passed to Windows APIs.
   260  //
   261  // On 386, if cdecl is true, the returned C function will use the
   262  // cdecl calling convention; otherwise, it will use stdcall. On amd64,
   263  // it always uses fastcall. On arm, it always uses the ARM convention.
   264  //
   265  //go:linkname compileCallback syscall.compileCallback
   266  func compileCallback(fn eface, cdecl bool) (code uintptr) {
   267  	if GOARCH != "386" {
   268  		// cdecl is only meaningful on 386.
   269  		cdecl = false
   270  	}
   271  
   272  	if fn._type == nil || (fn._type.kind&kindMask) != kindFunc {
   273  		panic("compileCallback: expected function with one uintptr-sized result")
   274  	}
   275  	ft := (*functype)(unsafe.Pointer(fn._type))
   276  
   277  	// Check arguments and construct ABI translation.
   278  	var abiMap abiDesc
   279  	for _, t := range ft.in() {
   280  		abiMap.assignArg(t)
   281  	}
   282  	// The Go ABI aligns the result to the word size. src is
   283  	// already aligned.
   284  	abiMap.dstStackSize = alignUp(abiMap.dstStackSize, goarch.PtrSize)
   285  	abiMap.retOffset = abiMap.dstStackSize
   286  
   287  	if len(ft.out()) != 1 {
   288  		panic("compileCallback: expected function with one uintptr-sized result")
   289  	}
   290  	if ft.out()[0].size != goarch.PtrSize {
   291  		panic("compileCallback: expected function with one uintptr-sized result")
   292  	}
   293  	if k := ft.out()[0].kind & kindMask; k == kindFloat32 || k == kindFloat64 {
   294  		// In cdecl and stdcall, float results are returned in
   295  		// ST(0). In fastcall, they're returned in XMM0.
   296  		// Either way, it's not AX.
   297  		panic("compileCallback: float results not supported")
   298  	}
   299  	if intArgRegs == 0 {
   300  		// Make room for the uintptr-sized result.
   301  		// If there are argument registers, the return value will
   302  		// be passed in the first register.
   303  		abiMap.dstStackSize += goarch.PtrSize
   304  	}
   305  
   306  	// TODO(mknyszek): Remove dstSpill from this calculation when we no longer have
   307  	// caller reserved spill space.
   308  	frameSize := alignUp(abiMap.dstStackSize, goarch.PtrSize)
   309  	frameSize += abiMap.dstSpill
   310  	if frameSize > callbackMaxFrame {
   311  		panic("compileCallback: function argument frame too large")
   312  	}
   313  
   314  	// For cdecl, the callee is responsible for popping its
   315  	// arguments from the C stack.
   316  	var retPop uintptr
   317  	if cdecl {
   318  		retPop = abiMap.srcStackSize
   319  	}
   320  
   321  	key := winCallbackKey{(*funcval)(fn.data), cdecl}
   322  
   323  	cbsLock()
   324  
   325  	// Check if this callback is already registered.
   326  	if n, ok := cbs.index[key]; ok {
   327  		cbsUnlock()
   328  		return callbackasmAddr(n)
   329  	}
   330  
   331  	// Register the callback.
   332  	if cbs.index == nil {
   333  		cbs.index = make(map[winCallbackKey]int)
   334  	}
   335  	n := cbs.n
   336  	if n >= len(cbs.ctxt) {
   337  		cbsUnlock()
   338  		throw("too many callback functions")
   339  	}
   340  	c := winCallback{key.fn, retPop, abiMap}
   341  	cbs.ctxt[n] = c
   342  	cbs.index[key] = n
   343  	cbs.n++
   344  
   345  	cbsUnlock()
   346  	return callbackasmAddr(n)
   347  }
   348  
   349  type callbackArgs struct {
   350  	index uintptr
   351  	// args points to the argument block.
   352  	//
   353  	// For cdecl and stdcall, all arguments are on the stack.
   354  	//
   355  	// For fastcall, the trampoline spills register arguments to
   356  	// the reserved spill slots below the stack arguments,
   357  	// resulting in a layout equivalent to stdcall.
   358  	//
   359  	// For arm, the trampoline stores the register arguments just
   360  	// below the stack arguments, so again we can treat it as one
   361  	// big stack arguments frame.
   362  	args unsafe.Pointer
   363  	// Below are out-args from callbackWrap
   364  	result uintptr
   365  	retPop uintptr // For 386 cdecl, how many bytes to pop on return
   366  }
   367  
   368  // callbackWrap is called by callbackasm to invoke a registered C callback.
   369  func callbackWrap(a *callbackArgs) {
   370  	c := cbs.ctxt[a.index]
   371  	a.retPop = c.retPop
   372  
   373  	// Convert from C to Go ABI.
   374  	var regs abi.RegArgs
   375  	var frame [callbackMaxFrame]byte
   376  	goArgs := unsafe.Pointer(&frame)
   377  	for _, part := range c.abiMap.parts {
   378  		switch part.kind {
   379  		case abiPartStack:
   380  			memmove(add(goArgs, part.dstStackOffset), add(a.args, part.srcStackOffset), part.len)
   381  		case abiPartReg:
   382  			goReg := unsafe.Pointer(&regs.Ints[part.dstRegister])
   383  			memmove(goReg, add(a.args, part.srcStackOffset), part.len)
   384  		default:
   385  			panic("bad ABI description")
   386  		}
   387  	}
   388  
   389  	// TODO(mknyszek): Remove this when we no longer have
   390  	// caller reserved spill space.
   391  	frameSize := alignUp(c.abiMap.dstStackSize, goarch.PtrSize)
   392  	frameSize += c.abiMap.dstSpill
   393  
   394  	// Even though this is copying back results, we can pass a nil
   395  	// type because those results must not require write barriers.
   396  	reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.abiMap.dstStackSize), uint32(c.abiMap.retOffset), uint32(frameSize), &regs)
   397  
   398  	// Extract the result.
   399  	//
   400  	// There's always exactly one return value, one pointer in size.
   401  	// If it's on the stack, then we will have reserved space for it
   402  	// at the end of the frame, otherwise it was passed in a register.
   403  	if c.abiMap.dstStackSize != c.abiMap.retOffset {
   404  		a.result = *(*uintptr)(unsafe.Pointer(&frame[c.abiMap.retOffset]))
   405  	} else {
   406  		var zero int
   407  		// On architectures with no registers, Ints[0] would be a compile error,
   408  		// so we use a dynamic index. These architectures will never take this
   409  		// branch, so this won't cause a runtime panic.
   410  		a.result = regs.Ints[zero]
   411  	}
   412  }
   413  
   414  const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
   415  
   416  // When available, this function will use LoadLibraryEx with the filename
   417  // parameter and the important SEARCH_SYSTEM32 argument. But on systems that
   418  // do not have that option, absoluteFilepath should contain a fallback
   419  // to the full path inside of system32 for use with vanilla LoadLibrary.
   420  //
   421  //go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary
   422  //go:nosplit
   423  //go:cgo_unsafe_args
   424  func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle, err uintptr) {
   425  	lockOSThread()
   426  	c := &getg().m.syscall
   427  
   428  	if useLoadLibraryEx {
   429  		c.fn = getLoadLibraryEx()
   430  		c.n = 3
   431  		args := struct {
   432  			lpFileName *uint16
   433  			hFile      uintptr // always 0
   434  			flags      uint32
   435  		}{filename, 0, _LOAD_LIBRARY_SEARCH_SYSTEM32}
   436  		c.args = uintptr(noescape(unsafe.Pointer(&args)))
   437  	} else {
   438  		c.fn = getLoadLibrary()
   439  		c.n = 1
   440  		c.args = uintptr(noescape(unsafe.Pointer(&absoluteFilepath)))
   441  	}
   442  
   443  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   444  	KeepAlive(filename)
   445  	KeepAlive(absoluteFilepath)
   446  	handle = c.r1
   447  	if handle == 0 {
   448  		err = c.err
   449  	}
   450  	unlockOSThread() // not defer'd after the lockOSThread above to save stack frame size.
   451  	return
   452  }
   453  
   454  //go:linkname syscall_loadlibrary syscall.loadlibrary
   455  //go:nosplit
   456  //go:cgo_unsafe_args
   457  func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
   458  	lockOSThread()
   459  	defer unlockOSThread()
   460  	c := &getg().m.syscall
   461  	c.fn = getLoadLibrary()
   462  	c.n = 1
   463  	c.args = uintptr(noescape(unsafe.Pointer(&filename)))
   464  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   465  	KeepAlive(filename)
   466  	handle = c.r1
   467  	if handle == 0 {
   468  		err = c.err
   469  	}
   470  	return
   471  }
   472  
   473  //go:linkname syscall_getprocaddress syscall.getprocaddress
   474  //go:nosplit
   475  //go:cgo_unsafe_args
   476  func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) {
   477  	lockOSThread()
   478  	defer unlockOSThread()
   479  	c := &getg().m.syscall
   480  	c.fn = getGetProcAddress()
   481  	c.n = 2
   482  	c.args = uintptr(noescape(unsafe.Pointer(&handle)))
   483  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   484  	KeepAlive(procname)
   485  	outhandle = c.r1
   486  	if outhandle == 0 {
   487  		err = c.err
   488  	}
   489  	return
   490  }
   491  
   492  //go:linkname syscall_Syscall syscall.Syscall
   493  //go:nosplit
   494  func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
   495  	return syscall_SyscallN(fn, a1, a2, a3)
   496  }
   497  
   498  //go:linkname syscall_Syscall6 syscall.Syscall6
   499  //go:nosplit
   500  func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
   501  	return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6)
   502  }
   503  
   504  //go:linkname syscall_Syscall9 syscall.Syscall9
   505  //go:nosplit
   506  func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
   507  	return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9)
   508  }
   509  
   510  //go:linkname syscall_Syscall12 syscall.Syscall12
   511  //go:nosplit
   512  func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
   513  	return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12)
   514  }
   515  
   516  //go:linkname syscall_Syscall15 syscall.Syscall15
   517  //go:nosplit
   518  func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
   519  	return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15)
   520  }
   521  
   522  //go:linkname syscall_Syscall18 syscall.Syscall18
   523  //go:nosplit
   524  func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) {
   525  	return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18)
   526  }
   527  
   528  // maxArgs should be divisible by 2, as Windows stack
   529  // must be kept 16-byte aligned on syscall entry.
   530  //
   531  // Although it only permits maximum 42 parameters, it
   532  // is arguably large enough.
   533  const maxArgs = 42
   534  
   535  //go:linkname syscall_SyscallN syscall.SyscallN
   536  //go:nosplit
   537  func syscall_SyscallN(trap uintptr, args ...uintptr) (r1, r2, err uintptr) {
   538  	nargs := len(args)
   539  
   540  	// asmstdcall expects it can access the first 4 arguments
   541  	// to load them into registers.
   542  	var tmp [4]uintptr
   543  	switch {
   544  	case nargs < 4:
   545  		copy(tmp[:], args)
   546  		args = tmp[:]
   547  	case nargs > maxArgs:
   548  		panic("runtime: SyscallN has too many arguments")
   549  	}
   550  
   551  	lockOSThread()
   552  	defer unlockOSThread()
   553  	c := &getg().m.syscall
   554  	c.fn = trap
   555  	c.n = uintptr(nargs)
   556  	c.args = uintptr(noescape(unsafe.Pointer(&args[0])))
   557  	cgocall(asmstdcallAddr, unsafe.Pointer(c))
   558  	return c.r1, c.r2, c.err
   559  }
   560  

View as plain text