From be9044e79fa22ac025bd576ea7c6842ee0ade134 Mon Sep 17 00:00:00 2001 From: Curle Date: Sun, 21 Jul 2019 01:08:57 +0100 Subject: [PATCH] Forgot x86_64 GNU-EFI. Oops. --- .gitignore | 3 - gnu-efi/lib/x86_64/callwrap.c | 40 +++++++ gnu-efi/lib/x86_64/efi_stub.S | 189 ++++++++++++++++++++++++++++++++++ gnu-efi/lib/x86_64/initplat.c | 27 +++++ gnu-efi/lib/x86_64/math.c | 181 ++++++++++++++++++++++++++++++++ gnu-efi/lib/x86_64/setjmp.S | 41 ++++++++ 6 files changed, 478 insertions(+), 3 deletions(-) create mode 100644 gnu-efi/lib/x86_64/callwrap.c create mode 100644 gnu-efi/lib/x86_64/efi_stub.S create mode 100644 gnu-efi/lib/x86_64/initplat.c create mode 100644 gnu-efi/lib/x86_64/math.c create mode 100644 gnu-efi/lib/x86_64/setjmp.S diff --git a/.gitignore b/.gitignore index c92ae86..aaf2ef1 100644 --- a/.gitignore +++ b/.gitignore @@ -8,9 +8,6 @@ *.fd *.db* *.*db* -arm -aa64 -x86_64 x86_32 image x64 diff --git a/gnu-efi/lib/x86_64/callwrap.c b/gnu-efi/lib/x86_64/callwrap.c new file mode 100644 index 0000000..30a5322 --- /dev/null +++ b/gnu-efi/lib/x86_64/callwrap.c @@ -0,0 +1,40 @@ +/* + * Convert SysV calling convention to EFI x86_64 calling convention + * + * Copyright (C) 2007-2010 Intel Corp + * Bibo Mao + * Chandramouli Narayanan + * Huang Ying + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * - Neither the name of Hewlett-Packard Co. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANYDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF + * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* uefi_call_wrapper() is a macro in efibind.h */ diff --git a/gnu-efi/lib/x86_64/efi_stub.S b/gnu-efi/lib/x86_64/efi_stub.S new file mode 100644 index 0000000..b431255 --- /dev/null +++ b/gnu-efi/lib/x86_64/efi_stub.S @@ -0,0 +1,189 @@ +/* + * Function calling ABI conversion from Linux to EFI for x86_64 + * + * Copyright (C) 2007 Intel Corp + * Bibo Mao + * Huang Ying + * Copyright (C) 2012 Felipe Contreras + */ + +#if !defined(HAVE_USE_MS_ABI) +/* + * EFI calling conventions are documented at: + * http://msdn.microsoft.com/en-us/library/ms235286%28v=vs.80%29.aspx + * ELF calling conventions are documented at: + * http://www.x86-64.org/documentation/abi.pdf + * + * Basically here are the conversion rules: + * a) our function pointer is in %rdi + * b) rsi through r8 (elf) aka rcx through r9 (ms) require stack space + * on the MS side even though it's not getting used at all. + * c) 8(%rsp) is always aligned to 16 in ELF, so %rsp is shifted 8 bytes extra + * d) arguments are as follows: (elf -> ms) + * 1) rdi -> rcx (32 saved) + * 2) rsi -> rdx (32 saved) + * 3) rdx -> r8 (32 saved) + * 4) rcx -> r9 (32 saved) + * 5) r8 -> 32(%rsp) (32 saved) + * 6) r9 -> 40(%rsp) (48 saved) + * 7) 8(%rsp) -> 48(%rsp) (48 saved) + * 8) 16(%rsp) -> 56(%rsp) (64 saved) + * 9) 24(%rsp) -> 64(%rsp) (64 saved) + * 10) 32(%rsp) -> 72(%rsp) (80 saved) + * e) because the first argument we recieve in a thunker is actually the + * function to be called, arguments are offset as such: + * 0) rdi -> caller + * 1) rsi -> rcx (32 saved) + * 2) rdx -> rdx (32 saved) + * 3) rcx -> r8 (32 saved) + * 4) r8 -> r9 (32 saved) + * 5) r9 -> 32(%rsp) (32 saved) + * 6) 8(%rsp) -> 40(%rsp) (48 saved) + * 7) 16(%rsp) -> 48(%rsp) (48 saved) + * 8) 24(%rsp) -> 56(%rsp) (64 saved) + * 9) 32(%rsp) -> 64(%rsp) (64 saved) + * 10) 40(%rsp) -> 72(%rsp) (80 saved) + * f) arguments need to be moved in opposite order to avoid clobbering + */ + +#define ENTRY(name) \ + .globl name; \ + name: + +ENTRY(efi_call0) + subq $40, %rsp + call *%rdi + addq $40, %rsp + ret + +ENTRY(efi_call1) + subq $40, %rsp + mov %rsi, %rcx + call *%rdi + addq $40, %rsp + ret + +ENTRY(efi_call2) + subq $40, %rsp + /* mov %rdx, %rdx */ + mov %rsi, %rcx + call *%rdi + addq $40, %rsp + ret + +ENTRY(efi_call3) + subq $40, %rsp + mov %rcx, %r8 + /* mov %rdx, %rdx */ + mov %rsi, %rcx + call *%rdi + addq $40, %rsp + ret + +ENTRY(efi_call4) + subq $40, %rsp + mov %r8, %r9 + mov %rcx, %r8 + /* mov %rdx, %rdx */ + mov %rsi, %rcx + call *%rdi + addq $40, %rsp + ret + +ENTRY(efi_call5) + subq $40, %rsp + mov %r9, 32(%rsp) + mov %r8, %r9 + mov %rcx, %r8 + /* mov %rdx, %rdx */ + mov %rsi, %rcx + call *%rdi + addq $40, %rsp + ret + +ENTRY(efi_call6) + subq $56, %rsp + mov 56+8(%rsp), %rax + mov %rax, 40(%rsp) + mov %r9, 32(%rsp) + mov %r8, %r9 + mov %rcx, %r8 + /* mov %rdx, %rdx */ + mov %rsi, %rcx + call *%rdi + addq $56, %rsp + ret + +ENTRY(efi_call7) + subq $56, %rsp + mov 56+16(%rsp), %rax + mov %rax, 48(%rsp) + mov 56+8(%rsp), %rax + mov %rax, 40(%rsp) + mov %r9, 32(%rsp) + mov %r8, %r9 + mov %rcx, %r8 + /* mov %rdx, %rdx */ + mov %rsi, %rcx + call *%rdi + addq $56, %rsp + ret + +ENTRY(efi_call8) + subq $72, %rsp + mov 72+24(%rsp), %rax + mov %rax, 56(%rsp) + mov 72+16(%rsp), %rax + mov %rax, 48(%rsp) + mov 72+8(%rsp), %rax + mov %rax, 40(%rsp) + mov %r9, 32(%rsp) + mov %r8, %r9 + mov %rcx, %r8 + /* mov %rdx, %rdx */ + mov %rsi, %rcx + call *%rdi + addq $72, %rsp + ret + +ENTRY(efi_call9) + subq $72, %rsp + mov 72+32(%rsp), %rax + mov %rax, 64(%rsp) + mov 72+24(%rsp), %rax + mov %rax, 56(%rsp) + mov 72+16(%rsp), %rax + mov %rax, 48(%rsp) + mov 72+8(%rsp), %rax + mov %rax, 40(%rsp) + mov %r9, 32(%rsp) + mov %r8, %r9 + mov %rcx, %r8 + /* mov %rdx, %rdx */ + mov %rsi, %rcx + call *%rdi + addq $72, %rsp + ret + +ENTRY(efi_call10) + subq $88, %rsp + mov 88+40(%rsp), %rax + mov %rax, 72(%rsp) + mov 88+32(%rsp), %rax + mov %rax, 64(%rsp) + mov 88+24(%rsp), %rax + mov %rax, 56(%rsp) + mov 88+16(%rsp), %rax + mov %rax, 48(%rsp) + mov 88+8(%rsp), %rax + mov %rax, 40(%rsp) + mov %r9, 32(%rsp) + mov %r8, %r9 + mov %rcx, %r8 + /* mov %rdx, %rdx */ + mov %rsi, %rcx + call *%rdi + addq $88, %rsp + ret + +#endif diff --git a/gnu-efi/lib/x86_64/initplat.c b/gnu-efi/lib/x86_64/initplat.c new file mode 100644 index 0000000..7c887a6 --- /dev/null +++ b/gnu-efi/lib/x86_64/initplat.c @@ -0,0 +1,27 @@ +/*++ + +Copyright (c) 1998 Intel Corporation + +Module Name: + + initplat.c + +Abstract: + + + + +Revision History + +--*/ + +#include "lib.h" + +VOID +InitializeLibPlatform ( + IN EFI_HANDLE ImageHandle EFI_UNUSED, + IN EFI_SYSTEM_TABLE *SystemTable EFI_UNUSED + ) +{ +} + diff --git a/gnu-efi/lib/x86_64/math.c b/gnu-efi/lib/x86_64/math.c new file mode 100644 index 0000000..aa02431 --- /dev/null +++ b/gnu-efi/lib/x86_64/math.c @@ -0,0 +1,181 @@ +/*++ + +Copyright (c) 1998 Intel Corporation + +Module Name: + + math.c + +Abstract: + + + + +Revision History + +--*/ + +#include "lib.h" + + +// +// Declare runtime functions +// + +#ifdef RUNTIME_CODE +#ifndef __GNUC__ +#pragma RUNTIME_CODE(LShiftU64) +#pragma RUNTIME_CODE(RShiftU64) +#pragma RUNTIME_CODE(MultU64x32) +#pragma RUNTIME_CODE(DivU64x32) +#endif +#endif + +// +// +// + +UINT64 +LShiftU64 ( + IN UINT64 Operand, + IN UINTN Count + ) +// Left shift 64bit by 32bit and get a 64bit result +{ +#if defined(__GNUC__) || defined(_MSC_EXTENSIONS) + return Operand << Count; +#else + UINT64 Result; + _asm { + mov eax, dword ptr Operand[0] + mov edx, dword ptr Operand[4] + mov ecx, Count + and ecx, 63 + + shld edx, eax, cl + shl eax, cl + + cmp ecx, 32 + jc short ls10 + + mov edx, eax + xor eax, eax + +ls10: + mov dword ptr Result[0], eax + mov dword ptr Result[4], edx + } + + return Result; +#endif +} + +UINT64 +RShiftU64 ( + IN UINT64 Operand, + IN UINTN Count + ) +// Right shift 64bit by 32bit and get a 64bit result +{ +#if defined(__GNUC__) || defined(_MSC_EXTENSIONS) + return Operand >> Count; +#else + UINT64 Result; + _asm { + mov eax, dword ptr Operand[0] + mov edx, dword ptr Operand[4] + mov ecx, Count + and ecx, 63 + + shrd eax, edx, cl + shr edx, cl + + cmp ecx, 32 + jc short rs10 + + mov eax, edx + xor edx, edx + +rs10: + mov dword ptr Result[0], eax + mov dword ptr Result[4], edx + } + + return Result; +#endif +} + + +UINT64 +MultU64x32 ( + IN UINT64 Multiplicand, + IN UINTN Multiplier + ) +// Multiple 64bit by 32bit and get a 64bit result +{ +#if defined(__GNUC__) || defined(_MSC_EXTENSIONS) + return Multiplicand * Multiplier; +#else + UINT64 Result; + _asm { + mov eax, dword ptr Multiplicand[0] + mul Multiplier + mov dword ptr Result[0], eax + mov dword ptr Result[4], edx + mov eax, dword ptr Multiplicand[4] + mul Multiplier + add dword ptr Result[4], eax + } + + return Result; +#endif +} + +UINT64 +DivU64x32 ( + IN UINT64 Dividend, + IN UINTN Divisor, + OUT UINTN *Remainder OPTIONAL + ) +// divide 64bit by 32bit and get a 64bit result +// N.B. only works for 31bit divisors!! +{ +#if defined(__GNUC__) || defined(_MSC_EXTENSIONS) + if (Remainder) + *Remainder = Dividend % Divisor; + return Dividend / Divisor; +#else + UINT32 Rem; + UINT32 bit; + + ASSERT (Divisor != 0); + ASSERT ((Divisor >> 31) == 0); + + // + // For each bit in the dividend + // + + Rem = 0; + for (bit=0; bit < 64; bit++) { + _asm { + shl dword ptr Dividend[0], 1 ; shift rem:dividend left one + rcl dword ptr Dividend[4], 1 + rcl dword ptr Rem, 1 + + mov eax, Rem + cmp eax, Divisor ; Is Rem >= Divisor? + cmc ; No - do nothing + sbb eax, eax ; Else, + sub dword ptr Dividend[0], eax ; set low bit in dividen + and eax, Divisor ; and + sub Rem, eax ; subtract divisor + } + } + + if (Remainder) { + *Remainder = Rem; + } + + return Dividend; +#endif +} diff --git a/gnu-efi/lib/x86_64/setjmp.S b/gnu-efi/lib/x86_64/setjmp.S new file mode 100644 index 0000000..e870aef --- /dev/null +++ b/gnu-efi/lib/x86_64/setjmp.S @@ -0,0 +1,41 @@ + .text + .globl setjmp +#ifndef __MINGW32__ + .type setjmp, @function +#else + .def setjmp; .scl 2; .type 32; .endef +#endif +setjmp: + pop %rsi + movq %rbx,0x00(%rdi) + movq %rsp,0x08(%rdi) + push %rsi + movq %rbp,0x10(%rdi) + movq %r12,0x18(%rdi) + movq %r13,0x20(%rdi) + movq %r14,0x28(%rdi) + movq %r15,0x30(%rdi) + movq %rsi,0x38(%rdi) + xor %rax,%rax + ret + + .globl longjmp +#ifndef __MINGW32__ + .type longjmp, @function +#else + .def longjmp; .scl 2; .type 32; .endef +#endif +longjmp: + movl %esi, %eax + movq 0x00(%rdi), %rbx + movq 0x08(%rdi), %rsp + movq 0x10(%rdi), %rbp + movq 0x18(%rdi), %r12 + movq 0x20(%rdi), %r13 + movq 0x28(%rdi), %r14 + movq 0x30(%rdi), %r15 + xor %rdx,%rdx + mov $1,%rcx + cmp %rax,%rdx + cmove %rcx,%rax + jmp *0x38(%rdi)