aboutsummaryrefslogtreecommitdiff
path: root/gnu-efi/gnu-efi-3.0/lib/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'gnu-efi/gnu-efi-3.0/lib/x86_64')
-rw-r--r--gnu-efi/gnu-efi-3.0/lib/x86_64/callwrap.c40
-rw-r--r--gnu-efi/gnu-efi-3.0/lib/x86_64/efi_stub.S189
-rw-r--r--gnu-efi/gnu-efi-3.0/lib/x86_64/initplat.c28
-rw-r--r--gnu-efi/gnu-efi-3.0/lib/x86_64/math.c181
4 files changed, 438 insertions, 0 deletions
diff --git a/gnu-efi/gnu-efi-3.0/lib/x86_64/callwrap.c b/gnu-efi/gnu-efi-3.0/lib/x86_64/callwrap.c
new file mode 100644
index 0000000..30a5322
--- /dev/null
+++ b/gnu-efi/gnu-efi-3.0/lib/x86_64/callwrap.c
@@ -0,0 +1,40 @@
+/*
+ * Convert SysV calling convention to EFI x86_64 calling convention
+ *
+ * Copyright (C) 2007-2010 Intel Corp
+ * Bibo Mao <bibo.mao@intel.com>
+ * Chandramouli Narayanan<mouli@linux.intel.com>
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ * - Neither the name of Hewlett-Packard Co. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANYDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ * THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* uefi_call_wrapper() is a macro in efibind.h */
diff --git a/gnu-efi/gnu-efi-3.0/lib/x86_64/efi_stub.S b/gnu-efi/gnu-efi-3.0/lib/x86_64/efi_stub.S
new file mode 100644
index 0000000..b431255
--- /dev/null
+++ b/gnu-efi/gnu-efi-3.0/lib/x86_64/efi_stub.S
@@ -0,0 +1,189 @@
+/*
+ * Function calling ABI conversion from Linux to EFI for x86_64
+ *
+ * Copyright (C) 2007 Intel Corp
+ * Bibo Mao <bibo.mao@intel.com>
+ * Huang Ying <ying.huang@intel.com>
+ * Copyright (C) 2012 Felipe Contreras <felipe.contreras@gmail.com>
+ */
+
+#if !defined(HAVE_USE_MS_ABI)
+/*
+ * EFI calling conventions are documented at:
+ * http://msdn.microsoft.com/en-us/library/ms235286%28v=vs.80%29.aspx
+ * ELF calling conventions are documented at:
+ * http://www.x86-64.org/documentation/abi.pdf
+ *
+ * Basically here are the conversion rules:
+ * a) our function pointer is in %rdi
+ * b) rsi through r8 (elf) aka rcx through r9 (ms) require stack space
+ * on the MS side even though it's not getting used at all.
+ * c) 8(%rsp) is always aligned to 16 in ELF, so %rsp is shifted 8 bytes extra
+ * d) arguments are as follows: (elf -> ms)
+ * 1) rdi -> rcx (32 saved)
+ * 2) rsi -> rdx (32 saved)
+ * 3) rdx -> r8 (32 saved)
+ * 4) rcx -> r9 (32 saved)
+ * 5) r8 -> 32(%rsp) (32 saved)
+ * 6) r9 -> 40(%rsp) (48 saved)
+ * 7) 8(%rsp) -> 48(%rsp) (48 saved)
+ * 8) 16(%rsp) -> 56(%rsp) (64 saved)
+ * 9) 24(%rsp) -> 64(%rsp) (64 saved)
+ * 10) 32(%rsp) -> 72(%rsp) (80 saved)
+ * e) because the first argument we recieve in a thunker is actually the
+ * function to be called, arguments are offset as such:
+ * 0) rdi -> caller
+ * 1) rsi -> rcx (32 saved)
+ * 2) rdx -> rdx (32 saved)
+ * 3) rcx -> r8 (32 saved)
+ * 4) r8 -> r9 (32 saved)
+ * 5) r9 -> 32(%rsp) (32 saved)
+ * 6) 8(%rsp) -> 40(%rsp) (48 saved)
+ * 7) 16(%rsp) -> 48(%rsp) (48 saved)
+ * 8) 24(%rsp) -> 56(%rsp) (64 saved)
+ * 9) 32(%rsp) -> 64(%rsp) (64 saved)
+ * 10) 40(%rsp) -> 72(%rsp) (80 saved)
+ * f) arguments need to be moved in opposite order to avoid clobbering
+ */
+
+#define ENTRY(name) \
+ .globl name; \
+ name:
+
+ENTRY(efi_call0)
+ subq $40, %rsp
+ call *%rdi
+ addq $40, %rsp
+ ret
+
+ENTRY(efi_call1)
+ subq $40, %rsp
+ mov %rsi, %rcx
+ call *%rdi
+ addq $40, %rsp
+ ret
+
+ENTRY(efi_call2)
+ subq $40, %rsp
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+ addq $40, %rsp
+ ret
+
+ENTRY(efi_call3)
+ subq $40, %rsp
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+ addq $40, %rsp
+ ret
+
+ENTRY(efi_call4)
+ subq $40, %rsp
+ mov %r8, %r9
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+ addq $40, %rsp
+ ret
+
+ENTRY(efi_call5)
+ subq $40, %rsp
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+ addq $40, %rsp
+ ret
+
+ENTRY(efi_call6)
+ subq $56, %rsp
+ mov 56+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+ addq $56, %rsp
+ ret
+
+ENTRY(efi_call7)
+ subq $56, %rsp
+ mov 56+16(%rsp), %rax
+ mov %rax, 48(%rsp)
+ mov 56+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+ addq $56, %rsp
+ ret
+
+ENTRY(efi_call8)
+ subq $72, %rsp
+ mov 72+24(%rsp), %rax
+ mov %rax, 56(%rsp)
+ mov 72+16(%rsp), %rax
+ mov %rax, 48(%rsp)
+ mov 72+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+ addq $72, %rsp
+ ret
+
+ENTRY(efi_call9)
+ subq $72, %rsp
+ mov 72+32(%rsp), %rax
+ mov %rax, 64(%rsp)
+ mov 72+24(%rsp), %rax
+ mov %rax, 56(%rsp)
+ mov 72+16(%rsp), %rax
+ mov %rax, 48(%rsp)
+ mov 72+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+ addq $72, %rsp
+ ret
+
+ENTRY(efi_call10)
+ subq $88, %rsp
+ mov 88+40(%rsp), %rax
+ mov %rax, 72(%rsp)
+ mov 88+32(%rsp), %rax
+ mov %rax, 64(%rsp)
+ mov 88+24(%rsp), %rax
+ mov %rax, 56(%rsp)
+ mov 88+16(%rsp), %rax
+ mov %rax, 48(%rsp)
+ mov 88+8(%rsp), %rax
+ mov %rax, 40(%rsp)
+ mov %r9, 32(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ /* mov %rdx, %rdx */
+ mov %rsi, %rcx
+ call *%rdi
+ addq $88, %rsp
+ ret
+
+#endif
diff --git a/gnu-efi/gnu-efi-3.0/lib/x86_64/initplat.c b/gnu-efi/gnu-efi-3.0/lib/x86_64/initplat.c
new file mode 100644
index 0000000..1e6ea82
--- /dev/null
+++ b/gnu-efi/gnu-efi-3.0/lib/x86_64/initplat.c
@@ -0,0 +1,28 @@
+/*++
+
+Copyright (c) 1998 Intel Corporation
+
+Module Name:
+
+ initplat.c
+
+Abstract:
+
+
+
+
+Revision History
+
+--*/
+
+#include "lib.h"
+
+VOID
+InitializeLibPlatform (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ )
+
+{
+}
+
diff --git a/gnu-efi/gnu-efi-3.0/lib/x86_64/math.c b/gnu-efi/gnu-efi-3.0/lib/x86_64/math.c
new file mode 100644
index 0000000..4f40388
--- /dev/null
+++ b/gnu-efi/gnu-efi-3.0/lib/x86_64/math.c
@@ -0,0 +1,181 @@
+/*++
+
+Copyright (c) 1998 Intel Corporation
+
+Module Name:
+
+ math.c
+
+Abstract:
+
+
+
+
+Revision History
+
+--*/
+
+#include "lib.h"
+
+
+//
+// Declare runtime functions
+//
+
+#ifdef RUNTIME_CODE
+#ifndef __GNUC__
+#pragma RUNTIME_CODE(LShiftU64)
+#pragma RUNTIME_CODE(RShiftU64)
+#pragma RUNTIME_CODE(MultU64x32)
+#pragma RUNTIME_CODE(DivU64x32)
+#endif
+#endif
+
+//
+//
+//
+
+UINT64
+LShiftU64 (
+ IN UINT64 Operand,
+ IN UINTN Count
+ )
+// Left shift 64bit by 32bit and get a 64bit result
+{
+#ifdef __GNUC__
+ return Operand << Count;
+#else
+ UINT64 Result;
+ _asm {
+ mov eax, dword ptr Operand[0]
+ mov edx, dword ptr Operand[4]
+ mov ecx, Count
+ and ecx, 63
+
+ shld edx, eax, cl
+ shl eax, cl
+
+ cmp ecx, 32
+ jc short ls10
+
+ mov edx, eax
+ xor eax, eax
+
+ls10:
+ mov dword ptr Result[0], eax
+ mov dword ptr Result[4], edx
+ }
+
+ return Result;
+#endif
+}
+
+UINT64
+RShiftU64 (
+ IN UINT64 Operand,
+ IN UINTN Count
+ )
+// Right shift 64bit by 32bit and get a 64bit result
+{
+#ifdef __GNUC__
+ return Operand >> Count;
+#else
+ UINT64 Result;
+ _asm {
+ mov eax, dword ptr Operand[0]
+ mov edx, dword ptr Operand[4]
+ mov ecx, Count
+ and ecx, 63
+
+ shrd eax, edx, cl
+ shr edx, cl
+
+ cmp ecx, 32
+ jc short rs10
+
+ mov eax, edx
+ xor edx, edx
+
+rs10:
+ mov dword ptr Result[0], eax
+ mov dword ptr Result[4], edx
+ }
+
+ return Result;
+#endif
+}
+
+
+UINT64
+MultU64x32 (
+ IN UINT64 Multiplicand,
+ IN UINTN Multiplier
+ )
+// Multiple 64bit by 32bit and get a 64bit result
+{
+#ifdef __GNUC__
+ return Multiplicand * Multiplier;
+#else
+ UINT64 Result;
+ _asm {
+ mov eax, dword ptr Multiplicand[0]
+ mul Multiplier
+ mov dword ptr Result[0], eax
+ mov dword ptr Result[4], edx
+ mov eax, dword ptr Multiplicand[4]
+ mul Multiplier
+ add dword ptr Result[4], eax
+ }
+
+ return Result;
+#endif
+}
+
+UINT64
+DivU64x32 (
+ IN UINT64 Dividend,
+ IN UINTN Divisor,
+ OUT UINTN *Remainder OPTIONAL
+ )
+// divide 64bit by 32bit and get a 64bit result
+// N.B. only works for 31bit divisors!!
+{
+#ifdef __GNUC__
+ if (Remainder)
+ *Remainder = Dividend % Divisor;
+ return Dividend / Divisor;
+#else
+ UINT32 Rem;
+ UINT32 bit;
+
+ ASSERT (Divisor != 0);
+ ASSERT ((Divisor >> 31) == 0);
+
+ //
+ // For each bit in the dividend
+ //
+
+ Rem = 0;
+ for (bit=0; bit < 64; bit++) {
+ _asm {
+ shl dword ptr Dividend[0], 1 ; shift rem:dividend left one
+ rcl dword ptr Dividend[4], 1
+ rcl dword ptr Rem, 1
+
+ mov eax, Rem
+ cmp eax, Divisor ; Is Rem >= Divisor?
+ cmc ; No - do nothing
+ sbb eax, eax ; Else,
+ sub dword ptr Dividend[0], eax ; set low bit in dividen
+ and eax, Divisor ; and
+ sub Rem, eax ; subtract divisor
+ }
+ }
+
+ if (Remainder) {
+ *Remainder = Rem;
+ }
+
+ return Dividend;
+#endif
+}