Initial build for a new STM build using cmake and gnu

This commit is contained in:
Eugene D. Myers 2019-08-09 12:50:32 -04:00
parent 757a1c7aa7
commit 5b1df1b92b
20 changed files with 411 additions and 224 deletions

32
Readme.Newbuild Normal file
View File

@ -0,0 +1,32 @@
This is the initial release of the linux based build system for the STM.
Any issues or suggestions please contact me via github.
Steps for building:
(1) Create a build directory in the Stm directory.
(2) Inside the build directory issue "cmake .. " then make
--> For coreboot do "cmake .. -DBIOS=coreboot" then make
(3) The StmPkg/Core directory will contain the results of the build
--> for coreboot move stm.bin to 3rdparty/blobs/cpu/intel/stm
then configure the stm in the coreboot config menus
rebuild by doing "make clean && make"
Other files in the StmPkg directory:
(1) Stm - an elf based load module, this is passed through
objdump to produce stm.bin
(2) stm.map - loader map
Current issues:
(1) When the coreboot smi handler is handling an SMI, the STM traps an
attempted illegal access for address 0x177 when the smm_stub.S executes
the fxsave instruction. This will be resolved in a future release.
(2) The STM data areas need to be reorganized to better support D-RTM (or TXT)
and STM teardown. This will be fixed in a future release.
(3) Since this is an initial build of the STM via a different the user is cautioned
about using this on a production system

View File

@ -3,7 +3,12 @@ cmake_minimum_required(VERSION 3.5)
project(stm C ASM)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Os -Xlinker -Map=stm.map -s -pie --entry _ModuleEntryPoint -u _ModuleEntryPoint -nostdlib -n -z common-page-size=0x40 -fno-asynchronous-unwind-tables -fno-jump-tables -fPIC -fno-stack-protector -fno-stack-check -include PcdData.h -T ${PROJECT_SOURCE_DIR}/StmPkg/Core/Stm.lds")
if("${BIOS}" STREQUAL "coreboot")
add_definitions( -DCOREBOOT32 )
message("Building for Coreboot")
endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xlinker -Map=stm.map -Os -falign-functions -ffreestanding -s -pie --entry _ModuleEntryPoint -u _ModuleEntryPoint -nostdlib -n -z common-page-size=0x40 -fno-asynchronous-unwind-tables -fno-jump-tables -fPIC -fno-stack-protector -fno-stack-check -include PcdData.h -T ${PROJECT_SOURCE_DIR}/StmPkg/Core/Stm.lds")
set(CMAKE_ASM_FLAGS "-include BaseAsm.h -fPIC")
set(CMAKE_ASM_CREATE_SHARED_LIBRARY "gcc ${CFLAGS} -o *.o")

View File

@ -14,7 +14,7 @@ add_custom_command(
OUTPUT stm.tmp
DEPENDS Stm
# POST_BUILD
COMMAND ${CMAKE_OBJCOPY} Stm stm.tmp -O binary -S
COMMAND ${CMAKE_OBJCOPY} Stm stm.tmp -O binary --strip-unneeded
COMMAND ${CMAKE_COMMAND} -E copy stm.tmp stm.bin
COMMENT "objcopy Stm to stm.bin"
)

View File

@ -15,7 +15,7 @@
#include "StmInit.h"
#include "PeStm.h"
void HeapList(void);
void HeapList(int id);
//#define HEAPCHECK
/**
@ -83,7 +83,7 @@ AllocatePages (
DEBUG((EFI_D_ERROR, "AllocatePages(0x%x) fail - no freeblock of the correct size\n", Pages));
#ifdef HEAPCHECK
// ReleaseSpinLock (&mHostContextCommon.MemoryLock);
HeapList();
HeapList(1);
#endif
ReleaseSpinLock (&mHostContextCommon.MemoryLock);
return NULL;
@ -130,10 +130,9 @@ AllocatePages (
//Address = mHostContextCommon.HeapTop - STM_PAGES_TO_SIZE(Pages);
//mHostContextCommon.HeapTop = Address;
#ifdef HEAPCHECK
HeapList();
HeapList(2);
#endif
ReleaseSpinLock (&mHostContextCommon.MemoryLock);
ZeroMem ((VOID *)(UINTN)Address, STM_PAGES_TO_SIZE (Pages));
#ifdef HEAPCHECK
DEBUG((EFI_D_ERROR, "****Allocating 0x%x pages at 0x%016llx - %d Cleared\n", Pages, Address, STM_PAGES_TO_SIZE (Pages)));
@ -251,17 +250,17 @@ FreePages (
// mHostContextCommon.HeapTop += STM_PAGES_TO_SIZE(Pages);
// }
#ifdef HEAPCHECK
HeapList();
HeapList(3);
#endif
ReleaseSpinLock (&mHostContextCommon.MemoryLock);
return ;
}
void HeapList(void)
void HeapList(int id)
{
HEAP_HEADER * CurrentBlock = (HEAP_HEADER *)(UINTN) mHostContextCommon.HeapFree;
DEBUG((EFI_D_ERROR, " ***HeapList Start***\n"));
DEBUG((EFI_D_ERROR, " ***HeapList %d Start***\n", id));
while(CurrentBlock != 0L)
{
@ -269,5 +268,5 @@ void HeapList(void)
CurrentBlock = CurrentBlock->NextBlock;
}
DEBUG((EFI_D_ERROR, " ***HeapList Done***\n"));
DEBUG((EFI_D_ERROR, " ***HeapList %d Done***\n", id));
}

View File

@ -15,6 +15,8 @@
#include "StmInit.h"
#include <IndustryStandard/PeImage.h>
#include <elf.h>
/**
This function relocate image at ImageBase.
@ -188,6 +190,53 @@ PeCoffRelocateImageOnTheSpot (
}
}
// elf_process_reloc_table - a very simple relocation processor
//
// it does only X64 relative relocations -- others are flagged
//
// Parameters:
// UINT64 BaseLocation - location of module im memory, in this case start of MSEG
// UINT64 RelativeLocation - for setup - location of module in memory
// for teardown - 0 - to reset values to make sinit happy
//
extern UINT64 _ElfRelocTablesEnd, _ElfRelocTablesStart;
static int elf_process_reloc_table(UINT64 BaseLocation, UINT64 RelativeLocation ) {
int size;
int idx;
Elf64_Rela * reloc_table = (Elf64_Rela *) ((UINT64)&_ElfRelocTablesStart + BaseLocation);
DEBUG((EFI_D_INFO, "ELF Relocation in progress\n"));
size = (UINT64)((UINT64)&_ElfRelocTablesEnd - (UINT64)&_ElfRelocTablesStart)/ sizeof(Elf64_Rela);
DEBUG((EFI_D_INFO, "%d locations to be relocated\n", size));
for(idx = 0; idx < size; idx++)
{
if(ELF64_R_TYPE(reloc_table->r_info) != R_X86_64_RELATIVE)
{
DEBUG((EFI_D_INFO, "WARNING only X86_64 relative relocations done\n"));
}
else
{
UINT64 * OFFSET = (UINT64*) (reloc_table[idx].r_offset + BaseLocation);
*OFFSET = reloc_table[idx].r_addend + RelativeLocation;
#ifdef PRINTRELOC
DEBUG((EFI_D_INFO, "Relocation r_offset %x r_addend %x OFFSET %x *OFFSET %x\n",
reloc_table[idx].r_offset,
reloc_table[idx].r_addend,
OFFSET,
*OFFSET));
#endif
}
}
DEBUG((EFI_D_INFO, "ELF Relocation done\n"));
return 0;
}
/**
This function relocate this STM image.
@ -233,6 +282,7 @@ RelocateStmImage (
//
// Not a valid PE image so Exit
//
elf_process_reloc_table(StmImage, StmImage );
return ;
}

View File

@ -472,9 +472,13 @@ GetMinMsegSize (
{
UINTN MinMsegSize;
MinMsegSize = (STM_PAGES_TO_SIZE (STM_SIZE_TO_PAGES (StmHeader->SwStmHdr.StaticImageSize)) +
//MinMsegSize = (STM_PAGES_TO_SIZE (STM_SIZE_TO_PAGES (StmHeader->SwStmHdr.StaticImageSize)) +
/* we use the page table offset in this calculation because the static memory size does
not account for the data and bss locations which come before the page tables and
are cleared by sinit */
MinMsegSize = StmHeader->HwStmHdr.Cr3Offset +
StmHeader->SwStmHdr.AdditionalDynamicMemorySize +
(StmHeader->SwStmHdr.PerProcDynamicMemorySize + GetVmcsSize () * 2) * mHostContextCommon.CpuNum);
((StmHeader->SwStmHdr.PerProcDynamicMemorySize + GetVmcsSize () * 2) * mHostContextCommon.CpuNum);
return MinMsegSize;
}
@ -594,6 +598,10 @@ InitHeap (
CpuDeadLoop ();
}
DEBUG ((EFI_D_INFO, "Cr30Offset - %08x\n", StmHeader->HwStmHdr.Cr3Offset));
DEBUG ((EFI_D_INFO, "Page Table Start - %08x\n", MsegBase + StmHeader->HwStmHdr.Cr3Offset));
// make sure that the tseg size was set correctly
// right now we will assume a max size of 3mb for mseg, bug, bug
@ -601,12 +609,9 @@ InitHeap (
StmHeader->HwStmHdr.Cr3Offset +
STM_PAGES_TO_SIZE(6)); // reserve 6 page for page table
mHostContextCommon.HeapTop = MsegBase + MsegLength;
#ifdef SizeFromLoad
mHostContextCommon.HeapTop = (UINT64)((UINTN)StmHeader +
STM_PAGES_TO_SIZE (STM_SIZE_TO_PAGES (StmHeader->SwStmHdr.StaticImageSize)) +
StmHeader->SwStmHdr.AdditionalDynamicMemorySize);
#endif
mHostContextCommon.HeapFree = mHostContextCommon.HeapBottom;
HeaderPointer = (HEAP_HEADER *)((UINTN) mHostContextCommon.HeapFree);
@ -631,6 +636,8 @@ InitBasicContext (
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu = AllocatePages (STM_SIZE_TO_PAGES(sizeof(STM_GUEST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
}
extern void GetMtrr(); // found in eptinit.c...
/**
This function initialize BSP.
@ -656,17 +663,23 @@ BspInit (
UINT32 BiosStmVer = 100; // initially assume that the BIOS supports v1.0 of the Intel ref
IA32_DESCRIPTOR IdtrLoad;
GetMtrr(); //Needed in various inits
AsmWbinvd(); // make sure it gets out
StmHeader = (STM_HEADER *)(UINTN)((UINT32)AsmReadMsr64(IA32_SMM_MONITOR_CTL_MSR_INDEX) & 0xFFFFF000);
// on a platform that does not start with TXT, cannot assume the data space has been set to zero
ZeroMem(&mHostContextCommon, sizeof(STM_HOST_CONTEXT_COMMON));
ZeroMem(&mGuestContextCommonSmi, sizeof(STM_HOST_CONTEXT_COMMON));
ZeroMem(&mGuestContextCommonSmm, sizeof(STM_HOST_CONTEXT_COMMON) * NUM_PE_TYPE);
InitHeap (StmHeader);
// after that we can use mHostContextCommon
InitializeSpinLock (&mHostContextCommon.DebugLock);
// after that we can use DEBUG
DEBUG ((EFI_D_ERROR, " ********************** STM/PE *********************\n"));
DEBUG ((EFI_D_INFO, " ********************** STM/PE *********************\n"));
DEBUG ((EFI_D_INFO, "!!!STM build time - %a %a!!!\n", (CHAR8 *)__DATE__, (CHAR8 *)__TIME__));
DEBUG ((EFI_D_INFO, "!!!STM Relocation DONE!!!\n"));
DEBUG ((EFI_D_INFO, "!!!Enter StmInit (BSP)!!! - %d (%x)\n", (UINTN)0, (UINTN)ReadUnaligned32 ((UINT32 *)&Register->Rax)));
@ -781,7 +794,7 @@ BspInit (
DEBUG ((EFI_D_INFO, "TXT Descriptor Signature ERROR - %016lx!\n", TxtProcessorSmmDescriptor->Signature));
CpuDeadLoop ();
}
if(TxtProcessorSmmDescriptor->Size != sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR) + 9) // are we dealing with a .99 Bios
if(TxtProcessorSmmDescriptor->Size == sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR) - 9) // are we dealing with a .99 Bios
{
BiosStmVer = 99; // version .99 has nine less bytes, etc
DEBUG((EFI_D_INFO, "Version .99 Bios detected Found Size: %08x SizeOf %08x\n", TxtProcessorSmmDescriptor->Size, sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR)));
@ -830,7 +843,22 @@ BspInit (
// Check MSEG BASE/SIZE in TXT region
//
mHostContextCommon.StmSize = GetMinMsegSize (StmHeader);
DEBUG ((EFI_D_INFO, "MinMsegSize - %08x!\n", (UINTN)mHostContextCommon.StmSize));
{
UINT64 MsegBase, MsegLength;
INT32 AvailMseg;
if (IsSentryEnabled()) {
GetMsegInfoFromTxt (&MsegBase, &MsegLength);
} else {
GetMsegInfoFromMsr (&MsegBase, &MsegLength);
}
AvailMseg = MsegLength - mHostContextCommon.StmSize;
DEBUG ((EFI_D_INFO, "MinMsegSize - 0x%08x MsegLength 0x%08x AvailMseg 0x%08x \n",
(UINTN)mHostContextCommon.StmSize,
MsegLength,
AvailMseg));
}
if(BiosStmVer == 99)
{
@ -974,6 +1002,7 @@ BspInit (
//
// Initialization done
//
mIsBspInitialized = TRUE;
AsmWbinvd (); // let everyone else know
return ;
@ -995,6 +1024,7 @@ ApInit (
{
X86_REGISTER *Reg;
IA32_DESCRIPTOR IdtrLoad;
while (!mIsBspInitialized) {
//
// Wait here
@ -1102,8 +1132,10 @@ VmcsInit (
UINTN Rflags;
StmHeader = mHostContextCommon.StmHeader;
/* have to use Cr3Offset because StaticImageSize ignores BSS and Data sections */
VmcsBase = (UINTN)StmHeader +
STM_PAGES_TO_SIZE (STM_SIZE_TO_PAGES (StmHeader->SwStmHdr.StaticImageSize)) +
//STM_PAGES_TO_SIZE (STM_SIZE_TO_PAGES (StmHeader->SwStmHdr.StaticImageSize)) +
StmHeader->HwStmHdr.Cr3Offset +
StmHeader->SwStmHdr.AdditionalDynamicMemorySize +
StmHeader->SwStmHdr.PerProcDynamicMemorySize * mHostContextCommon.CpuNum;
VmcsSize = GetVmcsSize();
@ -1115,10 +1147,11 @@ VmcsInit (
DEBUG ((EFI_D_INFO, "%d SmmVmcsPtr - %016lx\n", (UINTN)Index, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs));
AsmVmPtrStore (&CurrentVmcs);
DEBUG ((EFI_D_INFO, "%d CurrentVmcs - %016lx\n", (UINTN)Index, CurrentVmcs));
DEBUG ((EFI_D_INFO, "%d CurrentVmcs - %016lx VmcsSize %x\n", (UINTN)Index, CurrentVmcs, VmcsSize));
if (IsOverlap (CurrentVmcs, VmcsSize, mHostContextCommon.TsegBase, mHostContextCommon.TsegLength)) {
// Overlap TSEG
DEBUG ((EFI_D_ERROR, "%d CurrentVmcs violation - %016lx\n", (UINTN)Index, CurrentVmcs));
DumpVmcsAllField();
CpuDeadLoop() ;
}
Rflags = AsmVmClear (&CurrentVmcs);
@ -1236,7 +1269,6 @@ LaunchBack (
**/
extern void GetMtrr(); // found in eptinit.c...
extern void PrintSmiEnRegister(UINT32 Index); // found in PcPciHandler.c
VOID
InitializeSmmMonitor (
@ -1245,22 +1277,19 @@ InitializeSmmMonitor (
{
UINT32 Index;
GetMtrr(); //Needed in various inits
AsmWbinvd(); // make sure it gets out
Index = GetIndexFromStack (Register);
if (Index == 0) {
// The build process should make sure "virtual address" is same as "file pointer to raw data",
// in final PE/COFF image, so that we can let StmLoad load binrary to memory directly.
// If no, GenStm tool will "load image". So here, we just need "relocate image"
RelocateStmImage (FALSE);
BspInit (Register);
} else {
Index = GetIndexFromStack (Register);
ApInit (Index, Register);
}
//PrintSmiEnRegister(Index); /* debug*/
CommonInit (Index);
VmcsInit (Index);

View File

@ -189,7 +189,8 @@ InitializeSmmVmcs (
Data64 = AsmReadMsr64 (IA32_VMX_ENTRY_CTLS_MSR_INDEX);
VmEntryCtrls.Uint32 = (UINT32)Data64 & (UINT32)RShiftU64 (Data64, 32);
VmEntryCtrls.Bits.Ia32eGuest = mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmEntryState.Intel64Mode;
#ifdef COREBOOT32
#if defined(COREBOOT32)
DEBUG((EFI_D_INFO, "Setting up SMI Handler for 32 bit mode\n"));
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Efer = 0;//mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Efer & (~((1 << 9) & (1 << 10))); // turn off IA32 bits // need to find the proper defines
#endif
VmEntryCtrls.Bits.EntryToSmm = 1;
@ -295,7 +296,7 @@ InitializeSmmVmcs (
VmWriteN (VMCS_N_GUEST_CR4_INDEX, VmReadN(VMCS_N_GUEST_CR4_INDEX) | CR4_PSE);
}
}
#ifdef COREBOOT32 // coreboot support
#if defined(COREBOOT32) // coreboot support
#define CR0_CLEAR_FLAGS CR0_CD|CR0_NW|CR0_PG|CR0_WP|CR0_NE|CR0_TS
VmWriteN (VMCS_N_GUEST_CR0_INDEX, ((VmReadN(VMCS_N_GUEST_CR0_INDEX) & ~(CR0_PG)) | CR0_PE ));
VmWriteN (VMCS_N_GUEST_CR4_INDEX, (VmReadN(VMCS_N_GUEST_CR4_INDEX) | CR4_PAE)); // must be set because host address size vmexit control is 1

View File

@ -21,8 +21,9 @@ ASM_GLOBAL ASM_PFX(_ModuleEntryPoint)
.equ STM_API_START, 0x00010001
.equ STM_API_INITIALIZE_PROTECTION, 0x00010007
.equ STM_STACK_SIZE, 0x020000
.equ STM_STACK_SIZE, 0x8000
.align 16
#------------------------------------------------------------------------------
# VOID
# AsmInitializeSmmMonitor (
@ -40,12 +41,16 @@ GoBsp:
# ESP is pointer to stack bottom, NOT top
movl $STM_STACK_SIZE, %eax # eax = STM_STACK_SIZE,
lock xaddl %eax, (%esp) # eax = ThisOffset, ThisOffset += STM_STACK_SIZE (LOCK instruction)
addl $STM_STACK_SIZE, %eax # eax = ThisOffset + STM_STACK_SIZE
addl %eax, %esp # esp += ThisOffset + STM_STACK_SIZE
#
# Jump to C code
#
subq $512, %rsp
fxsave (%rsp)
push %r15
push %r14
push %r13
@ -63,10 +68,11 @@ GoBsp:
push %rcx
movl $STM_API_INITIALIZE_PROTECTION, %eax
push %rax
movq %rsp, %rcx # parameter
subq $0x20, %rsp
movq %rsp, %rcx # parameter 1 for MS
movq %rsp, %rdi # parameter 1 for GNU
subq $0x30, %rsp
call ASM_PFX(InitializeSmmMonitor)
addq $0x20, %rsp
addq $0x30, %rsp
# should never get here
jmp DeadLoop
@ -87,12 +93,17 @@ GoAp:
# ESP is pointer to stack bottom, NOT top
movl $STM_STACK_SIZE, %eax # eax = STM_STACK_SIZE,
lock xaddl %eax, (%esp) # eax = ThisOffset, ThisOffset += STM_STACK_SIZE (LOCK instruction)
addl $STM_STACK_SIZE, %eax # eax = ThisOffset + STM_STACK_SIZE
addl %eax, %esp # esp += ThisOffset + STM_STACK_SIZE
#
# Jump to C code
#
subq $512, %rsp
fxsave (%rsp)
push %r15
push %r14
push %r13
@ -110,10 +121,11 @@ GoAp:
push %rcx
movl $STM_API_START, %eax
push %rax
movq %rsp, %rcx # parameter
subq $0x20, %rsp
movq %rsp, %rcx # parameter #1 for MS
movq %rsp, %rdi # parameter #1 for GNU
subq $0x30, %rsp
call ASM_PFX(InitializeSmmMonitor)
addq $0x20, %rsp
addq $0x30, %rsp
# should never get here
DeadLoop:
jmp .

View File

@ -42,6 +42,7 @@ GoBsp:
; ESP is pointer to stack bottom, NOT top
mov eax, STM_STACK_SIZE ; eax = STM_STACK_SIZE,
lock xadd [esp], eax ; eax = ThisOffset, ThisOffset += STM_STACK_SIZE (LOCK instruction)
add eax, STM_STACK_SIZE ; eax = ThisOffset + STM_STACK_SIZE
add esp, eax ; esp += ThisOffset + STM_STACK_SIZE

View File

@ -21,7 +21,7 @@ ASM_GLOBAL ASM_PFX(_ModuleEntryPoint)
.equ STM_API_START, 0x00010001
.equ STM_API_INITIALIZE_PROTECTION, 0x00010007
.equ STM_STACK_SIZE, 0x020000
.equ STM_STACK_SIZE, 0x8000
#------------------------------------------------------------------------------
# VOID

View File

@ -123,23 +123,26 @@ VOID
Register->Rsp = VmReadN (VMCS_N_GUEST_RSP_INDEX);
CopyMem (Reg, Register, sizeof(X86_REGISTER));
#if 0
DEBUG ((EFI_D_INFO, "%ld - !!!StmHandlerSmi\n", (UINTN)Index));
DEBUG ((EFI_D_INFO, "%ld - !!!StmHandlerSmi InfoBasic %x reason %x \n",
(UINTN)Index,
InfoBasic.Uint32,
InfoBasic.Bits.Reason));
#endif
//
// Dispatch
//
if (InfoBasic.Bits.Reason >= VmExitReasonMax) {
DEBUG ((EFI_D_ERROR, "!!!UnknownReason!!!\n"));
DEBUG ((EFI_D_ERROR, "%ld !!!UnknownReason: %d!!!\n", Index, InfoBasic.Bits.Reason));
DumpVmcsAllField ();
CpuDeadLoop ();
}
mGuestContextCommonSmi.GuestContextPerCpu[Index].InfoBasic.Uint32 = InfoBasic.Uint32;
//
// Call dispatch handler
//
mStmHandlerSmi[InfoBasic.Bits.Reason] (Index);
VmWriteN (VMCS_N_GUEST_RSP_INDEX, Reg->Rsp); // sync RSP

View File

@ -285,7 +285,6 @@ SmiVmcallGetBiosResourcesHandler (
UINTN BiosResourceSize;
UINT32 PageNum;
X86_REGISTER *Reg;
Reg = &mGuestContextCommonSmi.GuestContextPerCpu[Index].Register;
// ECX:EBX - STM_RESOURCE_LIST
@ -985,9 +984,9 @@ SmiVmcallHandler (
STM_STATUS Status;
STM_VMCALL_HANDLER StmVmcallHandler;
UINT64 AddressParameter;
//DEBUG((EFI_D_ERROR, "%ld SmiVmcallHandler - entereda\n", Index));
Reg = &mGuestContextCommonSmi.GuestContextPerCpu[Index].Register;
//DEBUG((EFI_D_ERROR, "%ld SmiVmcallHandler - entered\n", Index));
StmVmcallHandler = GetSmiVmcallHandlerByIndex (ReadUnaligned32 ((UINT32 *)&Reg->Rax));
if (StmVmcallHandler == NULL) {
DEBUG ((EFI_D_INFO, "%ld SmiVmcallHandler - GetSmiVmcallHandlerByIndex- Invalid API entry - %x!\n", Index, (UINTN)ReadUnaligned32 ((UINT32 *)&Reg->Rax)));
@ -999,8 +998,8 @@ SmiVmcallHandler (
Status = ERROR_INVALID_API;
} else {
AddressParameter = ReadUnaligned32 ((UINT32 *)&Reg->Rbx) + LShiftU64 (ReadUnaligned32 ((UINT32 *)&Reg->Rcx), 32);
Status = StmVmcallHandler (Index, AddressParameter);
DEBUG((EFI_D_ERROR, "%ld SmiVmcallHandler done, Status: %x\n", Index, Status));
}
if (Status == STM_SUCCESS) {

View File

@ -316,6 +316,7 @@ L1:
## call into exception handler
movq 8(%rbp), %rcx
movq 8(%rbp), %rdi ##for GCC
movq ASM_PFX(mExternalVectorTablePtr)(%rip), %rax
movq (%rax, %rcx, 8), %rax
orq %rax, %rax
@ -324,6 +325,7 @@ L1:
## Prepare parameter and call
movq %rsp, %rdx
movq %rsp, %rsi # second parameter for GCC
#
# Per X64 calling convention, allocate maximum parameter stack space
# and make sure RSP is 16-byte aligned

View File

@ -37,9 +37,10 @@ ASM_PFX(AsmHostEntrypointSmmPe):
push %rdx
push %rcx
push %rax
movq %rsp, %rcx # parameter
subq $0x20, %rsp
movq %rsp, %rcx # parameter for MS
movq %rdi, %rdi # parameter for GCC
subq $0x30, %rsp
call ASM_PFX(PeStmHandlerSmm)
addq $0x20, %rsp
addq $0x30, %rsp
jmp .

View File

@ -21,6 +21,10 @@ ASM_GLOBAL ASM_PFX(AsmHostEntrypointSmi)
ASM_GLOBAL ASM_PFX(AsmHostEntrypointSmm)
ASM_PFX(AsmHostEntrypointSmi):
subq $512, %rsp
fxsave (%rsp)
push %r15
push %r14
push %r13
@ -37,13 +41,17 @@ ASM_PFX(AsmHostEntrypointSmi):
push %rdx
push %rcx
push %rax
movq %rsp, %rcx # parameter
subq $0x20, %rsp
movq %rsp, %rcx # parameter for MS
movq %rsp, %rdi # parameter for GCC
subq $0x30, %rsp
call ASM_PFX(StmHandlerSmi)
addq $0x20, %rsp
addq $0x30, %rsp
jmp .
ASM_PFX(AsmHostEntrypointSmm):
subq $512, %rsp
fxsave (%rsp)
push %r15
push %r14
push %r13
@ -60,9 +68,10 @@ ASM_PFX(AsmHostEntrypointSmm):
push %rdx
push %rcx
push %rax
movq %rsp, %rcx # parameter
subq $0x20, %rsp
movq %rsp, %rcx # parameter for MS
movq %rsp, %rdi # parameter for GCC
subq $0x30, %rsp
call ASM_PFX(StmHandlerSmm)
addq $0x20, %rsp
addq $0x30, %rsp
jmp .

View File

@ -15,6 +15,11 @@
**/
SizeOfPerProcessorStack = 0x8000;
HeapSize = 0x246000;
PageTableSize = 0x6000;
STM_SMM_REV_ID = 0x80010100;
SECTIONS
{
/* HARDWARE_STM_HEADER */
@ -22,26 +27,27 @@ SECTIONS
.StmHeaderRevision : {LONG(0)}
.MonitorFeatures : {LONG(1)}
.GdtrLimit : {LONG(0x47)}
.GdtrBaseOffset : {LONG(0x1000)}
.GdtrBaseOffset : {LONG(_StmGdtr)}
.CsSelector : {LONG(0x38)}
.EipOffset : {LONG(_ModuleEntryPoint)}
.EspOffset : {LONG(0)}
.Cr3Offset : {LONG(0)}
.EspOffset : {LONG(_StmPageTables + HeapSize + PageTableSize)}
.Cr3Offset : {LONG(_StmPageTables)}
.fill1 : { FILL(0x0); . = ALIGN(2048); }
/* SOFTWARE_STM_HEADER */
.StmSpecVerMajor ALIGN(2048) : {BYTE(1)}
.StmSpecVerMinor : {BYTE(0)}
.Reserved : {SHORT(0)}
.StaticImageSize : {LONG(0)}
.PerProcDynamicMemorySize : {LONG(0x8000)}
.AdditionalDynamicMemorySize : {LONG(0x246000)}
.StmFeatures : {LONG(0)}
.NumberOfRevIDs : {LONG(0)}
.StmSmmRevIDs0 : {LONG(0)}
. = ALIGN(0x1000);
/* page tables are at the end of the static section and the data section */
.StaticImageSize : {LONG(_StmImageEnd)}
.PerProcDynamicMemorySize : {LONG(SizeOfPerProcessorStack)}
.AdditionalDynamicMemorySize : {LONG(HeapSize + PageTableSize)}
.StmFeatures : {LONG(0x3)} /* EPT support and Intel mode 64 support */
.NumberOfRevIDs : {LONG(1)}
.StmSmmRevIDs0 : {LONG(STM_SMM_REV_ID)}
.fill2 : { FILL(0x0); . = ALIGN(0x1000);}
_StmGdtr = .;
/* GDT Entriess */
.LimitLow0 : {SHORT(0)}
.BaseLow0 : {SHORT(0)}
@ -107,21 +113,30 @@ SECTIONS
.BaseHi8 : {BYTE(0)}
. = ALIGN(0x1000);
.fill3 : { FILL(0x0); . = ALIGN(0x1000);}
.text : {
*(.text)
*(.text.*)
}
. = ALIGN(0x20);
.rodata : {
*(.rodata.*)
} =0x0
_StmImageEnd = .;
. = ALIGN(0X1000);
.data : {
*(.data)
*(.data.*)
}
. = ALIGN(0x20);
.rdata : {
*(.rdata)
*(.rdata.*)
}
. = ALIGN(0x20);
.bss : {
*(.bss)
*(.bss.*)
*(COMMON)
/* dummy */
/* LONG (0x12345678) */
@ -130,6 +145,14 @@ SECTIONS
.edata : {
}
. = ALIGN(0x20);
_ElfRelocTablesStart = .;
.reloc : {
*(.rela.*)
}
_ElfRelocTablesEnd = .;
. = ALIGN(0x1000);
_StmPageTables = .;
. = . + 24K; /* 6 pages for STM page tables, filled by BIOS and/or SINIT */
}

View File

@ -78,6 +78,12 @@ VERIFY_SIZE_OF (CHAR16, 2);
#define GLOBAL_REMOVE_IF_UNREFERENCED
#endif
#ifdef __GNUC__
#define MSABI __attribute__((ms_abi))
#else
#define MSABI
#endif
//
// For symbol name in GNU assembly code, an extra "_" is necessary
//

View File

@ -17,6 +17,14 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#ifndef __BASE_LIB__
#define __BASE_LIB__
// GCC
#ifdef __GNUC__
#define MSABI __attribute__((ms_abi))
#else
#define MSABI
#endif
//
// Definitions for architecture-specific types
//
@ -3455,7 +3463,7 @@ EFIAPI
AsmFlushCacheRange (
IN VOID *Address,
IN UINTN Length
);
) MSABI;
/**
@ -3473,7 +3481,7 @@ UINT64
EFIAPI
AsmFc (
IN UINT64 Address
);
) MSABI;
/**
@ -3491,7 +3499,7 @@ UINT64
EFIAPI
AsmFci (
IN UINT64 Address
);
) MSABI;
/**
@ -3514,7 +3522,7 @@ UINT64
EFIAPI
AsmReadCpuid (
IN UINT8 Index
);
) MSABI;
/**
@ -3528,7 +3536,7 @@ UINT64
EFIAPI
AsmReadPsr (
VOID
);
) MSABI;
/**
@ -3548,7 +3556,7 @@ UINT64
EFIAPI
AsmWritePsr (
IN UINT64 Value
);
) MSABI;
/**
@ -3564,7 +3572,7 @@ UINT64
EFIAPI
AsmReadKr0 (
VOID
);
) MSABI;
/**
@ -3580,7 +3588,7 @@ UINT64
EFIAPI
AsmReadKr1 (
VOID
);
) MSABI;
/**
@ -3596,7 +3604,7 @@ UINT64
EFIAPI
AsmReadKr2 (
VOID
);
) MSABI;
/**
@ -3612,7 +3620,7 @@ UINT64
EFIAPI
AsmReadKr3 (
VOID
);
) MSABI;
/**
@ -3628,7 +3636,7 @@ UINT64
EFIAPI
AsmReadKr4 (
VOID
);
) MSABI;
/**
@ -3644,7 +3652,7 @@ UINT64
EFIAPI
AsmReadKr5 (
VOID
);
) MSABI;
/**
@ -3660,7 +3668,7 @@ UINT64
EFIAPI
AsmReadKr6 (
VOID
);
) MSABI;
/**
@ -3676,7 +3684,7 @@ UINT64
EFIAPI
AsmReadKr7 (
VOID
);
) MSABI;
/**
@ -3694,7 +3702,7 @@ UINT64
EFIAPI
AsmWriteKr0 (
IN UINT64 Value
);
) MSABI;
/**
@ -3712,7 +3720,7 @@ UINT64
EFIAPI
AsmWriteKr1 (
IN UINT64 Value
);
) MSABI;
/**
@ -3730,7 +3738,7 @@ UINT64
EFIAPI
AsmWriteKr2 (
IN UINT64 Value
);
) MSABI;
/**
@ -3748,7 +3756,7 @@ UINT64
EFIAPI
AsmWriteKr3 (
IN UINT64 Value
);
) MSABI;
/**
@ -3766,7 +3774,7 @@ UINT64
EFIAPI
AsmWriteKr4 (
IN UINT64 Value
);
) MSABI;
/**
@ -3784,7 +3792,7 @@ UINT64
EFIAPI
AsmWriteKr5 (
IN UINT64 Value
);
) MSABI;
/**
@ -3802,7 +3810,7 @@ UINT64
EFIAPI
AsmWriteKr6 (
IN UINT64 Value
);
) MSABI;
/**
@ -3820,7 +3828,7 @@ UINT64
EFIAPI
AsmWriteKr7 (
IN UINT64 Value
);
) MSABI;
/**
@ -5105,7 +5113,7 @@ AsmCpuid (
OUT UINT32 *Ebx, OPTIONAL
OUT UINT32 *Ecx, OPTIONAL
OUT UINT32 *Edx OPTIONAL
);
) MSABI;
/**
@ -5149,7 +5157,7 @@ AsmCpuidEx (
OUT UINT32 *Ebx, OPTIONAL
OUT UINT32 *Ecx, OPTIONAL
OUT UINT32 *Edx OPTIONAL
);
) MSABI;
/**
@ -5163,7 +5171,7 @@ VOID
EFIAPI
AsmDisableCache (
VOID
);
) MSABI;
/**
@ -5177,7 +5185,7 @@ VOID
EFIAPI
AsmEnableCache (
VOID
);
) MSABI;
/**
@ -6532,7 +6540,6 @@ AsmReadMm2 (
VOID
) ;
/**
Reads the current value of 64-bit MMX Register #3 (MM3).
@ -6916,7 +6923,7 @@ AsmEnablePaging32 (
IN VOID *Context1, OPTIONAL
IN VOID *Context2, OPTIONAL
IN VOID *NewStack
);
) MSABI;
/**
@ -6960,7 +6967,7 @@ AsmDisablePaging32 (
IN VOID *Context1, OPTIONAL
IN VOID *Context2, OPTIONAL
IN VOID *NewStack
);
) MSABI;
/**
@ -7003,7 +7010,7 @@ AsmEnablePaging64 (
IN UINT64 Context1, OPTIONAL
IN UINT64 Context2, OPTIONAL
IN UINT64 NewStack
);
) MSABI;
/**
@ -7044,7 +7051,7 @@ AsmDisablePaging64 (
IN UINT32 Context1, OPTIONAL
IN UINT32 Context2, OPTIONAL
IN UINT32 NewStack
);
) MSABI;
//
@ -7077,7 +7084,7 @@ EFIAPI
AsmGetThunk16Properties (
OUT UINT32 *RealModeBufferSize,
OUT UINT32 *ExtraStackSize
);
) MSABI;
/**
@ -7098,7 +7105,7 @@ VOID
EFIAPI
AsmPrepareThunk16 (
OUT THUNK_CONTEXT *ThunkContext
);
) MSABI;
/**
@ -7158,7 +7165,7 @@ VOID
EFIAPI
AsmThunk16 (
IN OUT THUNK_CONTEXT *ThunkContext
);
) MSABI;
/**
@ -7185,7 +7192,7 @@ VOID
EFIAPI
AsmPrepareAndThunk16 (
IN OUT THUNK_CONTEXT *ThunkContext
);
) MSABI;
#endif
#endif

View File

@ -14,6 +14,7 @@
#include <Base.h>
#include <Library/BaseLib.h>
#include <Library/DebugLib.h>
/**
Executes an infinite loop.
@ -31,5 +32,6 @@ CpuDeadLoop (
)
{
volatile UINTN Index;
DEBUG((EFI_D_INFO, "In CpuDeadLoop\n"));
for (Index = 0; Index == 0;);
}

View File

@ -15,6 +15,12 @@
#ifndef _VMX_H_
#define _VMX_H_
#ifdef __GNUC__
#define MSABI __attribute__((ms_abi))
#else
#define MSABI
#endif
#include "CpuArchSpecific.h"
#define IA32_SMM_MONITOR_CTL_MSR_INDEX 0x9B
@ -842,7 +848,7 @@ AsmVmxOff (
UINTN
AsmVmClear (
IN UINT64 *Vmcs
);
) MSABI;
/**
@ -856,7 +862,7 @@ AsmVmClear (
UINTN
AsmVmPtrStore (
IN UINT64 *Vmcs
);
) MSABI;
/**
@ -870,7 +876,7 @@ AsmVmPtrStore (
UINTN
AsmVmPtrLoad (
IN UINT64 *Vmcs
);
) MSABI;
/**
@ -884,7 +890,7 @@ AsmVmPtrLoad (
UINTN
AsmVmLaunch (
IN X86_REGISTER *Register
);
) MSABI;
/**
@ -898,7 +904,7 @@ AsmVmLaunch (
UINTN
AsmVmResume (
IN X86_REGISTER *Register
);
) MSABI;
/**
@ -914,7 +920,7 @@ UINTN
AsmVmRead (
IN UINT32 Index,
OUT UINTN *Data
);
) MSABI;
/**
@ -930,7 +936,7 @@ UINTN
AsmVmWrite (
IN UINT32 Index,
IN UINTN Data
);
) MSABI;
typedef struct {
UINT64 Lo;
@ -954,7 +960,7 @@ UINTN
AsmInvEpt (
IN UINTN Type,
IN UINT_128 *Addr
);
) MSABI;
#define INVVPID_TYPE_INDIVIDUAL_ADDRESS_INVALIDATION 1
#define INVVPID_TYPE_SINGLE_CONTEXT_INVALIDATION 2
@ -975,7 +981,7 @@ UINTN
AsmInvVpid (
IN UINTN Type,
IN UINT_128 *Addr
);
) MSABI;
/**
@ -995,6 +1001,6 @@ AsmVmCall (
IN UINT32 Ebx,
IN UINT32 Ecx,
IN UINT32 Edx
);
) MSABI;
#endif