Initial distro

This commit is contained in:
Eugene Myers 2018-12-17 15:16:59 -05:00
parent 2696e3dc80
commit 853f07f483
68 changed files with 9136 additions and 1574 deletions

199
Readme.STMPE Normal file
View File

@ -0,0 +1,199 @@
STMPE Addtions to the STM API
AddPEVMtempVMCALL:
AddPEVMpermVMCALL:
Create a protected execution (PE) virtual machine (VM) and load a
module to be executed in that VM.
AddPEVMtempVMCALL:
The module is temporarily loaded in a VM,
executed, and the VM torn down afterwards. i.e. protected execution.
Input and Return register values: same as AddPEVMpermVMCALL.
AddPEVMpermVMCALL:
The module is permanently loaded in a VM and
persists. It is expected that this will be allowed only when the MLE
is initially brought up to allow for a measurement engine to be
loaded. This module is executed by the RunPeVmVMCALL.
Input registers:
EAX = STM_API_ADD_TEMP_PE or STM_API_ADD_PERM_PE_VM
EBX = low 32 bits of physical address of a caller created structure
containing module load information (struct module_info)
ECX = high 32 bits of physical address of a caller created structure
containing module load information (struct module_info)
Return register values:
CF = 0: No error, EAX set to STM_SUCCESS (0)
CF = 1: An error occurred, EAX holds relevant error value
EAX: Error/success return
RunPeVmVMCALL
This call runs the VM that was created via the AddSTMVmVMCALL vmcall.
Entrypoint into the module will be that defined during the
AddSTMVmVMCALL.
Input registers:
EAX = RunPeVmVMCALL
Output registers:
CF = 0: No error, EAX set to STM_SUCCESS (0)
CF = 1: An error occurred, EAX holds relevant error value EAX = -1 (actual
error values TBD)
EndAddPeVmVMCALL
Turns off the ability to add a permanent PE VM. This function is normally
ran at the end of the MLE's processing to ensure that no permanent
untrusted entity resides in the STM.
Bit definitions for the additions to the STM API
STM_API_ADD_TEMP_PE_VM 0x00010009
STM_API_ADD_PERM_PE_VM 0x0001000a
STM_API_END_PERM_PE_VM 0x0001000c
STM_API_RUN_PERM_PE_VM 0x0001000b
STM_API_ADD_PERM_PE_VM_NORUN 0x0001000d
Information block used to pass information about the PE module to the STM:
struct module_info {
u64 module_address; - physical address of VM/PE (SMM) module to load
u64 module_load_address; - guest-physical load address of module (must be
- within the range of address_space_start and
- address_space_start+addresss_space_size)
u32 module_size; - size of module in bytes
u32 module_entry_point; - relative offset from start of module
u64 address_space_start; - guest-physical address space start of module
u32 address_space_size; - module size in bytes
u32 vmconfig; - flags specifying how VM supporting module should
- be configured (see below for definitions)
u64 cr3_load; - guest-physical address to initialize cr3 to if
- paging enabled
u64 shared_page; - guest-physical address of a shared page (must not
- be in SMRAM space). segment will have R/W
- permission. value placed in RBX register of STM
- module.
struct region *segment; - guest-physical address of an array of R/O
- segments terminated with a null element.
- value placed in RCX register of STM module.
u32 shared_page_size; - size of shared page
u32 DoNotClearSize; - block at beginning of data not to be cleared
u64 ModuleDataSection; - start address of the module data section
} __attribute__((__packed__));
vmconfig settings:
SET_CS_L (1<<13) - cs.l set 64bit mode for cs (req. SET_IA32E is set)
SET_CS_D (1<<14) - cs.d default mode (0: 16bit seg, 1: 32-bit)
must be set if SET_CS_L is set
SET_IA32E (1<<15) - sets IA32e mode; when set, cr0.pg, cr0.pe, and
cr0.pae will be set as well
SET_CR0_PG (1<<31) - set cr0.pg
SET_CR0_PE (1<<0) - set cr0.pe
SET_CR4_PAE (1<<3) - set cr4.pae
SET_PERM_VM (1<<2) - VM can be re-executed using
STM_API_RUN_VM(RunPeVmVMCALL)
SET_PERM_VM_RUN_ONCE (1<<20) - run perm VM only once, then breakdown
SET_PERM_VM_CRASH_BREAKDOWN (1<<21) - if Perm VM crashes, then breakdown
SET_PERM_VM_RUN_SMI (1<<22) - Run VM/PE via SMI timer
SET_VM_CLEAR_MEMORY (1<<23) - Clear heap before each run
SET_VM_TEXT_RW (1<<24) - set VM/PE text space as RW
SET_VM_EXEC_HEAP (1<<25) - allow execution in Heap Space
SET_PERM_VM_HANDLE_INT (1<<26) - allow perm VM/PE to handle internal interrupts
error returns - placed in eax upon return to caller:
PE_SUCCESS 0 - PE setup/ran sucessfully
PE_FAIL -1 - catchall PE ERROR
PE_SPACE_TOO_LARGE 0x80040001 - requested memory space too large
PE_MODULE_ADDRESS_TOO_LOW 0x80040002 - PE module start below address space start
PE_MODULE_TOO_LARGE 0x80040003 - PE module too large for address space
(or located such that it overflows
the end of the address space
PE_NO_PT_SPACE 0x80040004 - not enough space left for PE VM
page tables
PE_NO_RL_SPACE 0x80040005 - not enough space left for resource
list
PE_MEMORY_AC_SETUP_FAILURE 0x80040006 - could not setup accesses to PE space
(internal error)
PE_SHARED_MEMORY_SETUP_ERROR 0x80040007 - could not setup shared memory
PE_MODULE_MAP_FAILURE 0x80040008 - could not map in the address space
PE_SHARED_MAP_FAILURE 0x80040009 - could not map in the shared page
PE_VMCS_ALLOC_FAIL 0x8004000A - could not allocate VMCS memory
PE_VMLAUNCH_ERROR 0x8004000B - attempted to launch PE VM with bad
- guest VM state
PE_VM_BAD_ACCESS 0x8004000C - PE VM attempted to access protected
memory out of bounds)
PE_VM_SETUP_ERROR_D_L 0x8004000D - CS_D and CS_L cannot be set to one at
the same time
PE_VM_SETUP_ERROR_IA32E_D 0x8004000E - SET_IA32E must be set when CS_L is
set
PE_VM_TRIPLE_FAULT 0x8004000F - PE VM crashed with a triple fault
PE_VM_PAGE_FAULT 0x80040010 - PE VM crashed with a page fault
Region list structure
struct region {
u64 address; - page aligned physical address
u32 size; - size of segment
u32 padding; - align structure to 64-bits
} __attribute__((__packed__));
Notes:
Interrupts and faults:
If the PE module encounters a fault, the VM/PE will be terminated. A future
STM/PE version will allow for faults to be handled by the PE module. In this
instance, the PE module will need to properly setup an IDT along with the
necessary handlers.
The VM/PE will not receive external interrupts.
Memory allocation errors:
Allocation errors for the PE module most likely mean that there is not enough
contiguous memory to fit the PE module in. The memory allocation could be
modified to allocate memory in smaller blocks from the STM heap, but as more
pages are allocated, the higher the overhead necessary for page tables.
Memory allocation errors for the overhead really mean that you should consider
reducing the size of the PE module. As at this point heap memory may
be getting so limited as the affect the operation of the STM itself.
To prevent this, the size of the PE module should be limited to
ensure that there is enough heap space for the STM to function.
MSR and I/O access:
Read and write access to the IA32_EFER_MSR is allowed, write access
to other MSRs are ignored and read attempts to other MSRs will return 0.
IO ports:
Access attempts to I/O ports are generally ignored except for 0x3D8 and 0x3F8.
0x3D8 and 0x3F8 (aka COM2 and COM1 respectively) can be used to send character strings through the STM console port.
No formatting, etc is done (any byte combinations that can pose a security issue will be delt with a necessary).
for debugging a VM/PE debugging output can be sent through:
RDX: port - 0x3F8 or 0x3D8
RCX: number of bytes (maximun length is 200. Stings longer than that will be truncated)
DS:ESI location in PE/VM where output is located
Use either instruction OUTSB/OUTSW/OUTSD (0x6E or Ox6F)
Note: do not use a loop with a rep statement (which is what is normally done)

0
Stm/Readme.STMPE.txt Executable file
View File

View File

@ -19,6 +19,13 @@ typedef struct {
CHAR8 *Str;
} DATA_STR;
extern UINTN
TranslateEPTGuestToHost (
IN UINT64 EptPointer,
IN UINTN Addr,
OUT EPT_ENTRY **EntryPtr OPTIONAL
);
GLOBAL_REMOVE_IF_UNREFERENCED
DATA_STR mVmxCapabilityMsrStr[] = {
{IA32_VMX_BASIC_MSR_INDEX, "VMX_BASIC_MSR "},
@ -503,4 +510,59 @@ DumpRegContext (
for (Index = 0; Index < sizeof(mX86RegisterStr)/sizeof(mX86RegisterStr[0]) / (sizeof(UINT64)/sizeof(UINTN)); Index++) {
DumpRegFiled (Reg, &mX86RegisterStr[Index]);
}
}
/**
This function dumps the guest stack.
@param Index - CPU Index
Assumes 1-1 mapping in the guest page tables between the
guest virtual address and the guest physical address
**/
VOID DumpGuestStack(IN UINT32 Index)
{
UINT32 VmType = mHostContextCommon.HostContextPerCpu[Index].GuestVmType;
UINT32 i;
UINT64 Location;
UINT64 RelLoc;
UINTN StackTopBase = (UINTN)VmReadN (VMCS_N_GUEST_RSP_INDEX);
UINTN StackTop;
UINTN StackLen;
UINT64 Stack[20]; // will limit output to at most the first 20 stack elements (64bit)
StackTop = TranslateEPTGuestToHost(mGuestContextCommonSmm[VmType].EptPointer.Uint64, StackTopBase, 0L);
// make sure that stack exists within the EPT tables
// otherwise print an error message
if(StackTop == 0)
{
DEBUG ((EFI_D_ERROR, "%ld DumpGuestStack - Stack registers have bad addresses\n", Index));
return;
}
StackLen = (((UINT64)StackTop + 0x1000) & (~0xFFF)) - (UINT64)StackTop;
if(StackLen > 160)
StackLen = 160; // max stackdump of 20 64-bit words
CopyMem (Stack, (VOID *)(UINTN)StackTop, StackLen);
DEBUG((EFI_D_ERROR, "%ld Stacktrace\n", Index));
Location = StackTopBase;
RelLoc = 0;
for(i = 0; RelLoc < StackLen; i++)
{
DEBUG ((EFI_D_INFO, "%ld: %016lx %016lx\n", Index, Location, Stack[i] ));
RelLoc =+ 8;
}
DEBUG((EFI_D_ERROR, "%ld End Stacktrace\n", Index));
}

View File

@ -14,6 +14,18 @@
#include "Stm.h"
/**
This function dump event log entry.
@param LogEntry Log entry
**/
VOID
DumpEventLogEntry (
IN STM_LOG_ENTRY *LogEntry
);
/**
This function clear event log.
@ -160,6 +172,7 @@ AddEventLog (
LogEntry = GetNextEmpty (EventLog);
if (LogEntry == NULL) {
ReleaseSpinLock (&mHostContextCommon.EventLog.EventLogLock);
DEBUG((EFI_D_ERROR, "No Log entries available\n"));
return ;
}
if ((LogEntry->Hdr.Valid == 1) && (LogEntry->Hdr.ReadByMle == 0)) {
@ -176,6 +189,7 @@ AddEventLog (
AsmTestAndReset (16, &LogEntry->Hdr.Type);
ReleaseSpinLock (&mHostContextCommon.EventLog.EventLogLock);
DumpEventLogEntry(LogEntry); /*DEBUG*/
return ;
}
@ -372,7 +386,7 @@ DumpEventLog (
if (LogEntry->Hdr.Valid == 0) {
continue ;
}
DumpEventLogEntry (LogEntry);
DumpEventLogEntry(LogEntry);
}
}
}

View File

@ -13,6 +13,17 @@
**/
#include "StmInit.h"
#include "PeStm.h"
#define PAGING_PAE_INDEX_MASK 0x1FF
#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
#define PAGING_4K_MASK 0xFFF
#define PAGING_2M_MASK 0x1FFFFF
#define PAGING_1G_MASK 0x3FFFFFFF
MRTT_INFO mMtrrInfo;
@ -47,6 +58,22 @@ EptDumpPageTable (
IN EPT_POINTER *EptPointer
);
/**
This function creates and individual EPT page table entry.
Accounts for 1GB and 2MB pages
@param EptPointer EPT pointer
@param BaseAddress address within the PTE to be created
**/
UINT64
EptAllocatePte(
IN UINT64 EptPointer,
IN UINT64 BaseAddress,
IN UINT64 PhysAddress,
IN UINT64 PhysSize);
/**
This function return TSEG information in TXT heap region.
@ -288,170 +315,169 @@ GetMemoryType (
@param Xa Execute access
**/
static UINT32 SmrrBase;
static UINT32 SmrrLength;
VOID
EptCreatePageTable (
OUT EPT_POINTER *EptPointer,
IN UINT32 Xa
)
{
EPT_ENTRY *L1PageTable;
EPT_ENTRY *L2PageTable;
EPT_ENTRY *L3PageTable;
EPT_ENTRY *L4PageTable;
UINTN Index1;
UINTN Index2;
UINTN Index3;
UINTN Index4;
UINT64 BaseAddress;
UINT8 MemoryType;
UINT32 SmrrBase;
UINT32 SmrrLength;
UINTN NumberOfPml4EntriesNeeded;
UINTN NumberOfPdpEntriesNeeded;
if (mHostContextCommon.PhysicalAddressBits <= 39) {
NumberOfPml4EntriesNeeded = 1;
NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, mHostContextCommon.PhysicalAddressBits - 30);
} else {
NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, mHostContextCommon.PhysicalAddressBits - 39);
NumberOfPdpEntriesNeeded = 512;
}
SmrrBase = (UINT32)mMtrrInfo.SmrrBase & (UINT32)mMtrrInfo.SmrrMask & 0xFFFFF000;
SmrrLength = (UINT32)mMtrrInfo.SmrrMask & 0xFFFFF000;
SmrrLength = ~SmrrLength + 1;
//
// Setup below 4G
//
L4PageTable = (EPT_ENTRY *)AllocatePages (6);
ZeroMem (L4PageTable, STM_PAGES_TO_SIZE(6));
L4PageTable = (EPT_ENTRY *)AllocatePages (1);
ZeroMem (L4PageTable, STM_PAGES_TO_SIZE(1));
EptPointer->Uint64 = (UINT64)(UINTN)L4PageTable;
EptPointer->Bits32.Gaw = EPT_GAW_48BIT;
EptPointer->Bits32.Etmt = MEMORY_TYPE_WB;
}
L3PageTable = (EPT_ENTRY *)((UINTN)L4PageTable + SIZE_4KB);
L2PageTable = (EPT_ENTRY *)((UINTN)L3PageTable + SIZE_4KB);
/**
This function creates and individual EPT page table entry for BaseAddess.
Accounts for 1GB and 2MB pages.
@param EptPointer EPT pointer
@param BaseAddress address within the PTE to be created
@param PhysAddress physical address to be mapped to
@return pte, 0 if Fail
**/
BaseAddress = 0;
for (Index4 = 0; Index4 < 1; Index4 ++) {
L4PageTable->Uint64 = (UINT64)(UINTN)L3PageTable;
L4PageTable->Bits32.Ra = 1;
L4PageTable->Bits32.Wa = 1;
L4PageTable->Bits32.Xa = 1;
L4PageTable ++;
for (Index3 = 0; Index3 < 4; Index3 ++) {
L3PageTable->Uint64 = (UINT64)(UINTN)L2PageTable;
L3PageTable->Bits32.Ra = 1;
L3PageTable->Bits32.Wa = 1;
L3PageTable->Bits32.Xa = 1;
L3PageTable ++;
for (Index2 = 0; Index2 < 512; Index2 ++) {
UINT64
EptAllocatePte(
IN UINT64 EptPointer,
IN UINT64 BaseAddress,
IN UINT64 PhysAddress,
IN UINT64 PhysSize
)
{
EPT_ENTRY *L1PageTable;
EPT_ENTRY *L2PageTable;
EPT_ENTRY *L3PageTable;
EPT_ENTRY *L4PageTable;
UINTN Index1;
UINTN Index2;
UINTN Index3;
UINTN Index4;
if (BaseAddress >= BASE_2MB) {
if (TRUE) {
// Use super page
L2PageTable->Uint64 = BaseAddress;
L2PageTable->Bits32.Ra = 1;
L2PageTable->Bits32.Wa = 1;
L2PageTable->Bits32.Xa = Xa;
L2PageTable->Bits32.Sp = 1;
UINT8 MemoryType;
Index4 = ((UINTN)RShiftU64 (BaseAddress, 39)) & PAGING_PAE_INDEX_MASK;
Index3 = ((UINTN)BaseAddress >> 30) & PAGING_PAE_INDEX_MASK;
Index2 = ((UINTN)BaseAddress >> 21) & PAGING_PAE_INDEX_MASK;
Index1 = ((UINTN)BaseAddress >> 12) & PAGING_PAE_INDEX_MASK;
// BUGBUG: Do we need set UC for STM region???
MemoryType = GetMemoryType (BaseAddress);
L2PageTable->Bits32.Emt = MemoryType;
if ((BaseAddress >= SmrrBase) && (BaseAddress < SmrrBase + SmrrLength)) {
DEBUG ((EFI_D_INFO, "EPT init: %x - %x\n", (UINTN)BaseAddress, (UINTN)L2PageTable->Bits32.Emt));
L2PageTable->Bits32.Xa = 1;
}
L2PageTable ++;
BaseAddress += SIZE_2MB;
continue;
}
}
//
// [0, 2MB)
//
L1PageTable = (EPT_ENTRY *)AllocatePages (1);
L2PageTable->Uint64 = (UINT64)(UINTN)L1PageTable;
L2PageTable->Bits32.Ra = 1;
L2PageTable->Bits32.Wa = 1;
L2PageTable->Bits32.Xa = 1;
L2PageTable ++;
for (Index1 = 0; Index1 < 512; Index1 ++) {
L1PageTable->Uint64 = BaseAddress;
L1PageTable->Bits32.Ra = 1;
L1PageTable->Bits32.Wa = 1;
L1PageTable->Bits32.Xa = Xa;
MemoryType = GetMemoryType (BaseAddress);
L1PageTable->Bits32.Emt = MemoryType;
L1PageTable ++;
BaseAddress += SIZE_4KB;
}
}
}
}
//
// Setup above 4G
//
if (sizeof(UINTN) == sizeof(UINT64)) {
ASSERT(BaseAddress == BASE_4GB);
L4PageTable = (EPT_ENTRY *)(UINTN)(EptPointer->Uint64 & ~(SIZE_4KB - 1));
for (Index4 = 0; Index4 < NumberOfPml4EntriesNeeded; Index4 ++) {
if (Index4 > 0) {
//DEBUG((EFI_D_INFO, "EptAllocatePte - BaseAddress 0x%llx PhysAddress 0x%llx Physize 0x%x\n", BaseAddress, PhysAddress, PhysSize));
L4PageTable = (EPT_ENTRY *) (UINTN)EptPointer;
if (L4PageTable[Index4].Uint64 == 0)
{
L3PageTable = (EPT_ENTRY *)(UINTN)AllocatePages (1);
if(L3PageTable == NULL)
{
return 0;
}
ZeroMem (L3PageTable, STM_PAGES_TO_SIZE(1));
L4PageTable[Index4].Uint64 = (UINT64)(UINTN)L3PageTable;
L4PageTable[Index4].Bits32.Ra = 1;
L4PageTable[Index4].Bits32.Wa = 1;
L4PageTable[Index4].Bits32.Xa = 1;
Index3 = 0;
} else {
// Start from 4G - L4PageTable[0] already allocated.
L3PageTable = (EPT_ENTRY *)(UINTN)(L4PageTable[0].Uint64 & ~(SIZE_4KB - 1));
Index3 = 4;
}
if (Is1GPageSupport()) {
for (; Index3 < NumberOfPdpEntriesNeeded; Index3 ++) {
L3PageTable[Index3].Uint64 = BaseAddress;
L3PageTable[Index3].Bits32.Ra = 1;
L3PageTable[Index3].Bits32.Wa = 1;
L3PageTable[Index3].Bits32.Xa = Xa;
L3PageTable[Index3].Bits32.Sp = 1;
MemoryType = GetMemoryType (BaseAddress);
L3PageTable[Index3].Bits32.Emt = MemoryType;
BaseAddress += SIZE_1GB;
}
} else {
for (; Index3 < NumberOfPdpEntriesNeeded; Index3 ++) {
L2PageTable = (EPT_ENTRY *)(UINTN)AllocatePages (1);
L3PageTable[Index3].Uint64 = (UINT64)(UINTN)L2PageTable;
L3PageTable[Index3].Bits32.Ra = 1;
L3PageTable[Index3].Bits32.Wa = 1;
L3PageTable[Index3].Bits32.Xa = 1;
for (Index2 = 0; Index2 < 512; Index2 ++) {
L2PageTable[Index2].Uint64 = BaseAddress;
L2PageTable[Index2].Bits32.Ra = 1;
L2PageTable[Index2].Bits32.Wa = 1;
L2PageTable[Index2].Bits32.Xa = Xa;
L2PageTable[Index2].Bits32.Sp = 1;
MemoryType = GetMemoryType (BaseAddress);
L2PageTable[Index2].Bits32.Emt = MemoryType;
BaseAddress += SIZE_2MB;
}
}
}
L4PageTable[Index4].Bits32.Ra = 1;
L4PageTable[Index4].Bits32.Wa = 1;
L4PageTable[Index4].Bits32.Xa = 1;
}
}
return ;
L3PageTable = (EPT_ENTRY *)(UINTN)(L4PageTable[Index4].Uint64 & PAGING_4K_ADDRESS_MASK_64);
if(L3PageTable[Index3].Uint64 == 0)
{
if((PhysAddress >= BASE_4GB) &&
Is1GPageSupport() &&
((PhysAddress & PAGING_1G_MASK) == 0) &&
(PhysSize > PAGING_1G_MASK )) // mask happens to be the size
{
L3PageTable[Index3].Uint64 = PhysAddress & PAGING_1G_ADDRESS_MASK_64;
L3PageTable[Index3].Bits32.Sp = 1;
MemoryType = GetMemoryType (PhysAddress);
L3PageTable[Index3].Bits32.Emt = MemoryType;
return L3PageTable[Index3].Uint64;
}
else
{
L2PageTable = (EPT_ENTRY *)(UINTN)AllocatePages (1);
if(L2PageTable == NULL)
{
return 0;
}
ZeroMem (L2PageTable, STM_PAGES_TO_SIZE(1));
L3PageTable[Index3].Uint64 = (UINT64)(UINTN)L2PageTable;
L3PageTable[Index3].Bits32.Ra = 1;
L3PageTable[Index3].Bits32.Wa = 1;
L3PageTable[Index3].Bits32.Xa = 1; // was Xa
}
}
else
{
L2PageTable = (EPT_ENTRY *)(UINTN)(L3PageTable[Index3].Uint64 & PAGING_4K_ADDRESS_MASK_64);
}
if(L2PageTable[Index2].Uint64 == 0)
{
if ((BaseAddress >= BASE_2MB) &&
((BaseAddress & PAGING_2M_MASK) == 0) &&
((PhysAddress & PAGING_2M_MASK) == 0) &&
(PhysSize > PAGING_2M_MASK))
{
if (TRUE) {
// Use super page
L2PageTable[Index2].Uint64 = PhysAddress & PAGING_2M_ADDRESS_MASK_64;
L2PageTable[Index2].Bits32.Sp = 1;
// BUGBUG: Do we need set UC for STM region???
MemoryType = GetMemoryType (PhysAddress);
L2PageTable[Index2].Bits32.Emt = MemoryType;
return L2PageTable[Index2].Uint64;
}
}
else
{
//
// [0, 2MB)
//
L1PageTable = (EPT_ENTRY *)AllocatePages (1);
if(L1PageTable == NULL)
{
return 0;
}
ZeroMem (L1PageTable, STM_PAGES_TO_SIZE(1));
L2PageTable[Index2].Uint64 = (UINT64)(UINTN)L1PageTable;
L2PageTable[Index2].Bits32.Ra = 1;
L2PageTable[Index2].Bits32.Wa = 1;
L2PageTable[Index2].Bits32.Xa = 1;
}
}
else
{
L1PageTable = (EPT_ENTRY *)(UINTN)(L2PageTable[Index2].Uint64 & PAGING_4K_ADDRESS_MASK_64);
}
if(L1PageTable[Index1].Uint64 == 0)
{
L1PageTable[Index1].Uint64 = PhysAddress & PAGING_4K_ADDRESS_MASK_64;
MemoryType = GetMemoryType (PhysAddress);
L1PageTable[Index1].Bits32.Emt = MemoryType;
}
return L1PageTable[Index1].Uint64;
}
/**
@ -561,7 +587,7 @@ EptInit (
Xa = 1;
}
EptCreatePageTable (&mGuestContextCommonSmm.EptPointer, Xa);
EptCreatePageTable (&mGuestContextCommonSmm[SMI_HANDLER].EptPointer, Xa);
SmrrBase = (UINT32)mMtrrInfo.SmrrBase & (UINT32)mMtrrInfo.SmrrMask & 0xFFFFF000;
SmrrLength = (UINT32)mMtrrInfo.SmrrMask & 0xFFFFF000;
@ -572,7 +598,7 @@ EptInit (
DEBUG ((EFI_D_INFO, "SmrrLength - %08x\n", (UINTN)SmrrLength));
DEBUG ((EFI_D_INFO, "StmHeader - %08x\n", (UINTN)mHostContextCommon.StmHeader));
DEBUG ((EFI_D_INFO, "StmSize - %08x\n", (UINTN)mHostContextCommon.StmSize));
DEBUG ((EFI_D_INFO, "GuestCr3(0) - %08x\n", (UINTN)mGuestContextCommonSmm.GuestContextPerCpu[0].Cr3));
DEBUG ((EFI_D_INFO, "GuestCr3(0) - %08x\n", (UINTN)mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[0].Cr3));
if (IsSentryEnabled()) {
GetTsegInfoFromTxt (&TsegBase, &TsegLength);
@ -601,29 +627,33 @@ EptInit (
//
// Need mark SMRAM executable again
//
if (ExecutionDisableOutsideSmrr) {
//if (ExecutionDisableOutsideSmrr) {
EPTSetPageAttributeRange (
mGuestContextCommonSmm[SMI_HANDLER].EptPointer.Uint64,
TsegBase,
TsegLength,
TsegBase,
1,
1,
1,
EptPageAttributeSet
);
}
//}
//
// Protect MSEG
//
DEBUG ((EFI_D_INFO, "Protect MSEG\n"));
EPTSetPageAttributeRange (
mGuestContextCommonSmm[SMI_HANDLER].EptPointer.Uint64,
(UINT64)(UINTN)mHostContextCommon.StmHeader,
(UINT64)(UINTN)mHostContextCommon.StmSize,
(UINT64)(UINTN)mHostContextCommon.StmHeader,
0,
0,
0,
EptPageAttributeSet
);
//EptDumpPageTable (&mGuestContextCommonSmm.EptPointer);
// EptDumpPageTable (&mGuestContextCommonSmm[SMI_HANDLER].EptPointer);
return ;
}

View File

@ -13,6 +13,7 @@
**/
#include "StmInit.h"
#include "PeStm.h"
/**
@ -31,12 +32,13 @@ SetIoBitmapEx (
UINT8 *IoBitmap;
UINTN Index;
UINTN Offset;
UINT32 VmType = SMI_HANDLER;
if (Port >= 0x8000) {
IoBitmap = (UINT8 *)(UINTN)mGuestContextCommonSmm.IoBitmapB;
IoBitmap = (UINT8 *)(UINTN)mGuestContextCommonSmm[VmType].IoBitmapB;
Port -= 0x8000;
} else {
IoBitmap = (UINT8 *)(UINTN)mGuestContextCommonSmm.IoBitmapA;
IoBitmap = (UINT8 *)(UINTN)mGuestContextCommonSmm[VmType].IoBitmapA;
}
Index = Port / 8;
@ -101,6 +103,7 @@ IoInit (
VOID
)
{
mGuestContextCommonSmm.IoBitmapA = (UINT64)(UINTN)AllocatePages (1);
mGuestContextCommonSmm.IoBitmapB = (UINT64)(UINTN)AllocatePages (1);
UINT32 VmType = SMI_HANDLER;
mGuestContextCommonSmm[VmType].IoBitmapA = (UINT64)(UINTN)AllocatePages (1);
mGuestContextCommonSmm[VmType].IoBitmapB = (UINT64)(UINTN)AllocatePages (1);
}

View File

@ -13,6 +13,10 @@
**/
#include "StmInit.h"
#include "PeStm.h"
void HeapList(void);
//#define HEAPCHECK
/**
@ -23,48 +27,247 @@
@return pages address
**/
VOID *
AllocatePages (
IN UINTN Pages
)
IN UINTN Pages
)
{
UINT64 Address;
UINT64 Address;
HEAP_HEADER * BlockHeader;
HEAP_HEADER * PrevBlock;
HEAP_HEADER * NewBlock;
BOOLEAN foundBlock;
BOOLEAN endList;
AcquireSpinLock (&mHostContextCommon.MemoryLock);
if (STM_PAGES_TO_SIZE(Pages) >= mHostContextCommon.HeapTop) {
DEBUG((EFI_D_ERROR, "AllocatePages(%x) overflow\n", Pages));
ReleaseSpinLock(&mHostContextCommon.MemoryLock);
CpuDeadLoop();
}
if (mHostContextCommon.HeapBottom > mHostContextCommon.HeapTop - STM_PAGES_TO_SIZE(Pages)) {
DEBUG ((EFI_D_ERROR, "AllocatePages(%x) fail\n", Pages));
// implements a first fit algorithm
// find the first block that fits
AcquireSpinLock (&mHostContextCommon.MemoryLock);
Address = mHostContextCommon.HeapFree; // get begining of freelist
PrevBlock = (HEAP_HEADER *) NULL;
BlockHeader = (HEAP_HEADER *)(UINTN)Address;
foundBlock = FALSE;
if(BlockHeader == 0)
{
endList = TRUE; // we are totally out of space
}
else
{
endList = FALSE;
}
while(!endList)
{
if(BlockHeader->BlockLength >= Pages)
{
foundBlock = TRUE;
Address = (UINT64) (UINTN) BlockHeader; // begining of block to return
break;
}
if(BlockHeader->NextBlock == 0)
{
endList = TRUE;
break;
}
PrevBlock = BlockHeader; // remember the previous block;
BlockHeader = BlockHeader->NextBlock;
}
if(endList)
{
DEBUG((EFI_D_ERROR, "AllocatePages(0x%x) fail - no freeblock of the correct size\n", Pages));
#ifdef HEAPCHECK
// ReleaseSpinLock (&mHostContextCommon.MemoryLock);
HeapList();
#endif
ReleaseSpinLock (&mHostContextCommon.MemoryLock);
return NULL;
}
// found a block that fits - now need to make adjustments
// cases 1 - first in list == change HeapFree
// 2 - middle of list == change previous pointer
// 3 - released block at end of list == change previous pointer
// subcases - block is completely consumed - need to change pointer in previous block
// - block has leftover space
//if (mHostContextCommon.HeapBottom + STM_PAGES_TO_SIZE(Pages) > mHostContextCommon.HeapTop) {
// DEBUG ((EFI_D_ERROR, "AllocatePages(%x) fail\n", Pages));
// ReleaseSpinLock (&mHostContextCommon.MemoryLock);
// //CpuDeadLoop ();
// return NULL;
//}
// (1) breakup block (if necessary)
// (2) point HeapFree to the new block (or to the next block in the event the current block is consumed
if(BlockHeader->BlockLength == Pages)
{
NewBlock = (BlockHeader->NextBlock); // get next block in the list
}
else // need to break the block up
{
NewBlock = (HEAP_HEADER *)(UINTN)(Address + ((UINT64)(UINTN)STM_PAGES_TO_SIZE(Pages))); // second half of block
NewBlock->NextBlock = BlockHeader->NextBlock;
NewBlock->BlockLength = BlockHeader->BlockLength - Pages;
}
if(BlockHeader == (HEAP_HEADER *)(UINTN) mHostContextCommon.HeapFree)
{
mHostContextCommon.HeapFree = (UINT64) NewBlock;
}
else
{
PrevBlock->NextBlock = NewBlock;
}
//Address = mHostContextCommon.HeapTop - STM_PAGES_TO_SIZE(Pages);
//mHostContextCommon.HeapTop = Address;
#ifdef HEAPCHECK
HeapList();
#endif
ReleaseSpinLock (&mHostContextCommon.MemoryLock);
CpuDeadLoop ();
}
Address = mHostContextCommon.HeapTop - STM_PAGES_TO_SIZE(Pages);
mHostContextCommon.HeapTop = Address;
ZeroMem ((VOID *)(UINTN)Address, STM_PAGES_TO_SIZE (Pages));
ReleaseSpinLock (&mHostContextCommon.MemoryLock);
return (VOID *)(UINTN)Address;
ZeroMem ((VOID *)(UINTN)Address, STM_PAGES_TO_SIZE (Pages));
#ifdef HEAPCHECK
DEBUG((EFI_D_ERROR, "****Allocating 0x%x pages at 0x%016llx - %d Cleared\n", Pages, Address, STM_PAGES_TO_SIZE (Pages)));
#endif
return (VOID *)(UINTN)Address;
}
/**
This function free pages in MSEG.
This function free pages in MSEG.
@param Address pages address
@param Pages pages number
@param Address pages address
@param Pages pages number
**/
VOID
FreePages (
IN VOID *Address,
IN UINTN Pages
)
IN VOID *Address,
IN UINTN Pages
)
{
if ((UINT64)(UINTN)Address == mHostContextCommon.HeapTop) {
mHostContextCommon.HeapTop += STM_PAGES_TO_SIZE(Pages);
}
return ;
HEAP_HEADER * CurrentBlock;
HEAP_HEADER * PreviousBlock;
#ifdef HEAPCHECK
DEBUG((EFI_D_ERROR, "****Freeing 0x%x pages at 0x%016llx\n", Pages, (UINTN) Address));
#endif
AcquireSpinLock (&mHostContextCommon.MemoryLock);
// (1) Set header
// (2) find place in buffer chain
// (3) coalese(sp?)
Address = (void *)((UINTN) Address & ~0xfff); // mask out the lower 12 bits
((HEAP_HEADER *)Address)->NextBlock = 0L;
((HEAP_HEADER *)Address)->BlockLength = Pages;
#ifdef HEAPCHECK
DEBUG((EFI_D_ERROR, "Address->NextBlock: 0x%016llx Address->BlockLength: 0x%016llx\n",
((HEAP_HEADER *)Address)->NextBlock,
((HEAP_HEADER *)Address)->BlockLength));
#endif
PreviousBlock = 0L;
CurrentBlock = (HEAP_HEADER *)(UINTN) mHostContextCommon.HeapFree;
// find where it belongs
while( CurrentBlock != 0L)
{
if((UINTN)CurrentBlock > (UINTN)Address)
{
break;
}
PreviousBlock = CurrentBlock;
CurrentBlock = CurrentBlock->NextBlock;
}
//link it in
if(PreviousBlock == 0L)
{
// at beginning of list
((HEAP_HEADER *)Address)->NextBlock = CurrentBlock;
mHostContextCommon.HeapFree = (UINT64)Address;
}
else
{
// somewhere in list
((HEAP_HEADER *)Address)->NextBlock = CurrentBlock;
PreviousBlock->NextBlock = (HEAP_HEADER *)Address;
}
#ifdef HEAPCHECK
DEBUG((EFI_D_ERROR, "Address->NextBlock: 0x%016llx Address->BlockLength: 0x%016llx\n",
((HEAP_HEADER *)Address)->NextBlock,
((HEAP_HEADER *)Address)->BlockLength));
#endif
// coalesce
// First check the block after
if(CurrentBlock != 0L)
{
if(((UINT64)Address + STM_PAGES_TO_SIZE(Pages)) == (UINT64)(UINTN)CurrentBlock)
{
#ifdef HEAPCHECK
DEBUG((EFI_D_ERROR, "Combined with block after\n"));
#endif
((HEAP_HEADER *)Address)->NextBlock = CurrentBlock->NextBlock;
((HEAP_HEADER *)Address)->BlockLength = ((HEAP_HEADER *)Address)->BlockLength + CurrentBlock->BlockLength;
#ifdef HEAPCHECK
DEBUG((EFI_D_ERROR, "Address->NextBlock: 0x%016llx Address->BlockLength: 0x%016llx\n",
((HEAP_HEADER *)Address)->NextBlock,
((HEAP_HEADER *)Address)->BlockLength));
#endif
}
}
// then then block before
if(PreviousBlock != 0L)
{
if(((UINT64)PreviousBlock + STM_PAGES_TO_SIZE((UINT64)PreviousBlock->BlockLength)) == (UINT64)Address)
{
#ifdef HEAPCHECK
DEBUG((EFI_D_ERROR, "Combined with block before\n"));
DEBUG((EFI_D_ERROR, "PreviousBlock: 0x%016llx BlockLength: 0x%016llx Add: 0x%016llx\n",
PreviousBlock,
STM_PAGES_TO_SIZE((UINT64)PreviousBlock->BlockLength),
((HEAP_HEADER*) Address)));
#endif
PreviousBlock->NextBlock = ((HEAP_HEADER *)Address)->NextBlock;
PreviousBlock->BlockLength += ((HEAP_HEADER *) Address)->BlockLength;
}
}
// if ((UINT64)(UINTN)Address == mHostContextCommon.HeapTop) {
// mHostContextCommon.HeapTop += STM_PAGES_TO_SIZE(Pages);
// }
#ifdef HEAPCHECK
HeapList();
#endif
ReleaseSpinLock (&mHostContextCommon.MemoryLock);
return ;
}
void HeapList(void)
{
HEAP_HEADER * CurrentBlock = (HEAP_HEADER *)(UINTN) mHostContextCommon.HeapFree;
DEBUG((EFI_D_ERROR, " ***HeapList Start***\n"));
while(CurrentBlock != 0L)
{
DEBUG((EFI_D_ERROR, " Block: 0x%llx BlockLength: 0x%x NextBlock: 0x%llx\n", CurrentBlock, CurrentBlock->BlockLength, CurrentBlock->NextBlock));
CurrentBlock = CurrentBlock->NextBlock;
}
DEBUG((EFI_D_ERROR, " ***HeapList Done***\n"));
}

View File

@ -13,6 +13,7 @@
**/
#include "StmInit.h"
#include "PeStm.h"
/**
@ -33,21 +34,22 @@ SetMsrBitmapEx (
UINT8 *MsrBitmap;
UINTN Index;
UINTN Offset;
UINT32 VmType = SMI_HANDLER;
if (!MsrWrite) {
if (MsrIndex < 0x2000) {
MsrBitmap = (UINT8 *)(UINTN)mGuestContextCommonSmm.MsrBitmap;
MsrBitmap = (UINT8 *)(UINTN)mGuestContextCommonSmm[VmType].MsrBitmap;
} else if (MsrIndex >= 0xC0000000 && MsrIndex < 0xC0002000){
MsrBitmap = (UINT8 *)(UINTN)(mGuestContextCommonSmm.MsrBitmap + 0x400);
MsrBitmap = (UINT8 *)(UINTN)(mGuestContextCommonSmm[VmType].MsrBitmap + 0x400);
MsrIndex -= 0xC0000000;
} else {
return ;
}
} else {
if (MsrIndex < 0x2000) {
MsrBitmap = (UINT8 *)(UINTN)(mGuestContextCommonSmm.MsrBitmap + 0x800);
MsrBitmap = (UINT8 *)(UINTN)(mGuestContextCommonSmm[VmType].MsrBitmap + 0x800);
} else if (MsrIndex >= 0xC0000000 && MsrIndex < 0xC0002000){
MsrBitmap = (UINT8 *)(UINTN)(mGuestContextCommonSmm.MsrBitmap + 0xC00);
MsrBitmap = (UINT8 *)(UINTN)(mGuestContextCommonSmm[VmType].MsrBitmap + 0xC00);
MsrIndex -= 0xC0000000;
} else {
return ;
@ -198,7 +200,7 @@ MsrInit (
VOID
)
{
mGuestContextCommonSmm.MsrBitmap = (UINT64)(UINTN)AllocatePages (1);
mGuestContextCommonSmm[SMI_HANDLER].MsrBitmap = (UINT64)(UINTN)AllocatePages (1);
SetAllMsrBitmaps ();

View File

@ -1,219 +1,293 @@
/** @file
STM paging
STM paging
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmInit.h"
extern RETURN_STATUS RegisterExceptionHandler(IN EFI_EXCEPTION_TYPE ExceptionType,
IN EFI_EXCEPTION_CALLBACK ExceptionCallback);
/**
Check if 1-GByte pages is supported by processor or not.
Check if 1-GByte pages is supported by processor or not.
@retval TRUE 1-GByte pages is supported.
@retval FALSE 1-GByte pages is not supported.
@retval TRUE 1-GByte pages is supported.
@retval FALSE 1-GByte pages is not supported.
**/
BOOLEAN
Is1GPageSupport (
VOID
)
Is1GPageSupport (
VOID
)
{
UINT32 RegEax;
UINT32 RegEdx;
UINT32 RegEax;
UINT32 RegEdx;
AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
if (RegEax >= 0x80000001) {
AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
if ((RegEdx & BIT26) != 0) {
return TRUE;
}
}
return FALSE;
AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
if (RegEax >= 0x80000001) {
AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
if ((RegEdx & BIT26) != 0) {
return TRUE;
}
}
return FALSE;
}
/**
This function create Ia32e page table for SMM guest.
This function create Ia32e page table for SMM guest.
@return pages table address
@return pages table address
**/
UINTN
CreateIa32ePageTable (
VOID
)
CreateIa32ePageTable (
VOID
)
{
UINTN PageTable;
UINTN Index;
UINTN SubIndex;
UINT64 *Pde;
UINT64 *Pte;
UINT64 *Pml4;
UINTN PageTable;
UINTN Index;
UINTN SubIndex;
UINT64 *Pde;
UINT64 *Pte;
UINT64 *Pml4;
PageTable = (UINTN)AllocatePages (6);
PageTable = (UINTN)AllocatePages (6);
Pml4 = (UINT64*)(UINTN)PageTable;
PageTable += SIZE_4KB;
*Pml4 = PageTable | IA32_PG_P;
Pml4 = (UINT64*)(UINTN)PageTable;
PageTable += SIZE_4KB;
*Pml4 = PageTable | IA32_PG_P;
Pde = (UINT64*)(UINTN)PageTable;
Pte = Pde + SIZE_4KB / sizeof (*Pde);
Pde = (UINT64*)(UINTN)PageTable;
Pte = Pde + SIZE_4KB / sizeof (*Pde);
for (Index = 0; Index < 4; Index++) {
*Pde = (UINTN)Pte | IA32_PG_P;
Pde++;
for (Index = 0; Index < 4; Index++) {
*Pde = (UINTN)Pte | IA32_PG_P;
Pde++;
for (SubIndex = 0; SubIndex < SIZE_4KB / sizeof (*Pte); SubIndex++) {
*Pte = (((Index << 9) + SubIndex) << 21) |
IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
Pte++;
}
}
for (SubIndex = 0; SubIndex < SIZE_4KB / sizeof (*Pte); SubIndex++) {
*Pte = (((Index << 9) + SubIndex) << 21) |
IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
Pte++;
}
}
PageTable = (UINTN)Pml4;
PageTable = (UINTN)Pml4;
return PageTable;
return PageTable;
}
/**
This function create compatible page table for SMM guest.
This function create compatible page table for SMM guest.
@return pages table address
@return pages table address
**/
UINTN
CreateCompatiblePageTable (
VOID
)
CreateCompatiblePageTable (
VOID
)
{
UINTN PageTable;
UINTN Index;
UINT32 *Pte;
UINT32 Address;
UINTN PageTable;
UINTN Index;
UINT32 *Pte;
UINT32 Address;
PageTable = (UINTN)AllocatePages (1);
PageTable = (UINTN)AllocatePages (1);
Pte = (UINT32*)(UINTN)PageTable;
Pte = (UINT32*)(UINTN)PageTable;
Address = 0;
for (Index = 0; Index < SIZE_4KB / sizeof (*Pte); Index++) {
*Pte = Address | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
Pte++;
Address += SIZE_4MB;
}
Address = 0;
for (Index = 0; Index < SIZE_4KB / sizeof (*Pte); Index++) {
*Pte = Address | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
Pte++;
Address += SIZE_4MB;
}
return PageTable;
return PageTable;
}
/**
This function create compatible PAE page table for SMM guest.
This function create compatible PAE page table for SMM guest.
@return pages table address
@return pages table address
**/
UINTN
CreateCompatiblePaePageTable (
VOID
)
CreateCompatiblePaePageTable (
VOID
)
{
UINTN PageTable;
UINTN Index;
UINTN SubIndex;
UINT64 *Pde;
UINT64 *Pte;
UINTN PageTable;
UINTN Index;
UINTN SubIndex;
UINT64 *Pde;
UINT64 *Pte;
PageTable = (UINTN)AllocatePages (5);
PageTable = (UINTN)AllocatePages (5);
Pde = (UINT64*)(UINTN)PageTable;
Pte = Pde + SIZE_4KB / sizeof (*Pde);
Pde = (UINT64*)(UINTN)PageTable;
Pte = Pde + SIZE_4KB / sizeof (*Pde);
for (Index = 0; Index < 4; Index++) {
*Pde = (UINTN)Pte | IA32_PG_P;
Pde++;
for (Index = 0; Index < 4; Index++) {
*Pde = (UINTN)Pte | IA32_PG_P;
Pde++;
for (SubIndex = 0; SubIndex < SIZE_4KB / sizeof (*Pte); SubIndex++) {
*Pte = (((Index << 9) + SubIndex) << 21) |
IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
Pte++;
}
}
for (SubIndex = 0; SubIndex < SIZE_4KB / sizeof (*Pte); SubIndex++) {
*Pte = (((Index << 9) + SubIndex) << 21) |
IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
Pte++;
}
}
return PageTable;
return PageTable;
}
/**
This function create page table for STM host.
The SINIT/StmLoader should already configured 4G paging, so here
we just create >4G paging for X64 mode.
This function create page table for STM host.
The SINIT/StmLoader should already configured 4G paging, so here
we just create >4G paging for X64 mode.
**/
VOID
CreateHostPaging (
VOID
)
CreateHostPaging (
VOID
)
{
UINTN PageTable;
UINTN Index;
UINTN SubIndex;
UINTN Pml4Index;
UINT64 *Pde;
UINT64 *Pte;
UINT64 *Pml4;
UINT64 BaseAddress;
UINTN NumberOfPml4EntriesNeeded;
UINTN NumberOfPdpEntriesNeeded;
UINTN PageTable;
UINTN Index;
UINTN SubIndex;
UINTN Pml4Index;
UINT64 *Pde;
UINT64 *Pte;
UINT64 *Pml4;
UINT64 BaseAddress;
UINTN NumberOfPml4EntriesNeeded;
UINTN NumberOfPdpEntriesNeeded;
if (sizeof(UINTN) == sizeof(UINT64)) {
PageTable = AsmReadCr3 ();
Pml4 = (UINT64 *)PageTable;
if (sizeof(UINTN) == sizeof(UINT64)) {
PageTable = AsmReadCr3 ();
Pml4 = (UINT64 *)PageTable;
if (mHostContextCommon.PhysicalAddressBits <= 39) {
NumberOfPml4EntriesNeeded = 1;
NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, mHostContextCommon.PhysicalAddressBits - 30);
} else {
NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, mHostContextCommon.PhysicalAddressBits - 39);
NumberOfPdpEntriesNeeded = 512;
}
if (mHostContextCommon.PhysicalAddressBits <= 39) {
NumberOfPml4EntriesNeeded = 1;
NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, mHostContextCommon.PhysicalAddressBits - 30);
} else {
NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, mHostContextCommon.PhysicalAddressBits - 39);
NumberOfPdpEntriesNeeded = 512;
}
BaseAddress = BASE_4GB;
for (Pml4Index = 0; Pml4Index < NumberOfPml4EntriesNeeded; Pml4Index++) {
if (Pml4Index > 0) {
Pde = (UINT64 *)(UINTN)AllocatePages (1);
Pml4[Pml4Index] = (UINT64)(UINTN)Pde | IA32_PG_P;
Index = 0;
} else {
// Start from 4G - Pml4[0] already allocated.
Pde = (UINT64 *)(UINTN)(Pml4[0] & 0xFFFFF000);
Index = 4;
}
BaseAddress = BASE_4GB;
for (Pml4Index = 0; Pml4Index < NumberOfPml4EntriesNeeded; Pml4Index++) {
if (Pml4Index > 0) {
Pde = (UINT64 *)(UINTN)AllocatePages (1);
Pml4[Pml4Index] = (UINT64)(UINTN)Pde | IA32_PG_P;
Index = 0;
} else {
// Start from 4G - Pml4[0] already allocated.
Pde = (UINT64 *)(UINTN)(Pml4[0] & 0xFFFFF000);
Index = 4;
}
if (Is1GPageSupport()) {
for (; Index < NumberOfPdpEntriesNeeded; Index++) {
Pde[Index] = (UINT64)(UINTN)BaseAddress | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
BaseAddress += SIZE_1GB;
}
} else {
for (; Index < NumberOfPdpEntriesNeeded; Index++) {
Pte = (UINT64 *)AllocatePages (1);
Pde[Index] = (UINT64)(UINTN)Pte | IA32_PG_P;
if (Is1GPageSupport()) {
for (; Index < NumberOfPdpEntriesNeeded; Index++) {
Pde[Index] = (UINT64)(UINTN)BaseAddress | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
BaseAddress += SIZE_1GB;
}
} else {
for (; Index < NumberOfPdpEntriesNeeded; Index++) {
Pte = (UINT64 *)AllocatePages (1);
Pde[Index] = (UINT64)(UINTN)Pte | IA32_PG_P;
for (SubIndex = 0; SubIndex < SIZE_4KB / sizeof(*Pte); SubIndex++) {
Pte[SubIndex] = BaseAddress | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
BaseAddress += SIZE_2MB;
}
}
}
}
}
for (SubIndex = 0; SubIndex < SIZE_4KB / sizeof(*Pte); SubIndex++) {
Pte[SubIndex] = BaseAddress | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
BaseAddress += SIZE_2MB;
}
}
}
}
}
}
#define Level4 0x0000FF8000000000ULL
#define Level3 0x0000007FC0000000ULL
#define Level2 0x000000003FE00000ULL
VOID AddSuperPage(UINTN address)
{
UINTN PageTable;
UINTN Pml4Index;
UINTN PdpteIndex;
UINTN PdeIndex;
UINT64 *Pde;
UINT64 *Pdpte;
UINT64 *Pml4;
if (sizeof(UINTN) == sizeof(UINT64)) {
PageTable = AsmReadCr3 ();
Pml4 = (UINT64 *)PageTable;
Pml4Index = (address & Level4) >> 39;
PdpteIndex = (address & Level3) >> 30;
PdeIndex = (address & Level2) >> 21;
// We assume that the PageTable has already been created
Pdpte = (UINT64*) ((UINTN)Pml4[Pml4Index] & ~0xFFF);
if(Pdpte == NULL)
{
Pdpte = (UINT64 *) AllocatePages(1);
Pml4[Pml4Index] = (UINT64)((UINTN)Pdpte | IA32_PG_P);
}
Pde = (UINT64 *)((UINTN)Pdpte[PdpteIndex] & ~0xFFF);
if(Pde == NULL)
{
Pde = (UINT64 *) AllocatePages(1);
Pdpte[PdpteIndex] = (UINT64)((UINTN)Pde | IA32_PG_P);
}
Pde[PdeIndex] = (address & ~0x1FFFFF) | IA32_PG_PS | IA32_PG_RW | IA32_PG_P;
}
else
{
DEBUG((EFI_D_ERROR, "***Warning*** AddSuperPage - only 64-bit is supported\n"));
}
}
VOID
EFIAPI
StmPageFault (
IN EFI_EXCEPTION_TYPE InterruptType,
IN EFI_SYSTEM_CONTEXT SystemContext
)
{
UINTN FaultAddress = AsmReadCr2();
DEBUG((EFI_D_ERROR,"STM Page Fault at 0x%016lx, Rip 0x%016lx Cr2 0x%016lx\n",
FaultAddress,
SystemContext.SystemContextX64->Rip, SystemContext.SystemContextX64->Cr2));
AddSuperPage(FaultAddress);
return;
}
void SetupStmPageFault(VOID)
{
UINT32 Index = 14; //pagefault
RegisterExceptionHandler(Index, StmPageFault);
}

254
Stm/StmPkg/Core/Init/PeVmcsInit.c Executable file
View File

@ -0,0 +1,254 @@
/** @file
VM/PE VMCS initialization
Copyright (c) 2010, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
/// Intel Copyright left in as this is a modification of their code
#include "StmInit.h"
#include "PeStm.h"
/**
This function initialize VMCS for Protected Execution VMs.
@param Index CPU index
@param Vmcs VMCS pointer
**/
VOID
InitPeGuestVmcs (
IN UINT32 CpuIndex,
IN UINT32 VmType,
IN PE_GUEST_CONTEXT_PER_CPU *Vmcs
)
{
UINT64 Data64;
//VM_EXIT_CONTROLS VmExitCtrls;
//VM_ENTRY_CONTROLS VmEntryCtrls;
VM_EXEC_PIN_BASES_VMEXIT_CONTROLS PinBasedCtls;
VM_EXEC_PROCESSOR_BASES_VMEXIT_CONTROLS ProcessorBasedCtrls;
VM_EXEC_2ND_PROCESSOR_BASES_VMEXIT_CONTROLS ProcessorBasedCtrls2nd;
GUEST_INTERRUPTIBILITY_STATE GuestInterruptibilityState;
VM_EXIT_MSR_ENTRY *VmExitMsrEntry;
// UINT32 ExceptionBitmap;
// UINT32 PageFaultErrorCodeMask;
// UINT32 PageFaultErrorCodeMatch;
Data64 = AsmReadMsr64 (IA32_VMX_PINBASED_CTLS_MSR_INDEX);
PinBasedCtls.Uint32 = (UINT32)(Data64 & 0xFFFFFFFF);
PinBasedCtls.Bits.ExternalInterrupt = 0; // external interrupt
PinBasedCtls.Bits.Nmi = 1; // NMI is used to allow for when an SMI occurs when one of the processors
// is running a VM/PE to allow the other processors to interrupt the VM/PE
// so that he SMI handler can process the SMI
PinBasedCtls.Bits.VmxPreemptionTimer = 1; // Timer (was zero)
// Processor based controls
Data64 = AsmReadMsr64 (IA32_VMX_PROCBASED_CTLS_MSR_INDEX);
ProcessorBasedCtrls.Uint32 = (UINT32)Data64;
ProcessorBasedCtrls.Bits.Hlt = 0;
ProcessorBasedCtrls.Bits.InterruptWindow = 0; // interrupt window
ProcessorBasedCtrls.Bits.NmiWindow = 0;
ProcessorBasedCtrls.Bits.UnconditionalIo = 1; // unconditional I/O exiting
ProcessorBasedCtrls.Bits.IoBitmap = 0; // was 1
ProcessorBasedCtrls.Bits.MsrBitmap = 0; // was 1
ProcessorBasedCtrls.Bits.SecondaryControl = 1;
ProcessorBasedCtrls.Bits.Cr3Load = 0;
ProcessorBasedCtrls.Bits.Cr3Store = 0;
ProcessorBasedCtrls.Uint32 &= (UINT32)RShiftU64 (Data64, 32);
// Secondary Processor Based Controls
Data64 = AsmReadMsr64 (IA32_VMX_PROCBASED_CTLS2_MSR_INDEX);
// Force this for now
ProcessorBasedCtrls2nd.Uint32 = (UINT32) Data64;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[0].UnrestrictedGuest = TRUE;
ProcessorBasedCtrls2nd.Bits.Ept = 1;
ProcessorBasedCtrls2nd.Bits.UnrestrictedGuest = 1;
DEBUG((EFI_D_ERROR, "%ld InitPeGuestVmcs - PE Guest set as unrestricted\n", CpuIndex));
ProcessorBasedCtrls2nd.Uint32 &= (UINT32)RShiftU64 (Data64, 32);
/*EDM - forced what we did before */
//VmExitCtrls.Uint32 = 0x13EFFB;
GuestInterruptibilityState.Uint32 = 0;
GuestInterruptibilityState.Bits.BlockingBySmi = 1;
//#define VMCS_32_CONTROL_EXCEPTION_BITMAP_INDEX 0x4004
//#define VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MASK_INDEX 0x4006
//#define VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MATCH_INDEX 0x4008
//
// Control field
//
VmWrite32 (VMCS_32_GUEST_VMX_PREEMPTION_TIMER_VALUE_INDEX, 4000000000); // peemption timer...
VmWrite32 (VMCS_32_CONTROL_PIN_BASED_VM_EXECUTION_INDEX, PinBasedCtls.Uint32);
VmWrite32 (VMCS_32_CONTROL_PROCESSOR_BASED_VM_EXECUTION_INDEX, ProcessorBasedCtrls.Uint32);
VmWrite32 (VMCS_32_CONTROL_2ND_PROCESSOR_BASED_VM_EXECUTION_INDEX, ProcessorBasedCtrls2nd.Uint32);
VmWrite32 (VMCS_32_CONTROL_EXCEPTION_BITMAP_INDEX, 0);
DEBUG((EFI_D_ERROR, "%ld InitPeGuestVmcs - Exception Bitmap set to: 0x%08lx\n", CpuIndex, VmRead32(VMCS_32_CONTROL_EXCEPTION_BITMAP_INDEX)));
VmWrite32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX, Vmcs->VmEntryCtrls.Uint32);
VmWrite32 (VMCS_32_CONTROL_VMEXIT_CONTROLS_INDEX, Vmcs->VmExitCtrls.Uint32);
VmWrite64 (VMCS_64_CONTROL_EPT_PTR_INDEX, mGuestContextCommonSmm[VmType].EptPointer.Uint64);
// turn the below on and the VM/PE CR handling code will be invoked
// can enter 64-bit w/o these setting turned on with Sandybridge
VmWriteN (VMCS_N_CONTROL_CR0_GUEST_HOST_MASK_INDEX, 0);//((UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED1_MSR_INDEX)) | CR0_CD);
VmWriteN (VMCS_N_CONTROL_CR4_GUEST_HOST_MASK_INDEX, 0);//((UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX)) );
VmWriteN (VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX, 0);//mGuestContextCommonSmm[VmType].GuestContextPerCpu[0].Cr0 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED1_MSR_INDEX)));
VmWriteN (VMCS_N_CONTROL_CR4_READ_SHADOW_INDEX, 0);//mGuestContextCommonSmm[VmType].GuestContextPerCpu[0].Cr4 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX)) | CR4_PAE);
if (ProcessorBasedCtrls.Bits.IoBitmap != 0)
{
// Since we want to use IO Bitmaps, then point to the Bitmaps
VmWrite64 (VMCS_64_CONTROL_IO_BITMAP_A_INDEX, mGuestContextCommonSmm[VmType].IoBitmapA);
VmWrite64 (VMCS_64_CONTROL_IO_BITMAP_B_INDEX, mGuestContextCommonSmm[VmType].IoBitmapB);
}
if (ProcessorBasedCtrls.Bits.MsrBitmap != 0) {
VmWrite64 (VMCS_64_CONTROL_MSR_BITMAP_INDEX, mGuestContextCommonSmm[VmType].MsrBitmap);
}
//
// Make sure the value is valid
//
VmWrite32 (VMCS_32_CONTROL_VMEXIT_MSR_STORE_COUNT_INDEX, mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].GuestMsrEntryCount);
VmWrite32 (VMCS_32_CONTROL_VMEXIT_MSR_LOAD_COUNT_INDEX, mHostContextCommon.HostContextPerCpu[CpuIndex].HostMsrEntryCount);
VmWrite32 (VMCS_32_CONTROL_VMENTRY_MSR_LOAD_COUNT_INDEX, mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].GuestMsrEntryCount);
//
// Upon receiving control due to an SMI, the STM shall save the contents of the IA32_PERF_GLOBAL_CTRL MSR, disable any
// enabled bits in the IA32_PERF_GLOBAL_CTRL MSR.
// Do we need handle IA32_PEBS_ENABLE MSR ???
//
VmExitMsrEntry = (VM_EXIT_MSR_ENTRY *)(UINTN)mHostContextCommon.HostContextPerCpu[CpuIndex].HostMsrEntryAddress;
VmExitMsrEntry[CpuIndex].MsrIndex = IA32_PERF_GLOBAL_CTRL_MSR_INDEX;
VmExitMsrEntry[CpuIndex].MsrData = 0;
VmExitMsrEntry = (VM_EXIT_MSR_ENTRY *)(UINTN)mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].GuestMsrEntryAddress;
VmExitMsrEntry[CpuIndex].MsrIndex = IA32_PERF_GLOBAL_CTRL_MSR_INDEX;
VmExitMsrEntry[CpuIndex].MsrData = AsmReadMsr64(IA32_PERF_GLOBAL_CTRL_MSR_INDEX);
VmWrite64 (VMCS_64_CONTROL_VMEXIT_MSR_STORE_INDEX, mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].GuestMsrEntryAddress);
VmWrite64 (VMCS_64_CONTROL_VMEXIT_MSR_LOAD_INDEX, mHostContextCommon.HostContextPerCpu[CpuIndex].HostMsrEntryAddress);
VmWrite64 (VMCS_64_CONTROL_VMENTRY_MSR_LOAD_INDEX, mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].GuestMsrEntryAddress);
//
// Host field
//
VmWriteN (VMCS_N_HOST_CR0_INDEX, AsmReadCr0 ());
VmWriteN (VMCS_N_HOST_CR3_INDEX, mHostContextCommon.PageTable);
VmWriteN (VMCS_N_HOST_CR4_INDEX, AsmReadCr4 ());
VmWrite16 (VMCS_16_HOST_ES_INDEX, AsmReadDs ());
VmWrite16 (VMCS_16_HOST_CS_INDEX, AsmReadCs ());
VmWrite16 (VMCS_16_HOST_SS_INDEX, AsmReadDs ());
VmWrite16 (VMCS_16_HOST_DS_INDEX, AsmReadDs ());
VmWrite16 (VMCS_16_HOST_FS_INDEX, AsmReadDs ());
VmWrite16 (VMCS_16_HOST_GS_INDEX, AsmReadDs ());
VmWrite16 (VMCS_16_HOST_TR_INDEX, AsmReadDs ());
VmWriteN (VMCS_N_HOST_TR_BASE_INDEX, 0);
VmWriteN (VMCS_N_HOST_GDTR_BASE_INDEX, mHostContextCommon.Gdtr.Base);
VmWriteN (VMCS_N_HOST_IDTR_BASE_INDEX, mHostContextCommon.Idtr.Base);
VmWriteN (VMCS_N_HOST_RSP_INDEX, mHostContextCommon.HostContextPerCpu[CpuIndex].Stack);
VmWriteN (VMCS_N_HOST_RIP_INDEX, (UINTN)AsmHostEntrypointSmmPe);
VmWrite64 (VMCS_64_HOST_IA32_PERF_GLOBAL_CTRL_INDEX, 0);
//
// Guest field
//
VmWriteN(VMCS_N_GUEST_CR0_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[0].Cr0);
VmWriteN(VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[0].Cr3);
VmWriteN(VMCS_N_GUEST_CR4_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[0].Cr4);
VmWriteN (VMCS_N_GUEST_LDTR_BASE_INDEX, Vmcs->LdtrBase);
VmWriteN (VMCS_N_GUEST_GDTR_BASE_INDEX, Vmcs->GdtrBase);
VmWriteN (VMCS_N_GUEST_IDTR_BASE_INDEX, Vmcs->IdtrBase);
VmWriteN (VMCS_N_GUEST_RSP_INDEX, Vmcs->Rsp);
VmWriteN (VMCS_N_GUEST_RIP_INDEX, Vmcs->Rip);
VmWriteN (VMCS_N_GUEST_RFLAGS_INDEX, 0x2);
VmWriteN (VMCS_N_GUEST_PENDING_DEBUG_EXCEPTIONS_INDEX,0);
VmWriteN (VMCS_N_GUEST_IA32_SYSENTER_ESP_INDEX, 0);
VmWriteN (VMCS_N_GUEST_IA32_SYSENTER_EIP_INDEX, 0);
//VmWriteN (VMCS_N_GUEST_DR7_INDEX, 0);
VmWrite16 (VMCS_16_GUEST_ES_INDEX, Vmcs->EsSelector);
VmWrite16 (VMCS_16_GUEST_CS_INDEX, Vmcs->CsSelector);
VmWrite16 (VMCS_16_GUEST_SS_INDEX, Vmcs->SsSelector);
VmWrite16 (VMCS_16_GUEST_DS_INDEX, Vmcs->DsSelector);
VmWrite16 (VMCS_16_GUEST_FS_INDEX, Vmcs->FsSelector);
VmWrite16 (VMCS_16_GUEST_GS_INDEX, Vmcs->GsSelector);
VmWrite16 (VMCS_16_GUEST_LDTR_INDEX, Vmcs->LdtrSelector);
VmWrite16 (VMCS_16_GUEST_TR_INDEX, Vmcs->TrSelector);
VmWrite32 (VMCS_32_GUEST_LDTR_LIMIT_INDEX, Vmcs->LdtrLimit);
VmWrite32 (VMCS_32_GUEST_GDTR_LIMIT_INDEX, Vmcs->GdtrLimit);
VmWrite32 (VMCS_32_GUEST_IDTR_LIMIT_INDEX, Vmcs->IdtrLimit);
VmWrite32 (VMCS_32_GUEST_IA32_SYSENTER_CS_INDEX, 0);
VmWriteN (VMCS_N_GUEST_IA32_SYSENTER_ESP_INDEX, 0);
VmWriteN (VMCS_N_GUEST_IA32_SYSENTER_EIP_INDEX, 0);
VmWrite32 (VMCS_32_GUEST_LDTR_ACCESS_RIGHT_INDEX, Vmcs->LdtrAccessRights);
VmWriteN (VMCS_N_GUEST_CS_BASE_INDEX, Vmcs->CsBase);
VmWrite32 (VMCS_32_GUEST_CS_ACCESS_RIGHT_INDEX, Vmcs->CsAccessRights);
VmWrite32 (VMCS_32_GUEST_CS_LIMIT_INDEX, Vmcs->CsLimit);
VmWriteN (VMCS_N_GUEST_SS_BASE_INDEX, Vmcs->SsBase);
VmWrite32 (VMCS_32_GUEST_SS_ACCESS_RIGHT_INDEX, Vmcs->SsAccessRights);
VmWrite32 (VMCS_32_GUEST_SS_LIMIT_INDEX, Vmcs->SsLimit);
VmWriteN (VMCS_N_GUEST_DS_BASE_INDEX, Vmcs->DsBase);
VmWrite32 (VMCS_32_GUEST_DS_ACCESS_RIGHT_INDEX, Vmcs->DsAccessRights);
VmWrite32 (VMCS_32_GUEST_DS_LIMIT_INDEX, Vmcs->DsLimit);
VmWriteN (VMCS_N_GUEST_ES_BASE_INDEX, Vmcs->EsBase);
VmWrite32 (VMCS_32_GUEST_ES_ACCESS_RIGHT_INDEX, Vmcs->EsAccessRights);
VmWrite32 (VMCS_32_GUEST_ES_LIMIT_INDEX, Vmcs->EsLimit);
VmWriteN (VMCS_N_GUEST_FS_BASE_INDEX, Vmcs->FsBase);
VmWrite32 (VMCS_32_GUEST_FS_ACCESS_RIGHT_INDEX, Vmcs->FsAccessRights);
VmWrite32 (VMCS_32_GUEST_FS_LIMIT_INDEX, Vmcs->FsLimit);
VmWriteN (VMCS_N_GUEST_GS_BASE_INDEX, Vmcs->GsBase);
VmWrite32 (VMCS_32_GUEST_GS_ACCESS_RIGHT_INDEX, Vmcs->GsAccessRights);
VmWrite32 (VMCS_32_GUEST_GS_LIMIT_INDEX, Vmcs->GsLimit);
VmWriteN (VMCS_N_GUEST_TR_BASE_INDEX, Vmcs->TrBase);
VmWrite32 (VMCS_32_GUEST_TR_ACCESS_RIGHT_INDEX, Vmcs->TrAccessRights);
VmWrite32 (VMCS_32_GUEST_TR_LIMIT_INDEX, Vmcs->TrLimit);
VmWrite32 (VMCS_32_GUEST_INTERRUPTIBILITY_STATE_INDEX, Vmcs->InterruptibilityState.Uint32);
VmWrite64 (VMCS_64_GUEST_IA32_PERF_GLOBAL_CTRL_INDEX, AsmReadMsr64(IA32_PERF_GLOBAL_CTRL_MSR_INDEX));
VmWrite32(VMCS_32_GUEST_ACTIVITY_STATE_INDEX, Vmcs->ActivityState);
VmWrite64(VMCS_64_GUEST_VMCS_LINK_PTR_INDEX, Vmcs->VmcsLinkPointerFull);
VmWrite64(VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[0].Efer);
DEBUG((EFI_D_ERROR, "%ld InitPeGuestVmcs - done\n", CpuIndex));
return ;
}

View File

@ -13,11 +13,22 @@
**/
#include "StmInit.h"
#include "PeStm.h"
#include <Library/PcdLib.h>
extern PE_SMI_CONTROL PeSmiControl;
extern void InitPe();
STM_HOST_CONTEXT_COMMON mHostContextCommon;
STM_GUEST_CONTEXT_COMMON mGuestContextCommonSmi;
STM_GUEST_CONTEXT_COMMON mGuestContextCommonSmm;
STM_GUEST_CONTEXT_COMMON mGuestContextCommonSmm[NUM_PE_TYPE];
static SPIN_LOCK CpuReadyCountLock;
static unsigned int CpuReadyCount = 0;
void CpuReadySync(UINT32 Index);
VOID InitCpuReadySync();
volatile BOOLEAN mIsBspInitialized;
@ -497,6 +508,51 @@ GetIndexFromStack (
return (UINT32)(Index - 1);
}
UINT64
GetMsegInfoFromTxt (
OUT UINT64 *MsegBase,
OUT UINT64 *MsegLength
)
{
*MsegBase = TxtPubRead64(TXT_MSEG_BASE);
*MsegLength = TxtPubRead64(TXT_MSEG_SIZE);
return 0;
}
/**
This function return MSEG information from MSR.
@param MsegBase MSEG base address
@param MsegLength MSEG length
@return MsegBase
**/
extern MRTT_INFO mMtrrInfo;
UINT64
GetMsegInfoFromMsr (
OUT UINT64 *MsegBase,
OUT UINT64 *MsegLength
)
{
UINT32 SmrrBase;
UINT32 SmrrLength;
SmrrBase = (UINT32)mMtrrInfo.SmrrBase & (UINT32)mMtrrInfo.SmrrMask & 0xFFFFF000;
SmrrLength = (UINT32)mMtrrInfo.SmrrMask & 0xFFFFF000;
SmrrLength = ~SmrrLength + 1;
*MsegBase = (UINT64)((UINT32)AsmReadMsr64(IA32_SMM_MONITOR_CTL_MSR_INDEX) & 0xFFFFF000);
*MsegLength = SmrrLength - (*MsegBase - SmrrBase);
return *MsegLength;
}
/**
This function initialize STM heap.
@ -509,12 +565,42 @@ InitHeap (
IN STM_HEADER *StmHeader
)
{
HEAP_HEADER *HeaderPointer;
UINT64 MsegBase;
UINT64 MsegLength;
if (IsSentryEnabled()) {
GetMsegInfoFromTxt (&MsegBase, &MsegLength);
DEBUG ((EFI_D_INFO, "TXT MsegBase- %08x\n", (UINTN)MsegBase));
DEBUG ((EFI_D_INFO, "TXT MsegLength - %08x\n", (UINTN)MsegLength));
} else {
GetMsegInfoFromMsr (&MsegBase, &MsegLength);
}
DEBUG ((EFI_D_INFO, "MsegBase (MSR) - %08x\n", (UINTN)MsegBase));
DEBUG ((EFI_D_INFO, "MsegLength (end of TSEG) - %08x\n", (UINTN)MsegLength));
if (MsegBase == 0) {
DEBUG ((EFI_D_ERROR, "MsegBase == 0\n"));
CpuDeadLoop ();
}
mHostContextCommon.HeapBottom = (UINT64)((UINTN)StmHeader +
StmHeader->HwStmHdr.Cr3Offset +
STM_PAGES_TO_SIZE(6)); // reserve 6 page for page table
mHostContextCommon.HeapTop = MsegBase + MsegLength;
#ifdef SizeFromLoad
mHostContextCommon.HeapTop = (UINT64)((UINTN)StmHeader +
STM_PAGES_TO_SIZE (STM_SIZE_TO_PAGES (StmHeader->SwStmHdr.StaticImageSize)) +
StmHeader->SwStmHdr.AdditionalDynamicMemorySize);
#endif
mHostContextCommon.HeapFree = mHostContextCommon.HeapBottom;
HeaderPointer = (HEAP_HEADER *)((UINTN) mHostContextCommon.HeapFree);
HeaderPointer->NextBlock = 0L;
HeaderPointer->BlockLength = (mHostContextCommon.HeapTop - mHostContextCommon.HeapBottom) >> 12;
}
/**
@ -527,9 +613,9 @@ InitBasicContext (
VOID
)
{
mHostContextCommon.HostContextPerCpu = AllocatePages (STM_SIZE_TO_PAGES(sizeof(STM_HOST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
mGuestContextCommonSmi.GuestContextPerCpu = AllocatePages (STM_SIZE_TO_PAGES(sizeof(STM_GUEST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
mGuestContextCommonSmm.GuestContextPerCpu = AllocatePages (STM_SIZE_TO_PAGES(sizeof(STM_GUEST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
mHostContextCommon.HostContextPerCpu = (STM_HOST_CONTEXT_PER_CPU *)AllocatePages (STM_SIZE_TO_PAGES(sizeof(STM_HOST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
mGuestContextCommonSmi.GuestContextPerCpu = (STM_GUEST_CONTEXT_PER_CPU *) AllocatePages (STM_SIZE_TO_PAGES(sizeof(STM_GUEST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu = AllocatePages (STM_SIZE_TO_PAGES(sizeof(STM_GUEST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
}
/**
@ -554,20 +640,25 @@ BspInit (
UINTN XStateSize;
UINT32 RegEax;
IA32_VMX_MISC_MSR VmxMisc;
UINT32 BiosStmVer = 100; // initially assume that the BIOS supports v1.0 of the Intel ref
IA32_DESCRIPTOR IdtrLoad;
StmHeader = (STM_HEADER *)(UINTN)((UINT32)AsmReadMsr64(IA32_SMM_MONITOR_CTL_MSR_INDEX) & 0xFFFFF000);
InitHeap (StmHeader);
// after that we can use mHostContextCommon
InitializeSpinLock (&mHostContextCommon.DebugLock);
// after that we can use DEBUG
// after that we can use DEBUG
DEBUG ((EFI_D_ERROR, " ********************** STM/PE *********************\n"));
DEBUG ((EFI_D_INFO, "!!!STM build time - %a %a!!!\n", (CHAR8 *)__DATE__, (CHAR8 *)__TIME__));
DEBUG ((EFI_D_INFO, "!!!STM Relocation DONE!!!\n"));
DEBUG ((EFI_D_INFO, "!!!Enter StmInit (BSP)!!! - %d (%x)\n", (UINTN)0, (UINTN)ReadUnaligned32 ((UINT32 *)&Register->Rax)));
// Check Signature and size
// Check Signature and size
VmxMisc.Uint64 = AsmReadMsr64 (IA32_VMX_MISC_MSR_INDEX);
if ((VmxMisc.Uint64 & BIT15) != 0) {
TxtProcessorSmmDescriptor = (TXT_PROCESSOR_SMM_DESCRIPTOR *)(UINTN)(AsmReadMsr64 (IA32_SMBASE_INDEX) + SMM_TXTPSD_OFFSET);
@ -585,10 +676,10 @@ BspInit (
EFI_ACPI_DESCRIPTION_HEADER *Rsdt;
EFI_ACPI_DESCRIPTION_HEADER *Xsdt;
mHostContextCommon.AcpiRsdp = TxtProcessorSmmDescriptor->AcpiRsdp;
mHostContextCommon.AcpiRsdp = TxtProcessorSmmDescriptor->AcpiRsdp;
Rsdp = FindAcpiRsdPtr ();
DEBUG ((EFI_D_INFO, "Rsdp - %08x\n", Rsdp));
if (Rsdp == NULL) {
if (Rsdp == NULL) {
CpuDeadLoop ();
}
Rsdt = (EFI_ACPI_DESCRIPTION_HEADER *)(UINTN)Rsdp->RsdtAddress;
@ -606,7 +697,7 @@ BspInit (
}
InterlockedIncrement (&mHostContextCommon.JoinedCpuNum);
mHostContextCommon.StmShutdown = 0; // used by Stm/Pe to know when to stop the timer
InitializeSpinLock (&mHostContextCommon.MemoryLock);
InitializeSpinLock (&mHostContextCommon.SmiVmcallLock);
InitializeSpinLock (&mHostContextCommon.PciLock);
@ -614,6 +705,17 @@ BspInit (
DEBUG ((EFI_D_INFO, "HeapBottom - %08x\n", mHostContextCommon.HeapBottom));
DEBUG ((EFI_D_INFO, "HeapTop - %08x\n", mHostContextCommon.HeapTop));
// initialize PE state
PeSmiControl.PeExec = 0;
PeSmiControl.PeNmiBreak = 0;
PeSmiControl.PeCpuIndex = -1;
// Initialize CpuSync
CpuReadyCount = 0;
InitializeSpinLock(&CpuReadyCountLock);
DEBUG ((EFI_D_INFO, "TxtProcessorSmmDescriptor - %08x\n", (UINTN)TxtProcessorSmmDescriptor));
DEBUG ((EFI_D_INFO, " Signature - %016lx\n", TxtProcessorSmmDescriptor->Signature));
DEBUG ((EFI_D_INFO, " Size - %04x\n", (UINTN)TxtProcessorSmmDescriptor->Size));
@ -659,11 +761,18 @@ BspInit (
DEBUG ((EFI_D_INFO, "TXT Descriptor Signature ERROR - %016lx!\n", TxtProcessorSmmDescriptor->Signature));
CpuDeadLoop ();
}
if (TxtProcessorSmmDescriptor->Size != sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR)) {
DEBUG ((EFI_D_INFO, "TXT Descriptor Size ERROR - %08x!\n", TxtProcessorSmmDescriptor->Size));
CpuDeadLoop ();
if(TxtProcessorSmmDescriptor->Size != sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR) + 9) // are we dealing with a .99 Bios
{
BiosStmVer = 99; // version .99 has nine less bytes, etc
DEBUG((EFI_D_INFO, "Version .99 Bios detected Found Size: %08x SizeOf %08x\n", TxtProcessorSmmDescriptor->Size, sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR)));
}
else
{
if (TxtProcessorSmmDescriptor->Size != sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR)) {
DEBUG ((EFI_D_INFO, "TXT Descriptor Size ERROR - %08x! Found %08x\n", TxtProcessorSmmDescriptor->Size, sizeof(TXT_PROCESSOR_SMM_DESCRIPTOR) ));
CpuDeadLoop ();
}
}
InitBasicContext ();
DEBUG ((EFI_D_INFO, "Register(%d) - %08x\n", (UINTN)0, Register));
@ -703,7 +812,15 @@ BspInit (
mHostContextCommon.StmSize = GetMinMsegSize (StmHeader);
DEBUG ((EFI_D_INFO, "MinMsegSize - %08x!\n", (UINTN)mHostContextCommon.StmSize));
mHostContextCommon.PhysicalAddressBits = TxtProcessorSmmDescriptor->PhysicalAddressBits;
if(BiosStmVer == 99)
{
mHostContextCommon.PhysicalAddressBits = 36; // for v.99 use this value for now. CPUID value might be too big
}
else
{
mHostContextCommon.PhysicalAddressBits = TxtProcessorSmmDescriptor->PhysicalAddressBits;
}
AsmCpuid(CPUID_EXTENDED_INFORMATION, &RegEax, NULL, NULL, NULL);
if (RegEax >= CPUID_EXTENDED_ADDRESS_SIZE) {
AsmCpuid(CPUID_EXTENDED_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);
@ -733,10 +850,18 @@ BspInit (
IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)mHostContextCommon.Idtr.Base;
InitializeExternalVectorTablePtr (IdtGate);
//IA32_DESCRIPTOR IdtrLoad;
IdtrLoad = mHostContextCommon.Idtr;
AsmWriteIdtr(&IdtrLoad);
//
// Add more paging for Host CR3.
//
CreateHostPaging ();
/////////
//CreateHostPaging ();
// make host paging dynamic to save space for the PE/VMs
SetupStmPageFault();
// VMCS database: One CPU one page should be enough
VmcsDatabasePage = mHostContextCommon.CpuNum;
@ -752,14 +877,14 @@ BspInit (
mCpuInitStatus = AllocatePages (STM_SIZE_TO_PAGES (mHostContextCommon.CpuNum));
mGuestContextCommonSmm.GuestContextPerCpu[0].Cr3 = (UINTN)TxtProcessorSmmDescriptor->SmmCr3;
mGuestContextCommonSmm.GuestContextPerCpu[0].Actived = FALSE;
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[0].Cr3 = (UINTN)TxtProcessorSmmDescriptor->SmmCr3;
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[0].Actived = FALSE;
//
// CompatiblePageTable for IA32 flat mode only
//
mGuestContextCommonSmm.CompatiblePageTable = CreateCompatiblePageTable ();
mGuestContextCommonSmm.CompatiblePaePageTable = CreateCompatiblePaePageTable ();
mGuestContextCommonSmm[SMI_HANDLER].CompatiblePageTable = CreateCompatiblePageTable ();
mGuestContextCommonSmm[SMI_HANDLER].CompatiblePaePageTable = CreateCompatiblePaePageTable ();
//
// Allocate XState buffer
@ -804,15 +929,15 @@ BspInit (
for (SubIndex = 0; SubIndex < mHostContextCommon.CpuNum; SubIndex++) {
mHostContextCommon.HostContextPerCpu[SubIndex].HostMsrEntryCount = 1;
mGuestContextCommonSmi.GuestContextPerCpu[SubIndex].GuestMsrEntryCount = 1;
mGuestContextCommonSmm.GuestContextPerCpu[SubIndex].GuestMsrEntryCount = 1;
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[SubIndex].GuestMsrEntryCount = 1;
}
mHostContextCommon.HostContextPerCpu[0].HostMsrEntryAddress = (UINT64)(UINTN)AllocatePages (STM_SIZE_TO_PAGES (sizeof(VM_EXIT_MSR_ENTRY) * mHostContextCommon.HostContextPerCpu[0].HostMsrEntryCount * mHostContextCommon.CpuNum));
mGuestContextCommonSmi.GuestContextPerCpu[0].GuestMsrEntryAddress = (UINT64)(UINTN)AllocatePages (STM_SIZE_TO_PAGES (sizeof(VM_EXIT_MSR_ENTRY) * mGuestContextCommonSmi.GuestContextPerCpu[0].GuestMsrEntryCount * mHostContextCommon.CpuNum));
mGuestContextCommonSmm.GuestContextPerCpu[0].GuestMsrEntryAddress = (UINT64)(UINTN)AllocatePages (STM_SIZE_TO_PAGES (sizeof(VM_EXIT_MSR_ENTRY) * mGuestContextCommonSmm.GuestContextPerCpu[0].GuestMsrEntryCount * mHostContextCommon.CpuNum));
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[SubIndex].GuestMsrEntryAddress = (UINT64)(UINTN)AllocatePages (STM_SIZE_TO_PAGES(sizeof(VM_EXIT_MSR_ENTRY) * mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[0].GuestMsrEntryCount * mHostContextCommon.CpuNum));
for (SubIndex = 0; SubIndex < mHostContextCommon.CpuNum; SubIndex++) {
mHostContextCommon.HostContextPerCpu[SubIndex].HostMsrEntryAddress = mHostContextCommon.HostContextPerCpu[0].HostMsrEntryAddress + sizeof(VM_EXIT_MSR_ENTRY) * mGuestContextCommonSmi.GuestContextPerCpu[0].GuestMsrEntryCount * SubIndex;
mGuestContextCommonSmi.GuestContextPerCpu[SubIndex].GuestMsrEntryAddress = mGuestContextCommonSmi.GuestContextPerCpu[0].GuestMsrEntryAddress + sizeof(VM_EXIT_MSR_ENTRY) * mGuestContextCommonSmi.GuestContextPerCpu[0].GuestMsrEntryCount * SubIndex;
mGuestContextCommonSmm.GuestContextPerCpu[SubIndex].GuestMsrEntryAddress = mGuestContextCommonSmm.GuestContextPerCpu[0].GuestMsrEntryAddress + sizeof(VM_EXIT_MSR_ENTRY) * mGuestContextCommonSmm.GuestContextPerCpu[0].GuestMsrEntryCount * SubIndex;
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[SubIndex].GuestMsrEntryAddress = mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[0].GuestMsrEntryAddress + sizeof(VM_EXIT_MSR_ENTRY) * mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[0].GuestMsrEntryCount * SubIndex;
}
DEBUG ((EFI_D_INFO, "DumpStmResource - %x\n", TxtProcessorSmmDescriptor->BiosHwResourceRequirementsPtr));
@ -823,6 +948,7 @@ BspInit (
InitStmHandlerSmi ();
InitStmHandlerSmm ();
InitPe(); // Initialize protected execution
STM_PERF_INIT;
//
@ -848,7 +974,7 @@ ApInit (
)
{
X86_REGISTER *Reg;
IA32_DESCRIPTOR IdtrLoad;
while (!mIsBspInitialized) {
//
// Wait here
@ -862,6 +988,10 @@ ApInit (
CpuDeadLoop ();
Index = GetIndexFromStack (Register);
}
// do this here to make sure that we can handle a page fault
IdtrLoad = mHostContextCommon.Idtr;
AsmWriteIdtr(&IdtrLoad);
InterlockedIncrement (&mHostContextCommon.JoinedCpuNum);
@ -912,6 +1042,7 @@ CommonInit (
mHostContextCommon.HostContextPerCpu[Index].Index = Index;
mHostContextCommon.HostContextPerCpu[Index].ApicId = (UINT8)ReadLocalApicId ();
mHostContextCommon.HostContextPerCpu[Index].Vmxon = VmRead64(VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX);
StmHeader = mHostContextCommon.StmHeader;
StackBase = (UINTN)StmHeader +
@ -931,8 +1062,8 @@ CommonInit (
DEBUG ((EFI_D_INFO, "TxtProcessorSmmDescriptor(%d) - %08x\n", (UINTN)Index, mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor));
DEBUG ((EFI_D_INFO, "Stack(%d) - %08x\n", (UINTN)Index, (UINTN)mHostContextCommon.HostContextPerCpu[Index].Stack));
mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr3 = (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmCr3;
mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer = AsmReadMsr64 (IA32_EFER_MSR_INDEX);
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Cr3 = (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmCr3;
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Efer = AsmReadMsr64 (IA32_EFER_MSR_INDEX);
mGuestContextCommonSmi.GuestContextPerCpu[Index].Efer = AsmReadMsr64 (IA32_EFER_MSR_INDEX);
}
@ -963,10 +1094,10 @@ VmcsInit (
VmcsSize = GetVmcsSize();
mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs = (UINT64)(VmcsBase + VmcsSize * (Index * 2));
mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs = (UINT64)(VmcsBase + VmcsSize * (Index * 2 + 1));
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs = (UINT64)(VmcsBase + VmcsSize * (Index * 2 + 1));
DEBUG ((EFI_D_INFO, "SmiVmcsPtr(%d) - %016lx\n", (UINTN)Index, mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs));
DEBUG ((EFI_D_INFO, "SmmVmcsPtr(%d) - %016lx\n", (UINTN)Index, mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs));
DEBUG ((EFI_D_INFO, "SmmVmcsPtr(%d) - %016lx\n", (UINTN)Index, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs));
AsmVmPtrStore (&CurrentVmcs);
DEBUG ((EFI_D_INFO, "CurrentVmcs(%d) - %016lx\n", (UINTN)Index, CurrentVmcs));
@ -987,24 +1118,24 @@ VmcsInit (
(UINTN)VmcsSize
);
CopyMem (
(VOID *)(UINTN)mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs,
(VOID *)(UINTN)mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs,
(VOID *)(UINTN)CurrentVmcs,
(UINTN)VmcsSize
);
*(UINT32 *)(UINTN)mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs = (UINT32)AsmReadMsr64 (IA32_VMX_BASIC_MSR_INDEX) & 0xFFFFFFFF;
*(UINT32 *)(UINTN)mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs = (UINT32)AsmReadMsr64 (IA32_VMX_BASIC_MSR_INDEX) & 0xFFFFFFFF;
AsmWbinvd ();
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmPtrLoad(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs, Rflags));
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmPtrLoad(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs, Rflags));
CpuDeadLoop ();
}
InitializeSmmVmcs (Index, &mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmClear (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs);
InitializeSmmVmcs (Index, &mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmClear (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmClear(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs, Rflags));
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmClear(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs, Rflags));
CpuDeadLoop ();
}
@ -1045,7 +1176,7 @@ LaunchBack (
#endif
if (ReadUnaligned32 ((UINT32 *)&Reg->Rax) == STM_API_START) {
// We need do additional thing for STM_API_START
mGuestContextCommonSmm.GuestContextPerCpu[Index].Actived = TRUE;
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Actived = TRUE;
SmmSetup (Index);
}
@ -1062,13 +1193,24 @@ LaunchBack (
}
WriteUnaligned32 ((UINT32 *)&Reg->Rbx, 0); // Not support STM_RSC_BGM or STM_RSC_BGI or STM_RSC_MSR
DEBUG ((EFI_D_INFO, "!!!LaunchBack (%d)!!!\n", (UINTN)Index));
DEBUG ((EFI_D_INFO, "%ld !!!LaunchBack!!!\n", (UINTN)Index));
if(Index != 0)
{
// syncs the AP CPUS - all will wait until the BSP has completed setting up the API
CpuReadySync(Index);
}
else
{
// DumpVmcsAllField();
}
Rflags = AsmVmLaunch (Reg);
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "!!!LaunchBack FAIL!!!\n"));
DEBUG ((EFI_D_ERROR, "Rflags: %08x\n", Rflags));
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DEBUG ((EFI_D_ERROR, "%ld !!!LaunchBack FAIL!!!\n", Index));
DEBUG ((EFI_D_ERROR, "%ld Rflags: %08x\n", Index, Rflags));
DEBUG ((EFI_D_ERROR, "%ld VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", Index, (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
@ -1108,3 +1250,41 @@ InitializeSmmMonitor (
LaunchBack (Index);
return ;
}
static unsigned int CpuSynched;
VOID InitCpuReadySync()
{
CpuReadyCount = 0; // count of CPUs waiting in the loop
CpuSynched = 0; // when 0 - not synched yet; when 1 CPUs are synched, but all have not exited the loop
// need to prevent CPUs from entering the loop until all CPUs have exited
}
VOID CpuReadySync(UINT32 Index)
{
while(InterlockedCompareExchange32(&CpuSynched, 1, 1) == 1 /*CpuSynched == 1*/) {} // prevent processors from entering the synch loop until all the previous processors have left
InterlockedIncrement(&CpuReadyCount);
// DEBUG ((EFI_D_ERROR, "%ld CpuReadySync - CpuReadyCount: %d CpuNum %d\n", Index, CpuReadyCount, mHostContextCommon.CpuNum));
while(InterlockedCompareExchange32(&CpuSynched, 0, 0) == 0)//( CpuReadyCount < mHostContextCommon.CpuNum)
{
// spin until all CPUs are synced
if((InterlockedCompareExchange32(&CpuSynched, 0, 0) == 0 /*CpuSynched == 0*/) &&
InterlockedCompareExchange32(&CpuReadyCount, mHostContextCommon.CpuNum, mHostContextCommon.CpuNum)== mHostContextCommon.CpuNum)
{
InterlockedCompareExchange32(&CpuSynched, 0, 1); //CpuSynched = 1;
//DEBUG((EFI_D_ERROR, "%ld CpuReadySync - CpuSynched set to 1\n", Index));
}
}
if(InterlockedDecrement(&CpuReadyCount) == 0)
{
InterlockedCompareExchange32(&CpuSynched, 1, 0); // CpuSynched = 0;
// DEBUG((EFI_D_ERROR, "%ld CpuReadySync - CpuSynched set to 0\n", Index));
}
//DEBUG((EFI_D_ERROR, "%ld CpuReadySync - Cpu Released - CpuReadyCount: %d, \n", Index, CpuReadyCount));//could cause problems
}

View File

@ -209,4 +209,13 @@ RegisterBiosResource (
IN STM_RSC *Resource
);
VOID AsmHostEntrypointSmmPe (
VOID
);
void SetupStmPageFault();
#define CPUSYNC
#endif

View File

@ -13,6 +13,7 @@
**/
#include "StmInit.h"
#include "PeStm.h"
/**
@ -56,7 +57,15 @@ InitializeSmiVmcs (
VmExitCtrls.Bits.SaveIA32_EFER = 1;
GuestInterruptibilityState.Uint32 = VmRead32 (VMCS_32_GUEST_INTERRUPTIBILITY_STATE_INDEX);
GuestInterruptibilityState.Bits.BlockingBySmi = 0;
if(Index != 0)
{
GuestInterruptibilityState.Bits.BlockingBySmi = 0;
}
else
{
GuestInterruptibilityState.Bits.BlockingBySmi = 1; // BSP has to do other stuff before interrupts are enabled
}
//
// Control field
@ -165,14 +174,14 @@ InitializeSmmVmcs (
Data64 = AsmReadMsr64 (IA32_VMX_PROCBASED_CTLS2_MSR_INDEX);
ProcessorBasedCtrls2nd.Uint32 = (UINT32)RShiftU64 (Data64, 32);
if (ProcessorBasedCtrls2nd.Bits.UnrestrictedGuest != 0) {
mGuestContextCommonSmm.GuestContextPerCpu[Index].UnrestrictedGuest = TRUE;
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].UnrestrictedGuest = TRUE;
} else {
mGuestContextCommonSmm.GuestContextPerCpu[Index].UnrestrictedGuest = FALSE;
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].UnrestrictedGuest = FALSE;
}
ProcessorBasedCtrls2nd.Uint32 = (UINT32)Data64 & (UINT32)RShiftU64 (Data64, 32);
ProcessorBasedCtrls2nd.Bits.Ept = 1;
if (mGuestContextCommonSmm.GuestContextPerCpu[Index].UnrestrictedGuest) {
if (mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].UnrestrictedGuest) {
ProcessorBasedCtrls2nd.Bits.UnrestrictedGuest = 1;
}
@ -208,21 +217,21 @@ InitializeSmmVmcs (
VmWrite32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX, VmEntryCtrls.Uint32);
VmWrite32 (VMCS_32_CONTROL_VMEXIT_CONTROLS_INDEX, VmExitCtrls.Uint32);
VmWrite64 (VMCS_64_CONTROL_EPT_PTR_INDEX, mGuestContextCommonSmm.EptPointer.Uint64);
VmWrite64 (VMCS_64_CONTROL_EPT_PTR_INDEX, mGuestContextCommonSmm[SMI_HANDLER].EptPointer.Uint64);
VmWriteN (VMCS_N_CONTROL_CR0_GUEST_HOST_MASK_INDEX, ((UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED1_MSR_INDEX)) | CR0_CD);
VmWriteN (VMCS_N_CONTROL_CR4_GUEST_HOST_MASK_INDEX, (UINTN)-1);
VmWriteN (VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED1_MSR_INDEX)));
VmWriteN (VMCS_N_CONTROL_CR4_READ_SHADOW_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr4 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX)) | CR4_PAE);
VmWriteN (VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Cr0 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED1_MSR_INDEX)));
VmWriteN (VMCS_N_CONTROL_CR4_READ_SHADOW_INDEX, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Cr4 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX)) | CR4_PAE);
if (ProcessorBasedCtrls.Bits.IoBitmap != 0) {
VmWrite64 (VMCS_64_CONTROL_IO_BITMAP_A_INDEX, mGuestContextCommonSmm.IoBitmapA);
VmWrite64 (VMCS_64_CONTROL_IO_BITMAP_B_INDEX, mGuestContextCommonSmm.IoBitmapB);
VmWrite64 (VMCS_64_CONTROL_IO_BITMAP_A_INDEX, mGuestContextCommonSmm[SMI_HANDLER].IoBitmapA);
VmWrite64 (VMCS_64_CONTROL_IO_BITMAP_B_INDEX, mGuestContextCommonSmm[SMI_HANDLER].IoBitmapB);
}
if (ProcessorBasedCtrls.Bits.MsrBitmap != 0) {
VmWrite64 (VMCS_64_CONTROL_MSR_BITMAP_INDEX, mGuestContextCommonSmm.MsrBitmap);
VmWrite64 (VMCS_64_CONTROL_MSR_BITMAP_INDEX, mGuestContextCommonSmm[SMI_HANDLER].MsrBitmap);
}
//
@ -270,9 +279,9 @@ InitializeSmmVmcs (
//
// Guest field
//
VmWriteN (VMCS_N_GUEST_CR0_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED1_MSR_INDEX)));
VmWriteN (VMCS_N_GUEST_CR0_INDEX, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Cr0 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED1_MSR_INDEX)));
VmWriteN (VMCS_N_GUEST_CR3_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmCr3);
VmWriteN (VMCS_N_GUEST_CR4_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr4 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX)));
VmWriteN (VMCS_N_GUEST_CR4_INDEX, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Cr4 | ((UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX) & (UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX)));
if (sizeof(UINTN) == sizeof(UINT64)) {
VmWriteN (VMCS_N_GUEST_CR4_INDEX, VmReadN(VMCS_N_GUEST_CR4_INDEX) | CR4_PAE);
} else {
@ -339,6 +348,6 @@ InitializeSmmVmcs (
VmWrite64 (VMCS_64_GUEST_IA32_PERF_GLOBAL_CTRL_INDEX, AsmReadMsr64(IA32_PERF_GLOBAL_CTRL_MSR_INDEX));
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer);
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Efer);
return ;
}

26
Stm/StmPkg/Core/PeStmEpt.h Executable file
View File

@ -0,0 +1,26 @@
/** @file
PE EPT Header
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef _PESTMEPT_H_
#define _PESTMEPT_H_
UINT32 PeMapRegionEpt(UINT32 VmType,
UINTN membase,
UINTN memsize,
UINTN physbase,
BOOLEAN isRead,
BOOLEAN isWrite,
BOOLEAN isExec,
UINT32 CpuIndex);
#endif /* STMPEEPT_H_ */

View File

@ -0,0 +1,42 @@
;------------------------------------------------------------------------------
;
; Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php.
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;
; Module Name:
;
; PeVmExit.asm
;
;------------------------------------------------------------------------------
.686P
.MMX
.MODEL FLAT,C
.CODE
EXTERNDEF PeStmHandlerSmm:PROC
AsmHostEntrypointSmmPe PROC PUBLIC
push edi
push esi
push ebp
push ebp ; should be esp
push ebx
push edx
push ecx
push eax
mov ecx, esp ; parameter
push ecx
call PeStmHandlerSmm
add esp, 4
jmp $
AsmHostEntrypointSmmPe ENDP
END

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
/**
@ -30,8 +31,9 @@ ResumeToBiosExceptionHandler (
STM_PROTECTION_EXCEPTION_STACK_FRAME_IA32 *StackFrame;
UINTN Rflags;
X86_REGISTER *Reg;
UINT32 VmType = SMI_HANDLER;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
StmProtectionExceptionHandler = &mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->StmProtectionExceptionHandler;
@ -50,14 +52,14 @@ ResumeToBiosExceptionHandler (
StackFrame = (STM_PROTECTION_EXCEPTION_STACK_FRAME_IA32 *)(UINTN)StmProtectionExceptionHandler->SpeRsp;
StackFrame -= 1;
mGuestContextCommonSmm.GuestContextPerCpu[Index].InfoBasic.Uint32 = VmRead32 (VMCS_32_RO_EXIT_REASON_INDEX);
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].InfoBasic.Uint32 = VmRead32 (VMCS_32_RO_EXIT_REASON_INDEX);
mGuestContextCommonSmm.GuestContextPerCpu[Index].VmExitInstructionLength = VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX);
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].VmExitInstructionLength = VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX);
StackFrame->VmcsExitQualification = VmReadN (VMCS_N_RO_EXIT_QUALIFICATION_INDEX);
StackFrame->VmcsExitInstructionLength = VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX);
StackFrame->VmcsExitInstructionInfo = VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_INFO_INDEX);
switch (mGuestContextCommonSmm.GuestContextPerCpu[Index].InfoBasic.Bits.Reason) {
switch (mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].InfoBasic.Bits.Reason) {
case VmExitReasonExceptionNmi:
case VmExitReasonEptViolation:
if (StmProtectionExceptionHandler->PageViolationException) {
@ -113,7 +115,7 @@ ResumeToBiosExceptionHandler (
StackFrame->Rbx = Reg->Rbx;
StackFrame->Rax = Reg->Rax;
StackFrame->Cr3 = VmReadN (VMCS_N_GUEST_CR3_INDEX);
if (mGuestContextCommonSmm.GuestContextPerCpu[Index].InfoBasic.Bits.Reason == VmExitReasonEptViolation) {
if (mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].InfoBasic.Bits.Reason == VmExitReasonEptViolation) {
// For SMM handle, linear addr == physical addr
StackFrame->Cr2 = (UINTN)VmRead64(VMCS_64_RO_GUEST_PHYSICAL_ADDR_INDEX);
} else {
@ -149,6 +151,7 @@ ResumeToBiosExceptionHandler (
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (Reg);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
return ;
@ -170,8 +173,9 @@ ReturnFromBiosExceptionHandler (
STM_PROTECTION_EXCEPTION_STACK_FRAME_IA32 *StackFrame;
UINTN Rflags;
X86_REGISTER *Reg;
UINT32 VmType = SMI_HANDLER;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
StmProtectionExceptionHandler = &mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->StmProtectionExceptionHandler;
@ -224,6 +228,7 @@ ReturnFromBiosExceptionHandler (
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (Reg);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
return ;

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
#define PAGING_4K_MASK 0xFFF
#define PAGING_4M_MASK 0x3FFFFF
@ -295,11 +296,12 @@ GuestLinearToGuestPhysical (
IN UINTN GuestLinearAddress
)
{
UINT32 VmType = SMI_HANDLER;
return TranslateGuestLinearToPhysical (
VmReadN (VMCS_N_GUEST_CR3_INDEX),
mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Cr0,
mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Cr4,
mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Efer,
mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Cr0,
mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Cr4,
mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Efer,
GuestLinearAddress,
NULL,
NULL,
@ -396,6 +398,7 @@ MapLinearAddressOneEntry (
UINT64 *L1PageTable;
UINT32 *L1PageTable32;
UINTN Index1;
UINT32 VmType = SMI_HANDLER;
Ia32e = FALSE;
Pg = FALSE;
@ -405,9 +408,9 @@ MapLinearAddressOneEntry (
Entry = NULL;
CurrentPhysicalAddress = TranslateGuestLinearToPhysical (
VmReadN (VMCS_N_GUEST_CR3_INDEX),
mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Cr0,
mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Cr4,
mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Efer,
mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Cr0,
mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Cr4,
mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Efer,
LinearAddress,
&Ia32e,
&Pg,
@ -495,8 +498,9 @@ MapVirtualAddressToPhysicalAddress (
UINTN Address;
UINTN Base;
UINTN Length;
UINT32 VmType = SMI_HANDLER;
if ((mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Cr4 & CR4_PAE) == 0) {
if ((mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Cr4 & CR4_PAE) == 0) {
SuperPageSize = SIZE_4MB;
} else {
SuperPageSize = SIZE_2MB;
@ -553,6 +557,7 @@ UnmapLinearAddressOneEntry (
UINT64 *L1PageTable;
UINT32 *L1PageTable32;
UINTN Index1;
UINT32 VmType = SMI_HANDLER;
ASSERT ((SuperPageSize == SIZE_4KB) || (SuperPageSize == SIZE_4MB) || (SuperPageSize == SIZE_2MB));
@ -561,9 +566,9 @@ UnmapLinearAddressOneEntry (
Entry = NULL;
CurrentPhysicalAddress = TranslateGuestLinearToPhysical (
VmReadN (VMCS_N_GUEST_CR3_INDEX),
mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Cr0,
mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Cr4,
mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Efer,
mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Cr0,
mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Cr4,
mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Efer,
LinearAddress,
&Ia32e,
&Pg,
@ -643,8 +648,9 @@ UnmapVirtualAddressToPhysicalAddress (
UINTN Address;
UINTN Base;
UINTN Length;
UINT32 VmType = SMI_HANDLER;
if ((mGuestContextCommonSmm.GuestContextPerCpu[CpuIndex].Cr4 & CR4_PAE) == 0) {
if ((mGuestContextCommonSmm[VmType].GuestContextPerCpu[CpuIndex].Cr4 & CR4_PAE) == 0) {
SuperPageSize = SIZE_4MB;
} else {
SuperPageSize = SIZE_2MB;

View File

@ -0,0 +1,169 @@
/** @file
APIC Handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
extern PE_SMI_CONTROL PeSmiControl;
// function to signal to the PE VM that it needs to handle a SMI
#define ICR_LOW 0x300
#define ICR_HIGH 0x310
#define APIC_REG(offset) (*(UINTN *)(apicAddress + offset))
static UINTN apicAddress;
#define APIC_EN 1 << 11
#define APIC_EXTD 1 << 10
#define LOCAL_APIC_DISABLED 0
#define APIC_INVALID APIC_EXTD
// apic modes
#define xAPIC_MODE APIC_EN
#define x2APIC_MODE APIC_EN|APIC_EXTD
// from LocalApic.h
// lower half of Interrupt Command Register (ICR)
typedef union
{
struct
{
UINT32 Vector:8; // the vector number of the interrupt being sent
UINT32 DeliveryMode:3; // Specifies the type of IPI being sent
UINT32 DestinationMode:1; // 0: physical destination mode, 1: logical destination mode
UINT32 DeliveryStatus:1; // Indicates the IPI delivery status. Reserved in x2APIC mode
UINT32 Reserved0:1; // Reserved
UINT32 Level:1; // 0 for the INIT level de-assert delivery mode. Otherwise 1
UINT32 TriggerMode:1; // 0: edge, 1: level when using the INIT level de-assert delivery mode
UINT32 Reserved1:2; // Reserved
UINT32 DestinationShorthand:2; // A shorthand notation to specify the destination of the message
UINT32 Reserved2:12; // Reserved
} Bits;
UINT32 Uint32;
} LOCAL_APIC_ICR_LOW;
#define LOCAL_APIC_DELIVERY_MODE_SMI 2
#define LOCAL_APIC_DESTINATION_SHORTHAND_ALL_EXCLUDING_SELF 3
void SignalPeVm(UINT32 CpuIndex)
{
UINT32 low, high;
UINT64 ApicMsr;
// UINT32 mine = 0;
#ifdef SHOWSMI
if(CpuIndex == 0)
{
UINT16 pmbase = get_pmbase();
DEBUG((EFI_D_ERROR, " %ld SignalPeVm ****SMI*** smi_en: %x smi_sts: %x\n", CpuIndex, IoRead32(pmbase + SMI_EN), IoRead32(pmbase + SMI_STS)));
}
#endif
if((PeSmiControl.PeExec == 1) && (InterlockedCompareExchange32(&PeSmiControl.PeNmiBreak, 0, 1) == 0))
{
ApicMsr = AsmReadMsr64 (IA32_APIC_BASE_MSR_INDEX);
apicAddress = (UINTN)(ApicMsr & 0xffffff000); // assume the default for now
low = APIC_REG(ICR_LOW) & 0xFFF32000;
high = APIC_REG(ICR_HIGH) & 0x00FFFFFF;
//high |= (PE_APIC_ID << 24); // put the destination apic ID into the upper eight bits
high = PeSmiControl.PeApicId << 24;
//low |= 0x4400; // bits: (8-10) NMI, (14) asset trigger mode
low = 0x400;
switch (ApicMsr & (APIC_EN|APIC_EXTD))
{
case xAPIC_MODE:
APIC_REG(ICR_HIGH) = high; // write high before low
APIC_REG(ICR_LOW) = low; // because writing to low triggers the IPI
break;
case x2APIC_MODE:
// x2APIC uses MSR's
__writemsr(0x800 + 0x30, low |((UINT64)PeSmiControl.PeApicId << 32));
break;
default:
DEBUG((EFI_D_ERROR, " %ld SignalPeVm - APIC mode invalid or APIC disabled\n", CpuIndex));
}
DEBUG((EFI_D_ERROR, "%ld SignalPeVm - Sent NMI to ApicId: %ld CpuIndex: %ld command: high: %08lx low: %08lx APIC_MSR: %p\n",
CpuIndex,
PeSmiControl.PeApicId,
PeSmiControl.PeCpuIndex,
high,
low,
ApicMsr));
}
#ifdef SMIVMPE
else
{
if(PeSmiControl.PeExec == 1)
{
DEBUG((EFI_D_ERROR, " %ld - ***+++*** SMI present with PE/VM active\n", CpuIndex));
}
}
#endif
}
void SendSmiToOtherProcessors(UINT32 CpuIndex)
{
LOCAL_APIC_ICR_LOW low;
UINT32 high;
UINTN highSave;
UINT64 ApicMsr;
ApicMsr = AsmReadMsr64 (IA32_APIC_BASE_MSR_INDEX);
apicAddress = (UINTN)(ApicMsr & 0xffffff000); // assume the default for now
highSave = APIC_REG(ICR_HIGH);
low.Uint32 = 0;
low.Bits.DeliveryMode = LOCAL_APIC_DELIVERY_MODE_SMI;
low.Bits.Level = 1;
low.Bits.DestinationShorthand = LOCAL_APIC_DESTINATION_SHORTHAND_ALL_EXCLUDING_SELF;
high = 0; // nonspecific
switch (ApicMsr & (APIC_EN|APIC_EXTD))
{
case xAPIC_MODE:
APIC_REG(ICR_HIGH) = high; // write high before lows
APIC_REG(ICR_LOW) = low.Uint32; // because writing to low triggers the IPI
break;
case x2APIC_MODE:
// x2APIC uses MSR's
__writemsr(0x800 + 0x30, low.Uint32 |((UINT64)high << 32));
break;
default:
DEBUG((EFI_D_ERROR, "%ld SendSmiToOtherProcessors - APIC mode invalid or APIC disabled\n", CpuIndex));
}
APIC_REG(ICR_HIGH) = highSave; //restore high
DEBUG((EFI_D_ERROR, "%ld SendSmiToOtherProcessors - Sent SMI to other processors command: high: 0x%08lx low: 0x%08lx APIC_MSR: 0x%p\n",
CpuIndex,
high,
low,
ApicMsr));
}

View File

@ -0,0 +1,298 @@
/** @file
EPT Handler functions specific to a VM/PE
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#define PAGINGEPT_C 1 //!< flag for include
//#include <stdio.h>
#include "StmRuntime.h"
#include "StmInit.h"
#include "PeStm.h"
#include "PeLoadVm.h"
#include "PeStmEpt.h"
//#define EPTCHECK
/*!
Setup VM/PE EPT tables.
\param[in] pStmVmm Pointer to StmVmm structure
\retval 0 Function succeeds
\retval !0 Error code
*/
extern UINT8 GetMemoryType (IN UINT64 BaseAddress); // call to base EPT functionality - hope to eventually do more of this
extern UINT64 EndTimeStamp;
static void PeEptFreeL2(IN UINT64 Level2);
static void insertPhysAdd(EPT_ENTRY* L1PageTable, UINTN startAddress, UINTN StartIndex, UINTN size);
extern UINT32 PostPeVmProc(UINT32 rc, UINT32 CpuIndex, UINT32 mode);
extern VOID
EptCreatePageTable (
OUT EPT_POINTER *EptPointer,
IN UINT32 Xa
);
// Initialization...
MRTT_INFO mMtrrInfo;
#define L4_BITMASK 0x0000FF8000000000
#define L3_BITMASK 0x0000007FC0000000
#define L2_BITMASK 0x000000003FE00000
#define L1_BITMASK 0x00000000001FF000
#define OFFSET_BITMASK_4K 0x0000000000000FFF
#define BITMASK_PAGE_SIZE_2MB 0x00000000001FFFFF
#define PTE_COUNT 512
#define L4_POSITION 39
#define L3_POSITION 30
#define L2_POSITION 21
#define L1_POSITION 12
UINT8 GetMemoryTypeTest(UINT64 StartAddress)
{
UINT8 Memory_Type = GetMemoryType(StartAddress);
if((Memory_Type != 0) || (Memory_Type != 6))
Memory_Type = 6;
return Memory_Type;
}
/**
This function create EPT l4/L3 tables for VM/PE guest.
L2 and L1 are added during the VM/PE build
@param EptPointer EPT pointer
@param Xa Execute access
**/
//EptCreatePageTable (&mGuestContextCommonSmm[VmType].EptPointer);
INT32 PeEptInit (OUT EPT_POINTER *EptPointer)
{
EptCreatePageTable (EptPointer, 1);
return 0;
}
void PeEPTViolationHandler( IN UINT32 CpuIndex)
{
VM_EXIT_QUALIFICATION VmexitQualification;
char AccessAllowed[4];
char AccessRequested[4];
char * LinearAddressValid;
char * AddressViolation;
char Line[100];
EndTimeStamp = AsmReadTsc();
VmexitQualification.UintN = VmReadN(VMCS_N_RO_EXIT_QUALIFICATION_INDEX);
DEBUG((EFI_D_ERROR, "%ld PeEPTViolationHandler - PE EPT Violation VMEXIT - Exit Qual: 0x%llx\n",
CpuIndex, VmexitQualification.UintN));
DEBUG((EFI_D_ERROR, "%ld PeEPTViolationHandler - Protected Execution VM attempted to access protected MEMORY at 0x%016llx\n",
CpuIndex,
VmRead64(VMCS_64_RO_GUEST_PHYSICAL_ADDR_INDEX)));
// have to generate the error messages this way - DEBUG function does not like conditionals
if(VmexitQualification.EptViolation.EptR == 1)
AccessAllowed[0] = 'R';
else
AccessAllowed[0] = '-';
if(VmexitQualification.EptViolation.EptW == 1)
AccessAllowed[1] = 'W';
else
AccessAllowed[1] = '-';
if(VmexitQualification.EptViolation.EptX == 1)
AccessAllowed[2] = 'X';
else
AccessAllowed[2] = '-';
AccessAllowed[3] = '\0';
//sprintf(Line, "%ld PeEPTViolationHandler - Access Allowed: %s\n", CpuIndex, AccessAllowed);
Line[0] = '\0';
strcat(Line, "PeEPTViolationHandler - Access Allowed: ");
strcat(Line, AccessAllowed);
strcat(Line, "\n");
DEBUG((EFI_D_ERROR, Line));
//DEBUG((EFI_D_ERROR, "%ld PeEPTViolationHandler - Access Allowed: %s\n", CpuIndex, AccessAllowed));
if(VmexitQualification.EptViolation.Ra == 1)
AccessRequested[0] = 'R';
else
AccessRequested[0] = '-';
if(VmexitQualification.EptViolation.Wa == 1)
AccessRequested[1] = 'W';
else
AccessRequested[1] = '-';
if(VmexitQualification.EptViolation.Xa == 1)
AccessRequested[2] = 'X';
else
AccessRequested[2] = '-';
AccessRequested[3] = '\0';
Line[0] = '\0';
strcat(Line, "PeEPTViolationHandler - Access Attempted causing Violation: ");
//sprintf(Line, "%ld PeEPTViolationHandler - Access Attempted: %s\n", CpuIndex, AccessRequested);
strcat(Line, AccessRequested);
strcat(Line, "\n");
DEBUG((EFI_D_ERROR, Line));
if(VmexitQualification.EptViolation.GlaValid == 1)
LinearAddressValid = "Valid";
else
LinearAddressValid = "Invalid";
Line[0] = '\0';
strcat(Line, "PeEPTViolationHandler - Linear address is ");
strcat(Line, LinearAddressValid);
strcat(Line, "\n");
//sprintf(Line, "%ld PeEPTViolationHandler - Linear address is %s\n", CpuIndex, LinearAddressValid);
DEBUG((EFI_D_ERROR, Line));
if(VmexitQualification.EptViolation.GlaValid == 1)
{
if(VmexitQualification.EptViolation.Gpa == 1)
AddressViolation = "Guest Physical EPT violation ";
else
AddressViolation = "Paging structure error";
Line[0] = '\0';
strcat(Line, "PeEPTViolationHandler - Linear address is ");
strcat(Line, AddressViolation);
strcat(Line, "\n");
//sprintf(Line, "%ld PeEPTViolationHandler - Linear address is %s\n", CpuIndex, AddressViolation);
DEBUG((EFI_D_ERROR, Line));
}
// take down the VM
//breakdownNonSmmVM(PE_VM_BAD_ACCESS, pStmVmm);
PostPeVmProc(PE_VM_BAD_ACCESS , CpuIndex, RELEASE_VM);
// rc = STM_SUCCESS; // get past the fatal error stuff
return;
}
void PeEPTMisconfigurationHandler( IN UINT32 CpuIndex)
{
EndTimeStamp = AsmReadTsc();
DEBUG((EFI_D_ERROR, "%ld PeEPTMisconfigurationHandler - PE EPT Misconfiguration VMEXIT\n", CpuIndex));
DumpVmcsAllField ();
DEBUG((EFI_D_ERROR, "ld PeEPTMisconfigurationHandler - CpuDeadLoop\n", CpuIndex));
CpuDeadLoop();
return;
}
void PeInvEPTHandler( IN UINT32 CpuIndex)
{
EndTimeStamp = AsmReadTsc();
DEBUG((EFI_D_ERROR, "%ld PeInvEPTHandler - PE - Invalid EPT Handler not implemented \n", CpuIndex));
DEBUG((EFI_D_ERROR, "%ld PeInvEPTHandler - CpuDeadLoop\n", CpuIndex));
CpuDeadLoop();
return;
}
// function to free EPT tables - assume EPT pointer to tables created by PeEPTinit
static void PeEptFreeL3(UINT64 Level3);
void PeEptFree(IN UINT64 EptPointer)
{
// calculate the size of the L4 table - the rest of the tables can be freed on
// the basis of the pointers
UINTN NumberOfPml4EntriesNeeded;
UINTN L3Entry;
EPT_ENTRY * L4Table = (EPT_ENTRY *)((UINTN)(EptPointer & ~0xFFF));
DEBUG ((EFI_D_ERROR, "PeEptFree Entered: %llx\n", L4Table));
if (mHostContextCommon.PhysicalAddressBits <= 39) {
NumberOfPml4EntriesNeeded = 1;
//NumberOfPdpEntriesNeeded = (UINTN)LShiftU64 (1, mHostContextCommon.PhysicalAddressBits - 30);
} else {
NumberOfPml4EntriesNeeded = (UINTN)LShiftU64 (1, mHostContextCommon.PhysicalAddressBits - 39);
//NumberOfPdpEntriesNeeded = 512;
}
for(L3Entry = 0; L3Entry < NumberOfPml4EntriesNeeded; L3Entry++)
{
if(L4Table[L3Entry].Uint64 != 0)
{
PeEptFreeL3((UINT64) L4Table[L3Entry].Uint64);
}
}
// Finally - free the L4 table
FreePages(L4Table, 1);
}
static void PeEptFreeL3(UINT64 Level3)
{
UINTN L2Entry;
EPT_ENTRY * L3Table = (EPT_ENTRY *) ((UINTN)(Level3 & ~0xFFF));
DEBUG ((EFI_D_ERROR, "PeEptFreeL3 Entered: %llx\n", L3Table));
for(L2Entry = 0; L2Entry < 512; L2Entry++)
{
if(L3Table[L2Entry].Uint64 != 0)
{
if(L3Table[L2Entry].Bits32.Sp != 1)
{
PeEptFreeL2(L3Table[L2Entry].Uint64);
}
}
}
FreePages(L3Table, 1);
}
static void PeEptFreeL2(IN UINT64 Level2)
{
UINTN L1Entry;
EPT_ENTRY * L2Table = (EPT_ENTRY *) ((UINTN)(Level2 & ~0xFFF));
DEBUG ((EFI_D_ERROR, "PeEptFreeL2 Entered: %llx\n", L2Table));
for(L1Entry = 0; L1Entry < 512; L1Entry ++)
{
if(L2Table[L1Entry].Uint64 != 0)
{
if(L2Table[L1Entry].Bits32.Sp != 1)
{
EPT_ENTRY * L1Table = (EPT_ENTRY *) ((UINTN)(L2Table[L1Entry].Uint64 & ~0xFFF));
DEBUG((EFI_D_ERROR, "PeEptFreeL2 - L1Table: 0x%016llx freed L1Entry: %d\n", L1Table, L1Entry));
FreePages(L1Table, 1);
}
}
}
FreePages(L2Table, 1);
}

View File

@ -0,0 +1,799 @@
/** @file
VM/PE setup, load and VM breakdown functions
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#define VMCALL_C 1
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
#include "StmApi.h"
#include "CpuDef.h"
extern VOID InitCpuReadySync();
extern VOID CpuReadySync(UINT32 Index);
extern UINT32 GetVmcsSize (VOID);
extern VOID AsmHostEntrypointSmmPe (VOID);
extern RETURN_STATUS RegisterExceptionHandler(IN EFI_EXCEPTION_TYPE ExceptionType,
IN EFI_EXCEPTION_CALLBACK ExceptionCallback);
extern void AsmSendInt2(); // setup NMI
extern void PeEptFree(IN UINT64 EptPointer);
extern UINT32 GetVmcsOffset( UINT32 field_encoding);
extern UINT32 GetMPState;
static int CpuGetState = 0;
void SetEndOfSmi(void);
void StartTimer(void);
void StopTimer(void);
int CheckTimerSTS(UINT32 Index);
void ClearTimerSTS(void);
void SetMaxSwTimerInt(void);
void SetMinSwTimerInt(void);
void SetTimerRate(UINT16 value);
extern void MapVmcs();
void LaunchPeVm(UINT32 PeType, UINT32 CpuIndex);
void AckTimer(void);
UINT16 get_pmbase(void);
UINT32 save_Inter_PeVm(UINT32 CpuIndex);
UINT32 PeSmiHandler(UINT32 CpuIndex);
#define PER_SMI_SEL (1<<0) // selects timinf for periodic SMI - in GEN_PMCON_1 (offset A0-A1h)
// Periodic SMI rates
#define PeriodicSmi64Sec 0
#define PeriodicSmi32Sec 1
#define PeriodicSmi16Sec 2
#define PeriodicSmi8Sec 3
// need to add or modify
void enable_nmi(); // turn on NMI
VOID EFIAPI NullExceptionHandler(IN EFI_EXCEPTION_TYPE InterruptType, IN EFI_SYSTEM_CONTEXT SystemContext);
UINT32 SetupProtExecVm(UINT32 CpuIndex, UINT32 VM_Configuration, UINT32 mode, UINT32 PeType);
//static UINT32 FreePE_PageTables(UINT32 PeType);
static void apic_wrmsr(UINT32 reg, UINT64 msr_content);
void print_region_list(UINT32 PeType, UINT32 CpuIndex);
extern int GetMultiProcessorState(UINT32 CpuIndex);
void InitPe();
typedef struct
{
UINT64 Reserved1:8;
UINT64 MaxNonTurboRatio:8;
UINT64 Reserved2:12;
UINT64 ProgRatioLimitTM:1;
UINT64 ProgTDPLimitTM:1;
UINT64 Reserved3:10;
UINT64 MaxEffRatio:8;
UINT64 Reserved4:16;
}MSR_PLATFORM_INFO_BITS;
typedef union
{
MSR_PLATFORM_INFO_BITS Bits;
UINT64 Uint64;
} MSR_PLATFORM_INFO_DATA;
PE_SMI_CONTROL PeSmiControl;
PE_VM_DATA PeVmData[4]; // right now support a max of 3 PE VM (VM 0 is the SMI_HANDLER)
static UINT64 StartPeTimeStamp = 0;
UINT64 EndTimeStamp = 0;
static unsigned int VmPeReady = 0;
static unsigned int NMIReceived = 0;
// This function launches the VM/PE for each "run"
void LaunchPeVm(UINT32 PeType, UINT32 CpuIndex)
{
UINTN Rflags;
UINTN InitStackPointer = 0; // Initial Stack pointer
// load the shared page and region list addresses into the register save area
// so that the PE module will access to those addresses
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register.Rbx = (UINTN)PeVmData[PeType].UserModule.SharedPage;
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register.Rcx = (UINT64)PeVmData[PeType].UserModule.Segment;
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register.Rdx = (UINTN) PeVmData[PeType].UserModule.SharedStmPage;
// check and make aure that the heap is cleared as requested
if((PeVmData[PeType].UserModule.VmConfig & PERM_VM_CLEAR_MEMORY) == PERM_VM_CLEAR_MEMORY)
{
// addess and size calculations from setupModulepages
UINTN StartEndBlock = PeVmData[PeType].UserModule.DataRegionSmmLoc;
UINTN EndSize = PeVmData[PeType].UserModule.DataRegionSize;
StartEndBlock += PeVmData[PeType].UserModule.DoNotClearSize;
EndSize -= PeVmData[PeType].UserModule.DoNotClearSize;
if(0 >= EndSize)
{
DEBUG((EFI_D_ERROR, "%ld LaunchPeVM - VM/PE heap space not cleared because of DoNotClearSize too large\n", CpuIndex));
}
else
{
DEBUG((EFI_D_ERROR, "%ld LaunchPeVm - Clearing VM/PE heap space: 0x%016llx:0x%016llx\n", CpuIndex, StartEndBlock, EndSize ));
ZeroMem ((VOID *)(UINTN)StartEndBlock, EndSize);
}
}
// setup the variables that are used in case an SMI is taken while the PE VM is running
// this needs to be fixed if more than one PE/VM is running
PeSmiControl.PeNmiBreak = 1; // when 1, a NMI has been sent to break the thread in PE_APIC_id
PeSmiControl.PeApicId = ReadLocalApicId (); // APIC id of the thread that is executing the PE V<
PeSmiControl.PeCpuIndex = CpuIndex;
PeSmiControl.PeExec = 0; // when 1 PE_APIC_ID is executing a
VmPeReady = 0; // set the ready gate
DEBUG((EFI_D_ERROR, "%ld LaunchPeVm - before check PeSmiState: %ld\n", CpuIndex, PeSmiControl.PeSmiState));
if(InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIHSMI, PESMIHSMI) == PESMIHSMI) // try to set the NMI
{
// if we know that the SMI handler is already active, then don't continue
// save the state, process the SMI, then start the VM/PE afterwards
DEBUG((EFI_D_ERROR,"%ld LaunchPeVM - SMI being processed - faking NMI - PeSmiState: %ld\n", CpuIndex, PeSmiControl.PeSmiState));
//InterlockedCompareExchange32(&PeSmiControl.PeSmiState, 2, 0); // reset to zero
//save_Inter_PeVm(CpuIndex);
//DEBUG((EFI_D_ERROR, "%ld LaunchPeVM - Return from non-returnable function\n", CpuIndex));
//NMIReceived = 2; // If an SMI happens we receive two NMI's so fake it
}
else
{
PeSmiControl.PeNmiBreak = 0;
PeSmiControl.PeExec = 1;
enable_nmi(); // turn on NMI
// make sure we setup the NMI first
}
// Set InitialStack pointer to the top of User memory...
InitStackPointer = (UINTN) PeVmData[PeType].UserModule.AddressSpaceStart + (UINTN) PeVmData[PeType].UserModule.AddressSpaceSize - (UINTN) 16;
VmWriteN (VMCS_N_GUEST_RSP_INDEX, InitStackPointer);
DEBUG((EFI_D_ERROR, "%ld LaunchPeVm- IntialStackPointer: 0x%llx\n", CpuIndex, InitStackPointer));
DEBUG((EFI_D_ERROR, "%ld LaunchPeVm- VMCS_N_GUEST_RFLAGS_INDEX: %08llx\n", CpuIndex, VmReadN(VMCS_N_GUEST_RFLAGS_INDEX)));
DEBUG((EFI_D_ERROR, "%ld LaunchPeVm- IA32_EFER_MSR: 0x%llx\n", CpuIndex, AsmReadMsr64 (IA32_EFER_MSR_INDEX)));
DEBUG((EFI_D_ERROR, "%ld LaunchPeVm- VMCS_32_CONTROL_PIN_BASED_VM_EXECUTION_INDEX: 0x%llx\n", CpuIndex, VmRead32(VMCS_32_CONTROL_PIN_BASED_VM_EXECUTION_INDEX)));
DEBUG((EFI_D_ERROR, "%ld LaunchPeVm- guest parameter regs:\n RBX: %p (shared page)\n RCX: %p (region list)\n RDX: %p (shared STM)\n",
CpuIndex,
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register.Rbx,
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register.Rcx,
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register.Rdx));
//SetMinSwTimerInt();
PeVmData[PeType].UserModule.RunCount++;
// set the runcount into the STM shared page
*((UINT64 *)(PeVmData[PeType].SharedPageStm + sizeof(UINT64))) = PeVmData[PeType].UserModule.RunCount;
DEBUG((EFI_D_ERROR, "%ld LaunchPeVM - Initiating PE/VM run number: %d\n",
CpuIndex,
PeVmData[PeType].UserModule.RunCount));
DEBUG((EFI_D_ERROR, "%ld LaunchPeVM - SharedPageStm 0x%016llx 0x%016llx\n",
CpuIndex,
*((UINT64 *)(PeVmData[PeType].SharedPageStm)),
*((UINT64 *)(PeVmData[PeType].SharedPageStm + sizeof(UINT64)))));
mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType = PeType; // Make sure we take the correct path upon RSM
StartPeTimeStamp = AsmReadTsc(); // set start time
AsmWbinvd ();
DEBUG((EFI_D_ERROR, "%ld LaunchPeVm - ***Debug*** VmPE ready for launch PeType %d registers-address: 0x%016llx\n",
CpuIndex, PeType, &mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register ));
// need to check to see if an SMI happend during this period
// first incidcate that the VM/PE is ready for launch
VmPeReady = 1; // this will cause the interrupt handler to save the VM/PE and launch the VM/PE once the SMI is handled
if(InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIPNMI, PESMIHSMI) == PESMIHSMI)
{
// if we are here, then an SMI has come in and the system is processing it
// we need to get out and let the system process the SMI and then restart
// PeSmiState = 2 means that the other processors are waitng for us to sync up
// so synch and then save up the state for when the processors come out of the SMI
DEBUG((EFI_D_ERROR,"%ld LaunchPeVM - SMI detected during build - delaying launch to handle SMI\n", CpuIndex));
//CpuReadySync(CpuIndex); // synch up
save_Inter_PeVm(CpuIndex);
DEBUG((EFI_D_ERROR, "%ld LaunchPeVM - Warning: Return from non-returnable function\n", CpuIndex));
}
if(NMIReceived > 1) // check to see if we received an NMI during the build proceess - if so, handle the SMI then launch
{
DEBUG((EFI_D_ERROR,"%ld LaunchPeVM - NMI detected during build - delaying launch to handle SMI\n", CpuIndex));
save_Inter_PeVm(CpuIndex);
DEBUG((EFI_D_ERROR, "%ld LaunchPeVM - Warning: Return from non-returnable function\n", CpuIndex));
// this function should not return
}
DEBUG((EFI_D_ERROR, "%ld LaunchPeVM - Launching PE/VM - NMIReceived: %d\n", CpuIndex, NMIReceived));
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register);
DEBUG ((EFI_D_ERROR, "%ld LaunchPeVm - (STM):o(\n", (UINTN)CpuIndex));
Rflags = AsmVmResume (&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register);
// returing here means that the launch has failed...
PeSmiControl.PeExec = 0; // not any more
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "%ld LaunchPeVm - !!!ResumeGuestSmm fail for PeVm!!! - %d\n", (UINTN)CpuIndex));
DEBUG ((EFI_D_ERROR, "%ld LaunchPeVm - Rflags: %08llx\n", Rflags));
DEBUG ((EFI_D_ERROR, "%ld LaunchPeVm - VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register);
DumpGuestStack(CpuIndex);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
}
// this function restarts a perm PE/VM
// we always start with the same "state" as the first launch
STM_STATUS RunPermVM(UINT32 CpuIndex)
{
UINT32 rc;
UINT32 PeType = PE_PERM; // can only restart perm vms...
// (for now) start the VM...
DEBUG((EFI_D_ERROR, "%ld RunPermVM entered\n", CpuIndex));
//VmWriteN (VMCS_N_GUEST_RIP_INDEX, VmReadN (VMCS_N_GUEST_RIP_INDEX) + VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX));
if(PeVmData[PeType].PeVmState != PE_VM_IDLE )
{
DEBUG((EFI_D_ERROR, "%ld RunPermVM - Can not run a Perm PE/VM\n", CpuIndex));
if((PeVmData[PeType].PeVmState == PE_VM_ACTIVE) ||
(PeVmData[PeType].PeVmState == PE_VM_SUSPEND))
{
rc = PE_VM_EXECUTING;
DEBUG((EFI_D_ERROR, "%ld RunPermVM - Attempting to execute an already running Perm PE/VM\n", CpuIndex));
}
else
{
rc = PE_VM_NO_PERM_VM;
DEBUG((EFI_D_ERROR, "%ld RunPermVM - Attempt to execute a non-existant PE/VM state: %d\n", CpuIndex, PeVmData[PeType].PeVmState));
}
return rc;
}
PeVmData[PeType].PeVmState = PE_VM_ACTIVE;
if(PeVmData[PeType].StartMode == PEVM_PRESTART_SMI)
{
PeVmData[PeType].StartMode = PEVM_START_SMI;
}
else
{
PeVmData[PeType].StartMode = PEVM_START_VMCALL;
}
// setup the return
rc = SetupProtExecVm(CpuIndex, PeVmData[PE_PERM].UserModule.VmConfig, RESTART_VM, PeType); // can only restart PERM_VM
if(rc != PE_SUCCESS) // did we have a problem
{
DEBUG((EFI_D_ERROR, "%ld - Error in configuring PE VM\n", CpuIndex));
//FreePE_DataStructures(PeType);
//setPEerrorCode(rc, StmVmm); // tell the caller of the problem
//StmVmm->NonSmiHandler = 0; // no longer an PE VM
PeVmData[PeType].PeVmState = PE_VM_AVAIL;
return(rc);
}
LaunchPeVm(PeType, CpuIndex); // Launch the PE/VM
PeVmData[PE_PERM].PeVmState = PE_VM_AVAIL; // not there anymore
mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType = SMI_HANDLER;
return rc;
}
// used to cleanup a VM when we are done
// PRESERVE_VM - ensures that the structures needed for the perminate PE VM are retained
// SUSPEND_VM - VM has been interrupted by an SMI
// RELEASE_VM - used to release the resources associated with a PE VM
// used in cases of temp PE VM's and VM's that encounter errors
#define MSR_HSW_PLATFORM_INFO 0xCE // not in cpudef.h
UINT32 PostPeVmProc(UINT32 rc, UINT32 CpuIndex, UINT32 mode)
{
UINTN Rflags;
X86_REGISTER *Reg;
//UINT64 EndTimeStamp = AsmReadTsc();
UINT64 TotalPeTime = EndTimeStamp - StartPeTimeStamp;
UINT32 PeType = mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType;
UINT64 Scale;
UINT64 TotalScaleTime;
MSR_PLATFORM_INFO_DATA PlatformData;
PlatformData.Uint64 = AsmReadMsr64(MSR_HSW_PLATFORM_INFO);
Scale = MultU64x32(PlatformData.Bits.MaxNonTurboRatio, 100000);
TotalScaleTime = DivU64x32(TotalPeTime, (UINT32) Scale);
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - Platform Data - Max Ratio: %d\n", CpuIndex, PlatformData.Bits.MaxNonTurboRatio));
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - TSC Info - StartPeTimeStamp: %ld EndTimeStamp: %ld\n", CpuIndex, StartPeTimeStamp, EndTimeStamp));
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - PeType: %d mode: %d PE clocktime: %ld runtime(scaled): %ldms\n", CpuIndex, PeType, mode, TotalPeTime, TotalScaleTime));
switch (rc) // dump guest state upon bad return (eventually place in shared area)
{
case PE_VM_ATTEMPTED_VMCALL:
case PE_VMLAUNCH_ERROR:
case PE_VM_PAGE_FAULT:
case PE_VM_GP_FAULT:
case PE_VM_NMI:
case PE_VM_CPUID:
case PE_VM_IO:
case PE_VM_READ_MSR:
case PE_VM_WRITE_MSR:
case PE_VM_BAD_ACCESS:
case PE_VM_TRIPLE_FAULT:
case PE_VM_EXCEPTION:
DumpVmcsAllField(); // temp debug
DumpVmxCapabillityMsr();
DumpRegContext(&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register);
DumpGuestStack(CpuIndex);
print_region_list(PeType, CpuIndex);
if((PERM_VM_CRASH_BREAKDOWN & PeVmData[PeType].UserModule.VmConfig) == PERM_VM_CRASH_BREAKDOWN)
{
// user wants perm vm released after crash
mode = RELEASE_VM;
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - Perm VM configured to be released after crash\n", CpuIndex));
}
else
{
mode = PRESERVE_VM;
}
break;
default:
// print out vm state during debug
{
#if EFI_D_ERROR == 1
DumpVmcsAllField();
print_region_list(PeType, CpuIndex);
#endif
}
}
// indicate that we are not running
PeSmiControl.PeExec = 0;
// bug - think about this one....
PeSmiControl.PeSmiState = PESMINULL; // indicate that we are out of it
if(mode == PRESERVE_VM)
{
if((PERM_VM_RUN_ONCE & PeVmData[PeType].UserModule.VmConfig) == PERM_VM_RUN_ONCE)
{
// user wants perm vm released after crash
mode = RELEASE_VM;
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - Perm VM configured to run only once\n", CpuIndex));
PeSmiControl.PeCpuIndex = -1; // indicate none functioning at this momemnet
}
else
{
if((PERM_VM_RUN_PERIODIC & PeVmData[PeType].UserModule.VmConfig) == PERM_VM_RUN_PERIODIC)
{
// the PE/VM is running in periodic mode
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - Perm VM being setup for Timer interrupt\n", CpuIndex));
PeSmiControl.PeCpuIndex = CpuIndex;
//
//InitCpuReadySync(); // Setup the locking
PeSmiControl.PeWaitTimer = 1;
PeVmData[PeType].PeVmState = PE_VM_IDLE;
// turn on the timer
SetTimerRate(PeriodicSmi16Sec);
StartTimer();
AckTimer();
}
}
}
if(mode == SUSPEND_VM)
{
// suspending PE/VM so that SMI handler can run
PeVmData[PeType].PeVmState = PE_VM_SUSPEND;
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - PE/VM suspended - PeType: %ld\n", CpuIndex, PeType));
// we will fake a return to the MLE - that will cause the pending SMI to fire allowing
// the smiEvent handler to process is and release all the processor threads
// to handle the SMI
AsmVmPtrLoad(&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Vmcs);
/// at this point we should return to the MLE as per the Intel method...
AsmVmClear(&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs);
DEBUG ((EFI_D_ERROR, "%ld PostPeVmProc - !!exiting to allow SMI to fire to Enter SmiHandler\n", CpuIndex));
mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType = SMI_HANDLER;
#ifdef OLDWAY
// set the SMI/Handler VMCS intoplace (note copied the Intel code - need to fix duplication...
STM_PERF_START (CpuIndex, 0, "WriteSyncSmmStateSaveArea", "SmiEventHandler");
WriteSyncSmmStateSaveArea (CpuIndex);
STM_PERF_END (CpuIndex, "WriteSyncSmmStateSaveArea", "SmiEventHandler");
AsmVmPtrStore (&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "%ld PostPeVmProc - ERROR: AsmVmPtrLoad - %016lx : %08x\n", (UINTN)CpuIndex, mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs, Rflags));
DEBUG((EFI_D_ERROR, "%ld, PostPeVmProc - CpuDeadLoop\n", CpuIndex));
CpuDeadLoop ();
}
VmWriteN (VMCS_N_GUEST_RIP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[CpuIndex].TxtProcessorSmmDescriptor->SmmSmiHandlerRip);
VmWriteN (VMCS_N_GUEST_RSP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[CpuIndex].TxtProcessorSmmDescriptor->SmmSmiHandlerRsp);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Cr3);
STM_PERF_START (CpuIndex, 0, "BiosSmmHandler", "SmiEventHandler");
if (mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Launched) {
Rflags = AsmVmResume (&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_ERROR, "(STM):-(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register);
}
} else {
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Launched = TRUE;
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[0].GuestContextPerCpu[CpuIndex].Register);
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Launched = FALSE;
}
#endif
if (mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Launched) {
Rflags = AsmVmResume (&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_ERROR, "(STM):-(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Register);
}
} else {
mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Launched = TRUE;
Rflags = AsmVmLaunch (&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Register);
mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Launched = FALSE;
}
}
else
{
// set the MLE VMCS into place
AsmVmPtrLoad(&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Vmcs);
/// at this point we should return to the MLE as per the Intel method...
AsmVmClear(&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs);
if(PeVmData[PeType].StartMode == PEVM_START_VMCALL)
{
// fixup return address
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - PE/VM guest return address bumped\n", CpuIndex));
VmWriteN (VMCS_N_GUEST_RIP_INDEX, VmReadN (VMCS_N_GUEST_RIP_INDEX) + VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX));
}
}
//DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - guest return address is %p\n", CpuIndex, VmReadN(VMCS_N_GUEST_RIP_INDEX)));
// clear out the page table list
if(mode == RELEASE_VM)
{
FreePE_DataStructures(PeType);
// need to add code here in the instance a perm PE VM has crashed
// so that in production someone cannot take advantange of this case
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
PeSmiControl.PeCpuIndex = -1; // indicate none functioning at this momemnet
//keep the old vmcs around - think about clearing...
//FreePages((UINTN *)mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs, 2);
//mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs = 0L; // not there any more
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - PE/VM Free (AVAIL) - PeType: %ld\n", CpuIndex, PeType));
}
else
{
// mark this VM as idle
PeVmData[PeType].PeVmState = PE_VM_IDLE; // Waiting for more actio
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - PE/VM Idle - PeType: %ld\n", CpuIndex, PeType));
}
if(PeVmData[PeType].StartMode == PEVM_START_VMCALL)
{
// setup the return codes
Reg = &mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Register;
WriteUnaligned32 ((UINT32 *)&Reg->Rax, rc);
if (rc == PE_SUCCESS)
{
VmWriteN (VMCS_N_GUEST_RFLAGS_INDEX, VmReadN(VMCS_N_GUEST_RFLAGS_INDEX) & ~RFLAGS_CF);
}
else
{
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - Unsucessful return noted in RFLAGS_CF\n", CpuIndex));
VmWriteN (VMCS_N_GUEST_RFLAGS_INDEX, VmReadN(VMCS_N_GUEST_RFLAGS_INDEX) | RFLAGS_CF);
}
}
mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType = SMI_HANDLER;
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - sucessfully completed - RC: 0x%x\n", CpuIndex, rc));
//StopSwTimer();
CheckPendingMtf (CpuIndex);
//
// Launch back
//
Rflags = AsmVmResume (&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
DEBUG ((EFI_D_ERROR, "%ld PostPeVmProc - (STM):o(\n", (UINTN)CpuIndex));
Rflags = AsmVmLaunch (&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Register);
}
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "%ld PostPeVmProc - !!!PePostVmProcessing FAIL!!!\n", CpuIndex));
DEBUG ((EFI_D_ERROR, "%ld PostPeVmProc - Rflags: %08x\n", CpuIndex, Rflags));
DEBUG ((EFI_D_ERROR, "%ld PostPeVmProc - VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", CpuIndex, (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Register);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
DEBUG((EFI_D_ERROR, "%ld PostPeVmProc - CpuDeadLoop\n"));
CpuDeadLoop (); // crash here because we cannot get back to the MLE...
// check to see if there is a path through the intel code for going back to the MLE
return rc; // always succeed
}
// standard function used to free the memory, etc associated with a PE VM
UINT32 FreePE_DataStructures(UINT32 PeType)
{
// first clear out the pages
if(PeVmData[PeType].SmmBuffer != NULL)
{
FreePages(PeVmData[PeType].SmmBuffer, PeVmData[PeType].SmmBufferSize);
PeVmData[PeType].SmmBuffer = 0;
PeVmData[PeType].SmmBufferSize = 0;
}
if(mGuestContextCommonSmm[PeType].EptPointer.Uint64 != 0)
{
PeEptFree(mGuestContextCommonSmm[PeType].EptPointer.Uint64);
mGuestContextCommonSmm[PeType].EptPointer.Uint64 = 0;
}
if(PeVmData[PeType].SharedPageStm != NULL)
{
FreePages(PeVmData[PeType].SharedPageStm, 1);
PeVmData[PeType].SharedPageStm = 0;
}
return STM_SUCCESS;
}
//setup the guest physical address space assigned for the module to be RWX
// the remainder of guest physical addrss space will be setup as RW.
UINT32 save_Inter_PeVm(UINT32 CpuIndex)
{
// come here when the VM has been interrupted by an SMI
// setup for the call to PostPeVmProc
UINT32 PeType = mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType; // which PT are we using
mHostContextCommon.HostContextPerCpu[CpuIndex].NonSmiHandler = PeType; // let the STM know that we are waiting to come back
EndTimeStamp = AsmReadTsc();
DEBUG((EFI_D_ERROR, "%ld save_Inter_PeVm - sucessfully completed\n", CpuIndex));
PostPeVmProc(PE_SUCCESS, CpuIndex, SUSPEND_VM);
return STM_SUCCESS; // always succeed
}
UINT32 RestoreInterPeVm(UINT32 CpuIndex, UINT32 PeType)
{
UINT32 rc = STM_SUCCESS;
UINTN Rflags;
// restores a VM after an SMI
if(GetMPState == 1)
{
// should only happen when the PEV/VM is initially loaded, otherwise
// this informaition should be normally grabbed upon a smi timer interrupt
// bug - need to consider the case of debug loading of a module for testing
if(GetMultiProcessorState(CpuIndex) == -1)
{
GetMPState = 1; // Indicate that we still need to get the processor state
return 1; // in this case there is an SMI in process and we need to let it be processed.
}
else
{
//Success - got the processor state
GetMPState = 0;
}
}
if(InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIHSMI, PESMIHSMI) == PESMIHSMI)
{
DEBUG((EFI_D_ERROR, "%ld RestoreInterPeVm - SMI in progress - aborting PE/VM restart\n", CpuIndex));
return 1;
}
// need to think about locking here in case there are two smi's in a row...
PeVmData[PeType].PeVmState = PE_VM_ACTIVE;
PeSmiControl.PeNmiBreak = 1; // when 1, a NMI has been sent to break the thread in PE_APIC_id
PeSmiControl.PeApicId = ReadLocalApicId (); // APIC id of the thread that is executing the PE V<
PeSmiControl.PeCpuIndex = CpuIndex;
//PeSmiControl.PeExec = 0; // when 1 PE_APIC_ID is executing a
// think about break code (BUG)
VmPeReady = 0; // set the ready gate (for here do not get out (BUG - Review this!!! for SMI in this interval)
enable_nmi(); // turn on NMI
PeSmiControl.PeNmiBreak = 0; // when 1, a NMI has been sent to break the thread in PE_APIC_id
PeSmiControl.PeExec = 1; // when 1 PE_APIC_ID is executing a
// setup the return
AsmVmPtrLoad(&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs);
mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType = PeType;
DEBUG((EFI_D_ERROR, "%ld RestoreInterPeVm - setup done, launching PE/VM\n", CpuIndex));
// Launch back
//
Rflags = AsmVmResume (&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_ERROR, "(STM):o(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Register);
}
return rc;
}
void print_region_list(UINT32 PeType, UINT32 CpuIndex)
{
int counter;
PE_REGION_LIST * rlist;
UINT64 EndAddress;
//UINT32 CpuIndex = (UINT32) GetCpuNumFromAcpi();
rlist = PeVmData[PeType].UserModule.Segment; // start the list
if(rlist == NULL)
{
DEBUG((EFI_D_ERROR, "%ld - No region list\n", CpuIndex));
return;
}
DEBUG((EFI_D_ERROR, "%ld --- Region List --- \n", CpuIndex));
for(counter = 0; counter < (4096/sizeof(PE_REGION_LIST)); counter++)
{
if(rlist[counter].Address == (UINT64) 0)
{
DEBUG((EFI_D_ERROR, "%ld Finish scanning Region List - %d elements found\n", CpuIndex, counter));
break; // done at end of list
}
EndAddress = rlist[counter].Address + rlist[counter].Size;
DEBUG((EFI_D_ERROR, "%ld region set at 0x%016llx:%016llx - size 0x%016lx\n", CpuIndex, rlist[counter].Address, EndAddress, rlist[counter].Size));
}
}
void enable_nmi()
{
UINT32 Index = 2; // need to
RegisterExceptionHandler (Index, NullExceptionHandler);
NMIReceived = 0;
AsmSendInt2(); // setup NMI
// reset because we received an NMI because of the previous instruction
// BUG - might have to review this to ensure that NMIReceived is set to zero after
// the interrupt is fired
while(NMIReceived == 0) {} // wait for NMI interrupt
DEBUG((EFI_D_ERROR, "NMI handler active\n"));
}
VOID
EFIAPI
NullExceptionHandler (
IN EFI_EXCEPTION_TYPE InterruptType,
IN EFI_SYSTEM_CONTEXT SystemContext
)
{
NMIReceived = NMIReceived + 1; // increment
DEBUG((EFI_D_ERROR, "***NMI***Happened****\n"));
if(VmPeReady == 1)
{
UINT32 CpuIndex = ApicToIndex (ReadLocalApicId ());
// in this instance, the VmPe is ready for launch, but an SMI has appeared after the NMI has
// been enabled, but during the iteval between VM/PE setup complete and its launch
// so we will hold the launch, service the SMI and then launch the VM/PE once the
// SMI is handled
save_Inter_PeVm(CpuIndex);
DEBUG((EFI_D_ERROR, "%ld enable_nmi - Return from non-returnable function\n", CpuIndex));
// this function should not return...
}
return; // basically we are ignoring NMI, but setting a flag that it occurred
}
// Initialization code goes here - ran everytime that StmInit is run
static STM_GUEST_CONTEXT_PER_CPU GuestContext[3];
extern void PeInitStmHandlerSmm ();
void InitPe()
{
unsigned int i;
DEBUG((EFI_D_ERROR, "InitPe - Starting PE initiaization\n"));
mGuestContextCommonSmm[1].GuestContextPerCpu = &GuestContext[0];
mGuestContextCommonSmm[2].GuestContextPerCpu = &GuestContext[1];
mGuestContextCommonSmm[3].GuestContextPerCpu = &GuestContext[2];
// initialize the VM/PE memory pointer to null
for(i = 0; i < 4; i++)
{
PeVmData[i].SharedPageStm = 0;
PeVmData[i].SmmBuffer = 0;
PeVmData[i].SmmBufferSize = 0;
}
PeSmiControl.PeExec = 0;
PeSmiControl.PeNmiBreak = 0;
PeSmiControl.PeCpuIndex = -1;
PeSmiControl.PeSmiState = 0;
PeSmiControl.PeWaitTimer = 0; // non-zero reflect timer length and active
PeInitStmHandlerSmm ();
InitializeSpinLock (&PeSmiControl.PeSmiControlLock);
DEBUG((EFI_D_ERROR, "InitPe - PE initialization complete\n"));
}

View File

@ -0,0 +1,140 @@
/** @file
Structures to pass information about the protected execution module
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef STMLOADVM_H
#define STMLOADVM_H
// Bit settings for VM_Configuration
// Bit settings for PE Vm setup
#define CS_L (1 << 13) // CS.L 64 bit mode active when set to 1
#define CS_D (1 << 14) // CS.D default operation (0 = 16 bit segment; 1= 32 bit segment)
// (NOTE: must be zero when CS.L=1 or VMX will not allow start)
#define SET_IA32E (1 << 15) // sets IA-32E mode upon entry
//#define SET_CR0_PE (1 << 0)
//#define SET_CR0_PG (1 << 31) // sets the paging bit
#define SET_CR0_WP (1 << 16)
#define SET_CR0_NE (1 << 5)
#define SET_CR4_PSE (1 << 4)
#define SET_CR4_PAE (1 << 3) // need to alias
#define PE_VM_EXCEPTION_HANDLING (1 << 6) // PE VM will handle execeptions - O.W. vmexit
// Return Codes
#define PE_SUCCESS 0 // PE setup/ran sucessfullu
#define PE_FAIL -1 // catchall PE ERROR
#define PE_SPACE_TOO_LARGE 0x80040001 // requested memory space too large
#define PE_MODULE_ADDRESS_TOO_LOW 0x80040002 // module start below address space start
#define PE_MODULE_TOO_LARGE 0x80040003 // PE module too large for address space (or located such that
// it overflows the end of the address space
#define PE_NO_PT_SPACE 0x80040004 // not enough space left for PE VM page tables
#define PE_NO_RL_SPACE 0x80040005 // not enough space left for resource list
#define PE_MEMORY_AC_SETUP_FAILURE 0x80040006 // could not setup accesses to PE space (internal error)
#define PE_SHARED_MEMORY_SETUP_ERROR 0x80040007 // could not setup shared memory
#define PE_MODULE_MAP_FAILURE 0x80040008 // could not map in the address space
#define PE_SHARED_MAP_FAILURE 0x80040009 // could not map in the shared page
#define PE_VMCS_ALLOC_FAIL 0x8004000A // could not allocate VMCS memory
#define PE_VMLAUNCH_ERROR 0x8004000B // attempted to launch PE VM with bad guest state
#define PE_VM_BAD_ACCESS 0x8004000C // PE VM attempted to access protected memory (out of bounds)
#define PE_VM_SETUP_ERROR_D_L 0x8004000D // CS_D and CS_L cannot be set to one at the same time
#define PE_VM_SETUP_ERROR_IA32E_D 0x8004000E // SET_IA32E must be set when CS_L is set
#define PE_VM_TRIPLE_FAULT 0x8004000F // PE VM crashed with a triple fault
#define PE_VM_PAGE_FAULT 0x80040010 // PE VM crashed with a page fault
#define PE_VM_GP_FAULT 0x80040024 // PE VM crashed with a GP fault
#define PE_VM_ATTEMPTED_VMCALL 0x80040011 // PE VM attempted VMCALL
#define PE_VM_NMI 0x80040012 // PE VM encountered NMI
#define PE_VM_CPUID 0x80040013 // PE VM encountered CPUID
#define PE_VM_IO 0x80040014 // PE VM attempted I/O
#define PE_VM_UNEXPECTED_VMEXIT 0x80040015 // PE VM attempted an unexpected VMEXIT (catchall)
#define PE_VM_READ_MSR 0x80040016 // PE VM attempted to read an MSR
#define PE_VM_WRITE_MSR 0x80040017 // PE VM attempted to write an MSR
#define PE_REGION_LIST_SETUP_ERROR 0x80040018 // Conflict in Permissions during setup of regions
#define PE_REGION_LIST_BAD_FORMAT 0x80040019 // Region List not properly formatted
#define PE_VM_BAD_PHYSICAL_ADDRESS 0x8004001A // Bad physical address provide by guest
#define PE_VM_NO_PERM_VM 0x8004001B // Attempt to execute a perm VM that does not exist
#define PE_VM_EXECUTING 0X8004001C // Attempt to execute a perm VM that is already executing
#define PE_VM_PERM_OPT_OUT 0x8004001D // Perm STM/PE has been opted out
#define PE_VM_PERM_ALREADY_ESTABLISHED 0x8004001E // Attempt setup a Perm PE VM when one has already been setup
#define PE_VM_TEMP_ACTIVE 0x8004001F // Another Temp VM is already executing, try again later
#define PE_VM_EXCEPTION 0x80040020 // PV VM terminated because of an exception
#define PE_VM_TEMP_OPT_OUT 0x80040021 // Temp STM/PE has been opted out
#define PE_VM_PE_SETUP_ERROR 0x80040023
#define PE_VM_PERM_TERM 0x80040022 // PE VM will be terminate when execution complete
// Various return codes
#define REGION_MEMSIZE_INVALID 0x80050001 // Invalid memory region request
#define INVALID_RESOURCE 0x80050002
#define ALLOC_FAILURE 0x80050003
// PE VM States
#define PE_VM_AVAIL 0 // PE VM not in use, can be allocated
#define PE_VM_ACTIVE 1 // PE VM currently executing or being setup
#define PE_VM_IDLE 2 // PE VM (Perminate VM only) waiting to be executed
#define PE_VM_OPT_OUT 3 // PE VM not allowed by configuration
#define PE_VM_TERM 4 // PE VM is terminating
#define PE_VM_SUSPEND 5 // PE VM is suspended because of SMI
typedef unsigned char byte;
typedef unsigned short int word;
typedef unsigned long int dword;
typedef struct {
dword MsegHdrRevision;
dword SmmMonitorFeatures;
dword GdtrLimit;
dword GdtrBaseOffset;
dword CodeSelector;
dword EipOffset;
dword EspOffset;
dword Cr3Offset;
} MSEG_HEADER_STRUCT_HW;
typedef struct {
byte StmSpecVerMajor;
byte StmSpecVerMinor;
word ReservedSw;
dword StaticImageSize;
dword PerProcDynamicMemorySize;
dword AdditionalDynamicMemorySize;
dword StmFeatures;
dword NumberOfRevIDs;
dword StmRevisionId;
} MSEG_HEADER_STRUCT_SW;
// MSEG_HEADER_STRUCT_HW * HwStmHdr;
// MSEG_HEADER_STRUCT_SW * SwStmHdr;
// PE status flags found in the NonSmiHandler field of the STM_VMM_STRUCT (usually either pStmVmm or StmVmm)
#define PE_ACTIVE 1 << 0 //!< PE active on this VM
#define PE_WAIT_SMI 1 << 1 //!< PE waiting the completion of SMI processing
#define PRESERVE_VM 1 // keep the VM for future invocations
#define RELEASE_VM 2 // free up the resources associated with this VM
#define SUSPEND_VM 3 // keep the VM in suspended state because of SMI
#define NEW_VM 1
#define RESTART_VM 2
extern UINT32 FreePE_DataStructures(UINT32 PeType);
extern STM_STATUS AddPeVm(UINT32 ApicId, PE_MODULE_INFO * callerDataStructure, UINT32 PeVmNum, UINT32 RunVm);
extern STM_STATUS RunPermVM(UINT32 CpuIndex);
#endif

View File

@ -0,0 +1,212 @@
/** @file
PE PCI Handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
void SetTimerRate(UINT16 value);
// interval timer support
//#define CONFIG_MMCONF_BASE_ADDRESS 0xF8000000///
//#define DEFAULT_PCIEXBAR CONFIG_MMCONF_BASE_ADDRESS // frm sandybridge.h
// from pci_devs.h
#define PCU_DEV 0x1F
// from lcp.h
/* D30:F0 LPC bridge */
#define D31F0_PMBASE 0x40
#define D31F0_GEN_PMCON_1 0xA0
#define D31F0_GEN_PMCON_3 0xA4
#define SMI_EN 0x30 /* SMI Control and Enable Register */
#define SWSMI_TMR_EN (1<<6) // start software timer on bit set
#define PERIODIC_EN (1<<14)
#define EOS_EN (1<<1) /* End of SMI */
#define SMI_STS 0x34 /* SMI Status Register */
// SMI STATUS BITS
#define SWSMI_TMR_STS (1<<6)
#define PERIODIC_STS (1<<14)
// from arch/io.h
#define PCI_DEV(SEGBUS, DEV, FN) ( \
(((SEGBUS) & 0xFFF) << 20) | \
(((DEV) & 0x1F) << 15) | \
(((FN) & 0x07) <<12))
typedef int device_t;
static UINT16 pmbase = 0x0;
static UINT16 read16(UINTN addr)
{
return *((volatile UINT16 *) (addr));
}
static void write16(UINTN addr, UINT16 Reg16)
{
*((volatile UINT16 *) (addr)) = Reg16;
}
static UINT32 read32(UINTN addr)
{
return *((volatile UINT32 *) (addr));
}
static UINT16 pcie_read_config16(device_t dev, unsigned int whereat)
{
UINTN addr;
addr = ((UINTN) mHostContextCommon.PciExpressBaseAddress) | dev | whereat;
//DEBUG((EFI_D_ERROR, "pcie_read_config16 %x\n", addr));
return read16(addr);
}
static UINT32 pcie_read_config32(device_t dev, unsigned int whereat)
{
UINTN addr;
addr = ((UINTN) mHostContextCommon.PciExpressBaseAddress) | dev | whereat;
//DEBUG((EFI_D_ERROR, "pcie_read_config32 %x\n", addr));
return read32(addr);
}
static void pcie_write_config16(device_t dev, unsigned int whereat, UINT16 Reg16)
{
UINTN addr;
addr = ((UINTN) mHostContextCommon.PciExpressBaseAddress) | dev | whereat;
write16(addr, Reg16);
return;
}
static device_t get_pcu_dev(void)
{
//DEBUG((EFI_D_ERROR, "get_pcu_dev - return %x\n", PCI_DEV(0, PCU_DEV, 0)));
return PCI_DEV(0, PCU_DEV, 0);
}
UINT16 get_pmbase(void)
{
return pcie_read_config16(get_pcu_dev(), D31F0_PMBASE ) & 0xFFF8;
}
void StartTimer(void)
{
UINT16 pmbase = get_pmbase();
UINT32 smi_en = IoRead32(pmbase + SMI_EN);
UINT32 smi_sts = IoRead32(pmbase + SMI_STS);
smi_en |= PERIODIC_EN;
//DEBUG((EFI_D_ERROR, "-- StartSwTimer pmbase: %x smi_en: %x \n", pmbase, smi_en));
DEBUG((EFI_D_ERROR, "StartTimer - smi_en: 0x%08lx smi_sts: 0x%08lx\n", smi_en, smi_sts));
IoWrite32(pmbase + SMI_STS, PERIODIC_STS);
IoWrite32(pmbase + SMI_EN, smi_en);
}
void SetEndOfSmi(void)
{
UINT16 pmbase = get_pmbase();
UINT32 smi_en = IoRead32(pmbase + SMI_EN);
smi_en |= EOS_EN; // set the bit
//DEBUG((EFI_D_ERROR, "-- StartSwTimer pmbase: %x smi_en: %x \n", pmbase, smi_en));
//DEBUG((EFI_D_ERROR, "-- SW Timer Started - smi_en: 0x%08lx smi_sts: 0x%08lx\n", smi_en, smi_sts));
IoWrite32(pmbase + SMI_EN, smi_en);
DEBUG((EFI_D_ERROR, "SetEndOfSmi smi_en: 0x%08lx smi_sts: 0x%08lx\n", IoRead32(pmbase + SMI_EN), IoRead32(pmbase + SMI_STS)));
}
void AckTimer(void)
{
UINT16 pmbase = get_pmbase();
IoWrite32(pmbase + SMI_STS, PERIODIC_STS);
DEBUG((EFI_D_ERROR, "AckTimer - smi_en: 0x%08lx smi_sts: 0x%08lx\n", IoRead32(pmbase + SMI_EN), IoRead32(pmbase + SMI_STS)));
}
void StopSwTimer(void)
{
UINT16 pmbase = get_pmbase();
UINT32 smi_en = IoRead32(pmbase + SMI_EN);
smi_en &= ~PERIODIC_EN;
IoWrite32(pmbase + SMI_EN, smi_en);
//DEBUG((EFI_D_ERROR, "-- SW Timer Stopped - pre-smi_en %x post-smi_en: %x\n", pre_smi_en, post_smi_en));
DEBUG((EFI_D_ERROR, "StopSwTimer - smi_en: 0x%08lx smi_sts: 0x%08lx\n", IoRead32(pmbase + SMI_EN), IoRead32(pmbase + SMI_STS)));
}
int CheckTimerSTS(UINT32 Index)
{
UINT16 pmbase = get_pmbase();
UINT32 smi_sts = IoRead32(pmbase + SMI_STS);
//DEBUG((EFI_D_ERROR, "%ld CheckTimerSTS - 0x%08lx\n", Index, smi_sts));
if((smi_sts & PERIODIC_STS) == PERIODIC_STS)
{
DEBUG((EFI_D_ERROR, "%ld CheckTimerSTS - Timer Interrupt Detected\n", Index, smi_sts));
return 1;
}
else
{
//DEBUG((EFI_D_ERROR, "%ld CheckTimerSTS - No Timer Interrupt Detected\n", Index, smi_sts));
return 0;
}
}
void ClearTimerSTS()
{
UINT16 pmbase = get_pmbase();
IoWrite32(pmbase + SMI_STS, PERIODIC_STS); // just want to clear the status - do not touch the rest
}
void SetMaxSwTimerInt()
{
SetTimerRate(3);
}
void SetMinSwTimerInt()
{
SetTimerRate(0);
}
void SetTimerRate(UINT16 value)
{
UINT16 Reg16;
UINT16 TimeOut;
device_t PcuDev = get_pcu_dev();
if( value > 3)
{
value = 3;
}
TimeOut = (value << 0);
Reg16 = pcie_read_config16(PcuDev, D31F0_GEN_PMCON_1);
pcie_write_config16(PcuDev, D31F0_GEN_PMCON_1, Reg16|TimeOut);
}

View File

@ -0,0 +1,197 @@
/** @file
PE SMI handler - Used used for getting the other processor state and handling
SMI's during VM/PE build and execution
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
PE_SMI_CONTROL PeSmiControl;
extern void SignalPeVm(UINT32 CpuIndex);
extern int CheckAndGetState(UINT32 CpuIndex);
extern void CpuReadySync(UINT32 Index);
extern PE_VM_DATA PeVmData[4]; // right now support a max of 3 PE VM (VM 0 is the SMI_HANDLER)
extern int CheckTimerSTS(UINT32 Index);
extern void StopSwTimer(void);
extern void SetEndOfSmi(void);
extern void PrintVmxState(UINT32 CpuIndex, ROOT_VMX_STATE * RootState);
static UINT32 HandleTimer = 0;
static UINT32 HandleSmi = 0;
// additional VM/PE SMI handling
static UINT32 retvalue = 0;
UINT32 PeSmiHandler(UINT32 CpuIndex)
{
ROOT_VMX_STATE * RootState;
UINT64 * NumProcessors;
UINT32 PeType = PE_PERM;
UINT32 CpuNum;
InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMINULL, PESMIHSMI);
//DEBUG((EFI_D_ERROR, "%ld PeSmiHandler - CurrPeSmiState %ld\n", CpuIndex, PeSmiControl.PeSmiState));
if(PeSmiControl.PeCpuIndex == (INT32)CpuIndex ) // when the pe/vm comes in...
{
//DEBUG((EFI_D_ERROR, "%ld PeSmiHandler - VM/PE responded to SMI, CurrPeSmiState %ld\n", CpuIndex, PeSmiControl.PeSmiState));
InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIPNMI2, PESMINULL);
}
if(InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIPNMI, PESMIPNMI2) == PESMIPNMI)
///PESMIPNMI == PeSmiControl.PeSmiState)
{
// eventually the VM/PE will be started (or at least built) and this will cause one of the processors
// to send a NMI to the VM/PE processor causing it to drop out and process the SMI
// when it does, all processors will exit this loop and process the SMI as usual
SignalPeVm(CpuIndex); // make sure that the PE/VM processes this SMI as well
//PeSmiControl.PeSmiState = PESMINULL;
}
CpuReadySync(CpuIndex); // everyone waits until processor 0 figures out what to do
switch(PeSmiControl.PeSmiState)
{
case PESMIPSMI:
// VM/PE sends a SMI to the other processors when it wants state information from other CPU's
NumProcessors = (UINT64 *) PeVmData[PeType].SharedPageStm;
RootState = (ROOT_VMX_STATE *) ((char *)NumProcessors + 64);//sizeof(*NumProcessors) + sizeof(*NumProcessors));
// get the local processor state
GetRootVmxState(CpuIndex, &RootState[CpuIndex]);
//retvalue = 1; // we did something
CpuReadySync(CpuIndex); // wait for everyone to finish
if(CpuIndex == 0)
{
InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIPSMI, PESMINULL); // reset the state
}
return 1; // tell the SmiEventHandler that there is one less processor
break;
case PESMIHSMI:
if(PeSmiControl.PeCpuIndex == (INT32)CpuIndex )
{
InterlockedCompareExchange32(&retvalue, 1, 0); // make sure that this is zero
if(InterlockedCompareExchange32(&PeSmiControl.PeWaitTimer, 1, 1) == 1)
{
if(CheckTimerSTS(CpuIndex) != 0)
{
//DEBUG((EFI_D_ERROR, "%ld CheckAndGetState - (PESMIHSMI) Processing VM/PE startup PeSmiState: %d\n", CpuIndex, PeSmiControl.PeSmiState));
InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIHSMI, PESMIHTMR);
NumProcessors = (UINT64 *) PeVmData[PeType].SharedPageStm;
RootState = (ROOT_VMX_STATE *) ((char *)NumProcessors + 64);//sizeof(*NumProcessors) + sizeof(*NumProcessors));
// get the local processor state
GetRootVmxState(CpuIndex, &RootState[CpuIndex]);
InterlockedCompareExchange32(&retvalue, 0, 1); // we set to one to indicate we are not there
// the VM/PE Cpu cleans up and runs
//PeSmiControl.PeWaitTimer = 0;
InterlockedCompareExchange32(&PeSmiControl.PeWaitTimer, 1, 0);
//ClearSwTimerSTS();
StopSwTimer();
retvalue = 1;
// start the VM/PE
PeVmData[PeType].StartMode = PEVM_PRESTART_SMI; // starting from SMI
CpuReadySync(CpuIndex); // sync everyone ud
SetEndOfSmi(); // make sure that the timer SMI has been cleared
for(CpuNum = 0; CpuNum < mHostContextCommon.CpuNum; CpuNum++)
{
PrintVmxState(CpuNum, &RootState[CpuNum]);
}
if( mHostContextCommon.StmShutdown == 1)
{
// time to quit
StmTeardown(CpuIndex);
}
RunPermVM(CpuIndex);
//PeVmData[PeType].StartMode = PEVM_START_SMI; // for consistency in error conditions
//CpuDeadLoop ();
// should not get here...
return retvalue; // tell the SmiEventHandler that there is one less processor
}
}
}
CpuReadySync(CpuIndex);
if(InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIHTMR, PESMIHTMR) == PESMIHTMR)
{
NumProcessors = (UINT64 *) PeVmData[PeType].SharedPageStm;
RootState = (ROOT_VMX_STATE *) ((char *)NumProcessors + 64);//sizeof(*NumProcessors) + sizeof(*NumProcessors));
GetRootVmxState(CpuIndex, &RootState[CpuIndex]);
retvalue = 1;
// we do not reset the state here as the VM/PE will be processing
// when it competes it should end with a PeSmiState pf PESMIPNMI (waiting for NMI)
}
else
{
if(CpuIndex == 0)
{
//PeSmiControl.PeSmiState = PESMINULL;
InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIHSMI, PESMINULL); // one of these will work
}
retvalue = 0;
}
return retvalue;
break;
case PESMINULL:
return 0; // process normally
break;
case PESMIPNMI:
case PESMIHTMR:
// at this point, if this is set, the VM/PE is in the startup process and
// has set this so that at the next SMI, if it occurs while the VM/PE is active
// the pssmihandler can shoot down the VM/PE
// we have a one return because the VM/PE will stay in SMM
//SetEndOfSmi(); // make sure that the timer SMI has been cleared
return 1;
break;
default:
DEBUG((EFI_D_ERROR, "%ld CheckAndGetState (default) ERROR incorrect PeSmiState: %ld, setting to PESMINULL (0)\n",
CpuIndex, PeSmiControl.PeSmiState));
PeSmiControl.PeSmiState = PESMINULL;
return 0;
}
}

View File

@ -0,0 +1,579 @@
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
extern PE_VM_DATA PeVmData[4];
extern PE_SMI_CONTROL PeSmiControl;
extern INT32 PeEptInit (EPT_POINTER *EptPointer);
extern void print_region_list(UINT32 PeType, UINT32 CpuIndex);
extern UINT32 SetupProtExecVm(UINT32 CpuIndex, UINT32 VM_Configuration, UINT32 mode, UINT32 PeType);
extern void LaunchPeVm(UINT32 PeType, UINT32 CpuIndex);
extern VOID EptDumpPageTable (IN EPT_POINTER *EptPointer );
static UINT32 setupModulepages(UINT32 PeType, UINT32 CpuIndex);
STM_STATUS AddPeVm(UINT32 CpuIndex, PE_MODULE_INFO * callerDataStructure, UINT32 PeType, UINT32 RunVm)
{
// assume that the caller has aready moved the module_info data structure
// into SMRAM and made sure that the MLE has not done something funny
UINTN *sourceBuffer = (UINTN *)NULL;
UINTN *destBuffer = (UINTN *) NULL;
STM_STATUS rc;
RETURN_STATUS rc1, rc2, rc3; // used for return code
UINT32 pageAlignedSize;
UINT32 numModulePages;
UINT_128 Data128;
DEBUG((EFI_D_ERROR,"%ld AddPeVm - entered, PeType: %d\n", CpuIndex, PeType));
PeVmData[PeType].PeVmState = PE_VM_ACTIVE; // indicate we are here
DEBUG((EFI_D_ERROR, "%ld AddPeVm - callerDataStructure location: 0x%08lx 0x%08lx\n", CpuIndex, (UINT64) (((UINT64)callerDataStructure) >> 32), (UINT64)(callerDataStructure)));
// pull information from the modules data structure
PeVmData[PeType].UserModule.ModuleLoadAddress = callerDataStructure->ModuleLoadAddress;
PeVmData[PeType].UserModule.ModuleSize = callerDataStructure->ModuleSize;
PeVmData[PeType].UserModule.ModuleEntryPoint = callerDataStructure->ModuleEntryPoint;
PeVmData[PeType].UserModule.AddressSpaceStart = callerDataStructure->AddressSpaceStart;
PeVmData[PeType].UserModule.AddressSpaceSize = callerDataStructure->AddressSpaceSize;
PeVmData[PeType].UserModule.VmConfig = callerDataStructure->VmConfig;
PeVmData[PeType].UserModule.Cr3Load = callerDataStructure->Cr3Load;
PeVmData[PeType].UserModule.SharedPage = callerDataStructure->SharedPage;
PeVmData[PeType].UserModule.SharedPageSize = callerDataStructure->SharedPageSize;
PeVmData[PeType].UserModule.Segment = callerDataStructure->Segment;
PeVmData[PeType].UserModule.ModuleAddress = callerDataStructure->ModuleAddress;
PeVmData[PeType].UserModule.ModuleDataSection = callerDataStructure->ModuleDataSection;
PeVmData[PeType].UserModule.DoNotClearSize = callerDataStructure->DoNotClearSize;
PeVmData[PeType].UserModule.RunCount = 0;
#if defined (MDE_CPU_X64)
sourceBuffer = (UINTN *)((PeVmData[PeType].UserModule.ModuleAddress));
#else
sourceBuffer = (UINTN *) ((UINT32) (PeVmData[PeType].UserModule.ModuleAddress) & 0x0FFFFFFFF);
// note need to check for corner case where user passed 64 bit address to STM
#endif
if (!IsGuestAddressValid ((UINTN)sourceBuffer, PeVmData[PeType].UserModule.ModuleSize, TRUE))
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - module_address: bad physical address %p\n", CpuIndex,(void *) sourceBuffer));
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_VM_BAD_PHYSICAL_ADDRESS);
}
DEBUG((EFI_D_ERROR, "%ld AddPeVm - ModuleSize: %ld (0x%08lx)\n",
CpuIndex,
PeVmData[PeType].UserModule.ModuleSize,
(UINT64)PeVmData[PeType].UserModule.ModuleSize));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - module_entry_point: 0x%p\n", CpuIndex, (UINT64)PeVmData[PeType].UserModule.ModuleEntryPoint));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - ModuleLoadAddress: 0x%p\n", CpuIndex, (UINT64)PeVmData[PeType].UserModule.ModuleLoadAddress));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - AddressSpaceStart: 0x%p\n", CpuIndex, (UINT64)PeVmData[PeType].UserModule.AddressSpaceStart));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - AddressSpaceSize: 0x%p\n", CpuIndex, (UINT64) PeVmData[PeType].UserModule.AddressSpaceSize));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - DoNotClearSize: 0x%08lx\n", CpuIndex, (UINT64) PeVmData[PeType].UserModule.DoNotClearSize));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - CR3_LOAD: 0x%p\n", CpuIndex, (UINT64) PeVmData[PeType].UserModule.Cr3Load));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - VmConfig: 0x%08lx\n", CpuIndex, (UINT64) PeVmData[PeType].UserModule.VmConfig));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - shared page 0x%p\n", CpuIndex, (UINT64) PeVmData[PeType].UserModule.SharedPage));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - shared page size 0x%llx\n", CpuIndex, (UINT64) PeVmData[PeType].UserModule.SharedPageSize));
DEBUG((EFI_D_ERROR, "%ld stmApaAddVM - segment page 0x%p\n", CpuIndex, (UINT64) PeVmData[PeType].UserModule.Segment));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - module_address 0x%p\n", CpuIndex, (UINT64) PeVmData[PeType].UserModule.ModuleAddress));
DEBUG((EFI_D_ERROR, "%ld AddPeVm - ModuleDataSection 0x%p\n", CpuIndex, (UINT64) PeVmData[PeType].UserModule.ModuleDataSection));
if(PeVmData[PeType].UserModule.SharedPage != 0) //NULL not used because of 32bit/64bit diff
{
if (!IsGuestAddressValid ((UINTN)PeVmData[PeType].UserModule.SharedPage,
(UINTN)PeVmData[PeType].UserModule.SharedPageSize,TRUE))
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - module_address: bad physical address %llx\n", CpuIndex,(void *) sourceBuffer));
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_VM_BAD_PHYSICAL_ADDRESS);
}
}
// determine how large the module is
// go to the module and determine its size from the header block
// allocate the space needed for the requested space
numModulePages = (PeVmData[PeType].UserModule.AddressSpaceSize + (PAGE_SIZE - 1)) >> 12; // calculate the number of pages in the module
// allocate the space for the PE VM based upon the guest Address Space Size
PeVmData[PeType].SmmBuffer = (UINTN *) AllocatePages(numModulePages);
PeVmData[PeType].SmmBufferSize = numModulePages; // AllocatePages does not retain size
DEBUG((EFI_D_ERROR, "%ld AddPeVm - SmmBuffer at: %llx PeType: %d\n", CpuIndex, PeVmData[PeType].SmmBuffer, PeType));
if(PeVmData[PeType].SmmBuffer == NULL)
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - failed to allocate module destination buffer\n", CpuIndex));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_SPACE_TOO_LARGE);
}
// move the module into the space
// since we are allowing the user to have a larger memory than the module itself
// we need to calculate where within SmmBuffer the module will be placed.
// things to check for self protection (will return an error to the user)
// ModuleLoadAddress < AddressSpaceStart
// ModuleLoadAddress + ModuleSize > AddressSpaceStart + AddressSpaceSize
{
if(PeVmData[PeType].UserModule.ModuleLoadAddress < PeVmData[PeType].UserModule.AddressSpaceStart)
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Error: ModuleLoadAddress is lower than Address_Space_Start\n", CpuIndex));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_MODULE_ADDRESS_TOO_LOW);
}
// calculate the location within the allocated space to place the module
destBuffer = PeVmData[PeType].SmmBuffer + PeVmData[PeType].UserModule.AddressSpaceStart -
PeVmData[PeType].UserModule.ModuleLoadAddress;
}
// make sure that the size of the module will fit into the allocated space
pageAlignedSize = numModulePages << 12;
{
UINT64 Avail_Space = pageAlignedSize - (PeVmData[PeType].UserModule.ModuleLoadAddress - PeVmData[PeType].UserModule.AddressSpaceStart);
if(Avail_Space < PeVmData[PeType].UserModule.ModuleSize)
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Error: ModuleSize is too large to fit in the address space\n", CpuIndex));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_MODULE_TOO_LARGE);
}
}
DEBUG((EFI_D_ERROR, "%ld AddPeVm - destBuffer: %llx sourceBuffer: %llx PeType %d\n", CpuIndex, (UINT64)destBuffer, (UINT64)sourceBuffer, PeType));
{
CopyMem (destBuffer, sourceBuffer, PeVmData[PeType].UserModule.ModuleSize);
DEBUG((EFI_D_ERROR, "%ld AddPeVm - **loaded Module SMRAM location: 0x%016llx ModuleSize: 0x%08x contents: 0x%016llx\n",
CpuIndex, destBuffer, PeVmData[PeType].UserModule.ModuleSize, *destBuffer));
}
DEBUG((EFI_D_ERROR, "%ld AddPeVm - destBuffer: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
CpuIndex,
*destBuffer,
*(destBuffer + sizeof(UINT64)),
*(destBuffer + 2*sizeof(UINT64)),
*(destBuffer + 3*sizeof(UINT64)),
*(destBuffer + 4*sizeof(UINT64))
));
/// create the EPT tables for the PE VM
/// need to create this for Intel Ref c
rc = PeEptInit(&mGuestContextCommonSmm[PeType].EptPointer);
if (rc != STM_SUCCESS) {
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Unable to setup PE page tables\n", CpuIndex));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_NO_PT_SPACE);
}
// setup the permission for the shared page
// map in our pages
// setup the user module pages and address space
rc1 = setupModulepages(PeType, CpuIndex);
if(rc1 != STM_SUCCESS)
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Unable to setup Module pages \n", CpuIndex));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_MODULE_MAP_FAILURE);
}
DEBUG((EFI_D_ERROR, "%ld AddPeVm - module pages sucessfully setup\n now setting up shared page\n", CpuIndex));
rc2 = EPTSetPageAttributeRange(
mGuestContextCommonSmm[PeType].EptPointer.Uint64,
(UINTN) PeVmData[PeType].UserModule.SharedPage,
(UINTN) PeVmData[PeType].UserModule.SharedPageSize,
(UINTN) PeVmData[PeType].UserModule.SharedPage,
TRUE, /*read*/
TRUE, /*write*/
FALSE, /*execute*/
EptPageAttributeSet);
if(rc2 != STM_SUCCESS) {
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Unable to map shared page into Prot Exec VM\n", CpuIndex));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_SHARED_MAP_FAILURE);
}
DEBUG((EFI_D_ERROR, "%ld AddPeVm - shared page sucessfully mapped - PeType: %d\n", CpuIndex, PeType));
// set the permission and the PTEs for the r/o regions
// setup the permission for the region list itself
if(PeVmData[PeType].UserModule.Segment != NULL)
{
int counter;
PE_REGION_LIST * rlist;
if(!IsGuestAddressValid ((UINTN)PeVmData[PeType].UserModule.Segment,
4096,TRUE))
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - region list: bad physical address: %p Size: %x\n", CpuIndex,
PeVmData[PeType].UserModule.SharedPage, PeVmData[PeType].UserModule.SharedPageSize ));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_VM_BAD_PHYSICAL_ADDRESS);
}
// link the list into the PE VM's space
DEBUG((EFI_D_ERROR, "%ld AddPeVm - mapping region list into PE VM Space - PeType %d\n", CpuIndex, PeType));
rc3 = EPTSetPageAttributeRange(
mGuestContextCommonSmm[PeType].EptPointer.Uint64,
(UINTN)PeVmData[PeType].UserModule.Segment,
4096,
(UINTN)PeVmData[PeType].UserModule.Segment, /* identity mapped */
TRUE, /* READ */
FALSE, /* write */
FALSE, /* Execute */
EptPageAttributeSet);
if (rc3 != STM_SUCCESS)
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Couldn't Setup region list access\n", CpuIndex));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_REGION_LIST_SETUP_ERROR);
}
// now process the region list
rlist = (PE_REGION_LIST *)PeVmData[PeType].UserModule.Segment; // start the list
// need to work in corner cases
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Setting up region list - PeType: %d\n", CpuIndex, PeType));
for(counter = 0; counter < (4096/sizeof(PE_REGION_LIST)); counter++)
{
//!!! - Need to check to ensure the region is valid
if((UINTN)rlist[counter].Address == 0)
break; // all done
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Setting up region %p size %016lx PeType: %d\n", CpuIndex, (UINTN) rlist[counter].Address, (UINTN) rlist[counter].Size, PeType));
if(!IsGuestAddressValid((UINTN)rlist[counter].Address,(UINTN)rlist[counter].Size,TRUE))
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - region: bad physical address %p size %016lx\n", CpuIndex,
(UINTN)rlist[counter].Address,(UINTN)rlist[counter].Size));
#ifdef PRODUCTION
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_VM_BAD_PHYSICAL_ADDRESS);
#endif
DEBUG((EFI_D_ERROR, "%ld AddPeVm - ***Warning*** bad region data - ignoring region entry in debug system\n", CpuIndex));
}
else
{
// now link it into the page table
rc3 = EPTSetPageAttributeRange(
mGuestContextCommonSmm[PeType].EptPointer.Uint64,
(UINTN)rlist[counter].Address,
(UINTN)rlist[counter].Size,
(UINTN)rlist[counter].Address, /* identity map */
TRUE, /* READ */
FALSE, /* write */
FALSE, /* Execute */
EptPageAttributeSet);
if (rc3 != STM_SUCCESS)
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Couldn't Setup region list\n", CpuIndex));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_REGION_LIST_SETUP_ERROR);
}
}
}
print_region_list(PeType, CpuIndex);
}
else
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - No caller region requested\n", CpuIndex));
}
// allocate a page to be shared between the STM and the module
// figure out a location in the guest physical address space for the page
//
// locate the shared STM/module page at the end of the guest physical address space
PeVmData[PeType].UserModule.SharedStmPage = PeVmData[PeType].UserModule.AddressSpaceStart + PeVmData[PeType].UserModule.AddressSpaceSize;
PeVmData[PeType].SharedPageStm = (UINTN *) AllocatePages(1); // one page for now
DEBUG((EFI_D_ERROR, "%ld AddPeVm - ShareModuleStm at: %p\n", CpuIndex, PeVmData[PeType].SharedPageStm));
if(PeVmData[PeType].SharedPageStm == NULL)
{
DEBUG((EFI_D_ERROR,"%ld AddPeVm - failed to allocate module - STM shared page\n", CpuIndex));
FreePE_DataStructures(PeType);
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
return(PE_VM_PE_SETUP_ERROR);
}
// move the
rc1 = EPTSetPageAttributeRange(
mGuestContextCommonSmm[PeType].EptPointer.Uint64,
(UINTN)PeVmData[PeType].UserModule.SharedStmPage,
4096,
(UINTN) PeVmData[PeType].SharedPageStm,
TRUE, /* read */
FALSE, /* write */
FALSE, /* execute */
EptPageAttributeSet);
if(rc1 != STM_SUCCESS)
{
DEBUG((EFI_D_ERROR, "%d AddPeVm: could not setup module-stm shared page\n", CpuIndex));
}
DEBUG((EFI_D_ERROR, "%d AddPeVm: module-stm shared page setup\n", CpuIndex));
//EptDumpPageTable (&mGuestContextCommonSmm[PeType].EptPointer);
///link the PT with the allocated space
/// somehow have the STM's PT mark these as R/O or invisable... (TODO)
// check the hash, etc of the module (TBD stuff)
Data128.Lo = mGuestContextCommonSmm[PeType].EptPointer.Uint64;
Data128.Hi = 0;
AsmInvEpt (INVEPT_TYPE_SINGLE_CONTEXT_INVALIDATION, &Data128);
// (for now) start the VM...
PeVmData[PeType].StartMode = PEVM_START_VMCALL;
rc = SetupProtExecVm(CpuIndex, PeVmData[PeType].UserModule.VmConfig, NEW_VM, PeType);
if(rc != PE_SUCCESS) // did we have a problem
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - Error in configuring PE VM\n", CpuIndex));
FreePE_DataStructures(PeType);
//setPEerrorCode(rc, StmVmm); // tell the caller of the problem
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
// StmVmm->NonSmiHandler = 0; // no longer an PE VM
AsmVmPtrLoad(&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Vmcs);
/// at this point we should return to the MLE as per the Intel method...
AsmVmClear(&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs);
mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType = SMI_HANDLER;
return(rc);
}
DEBUG((EFI_D_ERROR, "%ld AddPeVm - sucessfully completed - PeApicId: 0x%llx PeType: %d\n", CpuIndex, PeSmiControl.PeApicId, PeType));
if(RunVm == 1)
{
PeVmData[PeType].StartMode = PEVM_START_VMCALL;
LaunchPeVm(PeType, CpuIndex); // launch the PE/VM
// if we get to this point the PeVm has failed to launch so we need clean up the mess
// and return the error to the caller
FreePE_DataStructures(PeType);
DEBUG((EFI_D_ERROR, "%ld AddPeVm - VM/PE Launch Failure\n", CpuIndex));
rc = PE_VMLAUNCH_ERROR;
PeVmData[PeType].PeVmState = PE_VM_AVAIL; // not there anymore
}
else
{
DEBUG((EFI_D_ERROR, "%ld AddPeVm - VM not run per option\n", CpuIndex));
rc = STM_SUCCESS;
PeVmData[PeType].PeVmState = PE_VM_IDLE; // waiting for action
PeSmiControl.PeExec = 0; // make sure
}
//setPEerrorCode(PE_VM_BAD_PHYSICAL_ADDRESS, StmVmm);
AsmVmPtrLoad(&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Vmcs);
/// at this point we should return to the MLE as per the Intel method...
AsmVmClear(&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs);
mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType = SMI_HANDLER;
//PeSmiControl.PeExec = 1; // when 1 PE_APIC_ID is executing a
return rc;
}
STM_STATUS setupModulepages(UINT32 PeType, UINT32 CpuIndex)
{
RETURN_STATUS rc1 = STM_SUCCESS;
UINTN module_end, Address_end;
BOOLEAN Write = FALSE; // default for text region
BOOLEAN ExecuteHeap = FALSE;
UINTN module_address = PeVmData[PeType].UserModule.ModuleLoadAddress & ~OFFSET_BITMASK_IA32_4K;
UINTN ModuleSize; //= (PeVmData[PeType].UserModule.ModuleSize +(module_address - PeVmData[PeType].UserModule.ModuleLoadAddress)+ PAGE_SIZE_4K - 1) & ~OFFSET_BITMASK_IA32E_4K;
UINTN AddressSpaceStart = (UINTN) (PeVmData[PeType].UserModule.AddressSpaceStart & 0xFFFFFFFF);
UINTN AddressSpaceSize = (PeVmData[PeType].UserModule.AddressSpaceSize + PAGE_SIZE_4K - 1) & ~OFFSET_BITMASK_IA32E_4K;
UINTN StartEndBlock;
PeVmData[PeType].UserModule.ModuleDataSection &= ~OFFSET_BITMASK_IA32_4K;
if(PeVmData[PeType].UserModule.ModuleDataSection == 0)
{
ModuleSize = (PeVmData[PeType].UserModule.ModuleSize + (module_address - PeVmData[PeType].UserModule.ModuleLoadAddress)+ PAGE_SIZE_4K - 1) & ~OFFSET_BITMASK_IA32E_4K;
}
else
{
ModuleSize = (UINTN)(PeVmData[PeType].UserModule.ModuleDataSection & 0xFFFFFFFF) - module_address;
}
DEBUG((EFI_D_ERROR, "%ld setModulepages - entered AddressSpaceStart: %x AddressSpaceSize: %x\n", CpuIndex, AddressSpaceStart, AddressSpaceSize));
if(module_address > AddressSpaceStart)
{
// we have some space between the start of the address space and the module
PeVmData[PeType].UserModule.FrontDataRegionSize = module_address - AddressSpaceStart;
DEBUG((EFI_D_ERROR, "%ld setModulepages - setting up address space before the PE module (AddressSpaceStart): %x\n", CpuIndex, AddressSpaceStart));
rc1 = EPTSetPageAttributeRange(
mGuestContextCommonSmm[PeType].EptPointer.Uint64,
AddressSpaceStart,
PeVmData[PeType].UserModule.FrontDataRegionSize,
(UINTN) PeVmData[PeType].SmmBuffer,
TRUE, /* read */
TRUE, /* write */
FALSE, /* execute */
EptPageAttributeSet);
if(rc1 != RETURN_SUCCESS)
{
DEBUG((EFI_D_ERROR, "%ld setModulepages - failed to setup area in front of module\n", CpuIndex));
return 0xFFFFFFFF; // generic stm error
}
}
else
{
DEBUG((EFI_D_ERROR, "%ld setModulepages - no data space in front of module\n", CpuIndex));
PeVmData[PeType].UserModule.FrontDataRegionSize = 0;
if(module_address < AddressSpaceStart)
{
DEBUG((EFI_D_ERROR, "%ld setModulepages: Module starts before address space starts\n", CpuIndex));
return ERROR_STM_UNSPECIFIED; // universal error for now
}
}
// stuff in the middle
DEBUG((EFI_D_ERROR, "%ld setModulepages: Setting up area in the middle (module_address): %x\n", CpuIndex, module_address));
if((PeVmData[PeType].UserModule.VmConfig & PERM_VM_SET_TEXT_RW) == PERM_VM_SET_TEXT_RW)
{
Write = TRUE;
}
rc1 = EPTSetPageAttributeRange(
mGuestContextCommonSmm[PeType].EptPointer.Uint64,
module_address,
ModuleSize,
(UINTN)((UINTN)PeVmData[PeType].SmmBuffer + (UINTN)PeVmData[PeType].UserModule.FrontDataRegionSize),
TRUE, /* READ */
TRUE, /* write */
TRUE, /* Execute */
EptPageAttributeSet);
if(rc1 != RETURN_SUCCESS)
{
DEBUG((EFI_D_ERROR, "%d setModulepages - could not setup module area within address space\n", CpuIndex));
return ERROR_STM_UNSPECIFIED; // generic stm error
}
// stuff at the end
module_end = module_address + ModuleSize;
Address_end = AddressSpaceStart + AddressSpaceSize;
if(PeVmData[PeType].UserModule.ModuleDataSection == 0)
{
DEBUG((EFI_D_ERROR, "%ld setModulepages - ModuleDataSection is NULL, calculating data section\n", CpuIndex));
// user did not provide a data section - so we calculate it
PeVmData[PeType].UserModule.ModuleDataSection = module_end;
}
if(PeVmData[PeType].UserModule.ModuleDataSection < Address_end)
{
PeVmData[PeType].UserModule.DataRegionSize = Address_end - module_end;
StartEndBlock = (UINTN) (((UINTN) PeVmData[PeType].SmmBuffer) + (PeVmData[PeType].UserModule.ModuleDataSection - PeVmData[PeType].UserModule.AddressSpaceStart));
DEBUG((EFI_D_ERROR, "%ld setModulepages - Setting up area at the end (module_end): %x\n", CpuIndex, module_end));
DEBUG((EFI_D_ERROR, "%ld setModulepages - module_end: %llx DataRegionSize: %llx DataRegionStart: %llx SmmBuffer: %llx\n",
CpuIndex,
PeVmData[PeType].UserModule.ModuleDataSection,
PeVmData[PeType].UserModule.DataRegionSize,
PeVmData[PeType].UserModule.ModuleDataSection,
PeVmData[PeType].SmmBuffer));
PeVmData[PeType].UserModule.DataRegionSmmLoc = StartEndBlock;
DEBUG((EFI_D_ERROR, "%ld setModulepages - StartEndBlock: %llx\n", CpuIndex, StartEndBlock));
if((PeVmData[PeType].UserModule.VmConfig & PERM_VM_EXEC_HEAP) == PERM_VM_EXEC_HEAP)
{
ExecuteHeap = TRUE;
DEBUG((EFI_D_ERROR, "%d setModulepages - Execute Heap set to TRUE\n"));
}
else
ExecuteHeap = FALSE;
/*DEBUG*/ DEBUG((EFI_D_ERROR, "%d setModulepages - Execute Heap set to TRUE for debug purposes\n"));
ExecuteHeap = TRUE;
rc1 = EPTSetPageAttributeRange(
mGuestContextCommonSmm[PeType].EptPointer.Uint64,
(UINTN)PeVmData[PeType].UserModule.ModuleDataSection,
PeVmData[PeType].UserModule.DataRegionSize,
StartEndBlock,
TRUE, /* read */
TRUE, /* write */
ExecuteHeap, /* execute */
EptPageAttributeSet);
if(rc1 != RETURN_SUCCESS)
{
DEBUG((EFI_D_ERROR, "%d setModulepages - could not setup end of address space\n", CpuIndex));
return ERROR_STM_UNSPECIFIED;
}
DEBUG((EFI_D_ERROR, "%d setModulepages - end address region setup\n", CpuIndex));
}
else
{
PeVmData[PeType].UserModule.DataRegionSize = 0;
if(module_end == Address_end)
{
DEBUG((EFI_D_ERROR, "%d setModulepages - no space after end of module\n", CpuIndex));
}
else
{
DEBUG((EFI_D_ERROR, "%d setModulepages - end of Module is beyond address space end\n", CpuIndex));
rc1 = ERROR_STM_UNSPECIFIED;
}
}
// verify the no data section clearing size
if((UINTN)PeVmData[PeType].UserModule.DoNotClearSize > PeVmData[PeType].UserModule.DataRegionSize)
{
DEBUG((EFI_D_ERROR, "%d setModulepages - DoNotClearSize larger than DataRegionSize - Using DataRegion Size\n", CpuIndex));
PeVmData[PeType].UserModule.DoNotClearSize = (UINT32)PeVmData[PeType].UserModule.DataRegionSize;
}
DEBUG((EFI_D_ERROR, "%d SetModulepages - DoNotClearSize is: 0x%08lx\n",
CpuIndex, PeVmData[PeType].UserModule.DoNotClearSize));
//EptDumpPageTable (&mGuestContextCommonSmm[PeType].EptPointer);
return (STM_STATUS) rc1;
}

View File

@ -0,0 +1,37 @@
/** @file
PE Bad guest state handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
extern UINT64 EndTimeStamp;
extern UINT32 PostPeVmProc(UINT32 rc, UINT32 CpuIndex, UINT32 mode);
void PeBadGuestStateHandler( IN UINT32 CpuIndex)
{
EndTimeStamp = AsmReadTsc();
DEBUG((EFI_D_ERROR, "%ld PeBadGuestStateHandler - PE VmLaunch attempted with invalid guest state - VmInstruction Error field: %x\n",
CpuIndex, VmRead32(VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DEBUG((EFI_D_ERROR, "%ld PeBadGuestStateHandler - Exit qualification: %x\n", CpuIndex, VmReadN (VMCS_N_RO_EXIT_QUALIFICATION_INDEX)));
PostPeVmProc(PE_VMLAUNCH_ERROR, CpuIndex, RELEASE_VM);
// should not return
DEBUG((EFI_D_ERROR, "%ld PeBadGuestStateHandler - CpuDeadLoop\n", CpuIndex));
CpuDeadLoop ();
return ;
}

View File

@ -0,0 +1,27 @@
/** @file
CPUID Handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
extern UINT64 EndTimeStamp;
void PeCpuidHandler( IN UINT32 CpuIndex)
{
EndTimeStamp = AsmReadTsc();
DEBUG((EFI_D_ERROR, "%ld PE - CPUID Handler not implemented\n", CpuIndex));
DEBUG((EFI_D_ERROR, "%ld PeCpuidHandler - CpuDeadLoop\n", CpuIndex));
CpuDeadLoop();
return;
}

View File

@ -0,0 +1,31 @@
/** @file
CR Handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
extern UINT64 EndTimeStamp;
VOID
SmmCrHandler (
IN UINT32 Index
);
void PeCrHandler( IN UINT32 CpuIndex)
{
DEBUG((EFI_D_ERROR, "%ld PeCrHandler - Entered\n", CpuIndex));
SmmCrHandler(CpuIndex); // use the intel handler since it provides the necessary functionality
DEBUG((EFI_D_ERROR, "%ld PeCrHander - done\n", CpuIndex));
return;
}

View File

@ -0,0 +1,311 @@
/** @file
PeSMM exception handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
extern UINT64 EndTimeStamp;
extern UINT32 save_Inter_PeVm(UINT32 CpuIndex);
extern UINT32 PostPeVmProc(UINT32 rc, UINT32 CpuIndex, UINT32 mode);
extern PE_VM_DATA PeVmData[];
UINT32 EventInjection (UINT32 Index, VM_EXIT_INFO_INTERRUPTION IntInfo, UINT32 IntErr);
// define this here for now
#define INTERRUPT_VECTOR_NMI 2
#define INTERRUPT_VECTOR_OF 4
#define INTERRUPT_VECTOR_BR 5
#define INTERRUPT_VECTOR_UD 6
#define INTERRUPT_VECTOR_DF 8
#define INTERRUPT_VECTOR_NP 11
#define INTERRUPT_VECTOR_SS 12
#define INTERRUPT_VECTOR_PF 14
#define INTERRUPT_VECTOR_GPF 13
unsigned int StmVmPeNmiExCount = 0;
void PeExceptionHandler( IN UINT32 CpuIndex)
{
VM_EXIT_INFO_INTERRUPTION IntInfo;
UINT32 IntErr;
UINTN address;
UINT32 VmType = mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType;
EndTimeStamp = AsmReadTsc();
IntInfo.Uint32 = VmRead32(VMCS_32_RO_VMEXIT_INTERRUPTION_INFO_INDEX);
IntErr = VmRead32(VMCS_32_RO_VMEXIT_INTERRUPTION_ERROR_CODE_INDEX);
StmVmPeNmiExCount++; // free up the waiting smi processor
DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler VmexitInterruptionInfo: 0x%x INTERRUPTION_ERROR_CODE 0x%x\n", CpuIndex, IntInfo.Uint32, IntErr));
/*DEBUG*/DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - Exception Bitmap is: 0x%08lx\n", CpuIndex, VmRead32(VMCS_32_CONTROL_EXCEPTION_BITMAP_INDEX)));
/*DEBUG*/DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - IDT Vectoring info 0x%08lx IDT Error Code 0x%08lx\n", CpuIndex, VmRead32(VMCS_32_RO_IDT_VECTORING_INFO_INDEX), VmRead32(VMCS_32_RO_IDT_VECTORING_ERROR_CODE_INDEX)));
/*DEBUG*/DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - IDT Base 0x%016llx Limit 0x%016llx\n", CpuIndex, (UINT64)VmReadN(VMCS_N_GUEST_IDTR_BASE_INDEX), VmRead32(VMCS_32_GUEST_IDTR_LIMIT_INDEX)));
if(IntInfo.Bits.Valid == 1)
{
switch(IntInfo.Bits.Vector)
{
case INTERRUPT_VECTOR_NMI:
{
// NMI means that (in this case) an external processor has received an SMI..
DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - received NMI because SMI detected\n", CpuIndex));
save_Inter_PeVm(CpuIndex); // put the VM to sleep so that the SMI can be handled
break;
}
case INTERRUPT_VECTOR_GPF:
case INTERRUPT_VECTOR_OF:
case INTERRUPT_VECTOR_BR:
case INTERRUPT_VECTOR_UD:
case INTERRUPT_VECTOR_DF:
case INTERRUPT_VECTOR_NP:
case INTERRUPT_VECTOR_SS:
{
// General Protection Fault- kill the PE/VM
//DEBUG((EFI_D_ERROR, "%ld - PE/VM terminated because of an exception %x\n", CpuIndex, IntInfo.Uint32));
DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - PE/VM General Protection Fault @ 0x%04lx:0x%016llx Address: 0x%016llx Info: 0x%lx\n",
CpuIndex,
VmRead16 (VMCS_16_GUEST_CS_INDEX),
VmReadN(VMCS_N_GUEST_RIP_INDEX),
VmReadN(VMCS_N_RO_EXIT_QUALIFICATION_INDEX),
IntInfo.Uint32));
if(((PERM_VM_INJECT_INT & PeVmData[VmType].UserModule.VmConfig) == PERM_VM_INJECT_INT))// does the VM/PE want to handle its own interrupts
{
EventInjection(CpuIndex, IntInfo, IntErr);
return;
}
// otherwise abort
PostPeVmProc(PE_VM_GP_FAULT, CpuIndex, PRESERVE_VM);
break;
}
case INTERRUPT_VECTOR_PF:
{
UINTN IDTLocation = VmReadN(VMCS_N_GUEST_IDTR_BASE_INDEX); // find the IDT
address = VmReadN(VMCS_N_RO_EXIT_QUALIFICATION_INDEX);
DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - VM/PE Page Fault @ 0x%04lx:0x%016llx Address: 0x%016llx Info: 0x%lx\n",
CpuIndex,
VmRead16 (VMCS_16_GUEST_CS_INDEX),
VmReadN(VMCS_N_GUEST_RIP_INDEX),
address,
IntInfo.Uint32));
if(( address >= IDTLocation) && (address < (IDTLocation + SIZE_4KB)))
{
DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - VM/PE Page Fault on IDT page - terminating VM\n", CpuIndex));
}
else
{
if(((PERM_VM_INJECT_INT & PeVmData[VmType].UserModule.VmConfig) == PERM_VM_INJECT_INT))// does the VM/PE want to handle its own page fault
{
AsmWriteCr2(address); //CR2 holds the Page Fault address
VmWrite32(VMCS_32_CONTROL_VMENTRY_INTERRUPTION_INFO_INDEX, IntInfo.Uint32);
VmWrite32(VMCS_32_CONTROL_VMENTRY_EXCEPTION_ERROR_CODE_INDEX , IntErr);
/*debug*/ DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - Injecting Page Fault\n", CpuIndex));
return;
}
}
DEBUG((EFI_D_ERROR, " INTERRUPTION_ERROR_CODE: 0x%x\n",
IntErr));
if(IntErr & 0x00000001)
DEBUG((EFI_D_ERROR, "Page-level protection violation\n"));
else
DEBUG((EFI_D_ERROR, "Non-present page\n"));
if(IntErr & 0x00000002)
DEBUG((EFI_D_ERROR, "Write-access attempted\n"));
else
DEBUG((EFI_D_ERROR, "read-access attempted\n"));
if(IntErr & 0x00000004)
DEBUG((EFI_D_ERROR, "USER mode\n"));
else
DEBUG((EFI_D_ERROR, "Supervisor mode\n"));
if(IntErr & 0x00000008)
DEBUG((EFI_D_ERROR, "Reserved bit set in paging structures\n"));
if(IntErr & 0x00000010)
DEBUG((EFI_D_ERROR, "Instruction FETCH\n"));
else
DEBUG((EFI_D_ERROR, "Data access\n"));
// if the PT is 64-bit then dump the table for diagnostic purposes
#define Level4 0x0000FF8000000000ULL
#define Level3 0x0000007FC0000000ULL
#define Level2 0x000000003FE00000ULL
#define Level1 0x00000000001FF000ULL
#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
if (sizeof(UINTN) == sizeof(UINT64))
{
UINTN PageTable;
UINTN Pml4Index;
UINTN PdpteIndex;
UINTN PdeIndex;
UINTN PteIndex;
UINTN *Pde;
UINTN *Pdpte;
UINTN *Pml4;
UINTN *Pte;
UINTN BaseAddress;
UINTN EndAddress;
UINTN PhysBase;
UINTN PdpteV;
UINTN PdeV;
UINTN PteV;
UINTN Offset;
PageTable = VmReadN (VMCS_N_GUEST_CR3_INDEX);
//Pml4 = (UINT64 *)PageTable;
Pml4Index = (address & Level4) >> 39;
PdpteIndex = (UINTN)(address & Level3) >> 30;
PdeIndex = (address & Level2) >> 21;
PteIndex = (address & Level1) >> 12;
DEBUG((EFI_D_ERROR, "Pagetable Chain causing the error\n"));
DEBUG((EFI_D_ERROR, " Pagetable Address (CR3): 0x%llx\n", PageTable));
BaseAddress = (UINTN)PeVmData[VmType].UserModule.AddressSpaceStart;
EndAddress = BaseAddress + PeVmData[VmType].UserModule.AddressSpaceSize - 1;
PhysBase = (UINT64)PeVmData[VmType].SmmBuffer;
if(PageTable < BaseAddress || PageTable > (EndAddress - 4096))
{
DEBUG((EFI_D_ERROR, " CR3 points out of VM region\n"));
goto endpf;
}
// find out where the page table is located in the VM memory
Offset = PageTable - BaseAddress;
Pml4 = (UINTN *) (PhysBase + Offset);
DEBUG((EFI_D_ERROR, " Pml4[0x%x]: 0x%llx\n", Pml4Index, Pml4[Pml4Index]));
PdpteV = Pml4[Pml4Index] & PAGING_4K_ADDRESS_MASK_64;
if(PdpteV < BaseAddress || PdpteV > (EndAddress - 4096))
{
DEBUG((EFI_D_ERROR, " Pdpte points out of VM region\n"));
goto endpf;
}
Offset = PdpteV - BaseAddress;
Pdpte = (UINTN *)(PhysBase + Offset);
DEBUG((EFI_D_ERROR, " Pdpte[0x%x]: 0x%llx\n", PdpteIndex, Pdpte[PdpteIndex]));
PdeV = Pdpte[PdpteIndex] & PAGING_4K_ADDRESS_MASK_64;
if(PdeV < BaseAddress || PdeV > (EndAddress - 4096))
{
DEBUG((EFI_D_ERROR, " Pde points out of VM region\n"));
goto endpf;
}
if(Pdpte[PdpteIndex] & IA32_PG_PS)
{
DEBUG((EFI_D_ERROR, " Pdpte Index: [0x%x]\n", PdpteIndex));
DEBUG((EFI_D_ERROR, " Pte Index: [0x%x]\n", PteIndex));
goto endpf;
}
Offset = PdeV - BaseAddress;
Pde = (UINTN *) (PhysBase + Offset);
DEBUG((EFI_D_ERROR, " Pde[0x%x]: 0x%llx\n", PdeIndex, Pde[PdeIndex]));
PteV = Pde[PdeIndex] & PAGING_4K_ADDRESS_MASK_64;
if(PteV < BaseAddress || PteV > (EndAddress - 4096))
{
DEBUG((EFI_D_ERROR, " Pte points out of VM region\n"));
goto endpf;
}
if(Pde[PdeIndex] & IA32_PG_PS)
{
DEBUG((EFI_D_ERROR, " Pte Index: [0x%x]\n", PteIndex));
goto endpf;
}
Offset = PteV - BaseAddress;
Pte = (UINTN *) (PhysBase + Offset);
DEBUG((EFI_D_ERROR, " Pte[0x%x]: 0x%llx\n", PteIndex, Pte[PteIndex]));
}
//DumpVmcsAllField();
endpf:
PostPeVmProc(PE_VM_PAGE_FAULT, CpuIndex, PRESERVE_VM);
break;
}
default:
{
DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - PE/VM Unhandled Exception @ 0x%04lx:0x%016llx Address: 0x%016llx Info: 0x%lx\n",
CpuIndex,
VmRead16 (VMCS_16_GUEST_CS_INDEX),
VmReadN(VMCS_N_GUEST_RIP_INDEX),
VmReadN(VMCS_N_RO_EXIT_QUALIFICATION_INDEX),
IntInfo.Uint32));
//DumpVmcsAllField();
PostPeVmProc(PE_VM_GP_FAULT, CpuIndex, PRESERVE_VM);
}
}
// should not get here
DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - CpuDeadLoop\n", CpuIndex));
CpuDeadLoop();
return;
}
DEBUG((EFI_D_ERROR, "%ld PeExceptionHandler - Warning Info Valid bits not equal to 1 @ 0x%04lx:0x%016llx Address: 0x%016llx Info: 0x%lx\n",
CpuIndex,
VmRead16 (VMCS_16_GUEST_CS_INDEX),
VmReadN(VMCS_N_GUEST_RIP_INDEX),
VmReadN(VMCS_N_RO_EXIT_QUALIFICATION_INDEX),
IntInfo.Uint32));
return;
}
// very simple interrupt/event injection
// just routing what happened during the vmexit
// back to the virtual maching
//
// bug - still need to check for stuff such as can the VM be interrupted,
//
UINT32 EventInjection (UINT32 Index, VM_EXIT_INFO_INTERRUPTION IntInfo, UINT32 IntErr)
{
UINT32 InstructionLength;
InstructionLength = VmRead32(VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX);
VmWrite32(VMCS_32_CONTROL_VMENTRY_INTERRUPTION_INFO_INDEX, IntInfo.Uint32); // send the vector
VmWrite32(VMCS_32_CONTROL_VMENTRY_INSTRUCTION_LENGTH_INDEX, InstructionLength);
if(IntInfo.Bits.ErrorCodeValid == 1)
{
VmWrite32(VMCS_32_CONTROL_VMENTRY_EXCEPTION_ERROR_CODE_INDEX, IntErr);
}
return 0; // for now, always good return
}

View File

@ -0,0 +1,184 @@
/** @file
PE SMM handler - Handle VMEXITs from the running VM/PE
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
extern void PeSmmVmcallHandler ( IN UINT32 Index);
extern void PeRsmHandler( IN UINT32 Index);
extern void PeIoHandler( IN UINT32 CpuIndex);
extern void PeEPTViolationHandler( IN UINT32 CpuIndex);
extern void PeEPTMisconfigurationHandler( IN UINT32 CpuIndex);
extern void PeInvEPTHandler( IN UINT32 CpuIndex);
extern void PeBadGuestStateHandler( IN UINT32 CpuIndex);
extern void PeReadMsrHandler( IN UINT32 CpuIndex);
extern void PeWriteMsrHandler( IN UINT32 CpuIndex);
extern void PeCrHandler( IN UINT32 CpuIndex);
extern void PeExceptionHandler( IN UINT32 CpuIndex);
extern void PeCpuidHandler( IN UINT32 CpuIndex);
extern void PePreEmptionTimerHandler(IN UINT32 CpuIndex);
extern void PeTripleFaultHandler(IN UINT32 CpuIndex);
void InitCpuReadySync();
STM_HANDLER mStmHandlerPeVm[VmExitReasonMax];
extern PE_SMI_CONTROL PeSmiControl;
/**
This function initialize STM/PE handle for SMM.
**/
VOID
PeInitStmHandlerSmm (
VOID
)
{
UINT32 Index;
/* initialize the remainder of the guest contexts for the smm handlers */
for(Index = SMI_HANDLER + 1; Index < NUM_PE_TYPE; Index++)
{
mGuestContextCommonSmm[Index].GuestContextPerCpu = AllocatePages (STM_SIZE_TO_PAGES(sizeof(STM_GUEST_CONTEXT_PER_CPU)) * mHostContextCommon.CpuNum);
}
DEBUG ((EFI_D_INFO, "PeInitStmHandlerSmm - initilizating PeSmmHandler Tables\n"));
for (Index = 0; Index < VmExitReasonMax; Index++) {
mStmHandlerPeVm[Index] = UnknownHandlerSmm;
}
mStmHandlerPeVm[VmExitReasonRsm] = PeRsmHandler;
mStmHandlerPeVm[VmExitReasonVmCall] = PeSmmVmcallHandler;
mStmHandlerPeVm[VmExitReasonExceptionNmi] = PeExceptionHandler;
mStmHandlerPeVm[VmExitReasonCrAccess] = PeCrHandler;
mStmHandlerPeVm[VmExitReasonEptViolation] = PeEPTViolationHandler;
mStmHandlerPeVm[VmExitReasonEptMisConfiguration] = PeEPTMisconfigurationHandler;
mStmHandlerPeVm[VmExitReasonInvEpt] = PeInvEPTHandler;
mStmHandlerPeVm[VmExitReasonIoInstruction] = PeIoHandler;
mStmHandlerPeVm[VmExitReasonCpuid] = PeCpuidHandler;
mStmHandlerPeVm[VmExitReasonRdmsr] = PeReadMsrHandler;
mStmHandlerPeVm[VmExitReasonWrmsr] = PeWriteMsrHandler;
mStmHandlerPeVm[VmExitReasonVmEntryFailureDueToInvalidGuestState] = PeBadGuestStateHandler;
mStmHandlerPeVm[VmExitReasonVmxPreEmptionTimerExpired] = PePreEmptionTimerHandler;
mStmHandlerPeVm[VmExitReasonTripleFault] = PeTripleFaultHandler;
DEBUG ((EFI_D_INFO, "PeInitStmHandlerSmm - PeSmmHandler Tables initialized\n"));
InitCpuReadySync();
DEBUG((EFI_D_INFO, "PeInitStmHandlerSmm - CpuReadySync Initialized\n"));
}
/**
This function is STM/PE handler for SMM VMEXITS
@param Register X86 register context
**/
VOID
PeStmHandlerSmm (
IN X86_REGISTER *Register
)
{
UINT32 Index;
UINTN Rflags;
VM_EXIT_INFO_BASIC InfoBasic;
X86_REGISTER *Reg;
UINT32 VmType;
UINT32 pIndex;
Index = ApicToIndex (ReadLocalApicId ());
VmType = mHostContextCommon.HostContextPerCpu[Index].GuestVmType; // any VmType other than SMI_HANDLER is a PeVm
if(VmType != SMI_HANDLER)
pIndex = 0;
else
{
DEBUG((EFI_D_ERROR, "%ld PeStmHandlerSmm - Warning SMI_HANDLER type used in Pe handler\n", Index));
pIndex = Index;
}
// make sure no one fires an SMI our way
PeSmiControl.PeExec = 0;
PeSmiControl.PeNmiBreak = 1;
//STM_PERF_END (Index, "BiosSmmHandler", "StmHandlerSmm");
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[pIndex].Register;
Register->Rsp = VmReadN (VMCS_N_GUEST_RSP_INDEX);
CopyMem (Reg, Register, sizeof(X86_REGISTER));//
//#if 0
//DEBUG ((EFI_D_INFO, "%ld PeStmHandlerSmm - Started\n", (UINTN)Index));
//#endif
//
// Dispatch
//
InfoBasic.Uint32 = VmRead32 (VMCS_32_RO_EXIT_REASON_INDEX);
//DEBUG((EFI_D_ERROR, "%d PeStmHandlerSmm - InfoBasic: 0x%0l8x Reason: %d\n", Index, InfoBasic.Uint32, InfoBasic.Bits.Reason));
if (InfoBasic.Bits.Reason >= VmExitReasonMax) {
DEBUG ((EFI_D_ERROR, "%ld PeStmHandlerSmm - !!!Unknown VmExit Reason!!!\n", Index));
DumpVmcsAllField ();
DEBUG((EFI_D_ERROR, "%ld PeStmHandlerSmm - CpuDeadLoop\n", Index));
CpuDeadLoop ();
}
//
// Call dispatch handler
//
if(mStmHandlerPeVm[InfoBasic.Bits.Reason] == NULL)
{
DEBUG((EFI_D_INFO, "%ld PeStmHandlerSmm - ***WARNING*** mStmHandlerPeVm[%x] is NULL- aborting STM \n", Index, InfoBasic.Bits.Reason));
WriteUnaligned32 ((UINT32 *)&Reg->Rax, 0xFFFFFFFF);
VmWriteN (VMCS_N_GUEST_RIP_INDEX, VmReadN (VMCS_N_GUEST_RIP_INDEX) + VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX));
DEBUG((EFI_D_ERROR, "%ld PeStmHandlerSmm - CpuDeadLoop\n", Index));
CpuDeadLoop();
}
else
{
mStmHandlerPeVm[InfoBasic.Bits.Reason] (Index); // PE VM
}
VmWriteN (VMCS_N_GUEST_RSP_INDEX, Reg->Rsp); // sync RSP
// STM_PERF_START (Index, InfoBasic.Bits.Reason, "BiosSmmHandler", "StmHandlerSmm");
//
// Resume
//
Rflags = AsmVmResume (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[pIndex].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_ERROR, "(STM):-(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[pIndex].Register);
}
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "%ld PeStmHandlerSmm - !!!ResumePeGuestSmm FAIL!!!\n", (UINTN)Index));
DEBUG ((EFI_D_ERROR, "%ld PeStmHandlerSmm - Rflags: %08x\n", Index, Rflags));
DEBUG ((EFI_D_ERROR, "%ld PeStmHandlerSmm - VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", Index, (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[pIndex].Register);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
DEBUG((EFI_D_ERROR, "%ld PeStmHandlerSmm - CpuDeadLoop\n", Index));
CpuDeadLoop ();
return ;
}

View File

@ -0,0 +1,105 @@
/** @file
IO Handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include <Library/SerialPortLib.h>
#define NUMDEBLEN 200
extern SPIN_LOCK mInternalDebugLock; // have to make sure we do not step on debug statements
extern PE_VM_DATA PeVmData[4]; // right now support a max of 3 PE VM (VM 0 is the SMI_HANDLER)
extern UINTN
TranslateEPTGuestToHost (
IN UINT64 EptPointer,
IN UINTN Addr,
OUT EPT_ENTRY **EntryPtr OPTIONAL
);
void PeIoHandler( IN UINT32 CpuIndex)
{
X86_REGISTER *Reg;
UINT32 VmType;
UINT32 PortNumber; // I/O port requested
UINT64 GuestAddress;
UINT64 GuestAddressEnd;
UINT32 DataSize;
UINTN PhysAddress;
//UINTN PhysAddressParameterEnd;
// for debugging a VM/PE debugging output can be sent through:
// RDX: port - 0x3F8 or 0x3D8
// RCX: number of bytes (size over NUMVMDEBUGLEN is truncated)
// DS:ESI location in PE/VM where output is located
// use instruction OUTSB/OUTSW/OUTSD 0x6E or Ox6F
// note: do not use a loop with a rep statement (which is what is normally done)
//
// all other attempts to use I/O ports will result in VM/PE termination
//DEBUG((EFI_D_INFO, "%ld PeIoHandler - started\n", CpuIndex));
VmType = mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[0].Register;
PortNumber = ReadUnaligned32((UINT32 *) &Reg->Rdx);
if((PortNumber == 0x3D8)||(PortNumber == 0x3F8))
{
UINT64 AddressSpaceStart = PeVmData[VmType].UserModule.AddressSpaceStart;
UINT64 AddressSpaceEnd = PeVmData[VmType].UserModule.AddressSpaceStart + PeVmData[VmType].UserModule.AddressSpaceSize;
GuestAddress = ReadUnaligned64((UINT64 *) &Reg->Rsi); // assume that DS Base is zero
DataSize = ReadUnaligned32((UINT32 *) &Reg->Rcx);
//DEBUG((EFI_D_INFO, "%ld PeIoHandler - GuestAddress: 0x%016llx DataSize: 0x%016llx \n", CpuIndex, GuestAddress, DataSize));
GuestAddressEnd = GuestAddress + DataSize;
// make sure the GuestAddress fits in the block that is within
// SMRAM
if(GuestAddress < AddressSpaceStart ||
GuestAddressEnd > AddressSpaceEnd)
{
DEBUG((EFI_D_INFO, "%ld PeIoHander - **ERROR** Requested serial output not within address space string: 0x%016llx:0x%016llx address range: 0x%016llx:0x%016llx\n",
CpuIndex, GuestAddress, GuestAddressEnd, AddressSpaceStart, AddressSpaceEnd)); //
}
else
{
// address within bounds, then process it
// find it within SMRAM
PhysAddress = TranslateEPTGuestToHost(mGuestContextCommonSmm[VmType].EptPointer.Uint64, (UINTN)GuestAddress, 0L);
//PhysAddressEnd = TranslateEPTGuestToHost(mGuestContextCommonSmm[VmType].EptPointer.Uint64, (UINTN)GuestAddress + GuestAddressEnd), 0L);
if(DataSize > NUMDEBLEN)
{
DataSize = NUMDEBLEN;
}
AcquireSpinLock (&mInternalDebugLock);
SerialPortWrite ((UINT8 *)"(VM/PE) ", sizeof("(VM/PE) ") - 1);
SerialPortWrite ((UINT8 *) PhysAddress, DataSize);
ReleaseSpinLock (&mInternalDebugLock);
}
}
else
{
DEBUG((EFI_D_ERROR, "%ld PeIoHandler - IO Port 0x%x not permitted\n", CpuIndex, PortNumber));
}
// need to bump the instruction counter to get past the I/O instruction
VmWriteN (VMCS_N_GUEST_RIP_INDEX, VmReadN (VMCS_N_GUEST_RIP_INDEX) + VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX));
return; // all done
}

View File

@ -0,0 +1,190 @@
/** @file
SMM MSR handler
Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
// most of this code was borrowed from the Intel driver
// howver, the only MSR of interest is the EFER MSR since that is needed to configure
// the guest VM for 64 bit
// may merge this code back into to Intel reference with VM/PE mods
#include "StmRuntime.h"
#include "PeStm.h"
/**
This function is RDMSR handler for VM/PE.
@param Index CPU index
**/
VOID
PeReadMsrHandler (
IN UINT32 CpuIndex
)
{
UINT64 Data64;
UINT32 MsrIndex;
X86_REGISTER *Reg;
STM_SMM_CPU_STATE *SmmCpuState;
UINT32 VmType = PE_PERM;
UINT32 Index = 0; // PE/VM only has index as 0
SmmCpuState = mGuestContextCommonSmi.GuestContextPerCpu[Index].SmmCpuState;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
MsrIndex = ReadUnaligned32 ((UINT32 *)&Reg->Rcx);
DEBUG ((EFI_D_INFO, "%ld ReadMsrHandler - 0x%llx\n", CpuIndex, MsrIndex));
switch (MsrIndex) {
case IA32_EFER_MSR_INDEX:
Data64 = mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer;
break;
#if 0
case IA32_SYSENTER_CS_MSR_INDEX:
Data64 = (UINT64)VmRead32 (VMCS_32_GUEST_IA32_SYSENTER_CS_INDEX);
break;
case IA32_SYSENTER_ESP_MSR_INDEX:
Data64 = (UINT64)VmReadN (VMCS_N_GUEST_IA32_SYSENTER_ESP_INDEX);
break;
case IA32_SYSENTER_EIP_MSR_INDEX:
Data64 = (UINT64)VmReadN (VMCS_N_GUEST_IA32_SYSENTER_EIP_INDEX);
break;
case IA32_FS_BASE_MSR_INDEX:
Data64 = (UINT64)VmReadN (VMCS_N_GUEST_FS_BASE_INDEX);
break;
case IA32_GS_BASE_MSR_INDEX:
Data64 = (UINT64)VmReadN (VMCS_N_GUEST_GS_BASE_INDEX);
break;
#endif
default:
// since we do not allow the VM/PE to generally read MSRs
// we return 0 for a read.
Data64 = 0;
}
Reg->Rax = (UINTN)(UINT32)Data64; // HIGH bits are cleared
Reg->Rdx = (UINTN)(UINT32)RShiftU64 (Data64, 32); // HIGH bits are cleared
VmWriteN (VMCS_N_GUEST_RIP_INDEX, VmReadN(VMCS_N_GUEST_RIP_INDEX) + VmRead32(VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX));
return ;
}
/**
This function is WRMSR handler for SMM.
@param Index CPU index
**/
VOID
PeWriteMsrHandler (
IN UINT32 CpuIndex
)
{
UINT64 Data64;
UINT32 MsrIndex;
VM_ENTRY_CONTROLS VmEntryControls;
X86_REGISTER *Reg;
STM_SMM_CPU_STATE *SmmCpuState;
UINT32 VmType = PE_PERM;
UINT32 Index = 0; // PE VM only Index = 0
SmmCpuState = mGuestContextCommonSmi.GuestContextPerCpu[Index].SmmCpuState;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
MsrIndex = ReadUnaligned32 ((UINT32 *)&Reg->Rcx);
Data64 = LShiftU64 ((UINT64)ReadUnaligned32 ((UINT32 *)&Reg->Rdx), 32) | (UINT64)ReadUnaligned32 ((UINT32 *)&Reg->Rax);
DEBUG ((EFI_D_INFO, "%ld WriteMsrHandler - 0x%llx 0x%llx\n", CpuIndex, MsrIndex, Data64));
switch (MsrIndex) {
case IA32_EFER_MSR_INDEX:
#if 0
AcquireSpinLock (&mHostContextCommon.DebugLock);
if ((Data64 & IA32_EFER_MSR_SCE) != 0) {
DEBUG ((EFI_D_INFO, "%ld WriteMsrHandler - SCE\n", CpuIndex,));
}
if ((Data64 & IA32_EFER_MSR_XDE) != 0) {
DEBUG ((EFI_D_INFO, "%ld WriteMsrHandler - XDE\n", CpuIndex,));
}
ReleaseSpinLock (&mHostContextCommon.DebugLock);
#endif
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer = Data64;
//
// Check IA32e mode switch
//
VmEntryControls.Uint32 = VmRead32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX);
if ((Data64 & IA32_EFER_MSR_MLE) != 0) {
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLE;
} else {
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLE;
}
if (((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLE) != 0) &&
((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 & CR0_PG) != 0)) {
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLA;
VmEntryControls.Bits.Ia32eGuest = 1;
} else {
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLA;
VmEntryControls.Bits.Ia32eGuest = 0;
}
VmWrite32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX, VmEntryControls.Uint32);
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer);
break;
#if 0
case IA32_SYSENTER_CS_MSR_INDEX:
VmWrite32 (VMCS_32_GUEST_IA32_SYSENTER_CS_INDEX, (UINT32)Data64);
break;
case IA32_SYSENTER_ESP_MSR_INDEX:
VmWriteN (VMCS_N_GUEST_IA32_SYSENTER_ESP_INDEX, (UINTN)Data64);
break;
case IA32_SYSENTER_EIP_MSR_INDEX:
VmWriteN (VMCS_N_GUEST_IA32_SYSENTER_EIP_INDEX, (UINTN)Data64);
break;
case IA32_FS_BASE_MSR_INDEX:
VmWriteN (VMCS_N_GUEST_FS_BASE_INDEX, (UINTN)Data64);
// AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use FS
break;
case IA32_GS_BASE_MSR_INDEX:
VmWriteN (VMCS_N_GUEST_GS_BASE_INDEX, (UINTN)Data64);
// AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use GS
break;
case IA32_KERNAL_GS_BASE_MSR_INDEX:
AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use this
break;
case IA32_STAR_MSR_INDEX:
AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use this
break;
case IA32_LSTAR_MSR_INDEX:
AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use this
break;
case IA32_FMASK_MSR_INDEX:
AsmWriteMsr64 (MsrIndex, Data64); // VMM does not use this
break;
#endif
default:
DEBUG ((EFI_D_INFO, "%ldWriteMsrHandler - VM/PE has no access to this MSR - ignoring\n", CpuIndex));
break;
}
VmWriteN (VMCS_N_GUEST_RIP_INDEX, VmReadN(VMCS_N_GUEST_RIP_INDEX) + VmRead32(VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX));
return ;
}

View File

@ -0,0 +1,27 @@
/** @file
Preemption timer handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
extern UINT64 EndTimeStamp;
extern UINT32 PostPeVmProc(UINT32 rc, UINT32 CpuIndex, UINT32 mode);
void PePreEmptionTimerHandler(IN UINT32 CpuIndex)
{
EndTimeStamp = AsmReadTsc();
DEBUG((EFI_D_ERROR, "%ld - PE/VM terminated because of a Premption Timer Runout \n", CpuIndex));
DumpVmcsAllField();
PostPeVmProc(PE_SUCCESS, CpuIndex, RELEASE_VM);
}

View File

@ -0,0 +1,73 @@
/** @file
This function is RSM handler for PE.
@param CpuIndex CPU index
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
extern UINT64 EndTimeStamp;
extern UINT32 PostPeVmProc(UINT32 rc, UINT32 CpuIndex, UINT32 mode);
VOID
PeRsmHandler (
IN UINT32 CpuIndex
)
{
UINTN Rflags;
UINT32 VmType;
UINT32 pCpuIndex;
EndTimeStamp = AsmReadTsc();
VmType = mHostContextCommon.HostContextPerCpu[CpuIndex].GuestVmType; // any VmType other than SMI_HANDLER is a PeVm
if(VmType != SMI_HANDLER)
pCpuIndex = 0; // PeVm is always zero
else
pCpuIndex = CpuIndex;
AsmVmPtrStore (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[pCpuIndex].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmi.GuestContextPerCpu[pCpuIndex].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "%ld PeRsmHandler - ERROR: AsmVmPtrLoad %016lx : %08x\n", (UINTN)CpuIndex, mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Vmcs, Rflags));
DEBUG((EFI_D_ERROR, "%ld PeRsmHandler - CpuDeadLoop\n"));
CpuDeadLoop ();
}
//STM_PERF_START (Index, 0, "ReadSyncSmmStateSaveArea", "RsmHandler");
//ReadSyncSmmStateSaveArea (Index);
//STM_PERF_END (Index, "ReadSyncSmmStateSaveArea", "RsmHandler");
//#if 0
DEBUG ((EFI_D_ERROR, "%ld PeRsmHandler start\n", (UINTN)CpuIndex));
//#endif
//STM_PERF_END (Index, "OsSmiHandler", "RsmHandler");
// take care of any cleanup needed
if(VmType == PE_PERM)
{
PostPeVmProc(PE_SUCCESS, CpuIndex, PRESERVE_VM);
}
else
{
PostPeVmProc(PE_SUCCESS, CpuIndex, RELEASE_VM);
}
DEBUG((EFI_D_ERROR, "%ld PeRsmHandler CpuDeadLoop\n", CpuIndex));
CpuDeadLoop ();
return ;
}

View File

@ -0,0 +1,29 @@
/** @file
Triple Fault Handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
extern UINT64 EndTimeStamp;
extern UINT32 PostPeVmProc(UINT32 rc, UINT32 CpuIndex, UINT32 mode);
void PeTripleFaultHandler( IN UINT32 CpuIndex)
{
EndTimeStamp = AsmReadTsc();
DEBUG((EFI_D_ERROR, "%ld PeTripleFaultHandler - Triple Fault occured in VM/PE - terminating\n", CpuIndex));
PostPeVmProc(PE_VM_TRIPLE_FAULT, CpuIndex, PRESERVE_VM); // bring the VM down
return;
}

View File

@ -0,0 +1,284 @@
/** @file
PE/SMM VMCALL Handler
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeStmEpt.h"
#include "VmcsOffsets.h"
/**
This function translate guest physical address to host address.
found in SmmEptHandler.c
@param EptPointer EPT pointer
@param Addr Guest physical address
@param EntryPtr EPT entry pointer
NULL on output means Entry not found.
@return Host physical address
**/
UINTN
TranslateEPTGuestToHost (
IN UINT64 EptPointer,
IN UINTN Addr,
OUT EPT_ENTRY **EntryPtr OPTIONAL
);
extern VMCSFIELDOFFSET VmcsFieldOffsetTable[];
extern void MapVmcs ();
extern PE_VM_DATA PeVmData[4];
/**
This function is the STM_API_MAP_ADDRESS_RANGEVMCALL handler for SMM VM/PE.
@param Index CPU index
@param AddressParameter Addresss parameter
@return VMCALL status
**/
STM_STATUS
PeSmmVmcallMapAddressRangeHandler (
IN UINT32 Index,
IN UINT64 AddressParameter
)
{
STM_MAP_ADDRESS_RANGE_DESCRIPTOR *MapAddressRangeDescriptor;
STM_MAP_ADDRESS_RANGE_DESCRIPTOR LocalBuffer;
UINT32 VmType = mHostContextCommon.HostContextPerCpu[Index].GuestVmType;
UINTN PhysAddressParameter;
UINTN PhysAddressParameterEnd;
UINT64 GuestSmmEnd = PeVmData[VmType].UserModule.AddressSpaceStart + PeVmData[VmType].UserModule.AddressSpaceSize - 1;
// Make sure the parameter address is with the part of the guest that is within SMRAM
if((AddressParameter < PeVmData[VmType].UserModule.AddressSpaceStart)||
(AddressParameter > GuestSmmEnd) ||
((AddressParameter + sizeof(STM_MAP_ADDRESS_RANGE_DESCRIPTOR)) > GuestSmmEnd))
{
DEBUG ((EFI_D_ERROR, "%ld PeSmmVmcallMapAddressRangeHandler - Security Violation! - parameter block not in guest physical within SMRAM\n", Index));
DEBUG ((EFI_D_ERROR, "%ld PeSmmVmcallMapAddressRangeHandler - AddressParameter = 0x%016llx",
Index,
AddressParameter));
return ERROR_STM_SECURITY_VIOLATION;
}
PhysAddressParameter = TranslateEPTGuestToHost(mGuestContextCommonSmm[VmType].EptPointer.Uint64, (UINTN)AddressParameter, 0L);
PhysAddressParameterEnd = TranslateEPTGuestToHost(mGuestContextCommonSmm[VmType].EptPointer.Uint64, (UINTN)AddressParameter + sizeof(STM_MAP_ADDRESS_RANGE_DESCRIPTOR), 0L);
DEBUG((EFI_D_INFO, "%ld PeSmmVmcallMapAddressRangeHandler - STM_API_MAP_ADDRESS_RANGE started\n", Index));
if(((PhysAddressParameter == 0)||(PhysAddressParameterEnd == 0))||
((PhysAddressParameter & ~0xFFF) != (PhysAddressParameterEnd & ~0XFFF)))
{
// TODO - need to address the potential of having a parameter block split across two pages
// currently the VM/PE is created as a single block...
DEBUG ((EFI_D_ERROR, "%ld PeSmmVmcallMapAddressRangeHandler - Security Violation! - parameter block not in guest physical address space or split across two pages\n", Index));
DEBUG ((EFI_D_ERROR, "%ld PeSmmVmcallMapAddressRangeHandler - PhysAddressParameter = 0x%016llx, PhysAddressParameterEnd = 0x%016llx\n",
Index,
PhysAddressParameter, PhysAddressParameterEnd));
return ERROR_STM_SECURITY_VIOLATION;
}
//
// Copy data to local, to prevent time of check VS time of use attack
//
CopyMem (&LocalBuffer, (VOID *)(UINTN)PhysAddressParameter, sizeof(LocalBuffer));
MapAddressRangeDescriptor = (STM_MAP_ADDRESS_RANGE_DESCRIPTOR *)&LocalBuffer;
DEBUG((EFI_D_ERROR, "%ld PeSmmVmcallMapAddressRangeHandler - MapAddressRange base: 0x%016llx Pages:0x%016llx\n", Index, MapAddressRangeDescriptor->PhysicalAddress, MapAddressRangeDescriptor->PageCount));
if (!IsGuestAddressValid ((UINTN)MapAddressRangeDescriptor->PhysicalAddress, STM_PAGES_TO_SIZE(MapAddressRangeDescriptor->PageCount), TRUE))
{
DEBUG ((EFI_D_ERROR, "%ld PeSmmVmcallMapAddressRangeHandler [ Security Violation!\n", Index));
return ERROR_STM_SECURITY_VIOLATION;
}
if (MapAddressRangeDescriptor->PageCount == 0)
{
DEBUG((EFI_D_ERROR, "%ld PeSmmVmcallMapAddressRangeHandler - Error - zero address range requested\n", Index));
return ERROR_STM_PAGE_NOT_FOUND;
}
if (((MapAddressRangeDescriptor->PatCacheType > UC) && (MapAddressRangeDescriptor->PatCacheType != FOLLOW_MTRR)) ||
(MapAddressRangeDescriptor->PatCacheType == 2) ||
(MapAddressRangeDescriptor->PatCacheType == 3) )
{
DEBUG((EFI_D_ERROR, "%ld PeSmmVmcallMapAddressRangeHandler - Error - STM cache type not supported\n", Index));
return ERROR_STM_CACHE_TYPE_NOT_SUPPORTED;
}
// for VM/PE we map guest physcal to host physical - BUG, this should be consolodated...
EPTSetPageAttributeRange(
mGuestContextCommonSmm[VmType].EptPointer.Uint64,
(UINTN) MapAddressRangeDescriptor->PhysicalAddress,
(UINTN) STM_PAGES_TO_SIZE(MapAddressRangeDescriptor->PageCount),
(UINTN) MapAddressRangeDescriptor->PhysicalAddress,
TRUE, /* Read */
FALSE, /* Write */
FALSE, /* Execute */
EptPageAttributeSet);
return STM_SUCCESS;
}
/* STM/PE get VMCS Map */
STM_STATUS
PeSmmVmcallGetVmcsMap (
IN UINT32 Index,
IN UINT64 AddressParameter
)
{
UINT32 VmType = mHostContextCommon.HostContextPerCpu[Index].GuestVmType;
UINTN PhysAddressParameter;
UINTN PhysAddressParameterEnd;
UINT32 count;
void * VTable;
// figure out how big the VmcsFieldOffsetTable id
for( count = 0;
VmcsFieldOffsetTable[count].FieldEncoding != 0xFFFF;
count++){}
count++; // count the last element
PhysAddressParameter = TranslateEPTGuestToHost(mGuestContextCommonSmm[VmType].EptPointer.Uint64, (UINTN)AddressParameter, 0L);
PhysAddressParameterEnd = TranslateEPTGuestToHost(mGuestContextCommonSmm[VmType].EptPointer.Uint64, (UINTN)AddressParameter + (sizeof(VMCSFIELDOFFSET) * count), 0L);
// EBX:ECX - STM_MAP_ADDRESS_RANGE_DESCRIPTOR
DEBUG ((EFI_D_INFO, "%ld PE-STM_API_GET_VMCS_MAP: AddressParameter: 0x%016llx PhysAddressParameter: 0x%016llx\n",
Index, AddressParameter, PhysAddressParameter));
// bug bug - need to make sure address is in part of the app that is in SMRAM..
#ifdef FIXME
if(((PhysAddressParameter == 0)||(PhysAddressParameterEnd == 0))||
((PhysAddressParameter & ~0xFFF) != (PhysAddressParameterEnd & ~0XFFF)))
{
// TODO - need to address the potential of having a parameter block split across tow oages
DEBUG ((EFI_D_ERROR, "%ld Security Violation! - parameter block not in guest physical address space or split across two pages\n",
Index));
DEBUG ((EFI_D_ERROR, "%ld PhysAddressParameter = 0x%016llx, PhysAddressParameterEnd = 0x%016llx\n",
Index,
PhysAddressParameter, PhysAddressParameterEnd));
return ERROR_STM_SECURITY_VIOLATION;
}
#endif
MapVmcs ();
//
// Copy data to local, to prevent time of check VS time of use attack
//
DEBUG((EFI_D_ERROR, "%ld Size of VMCSFIELDOFFSET buffer is 0x%lx\n", Index, (sizeof(VMCSFIELDOFFSET) * count)));
VTable = (void *) VmcsFieldOffsetTable;
CopyMem((VOID *)(UINTN)PhysAddressParameter, VTable , (sizeof(VMCSFIELDOFFSET) * count));
return STM_SUCCESS;
}
/* VM/PE allowed vmcalls */
STM_VMCALL_HANDLER_STRUCT mPeSmmVmcallHandler[] = {
{STM_API_MAP_ADDRESS_RANGE, PeSmmVmcallMapAddressRangeHandler},
{STM_API_GET_VMCS_MAP, PeSmmVmcallGetVmcsMap},
};
/* not defined yet for STM/PE VM/PE */
/*
{STM_API_UNMAP_ADDRESS_RANGE, },
{STM_API_ADDRESS_LOOKUP, },
{STM_API_RETURN_FROM_PROTECTION_EXCEPTION, },
*/
/**
This function returns SMM VMCALL handler by FuncIndex.
@param FuncIndex VmCall function index
@return VMCALL Handler
**/
STM_VMCALL_HANDLER
GetPeSmmVmcallHandlerByIndex (
IN UINT32 FuncIndex
)
{
UINTN Index;
for (Index = 0; Index < sizeof(mPeSmmVmcallHandler)/sizeof(mPeSmmVmcallHandler[0]); Index++)
{
if (mPeSmmVmcallHandler[Index].FuncIndex == FuncIndex)
{
return mPeSmmVmcallHandler[Index].StmVmcallHandler;
}
}
return NULL;
}
/**
This function is VMCALL handler for SMM.
@param Index CPU index
**/
VOID
PeSmmVmcallHandler (
IN UINT32 Index
)
{
X86_REGISTER *Reg;
STM_STATUS Status;
STM_VMCALL_HANDLER StmVmcallHandler;
UINT64 AddressParameter;
UINT32 VmType;
//DEBUG((EFI_D_INFO, "%ld PeSmmVmcallHandler - start\n", Index));
VmType = mHostContextCommon.HostContextPerCpu[Index].GuestVmType;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[0].Register;
StmVmcallHandler = GetPeSmmVmcallHandlerByIndex (ReadUnaligned32 ((UINT32 *)&Reg->Rax));
if (StmVmcallHandler == NULL) {
DEBUG ((EFI_D_INFO, "%ld PeSmmVmcallHandler - GetPeSmmVmcallHandlerByIndex - 0x%llx!\n",
Index, (UINTN)ReadUnaligned32 ((UINT32 *)&Reg->Rax)));
// Should not happen
//CpuDeadLoop ();
Status = ERROR_INVALID_API;
} else {
AddressParameter = ReadUnaligned32 ((UINT32 *)&Reg->Rbx) + LShiftU64 (ReadUnaligned32 ((UINT32 *)&Reg->Rcx), 32);
Status = StmVmcallHandler (Index, AddressParameter);
}
WriteUnaligned32 ((UINT32 *)&Reg->Rax, Status);
if (Status == STM_SUCCESS) {
VmWriteN (VMCS_N_GUEST_RFLAGS_INDEX, VmReadN(VMCS_N_GUEST_RFLAGS_INDEX) & ~RFLAGS_CF);
} else {
VmWriteN (VMCS_N_GUEST_RFLAGS_INDEX, VmReadN(VMCS_N_GUEST_RFLAGS_INDEX) | RFLAGS_CF);
}
VmWriteN (VMCS_N_GUEST_RIP_INDEX, VmReadN (VMCS_N_GUEST_RIP_INDEX) + VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX));
return ;
}

378
Stm/StmPkg/Core/Runtime/PeStm.h Executable file
View File

@ -0,0 +1,378 @@
/** @file
STM PE Header file
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef _PESTM_H_
#define _PESTM_H_
// VM/PE PeSmiControl.PeSmiState state definitions
#define PESMINULL 0 // nothing happening
#define PESMIPSMI 1 // SMI sent by VM/PE startup to get cpu state
#define PESMIHSMI 2 // normal SMI processing
#define PESMIPNMI 3 // VM/PE needs an NMI sent for it to help process the host SMI
#define PESMIHTMR 4 // smi handler has detected an SMI timer
#define PESMIPNMI2 5 // NMI has been sent to the VM/PE, now waiting for its entry
#define OFFSET_BITMASK_IA32_4K 0x00000FFF
#define OFFSET_BITMASK_IA32E_4K 0x0000000000000FFF
#define PAGE_SIZE_4K 4096ULL
typedef struct
{
UINT64 Address;
UINT64 Size;
} PE_REGION_LIST;
typedef struct
{
UINT64 ModuleAddress; // physical address of module to be loaded into PE/VM
UINT64 ModuleLoadAddress; // guest physical address to load module in a PE/VM
UINT32 ModuleSize; // size of module in bytes
UINT32 ModuleEntryPoint; // entry point - relative offset to ModuleLoadAddress
UINT64 AddressSpaceStart; // start of guest physical address space (page aligned)
UINT32 AddressSpaceSize; // size of guest physical address space
UINT32 VmConfig; // Options to the configuration of the PE/VM
UINT64 Cr3Load; // CR3
UINT64 SharedPage; // writeable pages for sharing between the PE Module and kernel space
// can be multible pages and is located in mail memory
PE_REGION_LIST *Segment; // list of read only regions (contained within a page)
UINT32 SharedPageSize; // size of]SharedPage/region
UINT32 DoNotClearSize; // area at beginning of memory not to be cleared
UINT64 ModuleDataSection; // Location of Module Data Section for VM/PE
// data areas local to the STM go after this point
UINT64 SharedStmPage; // page shared between PE/VM and the STM
UINT64 RunCount; // count of runs starting with one (1)
// UINTN DataRegionStart; // data space after text region
UINTN DataRegionSize; // data space size
UINTN FrontDataRegionSize; // data space size before text region
UINTN DataRegionSmmLoc; // start location in SMM for data region
} PE_MODULE_INFO;
// options for VmConfig only for Perm VM
#define PERM_VM_CRASH_BREAKDOWN (1<<21) // if VM/PE crashes then breakdown
#define PERM_VM_RUN_ONCE (1<<20) // run once and delete
#define PERM_VM_ALLOW_TERM (1<<19)
#define PERM_VM_RUN_PERIODIC (1<<22) // run using SMI Timer
#define PERM_VM_CLEAR_MEMORY (1<<23) // clear HEAP before run
#define PERM_VM_SET_TEXT_RW (1<<24) // set the text area as RW ow W
#define PERM_VM_EXEC_HEAP (1<<25) // Allow Heap Execution
#define PERM_VM_INJECT_INT (1<<26) // VM/PE will handle Internal Interrupts
typedef struct __PE_SMI_CONTROL
{
SPIN_LOCK PeSmiControlLock;
UINT32 PeNmiBreak; // when 1, a NMI has been sent to break the thread in PE_APIC_id
UINT32 PeApicId; // APIC id of the thread that is executing the PE V<
UINT32 PeExec; // when 1 PE_APIC_ID is executing a
UINT32 PeSmiState; // SMI is sent to get processor state
UINT32 PeWaitTimer; // if non-zero - waiting for timer and length of timeout
INT32 PeCpuIndex; // CpuIndex of PeVm
} PE_SMI_CONTROL;
typedef struct _VMX_GUEST_VMCS_STRUCT
{
UINTN GdtrBase;
UINTN Rsp;
UINTN Rip;
UINTN Rflags;
UINTN Cr0;
UINTN Cr3;
UINTN Cr4;
UINTN Dr7;
UINT16 CsSelector;
UINT16 DsSelector;
UINT16 EsSelector;
UINT16 FsSelector;
UINT16 GsSelector;
UINT16 SsSelector;
UINT16 TrSelector;
UINT32 InterruptibilityState;
UINT32 Smbase;
UINT32 ActivityState;
UINT64 DebugCtlFull;
UINT64 VmcsLinkPointerFull;
UINT64 IA32_Efer;
UINT32 GdtrLimit;
UINT32 LdtrAccessRights;
UINTN CsBase;
UINT32 CsAccessRights;
UINT32 CsLimit;
UINTN DsBase;
UINT32 DsAccessRights;
UINT32 DsLimit;
UINTN EsBase;
UINT32 EsAccessRights;
UINT32 EsLimit;
UINTN FsBase;
UINT32 FsAccessRights;
UINT32 FsLimit;
UINTN GsBase;
UINT32 GsAccessRights;
UINT32 GsLimit;
UINTN SsBase;
UINT32 SsAccessRights;
UINT32 SsLimit;
UINTN TrBase;
UINT32 TrAccessRights;
UINT32 TrLimit;
} VMX_GUEST_VMCS_STRUCT;
// Guest VM Types
#define SMI_HANDLER 0
#define PE_PERM 1
#define PE_TEMP 2
#define PE_OTHER 3
#define NUM_PE_TYPE 4
typedef struct _PE_GUEST_CONTEXT_PER_CPU {
X86_REGISTER Register;
//IA32_DESCRIPTOR Gdtr;
//IA32_DESCRIPTOR Idtr;
//UINTN Cr0;
//UINTN Cr3;
//UINTN Cr4;
UINTN Rip;
UINTN Rsp;
UINTN Rflags;
//UINTN Stack;
//UINT64 Efer;
//BOOLEAN UnrestrictedGuest;
//UINTN XStateBuffer;
GUEST_INTERRUPTIBILITY_STATE InterruptibilityState;
UINT32 ActivityState;
UINT64 VmcsLinkPointerFull;
UINT32 VmcsLinkPointerHigh;
VM_EXIT_CONTROLS VmExitCtrls;
VM_ENTRY_CONTROLS VmEntryCtrls;
// For CPU support Save State in MSR, we need a place holder to save it in memory in advanced.
// The reason is that when we switch to SMM guest, we lose the context in SMI guest.
//STM_SMM_CPU_STATE *SmmCpuState;
//VM_EXIT_INFO_BASIC InfoBasic; // hold info since we need that when return to SMI guest.
//VM_EXIT_QUALIFICATION Qualification; // hold info since we need that when return to SMI guest.
//UINT32 VmExitInstructionLength;
//BOOLEAN Launched;
//BOOLEAN Actived; // For SMM VMCS only, controlled by StartStmVMCALL
//UINT64 Vmcs;
//UINT32 GuestMsrEntryCount;
//UINT64 GuestMsrEntryAddress;
#if defined (MDE_CPU_X64)
// Need check alignment here because we need use FXSAVE/FXRESTORE buffer
UINT32 Reserved;
#endif
// Stuff we reinitialize upon every restart
UINT16 CsSelector;
UINTN CsBase;
UINT32 CsLimit;
UINT32 CsAccessRights; // defined by user input
UINT16 DsSelector;
UINTN DsBase;
UINT32 DsLimit;
UINT32 DsAccessRights;
UINT16 EsSelector;
UINTN EsBase;
UINT32 EsLimit;
UINT32 EsAccessRights;
UINT16 FsSelector;
UINTN FsBase;
UINT32 FsLimit;
UINT32 FsAccessRights;
UINT16 GsSelector;
UINTN GsBase;
UINT32 GsLimit;
UINT32 GsAccessRights;
UINT16 SsSelector;
UINTN SsBase;
UINT32 SsLimit;
UINT32 SsAccessRights;
UINT16 TrSelector;
UINTN TrBase;
UINT32 TrLimit;
UINT32 TrAccessRights;
UINT16 LdtrSelector;
UINTN LdtrBase;
UINT32 LdtrLimit;
UINT32 LdtrAccessRights;
UINTN GdtrBase;
UINT32 GdtrLimit;
UINTN IdtrBase;
UINT32 IdtrLimit;
} PE_GUEST_CONTEXT_PER_CPU;
typedef struct _PE_GUEST_CONTEXT_COMMON {
//EPT_POINTER EptPointer;
//UINTN CompatiblePageTable;
//UINTN CompatiblePaePageTable;
//UINT64 MsrBitmap;
//UINT64 IoBitmapA;
//UINT64 IoBitmapB;
//UINT32 Vmid;
//UINTN ZeroXStateBuffer;
//
// BiosHwResourceRequirementsPtr: This is back up of BIOS resource - no ResourceListContinuation
//
//UINT64 BiosHwResourceRequirementsPtr;
PE_GUEST_CONTEXT_PER_CPU GuestContextPerCpu; // for PE we need only one
} PE_GUEST_CONTEXT_COMMON;
typedef struct _PE_HOST_CONTEXT_COMMON {
SPIN_LOCK DebugLock;
SPIN_LOCK MemoryLock;
SPIN_LOCK SmiVmcallLock;
UINT32 CpuNum;
UINT32 JoinedCpuNum;
UINTN PageTable;
IA32_DESCRIPTOR Gdtr;
IA32_DESCRIPTOR Idtr;
UINT64 HeapBottom;
UINT64 HeapTop;
UINT8 PhysicalAddressBits;
//
// BUGBUG: Assume only one segment for client system.
//
UINT64 PciExpressBaseAddress;
UINT64 PciExpressLength;
UINT64 VmcsDatabase;
UINT32 TotalNumberProcessors;
STM_HEADER *StmHeader;
UINTN StmSize;
UINT64 TsegBase;
UINT64 TsegLength;
//
// Log
//
MLE_EVENT_LOG_STRUCTURE EventLog;
//
// ProtectedResource: This is back up of MLE resource - no ResourceListContinuation
//
MLE_PROTECTED_RESOURCE_STRUCTURE MleProtectedResource;
//
// ProtectedTrappedIoResource: This is cache for TrappedIoResource in MLE resource
// For performance consideration only, because TrappedIoResource will be referred in each SMI.
//
MLE_PROTECTED_RESOURCE_STRUCTURE MleProtectedTrappedIoResource;
//
// Performance measurement
//
STM_PERF_DATA PerfData;
STM_HOST_CONTEXT_PER_CPU HostContextPerCpu;
} PE_HOST_CONTEXT_COMMON;
#define PEVM_START_VMCALL 1
#define PEVM_START_SMI 2
#define PEVM_PRESTART_SMI 3
#define PEVM_INIT_16bit 1
#define PEVM_INIT_32bit 2
#define PEVM_INIT_64bit 3
typedef struct
{
PE_MODULE_INFO UserModule;
UINT32 StartMode; // either SMI or VMCALL
UINT32 PeVmState;
UINT32 PeCpuInitMode; // VM/PE initial processor start mode
UINTN * SmmBuffer;
UINTN SmmBufferSize;
UINTN * SharedPageStm;
PE_HOST_CONTEXT_COMMON HostState;
PE_GUEST_CONTEXT_COMMON GuestState;
} PE_VM_DATA;
typedef struct HEAP_HEADER
{
UINT64 BlockLength;
struct HEAP_HEADER* NextBlock;
}HEAP_HEADER;
typedef struct ROOT_VMX_STATE {
UINT64 valid; // used by STM
UINT64 VmcsType; // 1 - guest-VM being serviced by VMM
// 2 - no current-VM active
// 3 - guest-VM
UINT64 Vmxon; // vmxon pointer - loaded at STM startup, should never change
UINT64 ExecutiveVMCS;
UINT64 LinkVMCS;
UINT64 HostRootVMCS;
UINT64 RootHostCR0;
UINT64 RootHostCR3;
UINT64 RootHostCR4;
UINT64 RootHostGDTRBase;
UINT64 RootHostIDTRBase;
UINT64 RootHostRSP;
UINT64 RootHostRIP;
UINT64 RootHostEPT; //read from memory
UINT64 RootGuestCR0;
UINT64 RootGuestCR3;
UINT64 RootGuestCR4;
UINT64 RootGuestGDTRBase;
UINT64 RootGuestGDTRLimit;
UINT64 RootGuestIDTRBase;
UINT64 RootGuestIDTRLimit;
UINT64 RootGuestRSP;
UINT64 RootGuestRIP;
UINT64 RootContEPT; // read from guest structure
UINT32 VmxState; // either root VMX or guest VMX
UINT32 Padding;
} ROOT_VMX_STATE;
#define VMX_STATE_ROOT 1
#define VMX_STATE_GUEST 2
void GetRootVmxState(UINT32 CpuIndex, ROOT_VMX_STATE * RootState);
#else
#endif

View File

@ -0,0 +1,449 @@
/** @file
Setup a VM/PE
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
extern PE_VM_DATA PeVmData[4]; // right now support a max of 3 PE VM (VM 0 is the SMI_HANDLER)
extern int GetMultiProcessorState(UINT32 CpuIndex);
UINT32 SetupProtExecVm(UINT32 CpuIndex, UINT32 VM_Configuration, UINT32 mode, UINT32 PeType);
VOID InitPeGuestVmcs (IN UINT32 CpuIndex, IN UINT32 PeType, IN PE_GUEST_CONTEXT_PER_CPU *Vmcs);
// modes: NEW_VM - Create a new VM
// RESTART_VM - restart a saved VM at the load point
UINT32 GetMPState;
UINT32 SetupProtExecVm(UINT32 CpuIndex, UINT32 VM_Configuration, UINT32 mode, UINT32 PeType) {
UINT32 rc = PE_SUCCESS;
UINT32 GCS_AR;
UINT32 DS_AR;
UINT32 SegLimit;
UINTN CR0_config;
UINTN CR4_config;
UINT16 tr_access;
UINT16 ldtr_access;
UINT64 guest_efer = 0;
UINT64 Data64;
UINT16 cs_selector;
UINT16 ds_selector;
UINTN Rflags;
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - CR3_Index: %lx VmConfig: %lx mode: %x\n",
CpuIndex, PeType, VM_Configuration, mode));
// make sure that we can jump over the calling instruction
// StmVmm->VmexitInstructionLen = (UINT32)vmxRead(VM_EXIT_INSTRUCTION_LENGTH);
// DEBUG((EFI_D_ERROR, " startSmiHandler2 GUEST_RIP %llx Ins Len %x\n", vmxRead(GUEST_RIP), StmVmm->VmexitInstructionLen));
//DEBUG((EFI_D_ERROR, " startSmiHandler2 return dump %llx\n", *(UINT64 *) (vmxRead(GUEST_RIP)& 0xFFFFFFFFF))); // should dump the calling location
// do some sanity checks on some of the user specified parameters before attempting the setup
// for now lets do this here - needs to be moved in a later version
//GetRootVmxState(CpuIndex, (ROOT_VMX_STATE *) PeVmData[PeType].SharedPageStm);
GetMPState = 0; // initialize, assume we have no problems
if(PeVmData[PeType].StartMode == PEVM_START_VMCALL)
{
// not necessary when we are started via smi
// note: in the case that a hardware SMI gets there before this can fire of an SMI to get the
// other processors state, we let SetupProtExecVm go ahead setup and start the VM. The waiting NMI
// will then shoot down the VM so that the hardware SMI can get handled
// If this happens, we will set a flag and obtain the processor state once the VM is restarted
// after the SMI is handled
if(GetMultiProcessorState(CpuIndex) == -1)
{
GetMPState = 1; // Indicate that we still need to get the processor state
}
}
if(NEW_VM == mode)
{
if((CS_D | CS_L) == (VM_Configuration & (CS_D | CS_L))) // CS_D and CS_L cannot be set at the same time
{
FreePE_DataStructures(PeType);
return(PE_VM_SETUP_ERROR_D_L); // change to just telling the caller that it can't be done
}
if((CS_L & VM_Configuration) && !(SET_IA32E & VM_Configuration))
{
FreePE_DataStructures(PeType);
return(PE_VM_SETUP_ERROR_IA32E_D); // hange to just telling the caller that it can't be done
}
//populateSmmSaveState(StmVmm, VmexitQualification); // is this necessary?
// set the vmcs pointer to at the smm montor (guest) vmcs
// Allocate 4k aligned memory for VMCS
if(mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs == 0L)
{
// memory has been released, so get some more
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs = (UINT64) AllocatePages(2);//GetVmcsSize() / PAGE_SIZE);
if (0L == mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs)
{
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - Failure allocating Prot execution VMCS memory\n", CpuIndex));
FreePE_DataStructures(PeType);
return(PE_VMCS_ALLOC_FAIL); // change to just telling the caller that it can't be done
}
// Initialize the VMCS area to be all zeros - bad things happen otherwise
// AllocatePages clears memor
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - Allocated and cleared VMCS memory\n", CpuIndex));
}
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - VMCS region allocated at %llx\n", CpuIndex, mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs));
// setup host and control vmcs here as we should only need to do this once
// the guest state stuff will be always reset, so we do that stuff later
// Write VMCS revision ID to VMCS memory
*(UINT32 *)(UINTN)mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs = (UINT32)AsmReadMsr64 (IA32_VMX_BASIC_MSR_INDEX) & 0xFFFFFFFF;
AsmVmPtrStore (&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Vmcs);
AsmVmClear(&mGuestContextCommonSmi.GuestContextPerCpu[CpuIndex].Vmcs);
Rflags = AsmVmClear(&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "%ld SetupProtExecVm - ERROR: AsmVmClear - %016lx : %08x\n",
(UINTN)CpuIndex, mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs, Rflags));
FreePE_DataStructures(PeType);
return(PE_VMCS_ALLOC_FAIL); // change to just telling the caller that it can't be done
}
Rflags = AsmVmPtrLoad(&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs); // make PE VMCS active
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "&ld SetupProtExecVm - ERROR: AsmVmPtrLoad - %016lx : %08x\n",
(UINTN)CpuIndex, mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs, Rflags));
FreePE_DataStructures(PeType);
return(PE_VMCS_ALLOC_FAIL); // change to just telling the caller that it can't be done
}
// Setup entry and exit controls
// VMENTRY CONTROLS Setup
//Data64 = AsmReadMsr64 (IA32_VMX_TRUE_ENTRY_CTLS_MSR_INDEX);
Data64 = AsmReadMsr64 (IA32_VMX_ENTRY_CTLS_MSR_INDEX);
PeVmData[PeType].GuestState.GuestContextPerCpu.VmEntryCtrls.Uint32 = (UINT32)Data64;
//VmEntryCtrls.Bits.Ia32eGuest = mHostContextCommon.HostContextPerCpu[CpuIndex].TxtProcessorSmmDescriptor->SmmEntryState.Intel64Mode;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmEntryCtrls.Bits.EntryToSmm = 1;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmEntryCtrls.Bits.LoadIA32_EFER = 1;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmEntryCtrls.Bits.LoadDebugControls = 1;
// Upon receiving control due to an SMI, the STM shall save the contents of the IA32_PERF_GLOBAL_CTRL MSR, disable any
// enabled bits in the IA32_PERF_GLOBAL_CTRL MSR
PeVmData[PeType].GuestState.GuestContextPerCpu.VmEntryCtrls.Bits.LoadIA32_PERF_GLOBAL_CTRL = 0;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmEntryCtrls.Uint32 &= (UINT32)RShiftU64 (Data64, 32);
// VMEXIT CONTROLS SETUP
//Data64 = AsmReadMsr64 (IA32_VMX_TRUE_EXIT_CTLS_MSR_INDEX);
Data64 = AsmReadMsr64 (IA32_VMX_EXIT_CTLS_MSR_INDEX);
PeVmData[PeType].GuestState.GuestContextPerCpu.VmExitCtrls.Uint32 = (UINT32)Data64;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmExitCtrls.Bits.Ia32eHost = (sizeof(UINT64) == sizeof(UINTN));
// Upon receiving control due to an SMI, the STM shall save the contents of the IA32_PERF_GLOBAL_CTRL MSR, disable any
// enabled bits in the IA32_PERF_GLOBAL_CTRL MSR
PeVmData[PeType].GuestState.GuestContextPerCpu.VmExitCtrls.Bits.LoadIA32_PERF_GLOBAL_CTRL = 0;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmExitCtrls.Bits.SaveIA32_EFER = 1;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmExitCtrls.Bits.AcknowledgeInterrupt = 1;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmExitCtrls.Uint32 &= (UINT32)RShiftU64 (Data64, 32);
if((VM_Configuration & SET_IA32E) && (PeVmData[PeType].GuestState.GuestContextPerCpu.VmExitCtrls.Bits.Ia32eHost == 1))
{
tr_access = 11;
//PeVmData[PeType].GuestState.GuestContextPerCpu.VmExitCtrls.Bits.Ia32eHost = 1;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmEntryCtrls.Bits.Ia32eGuest = 1;
}
else
{
tr_access = 11; // 32-bit busy TSS in non IA-32e mode and 64 bit busy TSS for IA-32e mode (3 = 16 bit tss)
//PeVmData[PeType].GuestState.GuestContextPerCpu.VmExitCtrls.Bits.Ia32eHost =1;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmEntryCtrls.Bits.Ia32eGuest = 0;
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - WARNING - No IA32e Host\n", CpuIndex));
}
mGuestContextCommonSmm[PeType].IoBitmapA = 0; // (UINT64)IoBitmapA;
mGuestContextCommonSmm[PeType].IoBitmapB = 0; //(UINT64)IoBitmapB;
mGuestContextCommonSmm[PeType].MsrBitmap = 0; //(UINT64)MsrBitmapReadLow;
#define CS_SEL 0x38
#define CODE_SEL 0x08
#define TR_SEL 0x68
#define DEF_BASE 0x00
#define DEF_LIMIT 0xFFFF
#define DS_ACCESS 0xC093
#define SEG_G (1<<15) // segment granularity
#define SEG_Present (1<<7) // segment present
#define SEG_CODEDATA (1<<4) // segment code or data (zero means system)
// set tr_access bits
tr_access = tr_access | SEG_Present| SEG_G;
ldtr_access = (2<<0)| SEG_Present | SEG_G;
// setup the efer msr
if(VM_Configuration & SET_IA32E)
{
// enable IA32E
guest_efer |= IA32_EFER_MSR_MLE;
if((VM_Configuration & CR0_PG) == CR0_PG)
{
guest_efer |= IA32_EFER_MSR_MLA; // we are running unrestricted guest.. but need to test
// intel manual says that LMA must mirror CRO_PG
}
}
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - guest_efer: 0x%llx\n", CpuIndex, guest_efer));
// setup the code segment access right
GCS_AR = (11<<0)|SEG_CODEDATA | SEG_Present; // code, (execute, read/accessed), present, granularity
DS_AR = (3<<0) | SEG_CODEDATA | SEG_Present;
if(CS_L & VM_Configuration)
{ // we are 64-bit mode
GCS_AR |= CS_L | SEG_G;
DS_AR |= SEG_G | CS_D;
SegLimit = 0xFFFFFFFF;
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - Setting 64 bit mode\n", CpuIndex));
PeVmData[PeType].PeCpuInitMode = PEVM_INIT_64bit;
}
else
{
if(CS_D & VM_Configuration)
{ // We are 32-bit mode
GCS_AR |= CS_D | SEG_G;
DS_AR |= CS_D | SEG_G;
SegLimit = 0xFFFFFFFF;
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - Setting 32 bit mode\n", CpuIndex));
PeVmData[PeType].PeCpuInitMode = PEVM_INIT_32bit;
}
else // we are 16-bit mode
{
SegLimit = 0xFFFF;
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - Setting 16 bit mode\n", CpuIndex));
PeVmData[PeType].PeCpuInitMode = PEVM_INIT_16bit;
}
}
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - GCS_AR: 0x%llx SegLimit 0x%llx\n", CpuIndex, GCS_AR, SegLimit));
// setup CR0 and CR4
// add fudge factors here
CR0_config = (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED0_MSR_INDEX);//CR0_WP ; // make sure that we set what is necessary
//CR0_config = 0; // above not necessary in unrestriced guests...
CR4_config = (UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX);
// clear and set bits demanded by processor mode
#define CR4_PCIDE (1u <<17) // CpuDef.h does not have this
if(VM_Configuration & SET_IA32E)
{
CR0_config |= CR0_PG; // has to be on in IA32E mode
CR4_config |= CR4_PAE; // has to be on in IA32E mode
}
else
{
CR0_config &= ~(CR0_PG|CR0_PE); // turn these guy off
CR4_config &= ~CR4_PCIDE; // must be turned off when IA32E is off
}
// set bits desired by user
CR0_config |= (CR0_PG | CR0_PE) & VM_Configuration;
// make sure the user does not shoot himself in the foot
if(VM_Configuration & CR0_PG)
CR0_config |= CR0_PE;
if(VM_Configuration & SET_CR4_PAE)
CR4_config |= CR4_PAE;
CR4_config |= CR4_PSE & VM_Configuration;
if(CR0_config & CR0_PE) // are we using the segment registers as selectors
{
cs_selector = CS_SEL;
ds_selector = CODE_SEL;
}
else
{
// real mode stuff set the segment registers to 0x000000
cs_selector = 0;
ds_selector = 0;
}
CR0_config &= (UINTN)AsmReadMsr64 (IA32_VMX_CR0_FIXED1_MSR_INDEX); // make sure that only these one bits can be set
CR4_config &= (UINTN)AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX);
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - Setting GUEST_CR0: %llx GUEST_CR4: %llx\n", CpuIndex, CR0_config, CR4_config));
///
//GuestRegionVmcs = PeVmData[CR3index].GuestRegionVmcs;
PeVmData[PeType].GuestState.GuestContextPerCpu.CsSelector = cs_selector;
PeVmData[PeType].GuestState.GuestContextPerCpu.CsBase = DEF_BASE;
PeVmData[PeType].GuestState.GuestContextPerCpu.CsLimit = SegLimit;
PeVmData[PeType].GuestState.GuestContextPerCpu.CsAccessRights = GCS_AR; // defined by user input
PeVmData[PeType].GuestState.GuestContextPerCpu.DsSelector = ds_selector;
PeVmData[PeType].GuestState.GuestContextPerCpu.DsBase = DEF_BASE;
PeVmData[PeType].GuestState.GuestContextPerCpu.DsLimit = SegLimit;
PeVmData[PeType].GuestState.GuestContextPerCpu.DsAccessRights = DS_AR;
PeVmData[PeType].GuestState.GuestContextPerCpu.EsSelector = ds_selector;
PeVmData[PeType].GuestState.GuestContextPerCpu.EsBase = DEF_BASE;
PeVmData[PeType].GuestState.GuestContextPerCpu.EsLimit = SegLimit;
PeVmData[PeType].GuestState.GuestContextPerCpu.EsAccessRights = DS_AR;
PeVmData[PeType].GuestState.GuestContextPerCpu.FsSelector = ds_selector;
PeVmData[PeType].GuestState.GuestContextPerCpu.FsBase = DEF_BASE;
PeVmData[PeType].GuestState.GuestContextPerCpu.FsLimit = SegLimit;
PeVmData[PeType].GuestState.GuestContextPerCpu.FsAccessRights = DS_AR;
PeVmData[PeType].GuestState.GuestContextPerCpu.GsSelector = ds_selector;
PeVmData[PeType].GuestState.GuestContextPerCpu.GsBase = DEF_BASE;
PeVmData[PeType].GuestState.GuestContextPerCpu.GsLimit = SegLimit;
PeVmData[PeType].GuestState.GuestContextPerCpu.GsAccessRights = DS_AR;
PeVmData[PeType].GuestState.GuestContextPerCpu.SsSelector = ds_selector;
PeVmData[PeType].GuestState.GuestContextPerCpu.SsBase = DEF_BASE;
PeVmData[PeType].GuestState.GuestContextPerCpu.SsLimit = SegLimit;
PeVmData[PeType].GuestState.GuestContextPerCpu.SsAccessRights = DS_AR;
PeVmData[PeType].GuestState.GuestContextPerCpu.TrSelector = TR_SEL;
PeVmData[PeType].GuestState.GuestContextPerCpu.TrBase = DEF_BASE;
PeVmData[PeType].GuestState.GuestContextPerCpu.TrLimit = DEF_LIMIT;
PeVmData[PeType].GuestState.GuestContextPerCpu.TrAccessRights = tr_access;
PeVmData[PeType].GuestState.GuestContextPerCpu.LdtrSelector = TR_SEL;
PeVmData[PeType].GuestState.GuestContextPerCpu.LdtrBase = DEF_BASE;
PeVmData[PeType].GuestState.GuestContextPerCpu.LdtrLimit = DEF_LIMIT;
{
PeVmData[PeType].GuestState.GuestContextPerCpu.GdtrLimit = DEF_LIMIT;
PeVmData[PeType].GuestState.GuestContextPerCpu.IdtrLimit = DEF_LIMIT;
}
PeVmData[PeType].GuestState.GuestContextPerCpu.LdtrAccessRights = ldtr_access;
PeVmData[PeType].GuestState.GuestContextPerCpu.GdtrBase = DEF_BASE;
//PeVmData[PeType].GuestState.GuestContextPerCpu.GdtrLimit = DEF_LIMIT;
#define DataSegType 0x3 // Segment type - data, read, write, accessed
#define CodeSegType 0xb // Segment type - code, read, write, accessed
#define CodeDataDescriptorType (1<<4) // S - Descriptor Type: code or data
#define SegmentPresent (1<<7) // P - segment is present in memory
#define SegmentAVL (1<<12) // AVL - Available for use by system software
#define Segment32bit (1<<14) // D/B - 1 = 32 bit segment
#define Granularity (1<<15) // G - Granularity (1 = 4096)
if(PeVmData[PeType].PeCpuInitMode == PEVM_INIT_16bit)
{
// now setup the (big) real mode representation
UINT32 CodeAR32bit = CodeSegType|CodeDataDescriptorType|SegmentPresent|SegmentAVL|Segment32bit|Granularity;
UINT32 DataAR32bit = DataSegType|CodeDataDescriptorType|SegmentPresent|SegmentAVL|Segment32bit|Granularity;
UINT32 Limit32bit = 0xFFFFF;
UINT32 Base32bit = 0;
// if he is asking for big real mode (both code and data are 32-bit)
// then CR0 and CR4 must be setup as if they are in real mode
CR0_config = CR0_config & ~(CR0_PE | CR0_PG); // set real mode
CR4_config = 0; // set real mode
PeVmData[PeType].GuestState.GuestContextPerCpu.Rflags = 0x2;
PeVmData[PeType].GuestState.GuestContextPerCpu.CsBase = Base32bit;
PeVmData[PeType].GuestState.GuestContextPerCpu.CsLimit = Limit32bit;
PeVmData[PeType].GuestState.GuestContextPerCpu.CsAccessRights = CodeAR32bit; // defined by user input
PeVmData[PeType].GuestState.GuestContextPerCpu.DsBase = Base32bit;
PeVmData[PeType].GuestState.GuestContextPerCpu.DsLimit = Limit32bit;
PeVmData[PeType].GuestState.GuestContextPerCpu.DsAccessRights = DataAR32bit;
PeVmData[PeType].GuestState.GuestContextPerCpu.SsBase = Base32bit;
PeVmData[PeType].GuestState.GuestContextPerCpu.SsLimit = Limit32bit;
PeVmData[PeType].GuestState.GuestContextPerCpu.SsAccessRights = DataAR32bit;
}
PeVmData[PeType].GuestState.GuestContextPerCpu.Rip = (UINTN)(PeVmData[PeType].UserModule.ModuleEntryPoint + PeVmData[PeType].UserModule.ModuleLoadAddress); // module entry point;
PeVmData[PeType].GuestState.GuestContextPerCpu.IdtrBase = DEF_BASE;
//PeVmData[PeType].GuestState.GuestContextPerCpu.IdtrLimit = DEF_LIMIT;
//PeVmData[PeType].GuestState.GuestContextPerCpu.Rip = (UINTN)(PeVmData[PeType].UserModule.ModuleEntryPoint + PeVmData[PeType].UserModule.ModuleLoadAddress); // module entry point;
PeVmData[PeType].GuestState.GuestContextPerCpu.Rsp = 0x0;
//! \todo Don't set up the guest stack - let him do it himself (so his stack isn't in MSEG)
PeVmData[PeType].GuestState.GuestContextPerCpu.Rflags = 0x02; // bit1 defaults to 1
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Cr0 = CR0_config;
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Cr3 = (UINTN)PeVmData[PeType].UserModule.Cr3Load;
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Cr4 = CR4_config;
PeVmData[PeType].GuestState.GuestContextPerCpu.ActivityState = GUEST_ACTIVITY_STATE_ACTIVE;
PeVmData[PeType].GuestState.GuestContextPerCpu.InterruptibilityState.Uint32 = 0;
PeVmData[PeType].GuestState.GuestContextPerCpu.InterruptibilityState.Bits.BlockingBySmi = 1; // We allow NMI to cause a VM exit
PeVmData[PeType].GuestState.GuestContextPerCpu.VmcsLinkPointerFull = 0xFFFFFFFFFFFFFFFF;
PeVmData[PeType].GuestState.GuestContextPerCpu.VmcsLinkPointerHigh = 0xFFFFFFFF;
mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Efer = guest_efer;
}
else
{
// here we restart the PE VM
//AsmVmClear(&StmVmm->SmmMonitorVmcsPtr ); // de-couple vmcs region (check on)
AsmVmPtrLoad(&mGuestContextCommonSmm[PeType].GuestContextPerCpu[0].Vmcs);
}
// we always reinitialize the guest region upon every restart of the VM
//GuestRegionVmcs = PeVmData[CR3index].GuestRegionVmcs;
InitPeGuestVmcs( CpuIndex, PeType, &PeVmData[PeType].GuestState.GuestContextPerCpu);
// make sure that page faults are turned off
// Setup the page fault controls
// also, allow for double fault exits
{
UINT32 ExceptionBitmap;
UINT32 PageFaultErrorCodeMask = 0; // inequality in these two means that bit 14 does not exit
UINT32 PageFaultErrorCodeMatch = 0;
if((PE_VM_EXCEPTION_HANDLING & VM_Configuration) & PE_VM_EXCEPTION_HANDLING)
{
ExceptionBitmap = (1<<14) | (1<<8); // vmexit on page fault and double fault
}
else
{
ExceptionBitmap = 0xFFFFFFFF; // vmexit on any exception
}
VmWrite32 (VMCS_32_CONTROL_EXCEPTION_BITMAP_INDEX, ExceptionBitmap);
VmWrite32 (VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MASK_INDEX, PageFaultErrorCodeMask);
VmWrite32 (VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MATCH_INDEX, PageFaultErrorCodeMatch);
}
DEBUG((EFI_D_ERROR, "%ld SetupProtExecVm - Guest CS access rights %llx\n", CpuIndex, VmRead32(VMCS_32_GUEST_CS_ACCESS_RIGHT_INDEX)));
return rc;
}

View File

@ -0,0 +1,478 @@
/** @file
Gather the hardware state to be passed to the VM/PE for analysis
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
extern PE_VM_DATA PeVmData[4]; // right now support a max of 3 PE VM (VM 0 is the SMI_HANDLER)
extern PE_SMI_CONTROL PeSmiControl;
extern void SendSmiToOtherProcessors(UINT32 CpuIndex);
extern VOID CpuReadySync(UINT32 Index);
extern void MapVmcs ();
extern UINT32 GetVmcsOffset( UINT32 field_encoding);
void SetupGetRootVmxState();
void PrintVmxState(UINT32 CpuIndex, ROOT_VMX_STATE * RootState);
static UINT32 SetupGetRootVmxStateDone = 0;
static UINT64 VMCS_N_HOST_CR0_OFFSET = 0;
static UINT64 VMCS_N_HOST_CR3_OFFSET = 0;
static UINT64 VMCS_N_HOST_CR4_OFFSET = 0;
static UINT64 VMCS_N_HOST_GDTR_BASE_OFFSET = 0;
static UINT64 VMCS_N_HOST_IDTR_BASE_OFFSET = 0;
static UINT64 VMCS_N_HOST_RSP_OFFSET = 0;
static UINT64 VMCS_N_HOST_RIP_OFFSET = 0;
static UINT64 VMCS_64_CONTROL_EPT_PTR_OFFSET = 0;
static UINT64 VMCS_N_GUEST_RIP_OFFSET = 0;
static UINT64 VMCS_N_GUEST_CR0_OFFSET = 0;
static UINT64 VMCS_N_GUEST_CR3_OFFSET = 0;
static UINT64 VMCS_N_GUEST_CR4_OFFSET = 0;
static UINT64 VMCS_N_GUEST_GDTR_BASE_OFFSET = 0;
static UINT64 VMCS_32_GUEST_GDTR_LIMIT_OFFSET = 0;
static UINT64 VMCS_N_GUEST_IDTR_BASE_OFFSET = 0;
static UINT64 VMCS_32_GUEST_LDTR_LIMIT_OFFSET = 0;
static UINT64 VMCS_N_GUEST_RSP_OFFSET = 0;
static UINT64 VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_OFFSET = 0;
static UINT64 VMCS_64_GUEST_VMCS_LINK_PTR_OFFSET = 0;
static UINT64 VMCS_OFFSET_READY = 0;
int GetMultiProcessorState(UINT32 CpuIndex)
{
UINT32 PeType = PE_PERM;
UINT64 * NumProcessors = (UINT64 *) PeVmData[PeType].SharedPageStm;
ROOT_VMX_STATE * RootState; // = (ROOT_VMX_STATE *) (NumProcessors + sizeof(*NumProcessors));
UINT32 CpuNum;
DEBUG((EFI_D_ERROR, "%ld GetMultiProcessorState - Started\n", CpuIndex));
if(PeVmData[PeType].SharedPageStm == NULL)
{
DEBUG((EFI_D_ERROR, "%ld GetMultiProcessorState - SharedPageStm is NULL, not gathering state\n", CpuIndex));
return -2;
}
// first clear out the data structures and set the number of processors
RootState = (ROOT_VMX_STATE *) ((char *)NumProcessors + 64 );//sizeof(*NumProcessors) + sizeof(*NumProcessors));
*NumProcessors = mHostContextCommon.CpuNum; // number of CPUs
ZeroMem ((VOID *)(UINTN) RootState, sizeof(ROOT_VMX_STATE) * mHostContextCommon.CpuNum);
// make sure that the VMCS offsets are setup
SetupGetRootVmxState();
// send an SMI to the other processors
if(InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMINULL, PESMIPSMI) != PESMINULL) //&PeSmiControl.PeSmiState = 1;
{
DEBUG((EFI_D_ERROR, "%ld x - Aborting, SMI handler already there. PeSmiState %ld\n", CpuIndex, PeSmiControl.PeSmiState));
return -1; // need to tell about smi handler is already there
}
SendSmiToOtherProcessors(CpuIndex);
// wait for the other processors to sync up and decide what to do
CpuReadySync(CpuIndex);
// get the local processor state
//CpuReadySync(CpuIndex);
GetRootVmxState(CpuIndex, &RootState[CpuIndex]);
// need to think about this --- without it this hangs, what in context of other processors
//InterlockedCompareExchange32(&PeSmiControl.PeSmiState, PESMIPSMI, PESMINULL);//PeSmiControl.PeSmiState = 0; // all done - may need to sync processors in the case of
// another SMI coming in
CpuReadySync(CpuIndex); // wait for everyone to finish the job - PeSmiHandler will set PeSmiState to 0
// once everyone has synched up
for(CpuNum = 0; CpuNum < mHostContextCommon.CpuNum; CpuNum++)
{
PrintVmxState(CpuNum, &RootState[CpuNum]);
}
DEBUG((EFI_D_ERROR, "%ld GetMultiProcessorState - Completed. PeSmiState: %ld\n", CpuIndex, PeSmiControl.PeSmiState));
return 0;
}
#define MAXVMCSFLUSH 6
#define VmcsSizeInPages 1
void GetRootVmxState(UINT32 CpuIndex, ROOT_VMX_STATE * RootState)
{
//UINT64 ExecutiveVMCS;
UINT64 HostRootVMCS;
UINT64 CurrentVMCSSave;
UINT64 RootGuestCR0_M;
UINT64 RootGuestCR3_M;
UINT64 RootGuestCR4_M;
UINT64 RootGuestGDTRBase_M;
UINT64 RootGuestGDTRLimit_M;
UINT64 RootGuestIDTRBase_M;
UINT64 RootGuestIDTRLimit_M;
UINT64 RootGuestRSP_M;
UINT64 RootGuestRIP_M;
UINT64 RootContExecVmcs_M;
UINT64 RootContLinkVmcs_M;
UINT32 FlushCount;
UINT32 i;
char * DummyVmcs[MAXVMCSFLUSH];
UINT32 VmxRevId;
RootState->Vmxon = mHostContextCommon.HostContextPerCpu[CpuIndex].Vmxon;
//UINT32 ApicId = (UINT32) (get_apic_id() & 0xFF);
RootState->LinkVMCS = VmRead64(VMCS_64_GUEST_VMCS_LINK_PTR_INDEX);
RootState->ExecutiveVMCS = VmRead64(VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX); // get the executive VMCS
// DEBUG((EFI_D_ERROR, "%ld GetRootVmxState\n VMXON: 0x%016llx\n ExecutiveVMCS: 0x%016llx\n LinkVMCS: 0x%016llx\n",
// CpuIndex,
// RootState->Vmxon,
// RootState->ExecutiveVMCS,
// RootState->LinkVMCS));
RootState->RootGuestCR0 = VmReadN(VMCS_N_GUEST_CR0_INDEX);
RootState->RootGuestCR3 = VmReadN(VMCS_N_GUEST_CR3_INDEX);
RootState->RootGuestCR4 = VmReadN(VMCS_N_GUEST_CR4_INDEX);
RootState->RootGuestGDTRBase = VmReadN(VMCS_N_GUEST_GDTR_BASE_INDEX);
RootState->RootGuestGDTRLimit = VmRead32(VMCS_32_GUEST_GDTR_LIMIT_INDEX);
RootState->RootGuestIDTRBase = VmReadN(VMCS_N_GUEST_IDTR_BASE_INDEX);
RootState->RootGuestIDTRLimit = VmRead32(VMCS_32_GUEST_LDTR_LIMIT_INDEX);
RootState->RootGuestRSP = VmReadN(VMCS_N_GUEST_RSP_INDEX);
RootState->RootGuestRIP = VmReadN(VMCS_N_GUEST_RIP_INDEX);
RootState->RootContEPT = VmReadN(VMCS_64_CONTROL_EPT_PTR_INDEX);
// test result
// save the current working vmcs
// find the vmcs that contains the root/host datastrucure
// this this the host state information for the root VMCS on the host
// it contains the information needed to proces the guest vmexit
if(RootState->ExecutiveVMCS == RootState->Vmxon) // ref: section 34.15.4.7
{
// we are in root operation, so our VMCS of interest is in the VNCS-Link field
if(RootState->LinkVMCS != 0xFFFFFFFFFFFFFFFF)
{
RootState->VmcsType = 1; // guest-VM being sericed by VMM
HostRootVMCS = RootState->LinkVMCS;
//HostRootVMCS = VmRead64(VMCS_64_GUEST_VMCS_LINK_PTR_INDEX);
RootState->VmxState = VMX_STATE_ROOT;
//DEBUG((EFI_D_ERROR, "%ld GetRootVmxState (%d): execVMCS is vmxon: 0x%016llx using VMCS_LINK_POINTER\n",
//CpuIndex, RootState->VmcsType, HostRootVMCS));
}
else
{
HostRootVMCS = RootState->ExecutiveVMCS;
RootState->VmcsType = 2;
//HostRootVMCS = VmRead64(VMCS_64_GUEST_VMCS_LINK_PTR_INDEX);
RootState->VmxState = VMX_STATE_ROOT;
//DEBUG((EFI_D_ERROR, "%ld GetRootVmxState (%d): execVMCS is vmxon: But LinkVMCS is 0xFFFFFFFFFFFFFFF so no current Vmcs. Using Executive Vmcs: %llx\n",
// CpuIndex, RootState->VmcsType, HostRootVMCS));
}
}
else
{
// in guest operation, so our VMCS of interest is in the executive-VMCS field
RootState->VmcsType = 3;
HostRootVMCS = RootState->ExecutiveVMCS;
RootState->VmxState = VMX_STATE_GUEST;
//DEBUG((EFI_D_ERROR, "%ld GetRootVmxState (%d): execVMCS is guest VMCS: 0x%016llx using Executive VMCS\n",
//CpuIndex, RootState->VmcsType, HostRootVMCS));
}
AsmVmClear(&(CurrentVMCSSave));
AsmVmPtrStore(&CurrentVMCSSave);
RootGuestRIP_M = *(UINT64 *)((UINTN)CurrentVMCSSave + (UINTN)VMCS_N_GUEST_RIP_OFFSET);
VmcsFlushStart:
FlushCount = 0;
while((RootState->RootGuestRIP != RootGuestRIP_M) &&
(FlushCount < MAXVMCSFLUSH))
{
// got here because the in-memory copy of the VMCS is different than
// what is in the processor - so we need to flush
//DEBUG((EFI_D_ERROR, "%ld - GetRootState: RootGuestRIPMemory: 0x%016llx, Location: 0x%016llx\n",
//CpuIndex, RootGuestRIPMemory, ((UINTN)HostRootVMCS + (UINTN)VMCS_N_GUEST_RIP_OFFSET)));
// first create a dummy VMCS
VmxRevId = AsmReadMsr32(IA32_VMX_BASIC_MSR_INDEX);
DummyVmcs[FlushCount] = (char *) AllocatePages(VmcsSizeInPages);
if(DummyVmcs[FlushCount] == NULL)
{
// ran out of memory - release everything and start over
// that way someone else hopefully gets a chance to complete
DEBUG((EFI_D_ERROR, "%ld - GetRootState: ran out of memory - so free everything and restart - Flushcount: %d\n",
CpuIndex, FlushCount));
if(FlushCount == 0)
goto VmcsFlushStart;
for(i = 0; i < FlushCount; i++)
{
FreePages(DummyVmcs[i], VmcsSizeInPages);
}
goto VmcsFlushStart;
}
memcpy(DummyVmcs[FlushCount], &VmxRevId, 4);
AsmVmPtrLoad((UINT64 *) &DummyVmcs[FlushCount]);
RootGuestRIP_M = *(UINT64 *)((UINTN)CurrentVMCSSave + (UINTN)VMCS_N_GUEST_RIP_OFFSET); // try again
FlushCount++;
}
AsmVmPtrLoad(&CurrentVMCSSave); // in any case, reload this and free the dummies if necessary
if(FlushCount > 0)
{
DEBUG((EFI_D_ERROR, "%ld GetRootVmxState - Flush necessary to get VMCS in sync. Flushcount=%d\n",
CpuIndex, FlushCount));
//DEBUG((EFI_D_ERROR, "%ld GetRootVmxState: after Flush: VMCS_N_GUEST_RIP_MEMORY: 0x%016llx (test) \n", CpuIndex, RootGuestRIPMemory));
// release the buffers
for(i = 0; i < FlushCount; i++)
{
FreePages(DummyVmcs[i], VmcsSizeInPages);
}
}
//AsmVmPtrStore(&CurrentVMCSSave);
//AsmVmClear(&(CurrentVMCSSave));
//AsmVmPtrLoad(&HostRootVMCS);
RootState->HostRootVMCS = HostRootVMCS;
//DEBUG((EFI_D_ERROR, "%ld - GetRootVmxState: HostRootVmcs 0x%016llx\n", CpuIndex, RootState->HostRootVMCS));
RootGuestCR0_M = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_GUEST_CR0_OFFSET);
RootGuestCR3_M = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_GUEST_CR3_OFFSET);
RootGuestCR4_M = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_GUEST_CR4_OFFSET);
RootGuestGDTRBase_M = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_GUEST_GDTR_BASE_OFFSET);
RootGuestGDTRLimit_M = (*(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_32_GUEST_GDTR_LIMIT_OFFSET)) & 0x00000000FFFFFFFF;
RootGuestIDTRBase_M = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_GUEST_IDTR_BASE_OFFSET);
RootGuestIDTRLimit_M = (*(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_32_GUEST_LDTR_LIMIT_OFFSET)) & 0x00000000FFFFFFFF;
RootGuestRSP_M = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_GUEST_RSP_OFFSET);
RootGuestRIP_M = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_GUEST_RIP_OFFSET);
RootContExecVmcs_M = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_OFFSET);
RootContLinkVmcs_M = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_64_GUEST_VMCS_LINK_PTR_OFFSET);
#ifdef VMCSDEBUGPRINT
if(RootState->VmcsType !=2) // only want active Vmcs
{
DEBUG((EFI_D_ERROR, "%ld GetRootVmxState (%d) HostRootVmcs 0x%016llx\n G_CR0 %llx\n G_CR3 %llx\n G_CR4 %llx\n G_GDTR %llx:%llx\n G_IDTR %llx:%llx\n G_RSP %llx\n G_RIP %llx\n",
CpuIndex,
RootState->VmcsType,
RootState->HostRootVMCS,
RootState->RootGuestCR0,
RootState->RootGuestCR3,
RootState->RootGuestCR4,
RootState->RootGuestGDTRBase,
RootState->RootGuestGDTRLimit,
RootState->RootGuestIDTRBase,
RootState->RootGuestIDTRLimit,
RootState->RootGuestRSP,
RootState->RootGuestRIP));
DEBUG((EFI_D_ERROR, "%ld GetRootVmxState (%d) (control) HostRootVmcs 0x%016llx\n VMXON %llx\n ExecutiveVMCS %llx\n LinkVMCS %llx\n EPT %llx\n",
CpuIndex,
RootState->VmcsType,
RootState->HostRootVMCS,
RootState->Vmxon,
RootState->ExecutiveVMCS,
RootState->LinkVMCS,
RootState->RootContEPT));
DEBUG((EFI_D_ERROR, "%ld GetRootVmxState (%d) (memory) HostRootVmcs 0x%016llx\n G_CR0m %llx\n G_CR3m %llx\n G_CR4m %llx\n G_GDTRm %llx:%llx\n G_IDTRm %llx:%llx\n G_RSPm %llx\n G_RIPm %llx\n",
CpuIndex,
// "GetRootVmxState (memory)\n G_CR0m %llx\n G_CR3m %llx\n G_CR4m %llx\n G_GDTRm %llx:%llx\n G_IDTRm %llx:%llx\n G_RSPm %llx\n G_RIPm %llx\n C_ExecVMCSm %llx\n C_LinkVMCSm %llx\n",
RootState->VmcsType,
RootState->HostRootVMCS,
RootGuestCR0_M,
RootGuestCR3_M,
RootGuestCR4_M,
RootGuestGDTRBase_M,
RootGuestGDTRLimit_M,
RootGuestIDTRBase_M,
RootGuestIDTRLimit_M,
RootGuestRSP_M,
RootGuestRIP_M));
DEBUG((EFI_D_ERROR, "%ld GetRootVmxState (%d) (memory) HostRootVmcs 0x%016llx\n C_ExecVMCSm %llx\n C_LinkVMCSm %llx\n",
CpuIndex,
RootState->VmcsType,
RootState->HostRootVMCS,
RootContExecVmcs_M,
RootContLinkVmcs_M));
}
#endif
//DEBUG((EFI_D_ERROR, "%ld GetRootVmxState: VMCS_N_GUEST_RIP_MEMORY: 0x%016llx VMCS_N_GUEST_RIP: 0x%016llx, Location: 0x%016llx (test) \n",
// CpuIndex, RootGuestRIP_M, RootState->RootGuestRIP, ((UINTN)HostRootVMCS + (UINTN)VMCS_N_GUEST_RIP_OFFSET)));
// need to save the root vmx host structures
#ifdef ZERO
if(RootState->VmxState == VMX_STATE_ROOT)
{
// if root, these entries are meaningless, so clear them out
RootState->RootHostCR0 = 0;
RootState->RootHostCR3 = 0;
RootState->RootHostCR4 = 0;
RootState->RootHostGDTRBase = 0;
RootState->RootHostIDTRBase = 0;
RootState->RootHostRSP = 0;
RootState->RootHostRIP = 0;
RootState->RootHostEPT = 0;
}
else
#endif
{
RootState->RootHostCR0 = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_HOST_CR0_OFFSET);
RootState->RootHostCR3 = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_HOST_CR3_OFFSET);
RootState->RootHostCR4 = *(UINT64 *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_HOST_CR4_OFFSET);
RootState->RootHostGDTRBase = *(UINTN *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_HOST_GDTR_BASE_OFFSET);
RootState->RootHostIDTRBase = *(UINTN *)((UINTN)HostRootVMCS + (UINTN)VMCS_N_HOST_IDTR_BASE_OFFSET);
RootState->RootHostRSP = *(UINTN*)((UINTN)HostRootVMCS + (UINTN)VMCS_N_HOST_RSP_OFFSET);
RootState->RootHostRIP = *(UINTN*)((UINTN)HostRootVMCS + (UINTN)VMCS_N_HOST_RIP_OFFSET);
RootState->RootHostEPT = *(UINTN*)((UINTN)HostRootVMCS + (UINTN)VMCS_64_CONTROL_EPT_PTR_OFFSET);
}
// Indicate to the master that we are all done
RootState->valid = 1;
// restore the current working vmcs
//AsmVmClear(&HostRootVMCS);
//AsmVmPtrLoad(&CurrentVMCSSave);
#ifdef VMCSDEBUGPRINT
if(RootState->VmcsType != 2)
{
DEBUG((EFI_D_ERROR, "%ld GetRootVmxState (%d) \n H_CR0 %llx\n H_CR3 %llx\n H_CR4 %llx\n H_GDTR %llx\n H_IDTR %llx\n H_RSP %llx\n H_RIP %llx\n H_EPT %llx\n",
CpuIndex,
RootState->VmcsType,
RootState->RootHostCR0,
RootState->RootHostCR3,
RootState->RootHostCR4,
RootState->RootHostGDTRBase,
RootState->RootHostIDTRBase,
RootState->RootHostRSP,
RootState->RootHostRIP,
RootState->RootHostEPT));
}
#endif
}
// setups the offsets needed for accessing in memory the root vmcs state (the host part at least)
void SetupGetRootVmxState()
{
if(SetupGetRootVmxStateDone == 1)
return; // already done, so move on
MapVmcs(); // make sure we have a map
if(VMCS_OFFSET_READY == 1)
return;
VMCS_N_HOST_CR0_OFFSET = GetVmcsOffset( VMCS_N_HOST_CR0_INDEX);
VMCS_N_HOST_CR3_OFFSET = GetVmcsOffset( VMCS_N_HOST_CR3_INDEX);
VMCS_N_HOST_CR4_OFFSET = GetVmcsOffset( VMCS_N_HOST_CR4_INDEX);
VMCS_N_HOST_GDTR_BASE_OFFSET = GetVmcsOffset( VMCS_N_HOST_GDTR_BASE_INDEX);
VMCS_N_HOST_IDTR_BASE_OFFSET = GetVmcsOffset( VMCS_N_HOST_IDTR_BASE_INDEX);
VMCS_N_HOST_RSP_OFFSET = GetVmcsOffset( VMCS_N_HOST_RSP_INDEX);
VMCS_N_HOST_RIP_OFFSET = GetVmcsOffset( VMCS_N_HOST_RIP_INDEX);
VMCS_64_CONTROL_EPT_PTR_OFFSET = GetVmcsOffset( VMCS_64_CONTROL_EPT_PTR_INDEX);
VMCS_N_GUEST_RIP_OFFSET = GetVmcsOffset(VMCS_N_GUEST_RIP_INDEX);
VMCS_N_GUEST_CR0_OFFSET = GetVmcsOffset(VMCS_N_GUEST_CR0_INDEX);
VMCS_N_GUEST_CR3_OFFSET = GetVmcsOffset(VMCS_N_GUEST_CR3_INDEX);
VMCS_N_GUEST_CR4_OFFSET = GetVmcsOffset(VMCS_N_GUEST_CR4_INDEX);
VMCS_N_GUEST_GDTR_BASE_OFFSET = GetVmcsOffset(VMCS_N_GUEST_GDTR_BASE_INDEX);
VMCS_32_GUEST_GDTR_LIMIT_OFFSET = GetVmcsOffset(VMCS_32_GUEST_GDTR_LIMIT_INDEX);
VMCS_N_GUEST_IDTR_BASE_OFFSET = GetVmcsOffset(VMCS_N_GUEST_IDTR_BASE_INDEX);
VMCS_32_GUEST_LDTR_LIMIT_OFFSET = GetVmcsOffset(VMCS_32_GUEST_LDTR_LIMIT_INDEX);
VMCS_N_GUEST_RSP_OFFSET = GetVmcsOffset(VMCS_N_GUEST_RSP_INDEX);
VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_OFFSET = GetVmcsOffset(VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX);
VMCS_64_GUEST_VMCS_LINK_PTR_OFFSET = GetVmcsOffset(VMCS_64_GUEST_VMCS_LINK_PTR_INDEX);
// need to initialize the VMCS Offset table, if it has not already been done
VMCS_OFFSET_READY = 1;
}
void PrintVmxState(UINT32 CpuIndex, ROOT_VMX_STATE * RootState)
{
if(RootState->ExecutiveVMCS == RootState->Vmxon) // ref: section 34.15.4.7
{
// we are in root operation, so our VMCS of interest is in the VNCS-Link field
if(RootState->LinkVMCS != 0xFFFFFFFFFFFFFFFF)
{
DEBUG((EFI_D_ERROR, "%ld PrintVmxState (%d): execVMCS is vmxon: 0x%016llx using VMCS_LINK_POINTER\n",
CpuIndex, RootState->VmcsType, RootState->LinkVMCS));
}
else
{
DEBUG((EFI_D_ERROR, "%ld PrintVmxState (%d): execVMCS is vmxon: But LinkVMCS is 0xFFFFFFFFFFFFFFF so no current Vmcs. Using Executive Vmcs: %llx\n",
CpuIndex, RootState->VmcsType, RootState->ExecutiveVMCS));
}
}
else
{
// in guest operation, so our VMCS of interest is in the executive-VMCS field
DEBUG((EFI_D_ERROR, "%ld PrintVmxState (%d): execVMCS is guest VMCS: 0x%016llx using Executive VMCS\n",
CpuIndex, RootState->VmcsType, RootState->ExecutiveVMCS));
}
if(RootState->VmcsType !=2) // only want active Vmcs
{
DEBUG((EFI_D_ERROR, "%ld PrintVmxState (%d) HostRootVmcs 0x%016llx\n G_CR0 %llx\n G_CR3 %llx\n G_CR4 %llx\n G_GDTR %llx:%llx\n G_IDTR %llx:%llx\n G_RSP %llx\n G_RIP %llx\n",
CpuIndex,
RootState->VmcsType,
RootState->HostRootVMCS,
RootState->RootGuestCR0,
RootState->RootGuestCR3,
RootState->RootGuestCR4,
RootState->RootGuestGDTRBase,
RootState->RootGuestGDTRLimit,
RootState->RootGuestIDTRBase,
RootState->RootGuestIDTRLimit,
RootState->RootGuestRSP,
RootState->RootGuestRIP));
DEBUG((EFI_D_ERROR, "%ld PrintVmxState (%d) (control) HostRootVmcs 0x%016llx\n VMXON %llx\n ExecutiveVMCS %llx\n LinkVMCS %llx\n EPT %llx\n",
CpuIndex,
RootState->VmcsType,
RootState->HostRootVMCS,
RootState->Vmxon,
RootState->ExecutiveVMCS,
RootState->LinkVMCS,
RootState->RootContEPT));
DEBUG((EFI_D_ERROR, "%ld PrintVmxState (%d) \n H_CR0 %llx\n H_CR3 %llx\n H_CR4 %llx\n H_GDTR %llx\n H_IDTR %llx\n H_RSP %llx\n H_RIP %llx\n H_EPT %llx\n",
CpuIndex,
RootState->VmcsType,
RootState->RootHostCR0,
RootState->RootHostCR3,
RootState->RootHostCR4,
RootState->RootHostGDTRBase,
RootState->RootHostIDTRBase,
RootState->RootHostRSP,
RootState->RootHostRIP,
RootState->RootHostEPT));
}
}

View File

@ -1,104 +1,115 @@
/** @file
SMI event handler
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
SMI event handler
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
extern unsigned int PeSmiHandler(unsigned int CpuIndex);
/**
This function is SMI event handler for SMI.
@param Index CPU index
**/
This function is SMI event handler for SMI.
@param Index CPU index
**/
VOID
SmiEventHandler (
IN UINT32 Index
)
IN UINT32 Index
)
{
UINTN Rflags;
UINT64 ExecutiveVmcsPtr;
UINT64 VmcsLinkPtr;
UINT32 VmcsSize;
if (!mGuestContextCommonSmm.GuestContextPerCpu[Index].Actived) {
return ;
}
VmcsSize = GetVmcsSize();
ExecutiveVmcsPtr = VmRead64 (VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX);
if (IsOverlap (ExecutiveVmcsPtr, VmcsSize, mHostContextCommon.TsegBase, mHostContextCommon.TsegLength)) {
// Overlap TSEG
DEBUG ((EFI_D_ERROR, "ExecutiveVmcsPtr violation (SmiEventHandler) - %016lx\n", ExecutiveVmcsPtr));
return ;
}
VmcsLinkPtr = VmRead64 (VMCS_64_GUEST_VMCS_LINK_PTR_INDEX);
if (IsOverlap (VmcsLinkPtr, VmcsSize, mHostContextCommon.TsegBase, mHostContextCommon.TsegLength)) {
// Overlap TSEG
DEBUG ((EFI_D_ERROR, "VmcsLinkPtr violation (SmiEventHandler) - %016lx\n", VmcsLinkPtr));
return ;
}
STM_PERF_START (Index, 0, "WriteSyncSmmStateSaveArea", "SmiEventHandler");
WriteSyncSmmStateSaveArea (Index);
STM_PERF_END (Index, "WriteSyncSmmStateSaveArea", "SmiEventHandler");
AsmVmPtrStore (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmPtrLoad(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs, Rflags));
CpuDeadLoop ();
}
VmWriteN (VMCS_N_GUEST_RIP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmSmiHandlerRip);
VmWriteN (VMCS_N_GUEST_RSP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmSmiHandlerRsp);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr3);
#if 0
DEBUG ((EFI_D_INFO, "!!!Enter SmmHandler - %d\n", (UINTN)Index));
#endif
STM_PERF_START (Index, 0, "BiosSmmHandler", "SmiEventHandler");
//
// Launch SMM
//
if (mGuestContextCommonSmm.GuestContextPerCpu[Index].Launched) {
Rflags = AsmVmResume (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_INFO, "(STM):-(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
UINTN Rflags;
UINT64 ExecutiveVmcsPtr;
UINT64 VmcsLinkPtr;
UINT32 VmcsSize;
if (!mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Actived) {
return ;
}
} else {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Launched = TRUE;
Rflags = AsmVmLaunch (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
mGuestContextCommonSmm.GuestContextPerCpu[Index].Launched = FALSE;
}
AcquireSpinLock (&mHostContextCommon.DebugLock);
if (mGuestContextCommonSmm.GuestContextPerCpu[Index].Launched) {
DEBUG ((EFI_D_ERROR, "!!!ResumeSmm FAIL!!!\n"));
} else {
DEBUG ((EFI_D_ERROR, "!!!LaunchSmm FAIL!!!\n"));
}
DEBUG ((EFI_D_ERROR, "Rflags: %08x\n", Rflags));
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
return ;
VmcsSize = GetVmcsSize();
ExecutiveVmcsPtr = VmRead64 (VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX);
if ((ExecutiveVmcsPtr + VmcsSize > (UINTN)mHostContextCommon.TsegBase) &&
(ExecutiveVmcsPtr < ((UINTN)mHostContextCommon.TsegBase + mHostContextCommon.TsegLength))) {
// Overlap TSEG
DEBUG ((EFI_D_ERROR, "SmiEventHandler - ExecutiveVmcsPtr violation (SmiEventHandler) - %016lx\n", ExecutiveVmcsPtr));
return ;
}
VmcsLinkPtr = VmRead64 (VMCS_64_GUEST_VMCS_LINK_PTR_INDEX);
if ((VmcsLinkPtr + VmcsSize > (UINTN)mHostContextCommon.TsegBase) &&
(VmcsLinkPtr < ((UINTN)mHostContextCommon.TsegBase + mHostContextCommon.TsegLength))) {
// Overlap TSEG
DEBUG ((EFI_D_ERROR, "SmiEventHandler - VmcsLinkPtr violation (SmiEventHandler) - %016lx\n", VmcsLinkPtr));
return ;
}
STM_PERF_START (Index, 0, "WriteSyncSmmStateSaveArea", "SmiEventHandler");
WriteSyncSmmStateSaveArea (Index);
STM_PERF_END (Index, "WriteSyncSmmStateSaveArea", "SmiEventHandler");
if(PeSmiHandler(Index) == 1)
{
return;
}
AsmVmPtrStore (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmPtrLoad(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs, Rflags));
CpuDeadLoop ();
}
VmWriteN (VMCS_N_GUEST_RIP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmSmiHandlerRip);
VmWriteN (VMCS_N_GUEST_RSP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmSmiHandlerRsp);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Cr3);
#if 0
DEBUG ((EFI_D_INFO, "!!!Enter SmmHandler - %d\n", (UINTN)Index));
#endif
STM_PERF_START (Index, 0, "BiosSmmHandler", "SmiEventHandler");
//
// Launch SMM
//
if (mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Launched) {
Rflags = AsmVmResume (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_INFO, "(STM):-(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Register);
}
} else {
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Launched = TRUE;
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Register);
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Launched = FALSE;
}
AcquireSpinLock (&mHostContextCommon.DebugLock);
if (mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Launched) {
DEBUG ((EFI_D_ERROR, "!!!ResumeSmm FAIL!!!\n"));
} else {
DEBUG ((EFI_D_ERROR, "!!!LaunchSmm FAIL!!!\n"));
}
DEBUG ((EFI_D_ERROR, "Rflags: %08x\n", Rflags));
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Register);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
return ;
}

View File

@ -1,14 +1,14 @@
/** @file
SMI handler
SMI handler
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
@ -18,156 +18,156 @@ STM_HANDLER mStmHandlerSmi[VmExitReasonMax];
/**
This function initialize STM handle for SMI.
This function initialize STM handle for SMI.
**/
VOID
InitStmHandlerSmi (
VOID
)
InitStmHandlerSmi (
VOID
)
{
UINT32 Index;
UINT32 Index;
for (Index = 0; Index < VmExitReasonMax; Index++) {
mStmHandlerSmi[Index] = UnknownHandlerSmi;
}
for (Index = 0; Index < VmExitReasonMax; Index++) {
mStmHandlerSmi[Index] = UnknownHandlerSmi;
}
mStmHandlerSmi[VmExitReasonIoSmi] = SmiEventHandler;
mStmHandlerSmi[VmExitReasonOtherSmi] = SmiEventHandler;
mStmHandlerSmi[VmExitReasonVmCall] = SmiVmcallHandler;
mStmHandlerSmi[VmExitReasonIoSmi] = SmiEventHandler;
mStmHandlerSmi[VmExitReasonOtherSmi] = SmiEventHandler;
mStmHandlerSmi[VmExitReasonVmCall] = SmiVmcallHandler;
}
/**
This function is unknown handler for SMI.
This function is unknown handler for SMI.
@param Index CPU index
@param Index CPU index
**/
VOID
UnknownHandlerSmi (
IN UINT32 Index
)
UnknownHandlerSmi (
IN UINT32 Index
)
{
AcquireSpinLock (&mHostContextCommon.DebugLock);
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "!!!UnknownHandlerSmi - %d\n", (UINTN)Index));
DumpVmcsAllField ();
DEBUG ((EFI_D_ERROR, "!!!UnknownHandlerSmi - %d\n", (UINTN)Index));
DumpVmcsAllField ();
ReleaseSpinLock (&mHostContextCommon.DebugLock);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
CpuDeadLoop ();
}
/**
This function checks Pending Mtf before resume.
This function checks Pending Mtf before resume.
@param Index CPU index
@param Index CPU index
**/
VOID
CheckPendingMtf (
IN UINT32 Index
)
CheckPendingMtf (
IN UINT32 Index
)
{
VM_EXIT_INFO_INTERRUPTION VmEntryControlInterrupt;
VM_EXIT_INFO_INTERRUPTION VmEntryControlInterrupt;
//
// Check pending MTF
//
if (mGuestContextCommonSmi.GuestContextPerCpu[Index].InfoBasic.Bits.PendingMtf == 0) {
return ;
}
//
// Check pending MTF
//
if (mGuestContextCommonSmi.GuestContextPerCpu[Index].InfoBasic.Bits.PendingMtf == 0) {
return ;
}
//
// In this case, prior to resuming the interrupted guest, the STM must set the VMENTRY
// interrupt-information field in the interrupted contexts VMCS to 80000070H (inject
// "other event" number 0). This will cause an MTF VMEXIT to be pended and delivered
// immediately after completion of the VMRESUME from the STM.
//
// If the STM doesn't do this re-injection, the guest will execute two instructions, rather
// than one, before the MTF VMEXIT occurs. This may have undesirable effects on the
// MLE and must be avoided.
//
VmEntryControlInterrupt.Uint32 = 0;
VmEntryControlInterrupt.Bits.InterruptType = INTERRUPT_TYPE_OTHER_EVENT;
VmEntryControlInterrupt.Bits.Valid = 1;
VmWrite32 (VMCS_32_CONTROL_VMENTRY_INTERRUPTION_INFO_INDEX, VmEntryControlInterrupt.Uint32);
//
// In this case, prior to resuming the interrupted guest, the STM must set the VMENTRY
// interrupt-information field in the interrupted contexts VMCS to 80000070H (inject
// "other event" number 0). This will cause an MTF VMEXIT to be pended and delivered
// immediately after completion of the VMRESUME from the STM.
//
// If the STM doesn't do this re-injection, the guest will execute two instructions, rather
// than one, before the MTF VMEXIT occurs. This may have undesirable effects on the
// MLE and must be avoided.
//
VmEntryControlInterrupt.Uint32 = 0;
VmEntryControlInterrupt.Bits.InterruptType = INTERRUPT_TYPE_OTHER_EVENT;
VmEntryControlInterrupt.Bits.Valid = 1;
VmWrite32 (VMCS_32_CONTROL_VMENTRY_INTERRUPTION_INFO_INDEX, VmEntryControlInterrupt.Uint32);
}
/**
This function is STM handler for SMI.
This function is STM handler for SMI.
@param Register X86 register context
@param Register X86 register context
**/
VOID
StmHandlerSmi (
IN X86_REGISTER *Register
)
StmHandlerSmi (
IN X86_REGISTER *Register
)
{
UINT32 Index;
UINTN Rflags;
VM_EXIT_INFO_BASIC InfoBasic;
X86_REGISTER *Reg;
UINT32 Index;
UINTN Rflags;
VM_EXIT_INFO_BASIC InfoBasic;
X86_REGISTER *Reg;
Index = ApicToIndex (ReadLocalApicId ());
InfoBasic.Uint32 = VmRead32 (VMCS_32_RO_EXIT_REASON_INDEX);
Index = ApicToIndex (ReadLocalApicId ());
InfoBasic.Uint32 = VmRead32 (VMCS_32_RO_EXIT_REASON_INDEX);
STM_PERF_START (Index, InfoBasic.Bits.Reason, "OsSmiHandler", "StmHandlerSmi");
STM_PERF_START (Index, InfoBasic.Bits.Reason, "OsSmiHandler", "StmHandlerSmi");
Reg = &mGuestContextCommonSmi.GuestContextPerCpu[Index].Register;
Register->Rsp = VmReadN (VMCS_N_GUEST_RSP_INDEX);
CopyMem (Reg, Register, sizeof(X86_REGISTER));
Reg = &mGuestContextCommonSmi.GuestContextPerCpu[Index].Register;
Register->Rsp = VmReadN (VMCS_N_GUEST_RSP_INDEX);
CopyMem (Reg, Register, sizeof(X86_REGISTER));
#if 0
DEBUG ((EFI_D_INFO, "!!!StmHandlerSmi - %d\n", (UINTN)Index));
DEBUG ((EFI_D_INFO, "!!!StmHandlerSmi - %d\n", (UINTN)Index));
#endif
//
// Dispatch
//
if (InfoBasic.Bits.Reason >= VmExitReasonMax) {
DEBUG ((EFI_D_ERROR, "!!!UnknownReason!!! (0x%x)\n", InfoBasic.Bits.Reason));
DumpVmcsAllField ();
//
// Dispatch
//
if (InfoBasic.Bits.Reason >= VmExitReasonMax) {
DEBUG ((EFI_D_ERROR, "!!!UnknownReason!!!\n"));
DumpVmcsAllField ();
CpuDeadLoop ();
}
CpuDeadLoop ();
}
mGuestContextCommonSmi.GuestContextPerCpu[Index].InfoBasic.Uint32 = InfoBasic.Uint32;
mGuestContextCommonSmi.GuestContextPerCpu[Index].InfoBasic.Uint32 = InfoBasic.Uint32;
//
// Call dispatch handler
//
mStmHandlerSmi[InfoBasic.Bits.Reason] (Index);
//
// Call dispatch handler
//
mStmHandlerSmi[InfoBasic.Bits.Reason] (Index);
VmWriteN (VMCS_N_GUEST_RSP_INDEX, Reg->Rsp); // sync RSP
VmWriteN (VMCS_N_GUEST_RSP_INDEX, Reg->Rsp); // sync RSP
STM_PERF_END (Index, "OsSmiHandler", "StmHandlerSmi");
STM_PERF_END (Index, "OsSmiHandler", "StmHandlerSmi");
CheckPendingMtf (Index);
CheckPendingMtf (Index);
//
// Resume
//
Rflags = AsmVmResume (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_ERROR, "(STM):o(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
}
//
// Resume
//
Rflags = AsmVmResume (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_ERROR, "(STM):o(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
}
AcquireSpinLock (&mHostContextCommon.DebugLock);
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "!!!ResumeGuestSmi FAIL!!! - %d\n", (UINTN)Index));
DEBUG ((EFI_D_ERROR, "Rflags: %08x\n", Rflags));
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
DEBUG ((EFI_D_ERROR, "%ld StmHandlerSmi - !!!ResumeGuestSmi FAIL!!!", (UINTN)Index));
DEBUG ((EFI_D_ERROR, "%ld StmHandlerSmi - Rflags: %08x\n", Index, Rflags));
DEBUG ((EFI_D_ERROR, "%ld StmHandlerSmi - VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", Index, (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
CpuDeadLoop ();
return ;
return ;
}

View File

@ -13,6 +13,12 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
#include "StmInit.h"
extern PE_VM_DATA PeVmData[4];
extern VOID EptDumpPageTable (IN EPT_POINTER *EptPointer );
/**
@ -44,6 +50,9 @@ SmiVmcallInitializeProtectionHandler (
@return VMCALL status
**/
extern VOID CpuReadySync(UINT32 Index);
STM_STATUS
SmiVmcallStartHandler (
IN UINT32 Index,
@ -53,12 +62,31 @@ SmiVmcallStartHandler (
//
// Let STM enable SMI for SMM guest
//
DEBUG ((EFI_D_INFO, "STM_API_START:\n"));
if (!mGuestContextCommonSmm.GuestContextPerCpu[Index].Actived) {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Actived = TRUE;
GUEST_INTERRUPTIBILITY_STATE GuestInterruptibilityState;
DEBUG ((EFI_D_INFO, "%ld STM_API_START:\n", Index));
if (!mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Actived) {
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Actived = TRUE;
SmmSetup (Index);
if(Index == 0)
{
//EptDumpPageTable (&mGuestContextCommonSmm[0].EptPointer); // **DEBUG** Dump the SMI Handler EPT tables
// sync the BSP CPU once the API is ready
CpuReadySync(Index);
// turn on SMI for the BSP - the base code allows SMIs before this point
// this mod prvents SMIs until the STM is ready to process them
GuestInterruptibilityState.Uint32 = VmRead32 (VMCS_32_GUEST_INTERRUPTIBILITY_STATE_INDEX);
GuestInterruptibilityState.Bits.BlockingBySmi = 0;
VmWrite32 (VMCS_32_GUEST_INTERRUPTIBILITY_STATE_INDEX, GuestInterruptibilityState.Uint32);
}
return STM_SUCCESS;
} else {
DEBUG((EFI_D_ERROR, "%d STM_API_START -- Error STM already started\n", (UINTN) Index));
return ERROR_STM_ALREADY_STARTED;
}
}
@ -83,6 +111,7 @@ SmiVmcallStopHandler (
Reg = &mGuestContextCommonSmi.GuestContextPerCpu[Index].Register;
mHostContextCommon.StmShutdown = 1; // let the VM/PE know to quit
//
// Launch SMM Teardown handler.
//
@ -91,6 +120,7 @@ SmiVmcallStopHandler (
WriteUnaligned32 ((UINT32 *)&Reg->Rax, STM_SUCCESS);
VmWriteN (VMCS_N_GUEST_RFLAGS_INDEX, VmReadN(VMCS_N_GUEST_RFLAGS_INDEX) & ~RFLAGS_CF);
StmTeardown (Index);
DEBUG((EFI_D_INFO, "CpuDeadLoop\n"));
CpuDeadLoop ();
return STM_SUCCESS;
@ -122,14 +152,14 @@ SmiVmcallProtectResourceHandler (
DEBUG ((EFI_D_INFO, "STM_API_PROTECT_RESOURCE:\n"));
// BiosHwResourceRequirementsPtr to local BiosResource, delay it to first ProtectResource VMCALL, because BIOS may change resource at runtime.
if (mGuestContextCommonSmm.BiosHwResourceRequirementsPtr == 0) {
if (mGuestContextCommonSmm[SMI_HANDLER].BiosHwResourceRequirementsPtr == 0) {
if (!IsResourceListValid ((STM_RSC *)(UINTN)mHostContextCommon.HostContextPerCpu[0].TxtProcessorSmmDescriptor->BiosHwResourceRequirementsPtr, FALSE)) {
DEBUG ((EFI_D_ERROR, "ValidateBiosResourceList fail!\n"));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
DEBUG ((EFI_D_ERROR, "ValidateBiosResourceList fail!\n"));
return ERROR_STM_MALFORMED_RESOURCE_LIST;
}
mGuestContextCommonSmm.BiosHwResourceRequirementsPtr = (UINT64)(UINTN)DuplicateResource ((STM_RSC *)(UINTN)mHostContextCommon.HostContextPerCpu[0].TxtProcessorSmmDescriptor->BiosHwResourceRequirementsPtr);
RegisterBiosResource ((STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr);
mGuestContextCommonSmm[SMI_HANDLER].BiosHwResourceRequirementsPtr = (UINT64)(UINTN)DuplicateResource ((STM_RSC *)(UINTN)mHostContextCommon.HostContextPerCpu[0].TxtProcessorSmmDescriptor->BiosHwResourceRequirementsPtr);
RegisterBiosResource ((STM_RSC *)(UINTN)mGuestContextCommonSmm[SMI_HANDLER].BiosHwResourceRequirementsPtr);
}
//
@ -153,7 +183,7 @@ SmiVmcallProtectResourceHandler (
}
DEBUG ((EFI_D_INFO, "IsResourceListValid pass!\n"));
BiosResource = (STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr;
BiosResource = (STM_RSC *)(UINTN)mGuestContextCommonSmm[SMI_HANDLER].BiosHwResourceRequirementsPtr;
if (IsResourceListOverlap (StmResource, BiosResource)) {
DEBUG ((EFI_D_ERROR, "IsResourceListOverlap fail!\n"));
RawFreeResource (LocalBuffer);
@ -253,7 +283,7 @@ SmiVmcallGetBiosResourcesHandler (
UINTN BiosResourceSize;
UINT32 PageNum;
X86_REGISTER *Reg;
Reg = &mGuestContextCommonSmi.GuestContextPerCpu[Index].Register;
// ECX:EBX - STM_RESOURCE_LIST
@ -262,14 +292,14 @@ SmiVmcallGetBiosResourcesHandler (
DEBUG ((EFI_D_INFO, "STM_API_GET_BIOS_RESOURCES:\n"));
// BiosHwResourceRequirementsPtr to local BiosResource, delay it to first ProtectResource VMCALL, because BIOS may change resource at runtime.
if (mGuestContextCommonSmm.BiosHwResourceRequirementsPtr == 0) {
if (mGuestContextCommonSmm[SMI_HANDLER].BiosHwResourceRequirementsPtr == 0) {
if (!IsResourceListValid ((STM_RSC *)(UINTN)mHostContextCommon.HostContextPerCpu[0].TxtProcessorSmmDescriptor->BiosHwResourceRequirementsPtr, FALSE)) {
DEBUG ((EFI_D_ERROR, "ValidateBiosResourceList fail!\n"));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
return ERROR_STM_MALFORMED_RESOURCE_LIST;
}
mGuestContextCommonSmm.BiosHwResourceRequirementsPtr = (UINT64)(UINTN)DuplicateResource ((STM_RSC *)(UINTN)mHostContextCommon.HostContextPerCpu[0].TxtProcessorSmmDescriptor->BiosHwResourceRequirementsPtr);
RegisterBiosResource ((STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr);
mGuestContextCommonSmm[SMI_HANDLER].BiosHwResourceRequirementsPtr = (UINT64)(UINTN)DuplicateResource ((STM_RSC *)(UINTN)mHostContextCommon.HostContextPerCpu[0].TxtProcessorSmmDescriptor->BiosHwResourceRequirementsPtr);
RegisterBiosResource ((STM_RSC *)(UINTN)mGuestContextCommonSmm[SMI_HANDLER].BiosHwResourceRequirementsPtr);
}
PageNum = (UINT32)Reg->Rdx;
@ -280,7 +310,7 @@ SmiVmcallGetBiosResourcesHandler (
return ERROR_STM_SECURITY_VIOLATION;
}
BiosResource = (STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr;
BiosResource = (STM_RSC *)(UINTN)mGuestContextCommonSmm[SMI_HANDLER].BiosHwResourceRequirementsPtr;
BiosResourceSize = GetSizeFromResource (BiosResource);
if (BiosResourceSize == 0) {
ReleaseSpinLock(&mHostContextCommon.SmiVmcallLock);
@ -294,6 +324,7 @@ SmiVmcallGetBiosResourcesHandler (
if (PageNum >= STM_SIZE_TO_PAGES (BiosResourceSize)) {
WriteUnaligned32 ((UINT32 *)&Reg->Rdx, 0);
DEBUG((EFI_D_INFO, "SmiVmcallGetBiosResourcesHandler - ERROR_STM_PAGE_NOT_FOUND - writing 0 to 0x%x\n", &Reg->Rdx));
return ERROR_STM_PAGE_NOT_FOUND;
}
// Write data
@ -415,7 +446,7 @@ SmiVmcallEventNewLogHandler (
// Check if local copy matches previous PageCount
//
if (PageCount != (UINTN)EventLogRequest->Data.LogBuffer.PageCount) {
DEBUG((EFI_D_ERROR, "Security Violation!\n"));
DEBUG ((EFI_D_ERROR, "Security Violation!\n"));
return ERROR_STM_SECURITY_VIOLATION;
}
@ -687,6 +718,216 @@ SmiVmcallManageEventLogHandler (
return Status;
}
/**
This function is VMCALL handler for SMI.
@param Index CPU index
@param AddressParameter Addresss parameter
@return VMCALL status
**/
STM_STATUS
SmiVmcallAddTempPeVmHandler (
IN UINT32 Index,
IN UINT64 AddressParameter
)
{
STM_STATUS Status;
PE_MODULE_INFO LocalBuffer;
// STM_ADD_TEMP_PE
AcquireSpinLock (&mHostContextCommon.SmiVmcallLock);
DEBUG ((EFI_D_ERROR, "STM_API_ADD_TEMP_VM:\n"));
if (!IsGuestAddressValid ((UINTN)AddressParameter, sizeof(PE_MODULE_INFO), TRUE)) {
DEBUG ((EFI_D_ERROR, "Security Violation!\n"));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
return ERROR_STM_SECURITY_VIOLATION;
}
//
// Copy data to local, to prevent time of check VS time of use attack
//
CopyMem (&LocalBuffer, (VOID *)(UINTN)AddressParameter, sizeof(LocalBuffer));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
Status = AddPeVm(Index, &LocalBuffer, PE_TEMP, 1);
//Status = STM_SUCCESS;
return Status;
}
/**
This function is VMCALL handler for SMI.
@param Index CPU index
@param AddressParameter Addresss parameter
@return VMCALL status
**/
STM_STATUS
SmiVmcallAddPermPeVmHandler (
IN UINT32 Index,
IN UINT64 AddressParameter
)
{
STM_STATUS Status;
PE_MODULE_INFO LocalBuffer;
// - STM_ADD_PERM_PE_VM
AcquireSpinLock (&mHostContextCommon.SmiVmcallLock);
DEBUG ((EFI_D_ERROR, "STM_API_ADD_PERM_VM:\n"));
if (!IsGuestAddressValid ((UINTN)AddressParameter, sizeof(PE_MODULE_INFO), TRUE)) {
DEBUG ((EFI_D_ERROR, "Security Violation!\n"));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
return ERROR_STM_SECURITY_VIOLATION;
}
//
// Copy data to local, to prevent time of check VS time of use attack
//
CopyMem (&LocalBuffer, (VOID *)(UINTN)AddressParameter, sizeof(LocalBuffer));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
Status = AddPeVm(Index, &LocalBuffer, PE_PERM, 1);
return Status;
}
/**
This function is VMCALL handler for SMI.
@param Index CPU index
@param AddressParameter Addresss parameter
@return VMCALL status
**/
STM_STATUS
SmiVmcallAddPermPeVmNoRunHandler (
IN UINT32 Index,
IN UINT64 AddressParameter
)
{
STM_STATUS Status;
PE_MODULE_INFO LocalBuffer;
// - STM_ADD_PERM_PE_VM
AcquireSpinLock (&mHostContextCommon.SmiVmcallLock);
DEBUG ((EFI_D_ERROR, "%ld SmiVmcallAddPermPeVmNoRunHandler - STM_API_ADD_PERM_VM_NO_RUN:\n", Index));
if (!IsGuestAddressValid ((UINTN)AddressParameter, sizeof(PE_MODULE_INFO), TRUE)) {
DEBUG ((EFI_D_ERROR, "%ld SmiVmcallAddPermPeVmNoRunHandler - Security Violation!\n", Index));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
return ERROR_STM_SECURITY_VIOLATION;
}
//
// Copy data to local, to prevent time of check VS time of use attack
//
CopyMem (&LocalBuffer, (VOID *)(UINTN)AddressParameter, sizeof(LocalBuffer));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
Status = AddPeVm(Index, &LocalBuffer, PE_PERM, 0); // do not run PE/VM
return Status;
}
/**
This function is VMCALL handler for SMI.
@param Index CPU index
@param AddressParameter Addresss parameter
@return VMCALL status
**/
STM_STATUS
SmiVmcallRunPeVmHandler (
IN UINT32 Index,
IN UINT64 AddressParameter
)
{
STM_STATUS Status;
PE_MODULE_INFO LocalBuffer;
UINT32 PeType = PE_PERM;
// ECX:EBX - STM_VMCS_DATABASE_REQUEST
AcquireSpinLock (&mHostContextCommon.SmiVmcallLock);
DEBUG ((EFI_D_ERROR, " %ld STM_API_RUN_PERM_VM:\n", Index));
if (!IsGuestAddressValid ((UINTN)AddressParameter, sizeof(PE_MODULE_INFO), TRUE)) {
DEBUG ((EFI_D_ERROR, " %ld Security Violation!\n", Index));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
return ERROR_STM_SECURITY_VIOLATION;
}
//
// Copy data to local, to prevent time of check VS time of use attack
//
CopyMem (&LocalBuffer, (VOID *)(UINTN)AddressParameter, sizeof(LocalBuffer));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
// provide the root state for the measurement VM
//GetRootVmxState(StmVmm, (ROOT_VMX_STATE *) ptData[CR3index].ShareModuleStm);
PeVmData[PeType].StartMode = PEVM_START_VMCALL;
RunPermVM(Index);
Status = STM_SUCCESS;
return Status;
}
/**
This function is VMCALL handler for SMI.
@param Index CPU index
@param AddressParameter Addresss parameter
@return VMCALL status
**/
STM_STATUS
SmiVmcallEndPermVmHandler (
IN UINT32 Index,
IN UINT64 AddressParameter
)
{
STM_STATUS Status;
PE_MODULE_INFO LocalBuffer;
// ECX:EBX - STM_VMCS_DATABASE_REQUEST
AcquireSpinLock (&mHostContextCommon.SmiVmcallLock);
DEBUG ((EFI_D_ERROR, "STM_API_END_PERM_VM:\n"));
if (!IsGuestAddressValid ((UINTN)AddressParameter, sizeof(PE_MODULE_INFO), TRUE)) {
DEBUG ((EFI_D_ERROR, "Security Violation!\n"));
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
return ERROR_STM_SECURITY_VIOLATION;
}
DEBUG ((EFI_D_ERROR, "STM_API_END_PERM_VM - not implemented\n"));
//
// Copy data to local, to prevent time of check VS time of use attack
//
CopyMem (&LocalBuffer, (VOID *)(UINTN)AddressParameter, sizeof(LocalBuffer));
Status = STM_SUCCESS;
ReleaseSpinLock (&mHostContextCommon.SmiVmcallLock);
return Status;
}
STM_VMCALL_HANDLER_STRUCT mSmiVmcallHandler[] = {
{STM_API_START, SmiVmcallStartHandler},
{STM_API_STOP, SmiVmcallStopHandler},
@ -696,6 +937,11 @@ STM_VMCALL_HANDLER_STRUCT mSmiVmcallHandler[] = {
{STM_API_MANAGE_VMCS_DATABASE, SmiVmcallManageVmcsDatabaseHandler},
{STM_API_INITIALIZE_PROTECTION, SmiVmcallInitializeProtectionHandler},
{STM_API_MANAGE_EVENT_LOG, SmiVmcallManageEventLogHandler},
{STM_API_ADD_TEMP_PE_VM, SmiVmcallAddTempPeVmHandler},
{STM_API_ADD_PERM_PE_VM, SmiVmcallAddPermPeVmHandler},
{STM_API_ADD_PERM_PE_VM_NORUN, SmiVmcallAddPermPeVmNoRunHandler},
{STM_API_RUN_PE_VM, SmiVmcallRunPeVmHandler},
{STM_API_END_ADD_PERM_PE_VM, SmiVmcallEndPermVmHandler}
};
/**
@ -742,13 +988,17 @@ SmiVmcallHandler (
StmVmcallHandler = GetSmiVmcallHandlerByIndex (ReadUnaligned32 ((UINT32 *)&Reg->Rax));
if (StmVmcallHandler == NULL) {
DEBUG ((EFI_D_INFO, "GetSmiVmcallHandlerByIndex - %x!\n", (UINTN)ReadUnaligned32 ((UINT32 *)&Reg->Rax)));
// Should not happen
DEBUG ((EFI_D_INFO, "%ld SmiVmcallHandler - GetSmiVmcallHandlerByIndex - %x!\n", Index, (UINTN)ReadUnaligned32 ((UINT32 *)&Reg->Rax)));
DumpVmcsAllField ();
DEBUG ((EFI_D_ERROR, "%ld SmiVmcallHandler - ***Error*** Halting STM\n", Index));
// Should not happen
CpuDeadLoop ();
Status = ERROR_INVALID_API;
} else {
AddressParameter = ReadUnaligned32 ((UINT32 *)&Reg->Rbx) + LShiftU64 (ReadUnaligned32 ((UINT32 *)&Reg->Rcx), 32);
Status = StmVmcallHandler (Index, AddressParameter);
DEBUG((EFI_D_ERROR, " %ld SmiVmcallHandler done, Status: %x\n", Index, Status));
}
if (Status == STM_SUCCESS) {

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
/**
@ -27,8 +28,9 @@ SmmCpuidHandler (
)
{
X86_REGISTER *Reg;
UINT32 VmType = SMI_HANDLER;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
AsmCpuidEx (
ReadUnaligned32 ((UINT32 *)&Reg->Rax),

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
/**
@ -42,16 +43,30 @@ SmmCrHandler (
UINTN *GptRegPtr;
VM_ENTRY_CONTROLS VmEntryControls;
STM_REGISTER_VIOLATION_DESC RegisterViolation;
UINT32 VmType;
UINT32 cIndex = Index;
VmType = mHostContextCommon.HostContextPerCpu[Index].GuestVmType; // any VmType other than SMI_HANDLER is a PeVm
if(SMI_HANDLER != VmType)
Index = 0; // PE VM index is always 0
Qualification.UintN = VmReadN (VMCS_N_RO_EXIT_QUALIFICATION_INDEX);
GptRegPtr = (UINTN *)&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
GptRegPtr = (UINTN *)&mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
DEBUG((EFI_D_ERROR, "%ld SmmCrHandler - CrNum %d AccessType %d GptRegPtr[%d] 0x%llx\n",
cIndex,
Qualification.CrAccess.CrNum,
Qualification.CrAccess.AccessType,
Qualification.CrAccess.GpReg,
GptRegPtr[Qualification.CrAccess.GpReg]));
switch (Qualification.CrAccess.CrNum) {
case 3: // Cr3
if (Qualification.CrAccess.AccessType == 0) { // MOV to CR
if ((!mGuestContextCommonSmm.GuestContextPerCpu[Index].UnrestrictedGuest) &&
((mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 & CR0_PG) == 0)) {
if ((!mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].UnrestrictedGuest) &&
((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 & CR0_PG) == 0)) {
//
// Need cache current Setting
//
@ -60,22 +75,22 @@ SmmCrHandler (
}
// special for EPT
Ia32PAESync (Index);
Ia32PAESync (cIndex);
//
// Save current data as old data
//
mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr3 = GptRegPtr[Qualification.CrAccess.GpReg];
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr3 = GptRegPtr[Qualification.CrAccess.GpReg];
goto Ret;
} else if (Qualification.CrAccess.AccessType == 1) { // MOV from CR
GptRegPtr[Qualification.CrAccess.GpReg] = mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr3;
GptRegPtr[Qualification.CrAccess.GpReg] = mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr3;
goto Ret;
}
break;
case 0: // Cr0
if (Qualification.CrAccess.AccessType == 0) { // MOV to CR
if ((!mGuestContextCommonSmm.GuestContextPerCpu[Index].UnrestrictedGuest) &&
if ((!mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].UnrestrictedGuest) &&
((GptRegPtr[Qualification.CrAccess.GpReg] & CR0_PE) == 0)) {
//
// Disabling PE is not support when UnrestrictedGuest is OFF.
@ -84,7 +99,7 @@ SmmCrHandler (
// However, this can be supported if we launch VM86 in STM in the future.
// Moreover, SMM guest can use VM86 mode to run INT10Thunk, so disabling PE is not needed.
//
DEBUG ((EFI_D_ERROR, "CR violation!\n"));
DEBUG ((EFI_D_ERROR, "%ld SmmCrHandler - CR violation!\n", cIndex));
ZeroMem (&RegisterViolation, sizeof(RegisterViolation));
RegisterViolation.Hdr.RscType = REGISTER_VIOLATION;
RegisterViolation.Hdr.Length = sizeof(RegisterViolation);
@ -101,20 +116,21 @@ SmmCrHandler (
// Check IA32e mode switch
//
VmEntryControls.Uint32 = VmRead32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX);
if (((mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLE) != 0)&&
if (((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLE) != 0)&&
((GptRegPtr[Qualification.CrAccess.GpReg] & CR0_PG) != 0)) {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLA;
DEBUG ((EFI_D_INFO, "%ld SmmCrHandler - MLA1\n", cIndex));
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLA;
VmEntryControls.Bits.Ia32eGuest = 1;
} else {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLA;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLA;
VmEntryControls.Bits.Ia32eGuest = 0;
}
VmWrite32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX, VmEntryControls.Uint32);
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer);
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer);
// check CD
if (GptRegPtr[Qualification.CrAccess.GpReg] & CR0_CD) {
// DEBUG ((EFI_D_INFO, "!!!CrHandler - Cr0: CD!!!\n"));
DEBUG ((EFI_D_INFO, "%ld SmmCrHandler - Cr0: CD!!!\n", cIndex));
AsmWbinvd ();
}
@ -124,54 +140,55 @@ SmmCrHandler (
//
// Check UnrestrictedGuest
//
if (!mGuestContextCommonSmm.GuestContextPerCpu[Index].UnrestrictedGuest) {
if (!mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].UnrestrictedGuest) {
//
// Need check PG and PE
//
if (((mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 & CR0_PG) != 0) &&
if (((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 & CR0_PG) != 0) &&
((GptRegPtr[Qualification.CrAccess.GpReg] & CR0_PG) == 0)) {
//
// Disable Paging, but still PE, or disable PE at same time.
//
if ((GptRegPtr[Qualification.CrAccess.GpReg] & CR0_PE) != 0) {
// DEBUG ((EFI_D_INFO, "-PG"));
DEBUG ((EFI_D_INFO, "%ld SmmCrHandler - -PG\n", cIndex));
} else {
// DEBUG ((EFI_D_INFO, "-PGE"));
DEBUG ((EFI_D_INFO, "%ld SmmCrHandler - -PGE\n", cIndex));
}
mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr3 = VmReadN (VMCS_N_GUEST_CR3_INDEX);
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr3 = VmReadN (VMCS_N_GUEST_CR3_INDEX);
if ((VmReadN (VMCS_N_GUEST_CR4_INDEX) & CR4_PAE) != 0) {
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, VmRead64 (VMCS_64_GUEST_IA32_EFER_INDEX) & ~IA32_EFER_MSR_MLE);
mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr4 = VmReadN (VMCS_N_GUEST_CR4_INDEX) & ~CR4_VMXE & ~CR4_SMXE;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr4 = VmReadN (VMCS_N_GUEST_CR4_INDEX) & ~CR4_VMXE & ~CR4_SMXE;
VmWriteN (VMCS_N_GUEST_CR4_INDEX, VmReadN (VMCS_N_GUEST_CR4_INDEX) & ~CR4_PAE);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm.CompatiblePageTable);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm[VmType].CompatiblePageTable);
} else {
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm.CompatiblePageTable);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm[VmType].CompatiblePageTable);
}
VmWriteN (VMCS_N_GUEST_CR0_INDEX, VmReadN (VMCS_N_GUEST_CR0_INDEX) | CR0_PG | CR0_PE);
if ((GptRegPtr[Qualification.CrAccess.GpReg] & CR0_PE) != 0) {
VmWriteN (VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX, VmReadN (VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX) & ~CR0_PG);
} else {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 = GptRegPtr[Qualification.CrAccess.GpReg];
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 = GptRegPtr[Qualification.CrAccess.GpReg];
VmWriteN (VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX, VmReadN (VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX) & ~CR0_PE & ~CR0_PG);
// LaunchVm86Monitor (Index);
CpuDeadLoop (); // never returned
}
} else if (((mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 & CR0_PG) == 0) &&
} else if (((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 & CR0_PG) == 0) &&
((GptRegPtr[Qualification.CrAccess.GpReg] & CR0_PG) != 0)) {
//
// Enable Paging, from PE
//
// DEBUG ((EFI_D_INFO, "+PG"));
DEBUG ((EFI_D_INFO, "%ld SmmCrHandler - +PG\n", cIndex));
VmWriteN (VMCS_N_GUEST_CR0_INDEX, VmReadN (VMCS_N_GUEST_CR0_INDEX) | CR0_PG | CR0_PE);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr3);
VmWriteN (VMCS_N_GUEST_CR4_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr4 | (UINTN)(AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX) & AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX)));
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr3);
VmWriteN (VMCS_N_GUEST_CR4_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr4 | (UINTN)(AsmReadMsr64 (IA32_VMX_CR4_FIXED0_MSR_INDEX) & AsmReadMsr64 (IA32_VMX_CR4_FIXED1_MSR_INDEX)));
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer);
if ((mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLE) != 0) {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLA;
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer);
if ((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLE) != 0) {
DEBUG ((EFI_D_INFO, "%ld SmmCrHandler - MLA2\n", cIndex));
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLA;
VmEntryControls.Uint32 = VmRead32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX);
VmEntryControls.Bits.Ia32eGuest = 1;
VmWrite32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX, VmEntryControls.Uint32);
@ -179,19 +196,19 @@ SmmCrHandler (
VmWriteN (VMCS_N_CONTROL_CR4_READ_SHADOW_INDEX, VmReadN (VMCS_N_GUEST_CR4_INDEX) & ~CR4_VMXE & ~CR4_SMXE);
} else if (((mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 & CR0_PE) != 0) &&
} else if (((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 & CR0_PE) != 0) &&
((GptRegPtr[Qualification.CrAccess.GpReg] & CR0_PE) == 0)) {
//
// Disable protection
//
// DEBUG ((EFI_D_INFO, "-PE"));
mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 = GptRegPtr[Qualification.CrAccess.GpReg];
DEBUG ((EFI_D_INFO, "%ld SmmCrHandler - -PE\n", cIndex));
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 = GptRegPtr[Qualification.CrAccess.GpReg];
VmWriteN (VMCS_N_GUEST_CR0_INDEX, VmReadN (VMCS_N_GUEST_CR0_INDEX) | CR0_PG | CR0_PE);
VmWriteN (VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX, VmReadN (VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX) & ~CR0_PE & ~CR0_PG);
// LaunchVm86Monitor (Index);
CpuDeadLoop (); // never returned
} else if (((mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 & CR0_PE) == 0) &&
} else if (((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 & CR0_PE) == 0) &&
((GptRegPtr[Qualification.CrAccess.GpReg] & CR0_PE) != 0)) {
//
// Enable protection
@ -204,10 +221,10 @@ SmmCrHandler (
//
// Save current data as old data
//
mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 = GptRegPtr[Qualification.CrAccess.GpReg];
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 = GptRegPtr[Qualification.CrAccess.GpReg];
// special for EPT
Ia32PAESync (Index);
Ia32PAESync (cIndex);
goto Ret;
#if 0
@ -219,8 +236,8 @@ SmmCrHandler (
break;
case 4: // Cr4
if (Qualification.CrAccess.AccessType == 0) { // MOV to CR
if ((!mGuestContextCommonSmm.GuestContextPerCpu[Index].UnrestrictedGuest) &&
((mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr0 & CR0_PG) == 0)) {
if ((!mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].UnrestrictedGuest) &&
((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 & CR0_PG) == 0)) {
//
// Need cache current Setting
//
@ -234,7 +251,7 @@ SmmCrHandler (
//
// Save current data as old data
//
mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr4 = GptRegPtr[Qualification.CrAccess.GpReg];
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr4 = GptRegPtr[Qualification.CrAccess.GpReg];
goto Ret;
#if 0
} else if (Qualification.CrAccess.AccessType == 1) { // MOV from CR
@ -248,7 +265,7 @@ SmmCrHandler (
break;
}
DEBUG ((EFI_D_INFO, "!!!CrAccessHandler!!!\n"));
DEBUG ((EFI_D_INFO, "%ld SmmCrHandler - !!!CrAccessHandler!!!\n", cIndex));
DumpVmcsAllField ();
CpuDeadLoop ();

File diff suppressed because it is too large Load Diff

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
STM_HANDLER mStmHandlerSmm[VmExitReasonMax];
@ -32,18 +33,18 @@ InitStmHandlerSmm (
mStmHandlerSmm[Index] = UnknownHandlerSmm;
}
mStmHandlerSmm[VmExitReasonRsm] = RsmHandler;
mStmHandlerSmm[VmExitReasonVmCall] = SmmVmcallHandler;
mStmHandlerSmm[VmExitReasonExceptionNmi] = SmmExceptionHandler;
mStmHandlerSmm[VmExitReasonRsm] = RsmHandler;
mStmHandlerSmm[VmExitReasonVmCall] = SmmVmcallHandler;
mStmHandlerSmm[VmExitReasonExceptionNmi] = SmmExceptionHandler;
mStmHandlerSmm[VmExitReasonCrAccess] = SmmCrHandler;
mStmHandlerSmm[VmExitReasonEptViolation] = SmmEPTViolationHandler;
mStmHandlerSmm[VmExitReasonEptMisConfiguration] = SmmEPTMisconfigurationHandler;
mStmHandlerSmm[VmExitReasonInvEpt] = SmmInvEPTHandler;
mStmHandlerSmm[VmExitReasonIoInstruction] = SmmIoHandler;
mStmHandlerSmm[VmExitReasonCpuid] = SmmCpuidHandler;
mStmHandlerSmm[VmExitReasonRdmsr] = SmmReadMsrHandler;
mStmHandlerSmm[VmExitReasonWrmsr] = SmmWriteMsrHandler;
mStmHandlerSmm[VmExitReasonCrAccess] = SmmCrHandler;
mStmHandlerSmm[VmExitReasonEptViolation] = SmmEPTViolationHandler;
mStmHandlerSmm[VmExitReasonEptMisConfiguration] = SmmEPTMisconfigurationHandler;
mStmHandlerSmm[VmExitReasonInvEpt] = SmmInvEPTHandler;
mStmHandlerSmm[VmExitReasonIoInstruction] = SmmIoHandler;
mStmHandlerSmm[VmExitReasonCpuid] = SmmCpuidHandler;
mStmHandlerSmm[VmExitReasonRdmsr] = SmmReadMsrHandler;
mStmHandlerSmm[VmExitReasonWrmsr] = SmmWriteMsrHandler;
mStmHandlerSmm[VmExitReasonInvd] = SmmInvdHandler;
mStmHandlerSmm[VmExitReasonWbinvd] = SmmWbinvdHandler;
mStmHandlerSmm[VmExitReasonTaskSwitch] = SmmTaskSwitchHandler;
@ -65,7 +66,8 @@ UnknownHandlerSmm (
DEBUG ((EFI_D_ERROR, "!!!UnknownHandlerSmm - %d\n", (UINTN)Index));
DumpVmcsAllField ();
DumpRegContext(&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
DumpRegContext(&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Register);
DumpGuestStack(Index);
{
UINT8 *Buffer;
@ -103,14 +105,21 @@ StmHandlerSmm (
UINTN Rflags;
VM_EXIT_INFO_BASIC InfoBasic;
X86_REGISTER *Reg;
UINT32 VmType;
UINT32 pIndex;
Index = ApicToIndex (ReadLocalApicId ());
VmType = mHostContextCommon.HostContextPerCpu[Index].GuestVmType; // any VmType other than SMI_HANDLER is a PeVm
if(VmType != SMI_HANDLER)
pIndex = 0; // PeVm always have index 0
else
pIndex = Index;
STM_PERF_END (Index, "BiosSmmHandler", "StmHandlerSmm");
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[pIndex].Register;
Register->Rsp = VmReadN (VMCS_N_GUEST_RSP_INDEX);
CopyMem (Reg, Register, sizeof(X86_REGISTER));
CopyMem (Reg, Register, sizeof(X86_REGISTER));//
#if 0
DEBUG ((EFI_D_INFO, "!!!StmHandlerSmm - %d\n", (UINTN)Index));
#endif
@ -136,11 +145,11 @@ StmHandlerSmm (
//
// Resume
//
Rflags = AsmVmResume (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
Rflags = AsmVmResume (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[pIndex].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_ERROR, "(STM):-(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[pIndex].Register);
}
AcquireSpinLock (&mHostContextCommon.DebugLock);
@ -149,8 +158,8 @@ StmHandlerSmm (
DEBUG ((EFI_D_ERROR, "Rflags: %08x\n", Rflags));
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
DumpRegContext (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[pIndex].Register);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
#define BUS_FROM_CF8_ADDRESS(PciAddress) (UINT8)(((UINTN)(PciAddress) & 0x00FF0000) >> 16)
#define DEVICE_FROM_CF8_ADDRESS(PciAddress) (UINT8)(((UINTN)(PciAddress) & 0x0000F800) >> 13)
@ -57,8 +58,9 @@ SmmIoHandler (
STM_RSC_IO_DESC LocalIoDesc;
STM_RSC_PCI_CFG_DESC *LocalPciCfgDescPtr;
UINT8 LocalPciCfgDescBuf[STM_LOG_ENTRY_SIZE];
UINT32 VmType = SMI_HANDLER;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
Qualification.UintN = VmReadN (VMCS_N_RO_EXIT_QUALIFICATION_INDEX);
@ -82,7 +84,7 @@ SmmIoHandler (
CpuDeadLoop ();
}
IoDesc = GetStmResourceIo ((STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr, Port);
IoDesc = GetStmResourceIo ((STM_RSC *)(UINTN)mGuestContextCommonSmm[VmType].BiosHwResourceRequirementsPtr, Port);
if (IoDesc == NULL) {
ZeroMem (&LocalIoDesc, sizeof(LocalIoDesc));
LocalIoDesc.Hdr.RscType = IO_RANGE;
@ -100,7 +102,7 @@ SmmIoHandler (
//
// We need make sure PciAddress access and PciData access is atomic.
//
//
AcquireSpinLock (&mHostContextCommon.PciLock);
}
if ((Port >= 0xCFC) && (Port <= 0xCFF)) {
@ -128,7 +130,7 @@ SmmIoHandler (
}
PciCfgDesc = GetStmResourcePci (
(STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr,
(STM_RSC *)(UINTN)mGuestContextCommonSmm[VmType].BiosHwResourceRequirementsPtr,
BUS_FROM_CF8_ADDRESS(PciAddress),
DEVICE_FROM_CF8_ADDRESS(PciAddress),
FUNCTION_FROM_CF8_ADDRESS(PciAddress),
@ -136,7 +138,14 @@ SmmIoHandler (
(Qualification.IoInstruction.Direction != 0) ? STM_RSC_PCI_CFG_R : STM_RSC_PCI_CFG_W
);
if (PciCfgDesc == NULL) {
DEBUG((EFI_D_ERROR, "Add unclaimed PCI_RSC!\n"));
DEBUG((EFI_D_ERROR, "Add unclaimed PCI_RSC!: Port: 0x%x PciAddress 0x%x Bus: 0x%x Device: 0x%x Function: 0x%x Register: 0x%x Direction: 0x%x\n",
Port,
PciAddress,
BUS_FROM_CF8_ADDRESS(PciAddress),
DEVICE_FROM_CF8_ADDRESS(PciAddress),
FUNCTION_FROM_CF8_ADDRESS(PciAddress),
REGISTER_FROM_CF8_ADDRESS(PciAddress) + (Port & 0x3),
(Qualification.IoInstruction.Direction != 0) ? STM_RSC_PCI_CFG_R : STM_RSC_PCI_CFG_W));
LocalPciCfgDescPtr = (STM_RSC_PCI_CFG_DESC *)LocalPciCfgDescBuf;
ZeroMem (LocalPciCfgDescBuf, sizeof(LocalPciCfgDescBuf));
LocalPciCfgDescPtr->Hdr.RscType = PCI_CFG_RANGE;
@ -159,7 +168,7 @@ SmmIoHandler (
UINT64 RcxMask;
RcxMask = 0xFFFFFFFFFFFFFFFFull;
if ((mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLA) == 0) {
if ((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLA) == 0) {
RcxMask = 0xFFFFFFFFull;
}
if ((Reg->Rcx & RcxMask) == 0) {

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
/**
@ -31,9 +32,12 @@ SmmReadMsrHandler (
X86_REGISTER *Reg;
STM_RSC_MSR_DESC *MsrDesc;
STM_RSC_MSR_DESC LocalMsrDesc;
STM_SMM_CPU_STATE *SmmCpuState;
UINT32 VmType = SMI_HANDLER;
BOOLEAN Result;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
SmmCpuState = mGuestContextCommonSmi.GuestContextPerCpu[Index].SmmCpuState;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
MsrIndex = ReadUnaligned32 ((UINT32 *)&Reg->Rcx);
MsrDesc = GetStmResourceMsr (mHostContextCommon.MleProtectedResource.Base, MsrIndex);
@ -44,7 +48,7 @@ SmmReadMsrHandler (
CpuDeadLoop ();
}
MsrDesc = GetStmResourceMsr ((STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr, MsrIndex);
MsrDesc = GetStmResourceMsr ((STM_RSC *)(UINTN)mGuestContextCommonSmm[VmType].BiosHwResourceRequirementsPtr, MsrIndex);
if ((MsrDesc == NULL) || (MsrDesc->ReadMask == 0) || (MsrDesc->KernelModeProcessing == 0)) {
ZeroMem (&LocalMsrDesc, sizeof(LocalMsrDesc));
LocalMsrDesc.Hdr.RscType = MACHINE_SPECIFIC_REG;
@ -59,7 +63,7 @@ SmmReadMsrHandler (
switch (MsrIndex) {
case IA32_EFER_MSR_INDEX:
Data64 = VmRead64 (VMCS_64_GUEST_IA32_EFER_INDEX);
Data64 = mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer;
break;
case IA32_SYSENTER_CS_MSR_INDEX:
@ -125,9 +129,13 @@ SmmWriteMsrHandler (
X86_REGISTER *Reg;
STM_RSC_MSR_DESC *MsrDesc;
STM_RSC_MSR_DESC LocalMsrDesc;
BOOLEAN Result;
BOOLEAN Result;
STM_SMM_CPU_STATE *SmmCpuState;
UINT32 VmType = SMI_HANDLER;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
SmmCpuState = mGuestContextCommonSmi.GuestContextPerCpu[Index].SmmCpuState;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
MsrIndex = ReadUnaligned32 ((UINT32 *)&Reg->Rcx);
MsrDesc = GetStmResourceMsr (mHostContextCommon.MleProtectedResource.Base, MsrIndex);
@ -138,7 +146,7 @@ SmmWriteMsrHandler (
CpuDeadLoop ();
}
MsrDesc = GetStmResourceMsr ((STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr, MsrIndex);
MsrDesc = GetStmResourceMsr ((STM_RSC *)(UINTN)mGuestContextCommonSmm[VmType].BiosHwResourceRequirementsPtr, MsrIndex);
if ((MsrDesc == NULL) || (MsrDesc->WriteMask == 0) || (MsrDesc->KernelModeProcessing == 0)) {
ZeroMem (&LocalMsrDesc, sizeof(LocalMsrDesc));
LocalMsrDesc.Hdr.RscType = MACHINE_SPECIFIC_REG;
@ -164,26 +172,26 @@ SmmWriteMsrHandler (
}
ReleaseSpinLock (&mHostContextCommon.DebugLock);
#endif
mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer = Data64;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer = Data64;
//
// Check IA32e mode switch
//
VmEntryControls.Uint32 = VmRead32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX);
if ((Data64 & IA32_EFER_MSR_MLE) != 0) {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLE;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLE;
} else {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLE;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLE;
}
if (((mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLE) != 0) &&
((VmReadN (VMCS_N_GUEST_CR0_INDEX) & CR0_PG) != 0)) {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLA;
if (((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer & IA32_EFER_MSR_MLE) != 0) &&
((mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr0 & CR0_PG) != 0)) {
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer |= IA32_EFER_MSR_MLA;
VmEntryControls.Bits.Ia32eGuest = 1;
} else {
mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLA;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer &= ~IA32_EFER_MSR_MLA;
VmEntryControls.Bits.Ia32eGuest = 0;
}
VmWrite32 (VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX, VmEntryControls.Uint32);
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Efer);
VmWrite64 (VMCS_64_GUEST_IA32_EFER_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Efer);
break;
@ -230,7 +238,7 @@ SmmWriteMsrHandler (
case IA32_BIOS_UPDT_TRIG_MSR_INDEX:
// Only write it when BIOS request MicrocodeUpdate
MsrDesc = GetStmResourceMsr ((STM_RSC *)(UINTN)mGuestContextCommonSmm.BiosHwResourceRequirementsPtr, IA32_BIOS_UPDT_TRIG_MSR_INDEX);
MsrDesc = GetStmResourceMsr ((STM_RSC *)(UINTN)mGuestContextCommonSmm[VmType].BiosHwResourceRequirementsPtr, IA32_BIOS_UPDT_TRIG_MSR_INDEX);
if (MsrDesc != NULL) {
AsmWriteMsr64 (MsrIndex, Data64);
}

View File

@ -1,102 +1,147 @@
/** @file
SMM RSM handler
SMM RSM handler
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "StmRuntime.h"
#include "PeStm.h"
#include "PeLoadVm.h"
extern PE_VM_DATA PeVmData[4];
extern UINT32 RestoreInterPeVm(UINT32 CpuIndex, UINT32 PeType);
extern PE_SMI_CONTROL PeSmiControl;
extern unsigned int CpuInSmiCount;
/**
This function is RSM handler for SMM.
This function is RSM handler for SMM.
@param Index CPU index
@param Index CPU index
**/
VOID
RsmHandler (
IN UINT32 Index
)
RsmHandler (
IN UINT32 Index
)
{
UINTN Rflags;
UINT64 ExecutiveVmcsPtr;
UINT64 VmcsLinkPtr;
UINT32 VmcsSize;
VmcsSize = GetVmcsSize();
ExecutiveVmcsPtr = VmRead64 (VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX);
UINTN Rflags = 0;
UINT64 ExecutiveVmcsPtr;
UINT64 VmcsLinkPtr;
UINT32 VmcsSize;
UINT32 PeType;
VmcsSize = GetVmcsSize();
ExecutiveVmcsPtr = VmRead64 (VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX);
if (IsOverlap (ExecutiveVmcsPtr, VmcsSize, mHostContextCommon.TsegBase, mHostContextCommon.TsegLength)) {
// Overlap TSEG
DEBUG ((EFI_D_ERROR, "ExecutiveVmcsPtr violation (RsmHandler) - %016lx\n", ExecutiveVmcsPtr));
CpuDeadLoop() ;
}
// Overlap TSEG
DEBUG ((EFI_D_ERROR, "%ld RsmHandler - ExecutiveVmcsPtr violation (RsmHandler) - %016lx\n", Index, ExecutiveVmcsPtr));
CpuDeadLoop() ;
}
VmcsLinkPtr = VmRead64 (VMCS_64_GUEST_VMCS_LINK_PTR_INDEX);
VmcsLinkPtr = VmRead64 (VMCS_64_GUEST_VMCS_LINK_PTR_INDEX);
if (IsOverlap (VmcsLinkPtr, VmcsSize, mHostContextCommon.TsegBase, mHostContextCommon.TsegLength)) {
// Overlap TSEG
DEBUG ((EFI_D_ERROR, "VmcsLinkPtr violation (RsmHandler) - %016lx\n", VmcsLinkPtr));
CpuDeadLoop() ;
}
if (mHostContextCommon.HostContextPerCpu[Index].JumpBufferValid) {
//
// return from Setup/TearDown
//
mHostContextCommon.HostContextPerCpu[Index].JumpBufferValid = FALSE;
LongJump (&mHostContextCommon.HostContextPerCpu[Index].JumpBuffer, (UINTN)-1);
// Should not get here
CpuDeadLoop ();
}
// Overlap TSEG
DEBUG ((EFI_D_ERROR, "%ld RsmHandler - VmcsLinkPtr violation (RsmHandler) - %016lx\n", Index, VmcsLinkPtr));
CpuDeadLoop() ;
}
AsmVmPtrStore (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmPtrLoad(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs, Rflags));
CpuDeadLoop ();
}
if (mHostContextCommon.HostContextPerCpu[Index].JumpBufferValid) {
//
// return from Setup/TearDown
//
mHostContextCommon.HostContextPerCpu[Index].JumpBufferValid = FALSE;
LongJump (&mHostContextCommon.HostContextPerCpu[Index].JumpBuffer, (UINTN)-1);
// Should not get here
CpuDeadLoop ();
}
STM_PERF_START (Index, 0, "ReadSyncSmmStateSaveArea", "RsmHandler");
ReadSyncSmmStateSaveArea (Index);
STM_PERF_END (Index, "ReadSyncSmmStateSaveArea", "RsmHandler");
AsmVmPtrStore (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs);
if((PeSmiControl.PeCpuIndex == ((INT32) Index)))
{
PeType = mHostContextCommon.HostContextPerCpu[Index].NonSmiHandler;
//PeType = mHostContextCommon.HostContextPerCpu[Index].GuestVmType;
DEBUG((EFI_D_ERROR, "%ld RsmHandler - VmPe Detected - PeType: %ld PeVmState: %ld\n", Index, PeType, PeVmData[PeType].PeVmState));
switch(PeVmData[PeType].PeVmState)
{
case PE_VM_SUSPEND: // is this a suspended PE/VM?
{
// restore it - return to peer once it completes
RestoreInterPeVm(Index, PeType);
//should not return... will let the module handle the error processing
// this will return in the case where the VM/PE was being created and it was interrupted by a SMI that was detected
// while doing the processor state gathering.
// we will come out and let it return so that the SMI can get fired and
// when the SMI handler is done will reattempt to regather the processor info
DEBUG((EFI_D_ERROR, "%ld RsmHandler ERROR - Failed to restart PE/VM after SMI, PeType: %ld\n", Index, PeType));
break;
}
case PE_VM_IDLE:
case PE_VM_AVAIL:
{
//DEBUG((EFI_D_ERROR, "%ld RsmHandler Idle VmPe: ignoring\n", Index));
break;
}
default:
{
DEBUG((EFI_D_ERROR, " %ld RsmHandler - data structure inconsistency - suspended PE/VM not found\n", Index));
}
}
}
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "%ld RsmHandler - ERROR: AsmVmPtrLoad - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs, Rflags));
CpuDeadLoop ();
}
STM_PERF_START (Index, 0, "ReadSyncSmmStateSaveArea", "RsmHandler");
ReadSyncSmmStateSaveArea (Index);
STM_PERF_END (Index, "ReadSyncSmmStateSaveArea", "RsmHandler");
#if 0
DEBUG ((EFI_D_INFO, "Exit SmmHandler - %d\n", (UINTN)Index));
DEBUG ((EFI_D_INFO, "RsmHandler Exit SmmHandler - %d\n", (UINTN)Index));
#endif
// We should not WaitAllProcessorRendezVous() because we can not assume SMM will bring all CPU into BIOS SMM handler.
// WaitAllProcessorRendezVous (Index);
// We should not WaitAllProcessorRendezVous() because we can not assume SMM will bring all CPU into BIOS SMM handler.
// WaitAllProcessorRendezVous (Index);
STM_PERF_END (Index, "OsSmiHandler", "RsmHandler");
STM_PERF_END (Index, "OsSmiHandler", "RsmHandler");
CheckPendingMtf (Index);
CheckPendingMtf (Index);
//
// Launch back
//
Rflags = AsmVmResume (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_ERROR, "(STM):o(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
}
//
// Launch back
//
Rflags = AsmVmResume (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
DEBUG ((EFI_D_ERROR, "%ld :o(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
}
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "!!!RsmHandler FAIL!!!\n"));
DEBUG ((EFI_D_ERROR, "Rflags: %08x\n", Rflags));
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "%ld !!!RsmHandler FAIL!!!\n", (UINTN)Index));
DEBUG ((EFI_D_ERROR, "%ld RsmHandler Rflags: %08x\n", (UINTN)Index, Rflags));
DEBUG ((EFI_D_ERROR, "%ld RsmHandler VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)Index, (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
CpuDeadLoop ();
return ;
return ;
}

View File

@ -14,6 +14,10 @@
#include "StmInit.h"
#include "StmRuntime.h"
#include "StmPe.h"
extern void CpuReadySync(UINT32 Index);
extern unsigned int CpuInSmiCount;
/**
@ -35,20 +39,20 @@ SmmSetup (
}
AsmVmPtrStore (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmPtrLoad(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs, Rflags));
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmPtrLoad(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs, Rflags));
CpuDeadLoop ();
}
VmWriteN (VMCS_N_GUEST_RIP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmStmSetupRip);
VmWriteN (VMCS_N_GUEST_RSP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmSmiHandlerRsp);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr3);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Cr3);
//
// We need update HOST_RSP to save context for SetJump.
//
VmWriteN (VMCS_N_HOST_RSP_INDEX, mHostContextCommon.HostContextPerCpu[Index].Stack - (mHostContextCommon.StmHeader->SwStmHdr.PerProcDynamicMemorySize / 2));
VmWriteN (VMCS_N_HOST_RSP_INDEX, mHostContextCommon.HostContextPerCpu[Index].Stack - (mHostContextCommon.StmHeader->SwStmHdr.PerProcDynamicMemorySize / 2));
JumpFlag = SetJump (&mHostContextCommon.HostContextPerCpu[Index].JumpBuffer);
if (JumpFlag == 0) {
@ -60,9 +64,9 @@ SmmSetup (
DEBUG ((EFI_D_INFO, "SmmStmSetupRip start (%d) ...\n", (UINTN)Index));
DEBUG ((EFI_D_INFO, "New HostStack (%d) - %08x\n", (UINTN)Index, VmReadN (VMCS_N_HOST_RSP_INDEX)));
mHostContextCommon.HostContextPerCpu[Index].JumpBufferValid = TRUE;
mGuestContextCommonSmm.GuestContextPerCpu[Index].Launched = TRUE;
Rflags = AsmVmLaunch (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
mGuestContextCommonSmm.GuestContextPerCpu[Index].Launched = FALSE;
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Launched = TRUE;
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Register);
mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Launched = FALSE;
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "!!!SmmSetup FAIL!!!\n"));
DEBUG ((EFI_D_ERROR, "Rflags: %08x\n", Rflags));
@ -77,7 +81,7 @@ SmmSetup (
//
VmWriteN (VMCS_N_HOST_RSP_INDEX, mHostContextCommon.HostContextPerCpu[Index].Stack);
AsmVmPtrStore (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs);
AsmVmPtrStore (&mGuestContextCommonSmm[SMI_HANDLER].GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
GLOBAL_REMOVE_IF_UNREFERENCED
CHAR8 *mTaskTypeStr[] = {
@ -40,6 +41,7 @@ SmmTaskSwitchHandler (
TASK_STATE *Tss;
UINT16 CurrentTr;
TASK_STATE *CurrentTss;
UINT32 VmType = SMI_HANDLER;
// DEBUG ((EFI_D_INFO, "!!!TaskSwitchHandler - %08x\n", (UINTN)Index));
DEBUG ((EFI_D_INFO, "T"));
@ -89,14 +91,14 @@ SmmTaskSwitchHandler (
//
if ((Qualification.TaskSwitch.TaskType == TASK_SWITCH_SOURCE_CALL) ||
(Qualification.TaskSwitch.TaskType == TASK_SWITCH_SOURCE_TASK_GATE)) {
CurrentTss->Eax = (UINT32)mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rax;
CurrentTss->Ebx = (UINT32)mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rbx;
CurrentTss->Ecx = (UINT32)mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rcx;
CurrentTss->Edx = (UINT32)mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rdx;
CurrentTss->Esi = (UINT32)mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rsi;
CurrentTss->Edi = (UINT32)mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rdi;
CurrentTss->Ebp = (UINT32)mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rbp;
CurrentTss->Esp = (UINT32)mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rsp;
CurrentTss->Eax = (UINT32)mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rax;
CurrentTss->Ebx = (UINT32)mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rbx;
CurrentTss->Ecx = (UINT32)mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rcx;
CurrentTss->Edx = (UINT32)mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rdx;
CurrentTss->Esi = (UINT32)mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rsi;
CurrentTss->Edi = (UINT32)mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rdi;
CurrentTss->Ebp = (UINT32)mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rbp;
CurrentTss->Esp = (UINT32)mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rsp;
CurrentTss->Eip = (UINT32)VmReadN (VMCS_N_GUEST_RIP_INDEX);
if (Qualification.TaskSwitch.TaskType == TASK_SWITCH_SOURCE_CALL) {
@ -127,14 +129,14 @@ SmmTaskSwitchHandler (
(Qualification.TaskSwitch.TaskType == TASK_SWITCH_SOURCE_TASK_GATE)) {
Tss->Link = CurrentTr;
}
mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rax = Tss->Eax;
mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rbx = Tss->Ebx;
mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rcx = Tss->Ecx;
mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rdx = Tss->Edx;
mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rsi = Tss->Esi;
mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rdi = Tss->Edi;
mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rbp = Tss->Ebp;
mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.Rsp = Tss->Esp;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rax = Tss->Eax;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rbx = Tss->Ebx;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rcx = Tss->Ecx;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rdx = Tss->Edx;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rsi = Tss->Esi;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rdi = Tss->Edi;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rbp = Tss->Ebp;
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.Rsp = Tss->Esp;
VmWriteN (VMCS_N_GUEST_RIP_INDEX, Tss->Eip);

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
/**
@ -28,21 +29,22 @@ SmmTeardown (
{
UINTN JumpFlag;
UINTN Rflags;
UINT32 VmType = SMI_HANDLER;
if (mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmStmTeardownRip == 0) {
return ;
}
AsmVmPtrStore (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmPtrLoad(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs, Rflags));
DEBUG ((EFI_D_ERROR, "ERROR: AsmVmPtrLoad(%d) - %016lx : %08x\n", (UINTN)Index, mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Vmcs, Rflags));
CpuDeadLoop ();
}
VmWriteN (VMCS_N_GUEST_RIP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmStmTeardownRip);
VmWriteN (VMCS_N_GUEST_RSP_INDEX, (UINTN)mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->SmmSmiHandlerRsp);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm.GuestContextPerCpu[Index].Cr3);
VmWriteN (VMCS_N_GUEST_CR3_INDEX, mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Cr3);
//
// We need update HOST_RSP to save context for SetJump.
@ -58,18 +60,19 @@ SmmTeardown (
DEBUG ((EFI_D_INFO, "SmmStmTeardownRip start (%d) ...\n", (UINTN)Index));
mHostContextCommon.HostContextPerCpu[Index].JumpBufferValid = TRUE;
Rflags = AsmVmResume (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
Rflags = AsmVmResume (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register);
// BUGBUG: - AsmVmLaunch if AsmVmResume fail
if (VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX) == VmxFailErrorVmResumeWithNonLaunchedVmcs) {
// DEBUG ((EFI_D_ERROR, "(STM):-(\n", (UINTN)Index));
Rflags = AsmVmLaunch (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
Rflags = AsmVmLaunch (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register);
}
AcquireSpinLock (&mHostContextCommon.DebugLock);
DEBUG ((EFI_D_ERROR, "!!!SmmTeardown FAIL!!!\n"));
DEBUG ((EFI_D_ERROR, "Rflags: %08x\n", Rflags));
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register);
DumpRegContext (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
}
@ -80,7 +83,7 @@ SmmTeardown (
//
VmWriteN (VMCS_N_HOST_RSP_INDEX, mHostContextCommon.HostContextPerCpu[Index].Stack);
AsmVmPtrStore (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Vmcs);
AsmVmPtrStore (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Vmcs);
Rflags = AsmVmPtrLoad (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Vmcs);
if ((Rflags & (RFLAGS_CF | RFLAGS_ZF)) != 0) {

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
/**
@ -351,8 +352,9 @@ SmmVmcallReturnFromProtectionExceptionHandler (
)
{
X86_REGISTER *Reg;
UINT32 VmType = SMI_HANDLER;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
// EBX = 0: resume SMM guest using register state found on exception stack
// EBX = 1 to 0x0F: EBX contains a BIOS error code which the STM must record in the TXT.ERRORCODE
@ -418,12 +420,13 @@ SmmVmcallHandler (
STM_STATUS Status;
STM_VMCALL_HANDLER StmVmcallHandler;
UINT64 AddressParameter;
UINT32 VmType = SMI_HANDLER;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
StmVmcallHandler = GetSmmVmcallHandlerByIndex (ReadUnaligned32 ((UINT32 *)&Reg->Rax));
if (StmVmcallHandler == NULL) {
DEBUG ((EFI_D_INFO, "GetSmmVmcallHandlerByIndex - %x!\n", (UINTN)ReadUnaligned32 ((UINT32 *)&Reg->Rax)));
DEBUG((EFI_D_INFO, "%ld SmmVmcallHandler - GetSmmVmcallHandlerByIndex - %x!\n", Index, (UINTN)ReadUnaligned32 ((UINT32 *)&Reg->Rax)));
// Should not happen
CpuDeadLoop ();
Status = ERROR_INVALID_API;

View File

@ -24,6 +24,7 @@
// bit 1 - exception 1
// etc.
//
//UINT32 mErrorCodeFlag = 0x00027d00;
UINT32 mErrorCodeFlag = 0x00027d00;
EFI_EXCEPTION_CALLBACK mExternalVectorTable[STM_MAX_IDT_NUM];

363
Stm/StmPkg/Core/Runtime/StmPe.h Executable file
View File

@ -0,0 +1,363 @@
#ifndef _PESTM_H_
#define _PESTM_H_
// VM/PE PeSmiControl.PeSmiState state definitions
#define PESMINULL 0 // nothing happening
#define PESMIPSMI 1 // SMI sent by VM/PE startup to get cpu state
#define PESMIHSMI 2 // normal SMI processing
#define PESMIPNMI 3 // VM/PE needs an NMI sent for it to help process the host SMI
#define PESMIHTMR 4 // smi handler has detected an SMI timer
#define PESMIPNMI2 5 // nmi has been sent to VM/PE - waiting for response
#define OFFSET_BITMASK_IA32_4K 0x00000FFF
#define OFFSET_BITMASK_IA32E_4K 0x0000000000000FFF
#define PAGE_SIZE_4K 4096ULL
typedef struct
{
UINT64 Address;
UINT64 Size;
} PE_REGION_LIST;
typedef struct
{
UINT64 ModuleAddress; // physical address of module to be loaded into PE/VM
UINT64 ModuleLoadAddress; // guest physical address to load module in a PE/VM
UINT32 ModuleSize; // size of module in bytes
UINT32 ModuleEntryPoint; // entry point - relative offset to ModuleLoadAddress
UINT64 AddressSpaceStart; // start of guest physical address space (page aligned)
UINT32 AddressSpaceSize; // size of guest physical address space
UINT32 VmConfig; // Options to the configuration of the PE/VM
UINT64 Cr3Load; // CR3
UINT64 SharedPage; // writeable pages for sharing between the PE Module and kernel space
// can be multible pages and is located in mail memory
PE_REGION_LIST *Segment; // list of read only regions (contained within a page)
UINT32 SharedPageSize; // size of]SharedPage/region
UINT32 DoNotClearSize; // area at beginning of memory not to be cleared
UINT64 ModuleDataSection; // Location of Module Data Section for VM/PE
// data areas local to the STM go after this point
UINT64 SharedStmPage; // page shared between PE/VM and the STM
UINT64 RunCount; // count of runs starting with one (1)
// UINTN DataRegionStart; // data space after text region
UINTN DataRegionSize; // data space size
UINTN FrontDataRegionSize; // data space size before text region
UINTN DataRegionSmmLoc; // start location in SMM for data region
} PE_MODULE_INFO;
// options for VmConfig only for Perm VM
#define PERM_VM_CRASH_BREAKDOWN (1<<21) // if VM/PE crashes then breakdown
#define PERM_VM_RUN_ONCE (1<<20) // run once and delete
#define PERM_VM_ALLOW_TERM (1<<19)
#define PERM_VM_RUN_PERIODIC (1<<22) // run using SMI Timer
#define PERM_VM_CLEAR_MEMORY (1<<23) // clear HEAP before run
#define PERM_VM_SET_TEXT_RW (1<<24) // set the text area as RW ow W
#define PERM_VM_EXEC_HEAP (1<<25) // Allow Heap Execution
typedef struct __PE_SMI_CONTROL
{
SPIN_LOCK PeSmiControlLock;
UINT32 PeNmiBreak; // when 1, a NMI has been sent to break the thread in PE_APIC_id
UINT32 PeApicId; // APIC id of the thread that is executing the PE V<
UINT32 PeExec; // when 1 PE_APIC_ID is executing a
UINT32 PeSmiState; // SMI is sent to get processor state
UINT32 PeWaitTimer; // if non-zero - waiting for timer and length of timeout
INT32 PeCpuIndex; // CpuIndex of PeVm
} PE_SMI_CONTROL;
typedef struct _VMX_GUEST_VMCS_STRUCT
{
UINTN GdtrBase;
UINTN Rsp;
UINTN Rip;
UINTN Rflags;
UINTN Cr0;
UINTN Cr3;
UINTN Cr4;
UINTN Dr7;
UINT16 CsSelector;
UINT16 DsSelector;
UINT16 EsSelector;
UINT16 FsSelector;
UINT16 GsSelector;
UINT16 SsSelector;
UINT16 TrSelector;
UINT32 InterruptibilityState;
UINT32 Smbase;
UINT32 ActivityState;
UINT64 DebugCtlFull;
UINT64 VmcsLinkPointerFull;
UINT64 IA32_Efer;
UINT32 GdtrLimit;
UINT32 LdtrAccessRights;
UINTN CsBase;
UINT32 CsAccessRights;
UINT32 CsLimit;
UINTN DsBase;
UINT32 DsAccessRights;
UINT32 DsLimit;
UINTN EsBase;
UINT32 EsAccessRights;
UINT32 EsLimit;
UINTN FsBase;
UINT32 FsAccessRights;
UINT32 FsLimit;
UINTN GsBase;
UINT32 GsAccessRights;
UINT32 GsLimit;
UINTN SsBase;
UINT32 SsAccessRights;
UINT32 SsLimit;
UINTN TrBase;
UINT32 TrAccessRights;
UINT32 TrLimit;
} VMX_GUEST_VMCS_STRUCT;
// Guest VM Types
#define SMI_HANDLER 0
#define PE_PERM 1
#define PE_TEMP 2
#define PE_OTHER 3
#define NUM_PE_TYPE 4
typedef struct _PE_GUEST_CONTEXT_PER_CPU {
X86_REGISTER Register;
//IA32_DESCRIPTOR Gdtr;
//IA32_DESCRIPTOR Idtr;
//UINTN Cr0;
//UINTN Cr3;
//UINTN Cr4;
UINTN Rip;
UINTN Rsp;
UINTN Rflags;
//UINTN Stack;
//UINT64 Efer;
//BOOLEAN UnrestrictedGuest;
//UINTN XStateBuffer;
GUEST_INTERRUPTIBILITY_STATE InterruptibilityState;
UINT32 ActivityState;
UINT64 VmcsLinkPointerFull;
UINT32 VmcsLinkPointerHigh;
VM_EXIT_CONTROLS VmExitCtrls;
VM_ENTRY_CONTROLS VmEntryCtrls;
// For CPU support Save State in MSR, we need a place holder to save it in memory in advanced.
// The reason is that when we switch to SMM guest, we lose the context in SMI guest.
//STM_SMM_CPU_STATE *SmmCpuState;
//VM_EXIT_INFO_BASIC InfoBasic; // hold info since we need that when return to SMI guest.
//VM_EXIT_QUALIFICATION Qualification; // hold info since we need that when return to SMI guest.
//UINT32 VmExitInstructionLength;
//BOOLEAN Launched;
//BOOLEAN Actived; // For SMM VMCS only, controlled by StartStmVMCALL
//UINT64 Vmcs;
//UINT32 GuestMsrEntryCount;
//UINT64 GuestMsrEntryAddress;
#if defined (MDE_CPU_X64)
// Need check alignment here because we need use FXSAVE/FXRESTORE buffer
UINT32 Reserved;
#endif
// Stuff we reinitialize upon every restart
UINT16 CsSelector;
UINTN CsBase;
UINT32 CsLimit;
UINT32 CsAccessRights; // defined by user input
UINT16 DsSelector;
UINTN DsBase;
UINT32 DsLimit;
UINT32 DsAccessRights;
UINT16 EsSelector;
UINTN EsBase;
UINT32 EsLimit;
UINT32 EsAccessRights;
UINT16 FsSelector;
UINTN FsBase;
UINT32 FsLimit;
UINT32 FsAccessRights;
UINT16 GsSelector;
UINTN GsBase;
UINT32 GsLimit;
UINT32 GsAccessRights;
UINT16 SsSelector;
UINTN SsBase;
UINT32 SsLimit;
UINT32 SsAccessRights;
UINT16 TrSelector;
UINTN TrBase;
UINT32 TrLimit;
UINT32 TrAccessRights;
UINT16 LdtrSelector;
UINTN LdtrBase;
UINT32 LdtrLimit;
UINT32 LdtrAccessRights;
UINTN GdtrBase;
UINT32 GdtrLimit;
UINTN IdtrBase;
UINT32 IdtrLimit;
} PE_GUEST_CONTEXT_PER_CPU;
typedef struct _PE_GUEST_CONTEXT_COMMON {
//EPT_POINTER EptPointer;
//UINTN CompatiblePageTable;
//UINTN CompatiblePaePageTable;
//UINT64 MsrBitmap;
//UINT64 IoBitmapA;
//UINT64 IoBitmapB;
//UINT32 Vmid;
//UINTN ZeroXStateBuffer;
//
// BiosHwResourceRequirementsPtr: This is back up of BIOS resource - no ResourceListContinuation
//
//UINT64 BiosHwResourceRequirementsPtr;
PE_GUEST_CONTEXT_PER_CPU GuestContextPerCpu; // for PE we need only one
} PE_GUEST_CONTEXT_COMMON;
typedef struct _PE_HOST_CONTEXT_COMMON {
SPIN_LOCK DebugLock;
SPIN_LOCK MemoryLock;
SPIN_LOCK SmiVmcallLock;
UINT32 CpuNum;
UINT32 JoinedCpuNum;
UINTN PageTable;
IA32_DESCRIPTOR Gdtr;
IA32_DESCRIPTOR Idtr;
UINT64 HeapBottom;
UINT64 HeapTop;
UINT8 PhysicalAddressBits;
//
// BUGBUG: Assume only one segment for client system.
//
UINT64 PciExpressBaseAddress;
UINT64 PciExpressLength;
UINT64 VmcsDatabase;
UINT32 TotalNumberProcessors;
STM_HEADER *StmHeader;
UINTN StmSize;
UINT64 TsegBase;
UINT64 TsegLength;
//
// Log
//
MLE_EVENT_LOG_STRUCTURE EventLog;
//
// ProtectedResource: This is back up of MLE resource - no ResourceListContinuation
//
MLE_PROTECTED_RESOURCE_STRUCTURE MleProtectedResource;
//
// ProtectedTrappedIoResource: This is cache for TrappedIoResource in MLE resource
// For performance consideration only, because TrappedIoResource will be referred in each SMI.
//
MLE_PROTECTED_RESOURCE_STRUCTURE MleProtectedTrappedIoResource;
//
// Performance measurement
//
STM_PERF_DATA PerfData;
STM_HOST_CONTEXT_PER_CPU HostContextPerCpu;
} PE_HOST_CONTEXT_COMMON;
#define PEVM_START_VMCALL 1
#define PEVM_START_SMI 2
#define PEVM_PRESTART_SMI 3
#define PEVM_INIT_16bit 1
#define PEVM_INIT_32bit 2
#define PEVM_INIT_64bit 3
typedef struct
{
PE_MODULE_INFO UserModule;
UINT32 StartMode; // either SMI or VMCALL
UINT32 PeVmState;
UINT32 PeCpuInitMode; // VM/PE initial processor start mode
UINTN * SmmBuffer;
UINTN SmmBufferSize;
UINTN * SharedPageStm;
PE_HOST_CONTEXT_COMMON HostState;
PE_GUEST_CONTEXT_COMMON GuestState;
} PE_VM_DATA;
typedef struct HEAP_HEADER
{
UINT64 BlockLength;
struct HEAP_HEADER* NextBlock;
}HEAP_HEADER;
typedef struct ROOT_VMX_STATE {
UINT64 valid; // used by STM
UINT64 VmcsType; // 1 - guest-VM being serviced by VMM
// 2 - no current-VM active
// 3 - guest-VM
UINT64 Vmxon; // vmxon pointer - loaded at STM startup, should never change
UINT64 ExecutiveVMCS;
UINT64 LinkVMCS;
UINT64 HostRootVMCS;
UINT64 RootHostCR0;
UINT64 RootHostCR3;
UINT64 RootHostCR4;
UINT64 RootHostGDTRBase;
UINT64 RootHostIDTRBase;
UINT64 RootHostRSP;
UINT64 RootHostRIP;
UINT64 RootHostEPT; //read from memory
UINT64 RootGuestCR0;
UINT64 RootGuestCR3;
UINT64 RootGuestCR4;
UINT64 RootGuestGDTRBase;
UINT64 RootGuestGDTRLimit;
UINT64 RootGuestIDTRBase;
UINT64 RootGuestIDTRLimit;
UINT64 RootGuestRSP;
UINT64 RootGuestRIP;
UINT64 RootContEPT; // read from guest structure
UINT32 VmxState; // either root VMX or guest VMX
UINT32 Padding;
} ROOT_VMX_STATE;
#define VMX_STATE_ROOT 1
#define VMX_STATE_GUEST 2
void GetRootVmxState(UINT32 CpuIndex, ROOT_VMX_STATE * RootState);
#else
#endif

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
extern volatile BOOLEAN mIsBspInitialized;
extern volatile BOOLEAN *mCpuInitStatus;
@ -35,7 +36,7 @@ RestoreStmData (
ZeroMem (&mMtrrInfo, sizeof(mMtrrInfo));
ZeroMem (&mStmHandlerSmm, sizeof(mStmHandlerSmm));
ZeroMem (&mStmHandlerSmi, sizeof(mStmHandlerSmi));
ZeroMem (&mGuestContextCommonSmm, sizeof(mGuestContextCommonSmm));
ZeroMem (&mGuestContextCommonSmm, sizeof(mGuestContextCommonSmm[NUM_PE_TYPE]));
ZeroMem (&mHostContextCommon, sizeof(mHostContextCommon));
ZeroMem (&mGuestContextCommonSmi, sizeof(mGuestContextCommonSmi));
mIsBspInitialized = FALSE;
@ -122,6 +123,7 @@ StmTeardown (
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (Reg);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
#endif

View File

@ -0,0 +1,422 @@
/** @file
VMCS Memory Mapper
Gov't Copyright stuff
**/
#include "StmRuntime.h"
#include "VmcsOffsets.h"
#define VmcsSizeInPages 1 // current VMCS size in pages
VMCSFIELDOFFSET VmcsFieldOffsetTable[] =
{
{ VMCS_16_CONTROL_VPID_INDEX, 0 }, // 0x0000
{ VMCS_16_GUEST_ES_INDEX, 0 }, // 0x0800
{ VMCS_16_GUEST_CS_INDEX, 0 }, // 0x0802
{ VMCS_16_GUEST_SS_INDEX, 0 }, // 0x0804
{ VMCS_16_GUEST_DS_INDEX, 0 }, // 0x0806
{ VMCS_16_GUEST_FS_INDEX, 0 }, // 0x0808
{ VMCS_16_GUEST_GS_INDEX, 0 }, // 0x080A
{ VMCS_16_GUEST_LDTR_INDEX, 0 }, // 0x080C
{ VMCS_16_GUEST_TR_INDEX, 0 }, // 0x080E
{ VMCS_16_HOST_ES_INDEX, 0 }, // 0x0C00
{ VMCS_16_HOST_CS_INDEX, 0 }, // 0x0C02
{ VMCS_16_HOST_SS_INDEX, 0 }, // 0x0C04
{ VMCS_16_HOST_DS_INDEX, 0 }, // 0x0C06
{ VMCS_16_HOST_FS_INDEX, 0 }, // 0x0C08
{ VMCS_16_HOST_GS_INDEX, 0 }, // 0x0C0A
{ VMCS_16_HOST_TR_INDEX, 0 }, // 0x0C0C
{ VMCS_64_CONTROL_IO_BITMAP_A_INDEX, 0 }, // 0x2000
{ VMCS_64_CONTROL_IO_BITMAP_B_INDEX, 0 }, // 0x2002
{ VMCS_64_CONTROL_MSR_BITMAP_INDEX, 0 }, // 0x2004
{ VMCS_64_CONTROL_VMEXIT_MSR_STORE_INDEX, 0 }, // 0x2006
{ VMCS_64_CONTROL_VMEXIT_MSR_LOAD_INDEX, 0 }, // 0x2008
{ VMCS_64_CONTROL_VMENTRY_MSR_LOAD_INDEX, 0 }, // 0x200A
{ VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX, 0 }, // 0x200C
{ VMCS_64_CONTROL_TSC_OFFSET_INDEX, 0 }, // 0x2010
{ VMCS_64_CONTROL_VIRTUAL_APIC_ADDR_INDEX, 0 }, // 0x2012
{ VMCS_64_CONTROL_APIC_ACCESS_ADDR_INDEX, 0 }, // 0x2014
{ VMCS_64_CONTROL_VM_FUNCTION_CONTROLS_INDEX, 0 }, // 0x2018
{ VMCS_64_CONTROL_EPT_PTR_INDEX, 0 }, // 0x201A
{ VMCS_64_CONTROL_EPTP_LIST_ADDRESS_INDEX, 0 }, // 0x2024
{ VMCS_64_RO_GUEST_PHYSICAL_ADDR_INDEX, 0 }, // 0x2400
{ VMCS_64_GUEST_VMCS_LINK_PTR_INDEX, 0 }, // 0x2800
{ VMCS_64_GUEST_IA32_DEBUGCTL_INDEX, 0 }, // 0x2802
{ VMCS_64_GUEST_IA32_PAT_INDEX, 0 }, // 0x2804
{ VMCS_64_GUEST_IA32_EFER_INDEX, 0 }, // 0x2806
{ VMCS_64_GUEST_IA32_PERF_GLOBAL_CTRL_INDEX, 0 }, // 0x2808
{ VMCS_64_GUEST_PDPTE0_INDEX, 0 }, // 0x280A
{ VMCS_64_GUEST_PDPTE1_INDEX, 0 }, // 0x280C
{ VMCS_64_GUEST_PDPTE2_INDEX, 0 }, // 0x280E
{ VMCS_64_GUEST_PDPTE3_INDEX, 0 }, // 0x2810
{ VMCS_64_HOST_IA32_PAT_INDEX, 0 }, // 0x2C00
{ VMCS_64_HOST_IA32_EFER_INDEX, 0 }, // 0x2C02
{ VMCS_64_HOST_IA32_PERF_GLOBAL_CTRL_INDEX, 0 }, // 0x2C04
{ VMCS_32_CONTROL_PIN_BASED_VM_EXECUTION_INDEX, 0 }, // 0x4000
{ VMCS_32_CONTROL_PROCESSOR_BASED_VM_EXECUTION_INDEX, 0 }, // 0x4002
{ VMCS_32_CONTROL_EXCEPTION_BITMAP_INDEX, 0 }, // 0x4004
{ VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MASK_INDEX, 0 }, // 0x4006
{ VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MATCH_INDEX, 0 }, // 0x4008
{ VMCS_32_CONTROL_CR3_TARGET_COUNT_INDEX, 0 }, // 0x400A
{ VMCS_32_CONTROL_VMEXIT_CONTROLS_INDEX, 0 }, // 0x400C
{ VMCS_32_CONTROL_VMEXIT_MSR_STORE_COUNT_INDEX, 0 }, // 0x400E
{ VMCS_32_CONTROL_VMEXIT_MSR_LOAD_COUNT_INDEX, 0 }, // 0x4010
{ VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX, 0 }, // 0x4012
{ VMCS_32_CONTROL_VMENTRY_MSR_LOAD_COUNT_INDEX, 0 }, // 0x4014
{ VMCS_32_CONTROL_VMENTRY_INTERRUPTION_INFO_INDEX, 0 }, // 0x4016
{ VMCS_32_CONTROL_VMENTRY_EXCEPTION_ERROR_CODE_INDEX, 0 }, // 0x4018
{ VMCS_32_CONTROL_VMENTRY_INSTRUCTION_LENGTH_INDEX, 0 }, // 0x401A
{ VMCS_32_CONTROL_TPR_THRESHOLD_INDEX, 0 }, // 0x401C
{ VMCS_32_CONTROL_2ND_PROCESSOR_BASED_VM_EXECUTION_INDEX, 0 }, // 0x401E
{ VMCS_32_CONTROL_PLE_GAP_INDEX, 0 }, // 0x4020
{ VMCS_32_CONTROL_PLE_WINDOW_INDEX, 0 }, // 0x4022
{ VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX, 0 }, // 0x4400
{ VMCS_32_RO_EXIT_REASON_INDEX, 0 }, // 0x4402
{ VMCS_32_RO_VMEXIT_INTERRUPTION_INFO_INDEX, 0 }, // 0x4404
{ VMCS_32_RO_VMEXIT_INTERRUPTION_ERROR_CODE_INDEX, 0 }, // 0x4406
{ VMCS_32_RO_IDT_VECTORING_INFO_INDEX, 0 }, // 0x4408
{ VMCS_32_RO_IDT_VECTORING_ERROR_CODE_INDEX, 0 }, // 0x440A
{ VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX, 0 }, // 0x440C
{ VMCS_32_RO_VMEXIT_INSTRUCTION_INFO_INDEX, 0 }, // 0x440E
{ VMCS_32_GUEST_ES_LIMIT_INDEX, 0 }, // 0x4800
{ VMCS_32_GUEST_CS_LIMIT_INDEX, 0 }, // 0x4802
{ VMCS_32_GUEST_SS_LIMIT_INDEX, 0 }, // 0x4804
{ VMCS_32_GUEST_DS_LIMIT_INDEX, 0 }, // 0x4806
{ VMCS_32_GUEST_FS_LIMIT_INDEX, 0 }, // 0x4808
{ VMCS_32_GUEST_GS_LIMIT_INDEX, 0 }, // 0x480A
{ VMCS_32_GUEST_LDTR_LIMIT_INDEX, 0 }, // 0x480C
{ VMCS_32_GUEST_TR_LIMIT_INDEX, 0 }, // 0x480E
{ VMCS_32_GUEST_GDTR_LIMIT_INDEX, 0 }, // 0x4810
{ VMCS_32_GUEST_IDTR_LIMIT_INDEX, 0 }, // 0x4812
{ VMCS_32_GUEST_ES_ACCESS_RIGHT_INDEX, 0 }, // 0x4814
{ VMCS_32_GUEST_CS_ACCESS_RIGHT_INDEX, 0 }, // 0x4816
{ VMCS_32_GUEST_SS_ACCESS_RIGHT_INDEX, 0 }, // 0x4818
{ VMCS_32_GUEST_DS_ACCESS_RIGHT_INDEX, 0 }, // 0x481A
{ VMCS_32_GUEST_FS_ACCESS_RIGHT_INDEX, 0 }, // 0x481C
{ VMCS_32_GUEST_GS_ACCESS_RIGHT_INDEX, 0 }, // 0x481E
{ VMCS_32_GUEST_LDTR_ACCESS_RIGHT_INDEX, 0 }, // 0x4820
{ VMCS_32_GUEST_TR_ACCESS_RIGHT_INDEX, 0 }, // 0x4822
{ VMCS_32_GUEST_INTERRUPTIBILITY_STATE_INDEX, 0 }, // 0x4824
{ VMCS_32_GUEST_ACTIVITY_STATE_INDEX, 0 }, // 0x4826
{ VMCS_32_GUEST_SMBASE_INDEX, 0 }, // 0x4828
{ VMCS_32_GUEST_IA32_SYSENTER_CS_INDEX, 0 }, // 0x482A
{ VMCS_32_GUEST_VMX_PREEMPTION_TIMER_VALUE_INDEX, 0 }, // 0x482E
{ VMCS_32_HOST_IA32_SYSENTER_CS_INDEX, 0 }, // 0x4C00
{ VMCS_N_CONTROL_CR0_GUEST_HOST_MASK_INDEX, 0 }, // 0x6000
{ VMCS_N_CONTROL_CR4_GUEST_HOST_MASK_INDEX, 0 }, // 0x6002
{ VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX, 0 }, // 0x6004
{ VMCS_N_CONTROL_CR4_READ_SHADOW_INDEX, 0 }, // 0x6006
{ VMCS_N_CONTROL_CR3_TARGET_VALUE0_INDEX, 0 }, // 0x6008
{ VMCS_N_CONTROL_CR3_TARGET_VALUE1_INDEX, 0 }, // 0x600A
{ VMCS_N_CONTROL_CR3_TARGET_VALUE2_INDEX, 0 }, // 0x600C
{ VMCS_N_CONTROL_CR3_TARGET_VALUE3_INDEX, 0 }, // 0x600E
{ VMCS_N_RO_EXIT_QUALIFICATION_INDEX, 0 }, // 0x6400
{ VMCS_N_RO_IO_RCX_INDEX, 0 }, // 0x6402
{ VMCS_N_RO_IO_RSI_INDEX, 0 }, // 0x6404
{ VMCS_N_RO_IO_RDI_INDEX, 0 }, // 0x6406
{ VMCS_N_RO_IO_RIP_INDEX, 0 }, // 0x6408
{ VMCS_N_RO_GUEST_LINEAR_ADDR_INDEX, 0 }, // 0x640A
{ VMCS_N_GUEST_CR0_INDEX, 0 }, // 0x6800
{ VMCS_N_GUEST_CR3_INDEX, 0 }, // 0x6802
{ VMCS_N_GUEST_CR4_INDEX, 0 }, // 0x6804
{ VMCS_N_GUEST_ES_BASE_INDEX, 0 }, // 0x6806
{ VMCS_N_GUEST_CS_BASE_INDEX, 0 }, // 0x6808
{ VMCS_N_GUEST_SS_BASE_INDEX, 0 }, // 0x680A
{ VMCS_N_GUEST_DS_BASE_INDEX, 0 }, // 0x680C
{ VMCS_N_GUEST_FS_BASE_INDEX, 0 }, // 0x680E
{ VMCS_N_GUEST_GS_BASE_INDEX, 0 }, // 0x6810
{ VMCS_N_GUEST_LDTR_BASE_INDEX, 0 }, // 0x6812
{ VMCS_N_GUEST_TR_BASE_INDEX, 0 }, // 0x6814
{ VMCS_N_GUEST_GDTR_BASE_INDEX, 0 }, // 0x6816
{ VMCS_N_GUEST_IDTR_BASE_INDEX, 0 }, // 0x6818
{ VMCS_N_GUEST_DR7_INDEX, 0 }, // 0x681A
{ VMCS_N_GUEST_RSP_INDEX, 0 }, // 0x681C
{ VMCS_N_GUEST_RIP_INDEX, 0 }, // 0x681E
{ VMCS_N_GUEST_RFLAGS_INDEX, 0 }, // 0x6820
{ VMCS_N_GUEST_PENDING_DEBUG_EXCEPTIONS_INDEX, 0 }, // 0x6822
{ VMCS_N_GUEST_IA32_SYSENTER_ESP_INDEX, 0 }, // 0x6824
{ VMCS_N_GUEST_IA32_SYSENTER_EIP_INDEX, 0 }, // 0x6826
{ VMCS_N_HOST_CR0_INDEX, 0 }, // 0x6C00
{ VMCS_N_HOST_CR3_INDEX, 0 }, // 0x6C02
{ VMCS_N_HOST_CR4_INDEX, 0 }, // 0x6C04
{ VMCS_N_HOST_FS_BASE_INDEX, 0 }, // 0x6C06
{ VMCS_N_HOST_GS_BASE_INDEX, 0 }, // 0x6C08
{ VMCS_N_HOST_TR_BASE_INDEX, 0 }, // 0x6C0A
{ VMCS_N_HOST_GDTR_BASE_INDEX, 0 }, // 0x6C0C
{ VMCS_N_HOST_IDTR_BASE_INDEX, 0 }, // 0x6C0E
{ VMCS_N_HOST_IA32_SYSENTER_ESP_INDEX, 0 }, // 0x6C10
{ VMCS_N_HOST_IA32_SYSENTER_EIP_INDEX, 0 }, // 0x6C12
{ VMCS_N_HOST_RSP_INDEX, 0 }, // 0x6C14
{ VMCS_N_HOST_RIP_INDEX, 0 }, // 0x6C16
{ 0xFFFF, 0 }
};
VMCSFIELDPRINT VmcsFieldPrintTable[] =
{
{ VMCS_16_CONTROL_VPID_INDEX, "VMCS_16_CONTROL_VPID" }, // 0x0000
{ VMCS_16_GUEST_ES_INDEX, "VMCS_16_GUEST_ES" }, // 0x0800
{ VMCS_16_GUEST_CS_INDEX, "VMCS_16_GUEST_ES" }, // 0x0802
{ VMCS_16_GUEST_SS_INDEX, "VMCS_16_GUEST_SS" }, // 0x0804
{ VMCS_16_GUEST_DS_INDEX, "VMCS_16_GUEST_DS" }, // 0x0806
{ VMCS_16_GUEST_FS_INDEX, "VMCS_16_GUEST_FS" }, // 0x0808
{ VMCS_16_GUEST_GS_INDEX, "VMCS_16_GUEST_GS" }, // 0x080A
{ VMCS_16_GUEST_LDTR_INDEX, "VMCS_16_GUEST_LDTR" }, // 0x080C
{ VMCS_16_GUEST_TR_INDEX, "VMCS_16_GUEST_TR" }, // 0x080E
{ VMCS_16_HOST_ES_INDEX, "VMCS_16_HOST_ES_INDEX" }, // 0x0C00
{ VMCS_16_HOST_CS_INDEX, "VMCS_16_HOST_CS_INDEX" }, // 0x0C02
{ VMCS_16_HOST_SS_INDEX, "VMCS_16_HOST_SS_INDEX" }, // 0x0C04
{ VMCS_16_HOST_DS_INDEX, "VMCS_16_HOST_DS_INDEX" }, // 0x0C06
{ VMCS_16_HOST_FS_INDEX, "VMCS_16_HOST_FS_INDEX" }, // 0x0C08
{ VMCS_16_HOST_GS_INDEX, "VMCS_16_HOST_GS_INDEX" }, // 0x0C0A
{ VMCS_16_HOST_TR_INDEX, "VMCS_16_HOST_TR_INDEX" }, // 0x0C0C
{ VMCS_64_CONTROL_IO_BITMAP_A_INDEX, "VMCS_64_CONTROL_IO_BITMAP_A_INDEX" }, // 0x2000
{ VMCS_64_CONTROL_IO_BITMAP_B_INDEX, "VMCS_64_CONTROL_IO_BITMAP_B_INDEX" }, // 0x2002
{ VMCS_64_CONTROL_MSR_BITMAP_INDEX, "VMCS_64_CONTROL_MSR_BITMAP_INDEX" }, // 0x2004
{ VMCS_64_CONTROL_VMEXIT_MSR_STORE_INDEX, "VMCS_64_CONTROL_VMEXIT_MSR_STORE_INDEX" }, // 0x2006
{ VMCS_64_CONTROL_VMEXIT_MSR_LOAD_INDEX, "VMCS_64_CONTROL_VMEXIT_MSR_LOAD_INDEX" }, // 0x2008
{ VMCS_64_CONTROL_VMENTRY_MSR_LOAD_INDEX, "VMCS_64_CONTROL_VMENTRY_MSR_LOAD_INDEX" }, // 0x200A
{ VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX, "VMCS_64_CONTROL_EXECUTIVE_VMCS_PTR_INDEX" }, // 0x200C
{ VMCS_64_CONTROL_TSC_OFFSET_INDEX, "VMCS_64_CONTROL_TSC_OFFSET_INDEX" }, // 0x2010
{ VMCS_64_CONTROL_VIRTUAL_APIC_ADDR_INDEX, "VMCS_64_CONTROL_VIRTUAL_APIC_ADDR_INDEX" }, // 0x2012
{ VMCS_64_CONTROL_APIC_ACCESS_ADDR_INDEX, "VMCS_64_CONTROL_APIC_ACCESS_ADDR_INDEX" }, // 0x2014
{ VMCS_64_CONTROL_VM_FUNCTION_CONTROLS_INDEX, "VMCS_64_CONTROL_VM_FUNCTION_CONTROLS_INDEX" }, // 0x2018
{ VMCS_64_CONTROL_EPT_PTR_INDEX, "VMCS_64_CONTROL_EPT_PTR_INDEX" }, // 0x201A
{ VMCS_64_CONTROL_EPTP_LIST_ADDRESS_INDEX, "VMCS_64_CONTROL_EPTP_LIST_ADDRESS_INDEX" }, // 0x2024
{ VMCS_64_RO_GUEST_PHYSICAL_ADDR_INDEX, "VMCS_64_RO_GUEST_PHYSICAL_ADDR_INDEX" }, // 0x2400
{ VMCS_64_GUEST_VMCS_LINK_PTR_INDEX, "VMCS_64_GUEST_VMCS_LINK_PTR_INDEX" }, // 0x2800
{ VMCS_64_GUEST_IA32_DEBUGCTL_INDEX, "VMCS_64_GUEST_IA32_DEBUGCTL_INDEX" }, // 0x2802
{ VMCS_64_GUEST_IA32_PAT_INDEX, "VMCS_64_GUEST_IA32_PAT_INDEX" }, // 0x2804
{ VMCS_64_GUEST_IA32_EFER_INDEX, "VMCS_64_GUEST_IA32_EFER_INDEX" }, // 0x2806
{ VMCS_64_GUEST_IA32_PERF_GLOBAL_CTRL_INDEX, "VMCS_64_GUEST_IA32_PERF_GLOBAL_CTRL_INDEX" }, // 0x2808
{ VMCS_64_GUEST_PDPTE0_INDEX, "VMCS_64_GUEST_PDPTE0_INDEX" }, // 0x280A
{ VMCS_64_GUEST_PDPTE1_INDEX, "VMCS_64_GUEST_PDPTE1_INDEX" }, // 0x280C
{ VMCS_64_GUEST_PDPTE2_INDEX, "VMCS_64_GUEST_PDPTE2_INDEX" }, // 0x280E
{ VMCS_64_GUEST_PDPTE3_INDEX, "VMCS_64_GUEST_PDPTE3_INDEX" }, // 0x2810
{ VMCS_64_HOST_IA32_PAT_INDEX, "VMCS_64_HOST_IA32_PAT_INDEX" }, // 0x2C00
{ VMCS_64_HOST_IA32_EFER_INDEX, "VMCS_64_HOST_IA32_EFER_INDEX" }, // 0x2C02
{ VMCS_64_HOST_IA32_PERF_GLOBAL_CTRL_INDEX, "VMCS_64_HOST_IA32_PERF_GLOBAL_CTRL_INDEX" }, // 0x2C04
{ VMCS_32_CONTROL_PIN_BASED_VM_EXECUTION_INDEX, "VMCS_32_CONTROL_PIN_BASED_VM_EXECUTION_INDEX" }, // 0x4000
{ VMCS_32_CONTROL_PROCESSOR_BASED_VM_EXECUTION_INDEX, "VMCS_32_CONTROL_PROCESSOR_BASED_VM_EXECUTION_INDEX" }, // 0x4002
{ VMCS_32_CONTROL_EXCEPTION_BITMAP_INDEX, "VMCS_32_CONTROL_EXCEPTION_BITMAP_INDEX" }, // 0x4004
{ VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MASK_INDEX, "VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MASK_INDEX" }, // 0x4006
{ VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MATCH_INDEX, "VMCS_32_CONTROL_PAGE_FAULT_ERROR_CODE_MATCH_INDEX" }, // 0x4008
{ VMCS_32_CONTROL_CR3_TARGET_COUNT_INDEX, "VMCS_32_CONTROL_CR3_TARGET_COUNT_INDEX" }, // 0x400A
{ VMCS_32_CONTROL_VMEXIT_CONTROLS_INDEX, "VMCS_32_CONTROL_VMEXIT_CONTROLS_INDEX" }, // 0x400C
{ VMCS_32_CONTROL_VMEXIT_MSR_STORE_COUNT_INDEX, "VMCS_32_CONTROL_VMEXIT_MSR_STORE_COUNT_INDEX" }, // 0x400E
{ VMCS_32_CONTROL_VMEXIT_MSR_LOAD_COUNT_INDEX, "VMCS_32_CONTROL_VMEXIT_MSR_LOAD_COUNT_INDEX" }, // 0x4010
{ VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX, "VMCS_32_CONTROL_VMENTRY_CONTROLS_INDEX" }, // 0x4012
{ VMCS_32_CONTROL_VMENTRY_MSR_LOAD_COUNT_INDEX, "VMCS_32_CONTROL_VMENTRY_MSR_LOAD_COUNT_INDEX" }, // 0x4014
{ VMCS_32_CONTROL_VMENTRY_INTERRUPTION_INFO_INDEX, "VMCS_32_CONTROL_VMENTRY_INTERRUPTION_INFO_INDEX" }, // 0x4016
{ VMCS_32_CONTROL_VMENTRY_EXCEPTION_ERROR_CODE_INDEX, "VMCS_32_CONTROL_VMENTRY_EXCEPTION_ERROR_CODE_INDEX" }, // 0x4018
{ VMCS_32_CONTROL_VMENTRY_INSTRUCTION_LENGTH_INDEX, "VMCS_32_CONTROL_VMENTRY_INSTRUCTION_LENGTH_INDEX" }, // 0x401A
{ VMCS_32_CONTROL_TPR_THRESHOLD_INDEX, "VMCS_32_CONTROL_TPR_THRESHOLD_INDEX" }, // 0x401C
{ VMCS_32_CONTROL_2ND_PROCESSOR_BASED_VM_EXECUTION_INDEX, "VMCS_32_CONTROL_2ND_PROCESSOR_BASED_VM_EXECUTION_INDEX" }, // 0x401E
{ VMCS_32_CONTROL_PLE_GAP_INDEX, "VMCS_32_CONTROL_PLE_GAP_INDEX" }, // 0x4020
{ VMCS_32_CONTROL_PLE_WINDOW_INDEX, "VMCS_32_CONTROL_PLE_WINDOW_INDEX" }, // 0x4022
{ VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX, "VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX" }, // 0x4400
{ VMCS_32_RO_EXIT_REASON_INDEX, "VMCS_32_RO_EXIT_REASON_INDEX" }, // 0x4402
{ VMCS_32_RO_VMEXIT_INTERRUPTION_INFO_INDEX, "VMCS_32_RO_VMEXIT_INTERRUPTION_INFO_INDEX" }, // 0x4404
{ VMCS_32_RO_VMEXIT_INTERRUPTION_ERROR_CODE_INDEX, "VMCS_32_RO_VMEXIT_INTERRUPTION_ERROR_CODE_INDEX" }, // 0x4406
{ VMCS_32_RO_IDT_VECTORING_INFO_INDEX, "VMCS_32_RO_IDT_VECTORING_INFO_INDEX" }, // 0x4408
{ VMCS_32_RO_IDT_VECTORING_ERROR_CODE_INDEX, "VMCS_32_RO_IDT_VECTORING_ERROR_CODE_INDEX" }, // 0x440A
{ VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX, "VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX" }, // 0x440C
{ VMCS_32_RO_VMEXIT_INSTRUCTION_INFO_INDEX, "VMCS_32_RO_VMEXIT_INSTRUCTION_INFO_INDEX" }, // 0x440E
{ VMCS_32_GUEST_ES_LIMIT_INDEX, "VMCS_32_GUEST_ES_LIMIT_INDEX" }, // 0x4800
{ VMCS_32_GUEST_CS_LIMIT_INDEX, "VMCS_32_GUEST_CS_LIMIT_INDEX" }, // 0x4802
{ VMCS_32_GUEST_SS_LIMIT_INDEX, "VMCS_32_GUEST_SS_LIMIT_INDEX" }, // 0x4804
{ VMCS_32_GUEST_DS_LIMIT_INDEX, "VMCS_32_GUEST_DS_LIMIT_INDEX" }, // 0x4806
{ VMCS_32_GUEST_FS_LIMIT_INDEX, "VMCS_32_GUEST_FS_LIMIT_INDEX" }, // 0x4808
{ VMCS_32_GUEST_GS_LIMIT_INDEX, "VMCS_32_GUEST_GS_LIMIT_INDEX" }, // 0x480A
{ VMCS_32_GUEST_LDTR_LIMIT_INDEX, "VMCS_32_GUEST_LDTR_LIMIT_INDEX" }, // 0x480C
{ VMCS_32_GUEST_TR_LIMIT_INDEX, "VMCS_32_GUEST_TR_LIMIT_INDEX" }, // 0x480E
{ VMCS_32_GUEST_GDTR_LIMIT_INDEX, "VMCS_32_GUEST_GDTR_LIMIT_INDEX" }, // 0x4810
{ VMCS_32_GUEST_IDTR_LIMIT_INDEX, "VMCS_32_GUEST_IDTR_LIMIT_INDEX" }, // 0x4812
{ VMCS_32_GUEST_ES_ACCESS_RIGHT_INDEX, "VMCS_32_GUEST_ES_ACCESS_RIGHT_INDEX" }, // 0x4814
{ VMCS_32_GUEST_CS_ACCESS_RIGHT_INDEX, "VMCS_32_GUEST_CS_ACCESS_RIGHT_INDEX" }, // 0x4816
{ VMCS_32_GUEST_SS_ACCESS_RIGHT_INDEX, "VMCS_32_GUEST_SS_ACCESS_RIGHT_INDEX" }, // 0x4818
{ VMCS_32_GUEST_DS_ACCESS_RIGHT_INDEX, "VMCS_32_GUEST_DS_ACCESS_RIGHT_INDEX" }, // 0x481A
{ VMCS_32_GUEST_FS_ACCESS_RIGHT_INDEX, "VMCS_32_GUEST_FS_ACCESS_RIGHT_INDEX" }, // 0x481C
{ VMCS_32_GUEST_GS_ACCESS_RIGHT_INDEX, "VMCS_32_GUEST_GS_ACCESS_RIGHT_INDEX" }, // 0x481E
{ VMCS_32_GUEST_LDTR_ACCESS_RIGHT_INDEX, "VMCS_32_GUEST_LDTR_ACCESS_RIGHT_INDEX" }, // 0x4820
{ VMCS_32_GUEST_TR_ACCESS_RIGHT_INDEX, "VMCS_32_GUEST_TR_ACCESS_RIGHT_INDEX" }, // 0x4822
{ VMCS_32_GUEST_INTERRUPTIBILITY_STATE_INDEX, "VMCS_32_GUEST_INTERRUPTIBILITY_STATE_INDEX" }, // 0x4824
{ VMCS_32_GUEST_ACTIVITY_STATE_INDEX, "VMCS_32_GUEST_ACTIVITY_STATE_INDEX" }, // 0x4826
{ VMCS_32_GUEST_SMBASE_INDEX, "VMCS_32_GUEST_SMBASE_INDEX" }, // 0x4828
{ VMCS_32_GUEST_IA32_SYSENTER_CS_INDEX, "VMCS_32_GUEST_IA32_SYSENTER_CS_INDEX" }, // 0x482A
{ VMCS_32_GUEST_VMX_PREEMPTION_TIMER_VALUE_INDEX, "VMCS_32_GUEST_VMX_PREEMPTION_TIMER_VALUE_INDEX" }, // 0x482E
{ VMCS_32_HOST_IA32_SYSENTER_CS_INDEX, "VMCS_32_HOST_IA32_SYSENTER_CS_INDEX" }, // 0x4C00
{ VMCS_N_CONTROL_CR0_GUEST_HOST_MASK_INDEX, "VMCS_N_CONTROL_CR0_GUEST_HOST_MASK_INDEX" }, // 0x6000
{ VMCS_N_CONTROL_CR4_GUEST_HOST_MASK_INDEX, "VMCS_N_CONTROL_CR4_GUEST_HOST_MASK_INDEX" }, // 0x6002
{ VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX, "VMCS_N_CONTROL_CR0_READ_SHADOW_INDEX" }, // 0x6004
{ VMCS_N_CONTROL_CR4_READ_SHADOW_INDEX, "VMCS_N_CONTROL_CR4_READ_SHADOW_INDEX" }, // 0x6006
{ VMCS_N_CONTROL_CR3_TARGET_VALUE0_INDEX, "VMCS_N_CONTROL_CR3_TARGET_VALUE0_INDEX" }, // 0x6008
{ VMCS_N_CONTROL_CR3_TARGET_VALUE1_INDEX, "VMCS_N_CONTROL_CR3_TARGET_VALUE1_INDEX" }, // 0x600A
{ VMCS_N_CONTROL_CR3_TARGET_VALUE2_INDEX, "VMCS_N_CONTROL_CR3_TARGET_VALUE2_INDEX" }, // 0x600C
{ VMCS_N_CONTROL_CR3_TARGET_VALUE3_INDEX, "VMCS_N_CONTROL_CR3_TARGET_VALUE3_INDEX" }, // 0x600E
{ VMCS_N_RO_EXIT_QUALIFICATION_INDEX, "VMCS_N_RO_EXIT_QUALIFICATION_INDEX" }, // 0x6400
{ VMCS_N_RO_IO_RCX_INDEX, "VMCS_N_RO_IO_RCX_INDEX" }, // 0x6402
{ VMCS_N_RO_IO_RSI_INDEX, "VMCS_N_RO_IO_RSI_INDEX" }, // 0x6404
{ VMCS_N_RO_IO_RDI_INDEX, "VMCS_N_RO_IO_RDI_INDEX" }, // 0x6406
{ VMCS_N_RO_IO_RIP_INDEX, "VMCS_N_RO_IO_RIP_INDEX" }, // 0x6408
{ VMCS_N_RO_GUEST_LINEAR_ADDR_INDEX, "VMCS_N_RO_GUEST_LINEAR_ADDR_INDEX" }, // 0x640A
{ VMCS_N_GUEST_CR0_INDEX, "VMCS_N_GUEST_CR0_INDEX" }, // 0x6800
{ VMCS_N_GUEST_CR3_INDEX, "VMCS_N_GUEST_CR3_INDEX" }, // 0x6802
{ VMCS_N_GUEST_CR4_INDEX, "VMCS_N_GUEST_CR4_INDEX" }, // 0x6804
{ VMCS_N_GUEST_ES_BASE_INDEX, "VMCS_N_GUEST_ES_BASE_INDEX" }, // 0x6806
{ VMCS_N_GUEST_CS_BASE_INDEX, "VMCS_N_GUEST_CS_BASE_INDEX" }, // 0x6808
{ VMCS_N_GUEST_SS_BASE_INDEX, "VMCS_N_GUEST_SS_BASE_INDEX" }, // 0x680A
{ VMCS_N_GUEST_DS_BASE_INDEX, "VMCS_N_GUEST_DS_BASE_INDEX" }, // 0x680C
{ VMCS_N_GUEST_FS_BASE_INDEX, "VMCS_N_GUEST_FS_BASE_INDEX" }, // 0x680E
{ VMCS_N_GUEST_GS_BASE_INDEX, "VMCS_N_GUEST_GS_BASE_INDEX" }, // 0x6810
{ VMCS_N_GUEST_LDTR_BASE_INDEX, "VMCS_N_GUEST_LDTR_BASE_INDEX" }, // 0x6812
{ VMCS_N_GUEST_TR_BASE_INDEX, "VMCS_N_GUEST_TR_BASE_INDEX" }, // 0x6814
{ VMCS_N_GUEST_GDTR_BASE_INDEX, "VMCS_N_GUEST_GDTR_BASE_INDEX" }, // 0x6816
{ VMCS_N_GUEST_IDTR_BASE_INDEX, "VMCS_N_GUEST_IDTR_BASE_INDEX" }, // 0x6818
{ VMCS_N_GUEST_DR7_INDEX, "VMCS_N_GUEST_DR7_INDEX" }, // 0x681A
{ VMCS_N_GUEST_RSP_INDEX, "VMCS_N_GUEST_RSP_INDEX" }, // 0x681C
{ VMCS_N_GUEST_RIP_INDEX, "VMCS_N_GUEST_RIP_INDEX" }, // 0x681E
{ VMCS_N_GUEST_RFLAGS_INDEX, "VMCS_N_GUEST_RFLAGS_INDEX" }, // 0x6820
{ VMCS_N_GUEST_PENDING_DEBUG_EXCEPTIONS_INDEX, "VMCS_N_GUEST_PENDING_DEBUG_EXCEPTIONS_INDEX" }, // 0x6822
{ VMCS_N_GUEST_IA32_SYSENTER_ESP_INDEX, "VMCS_N_GUEST_IA32_SYSENTER_ESP_INDEX" }, // 0x6824
{ VMCS_N_GUEST_IA32_SYSENTER_EIP_INDEX, "VMCS_N_GUEST_IA32_SYSENTER_EIP_INDEX" }, // 0x6826
{ VMCS_N_HOST_CR0_INDEX, "VMCS_N_HOST_CR0_INDEX" }, // 0x6C00
{ VMCS_N_HOST_CR3_INDEX, "VMCS_N_HOST_CR3_INDEX" }, // 0x6C02
{ VMCS_N_HOST_CR4_INDEX, "VMCS_N_HOST_CR4_INDEX" }, // 0x6C04
{ VMCS_N_HOST_FS_BASE_INDEX, "VMCS_N_HOST_FS_BASE_INDEX" }, // 0x6C06
{ VMCS_N_HOST_GS_BASE_INDEX, "VMCS_N_HOST_GS_BASE_INDEX" }, // 0x6C08
{ VMCS_N_HOST_TR_BASE_INDEX, "VMCS_N_HOST_TR_BASE_INDEX" }, // 0x6C0A
{ VMCS_N_HOST_GDTR_BASE_INDEX, "VMCS_N_HOST_GDTR_BASE_INDEX" }, // 0x6C0C
{ VMCS_N_HOST_IDTR_BASE_INDEX, "VMCS_N_HOST_IDTR_BASE_INDEX" }, // 0x6C0E
{ VMCS_N_HOST_IA32_SYSENTER_ESP_INDEX, "VMCS_N_HOST_IA32_SYSENTER_ESP_INDEX" }, // 0x6C10
{ VMCS_N_HOST_IA32_SYSENTER_EIP_INDEX, "VMCS_N_HOST_IA32_SYSENTER_EIP_INDEX" }, // 0x6C12
{ VMCS_N_HOST_RSP_INDEX, "VMCS_N_HOST_RSP_INDEX" }, // 0x6C14
{ VMCS_N_HOST_RIP_INDEX, "VMCS_N_HOST_RIP_INDEX" }, // 0x6C16
{ 0xFFFF, NULL }
};
static UINT32 VmcsMapInit = 0; // used to trigger VmcsMap initialization
void MapVmcs ()
{
unsigned short index;
unsigned int i;
UINT64 CurrentVMCSSave;
UINT64 FieldValue;
UINT32 FieldOffset;
UINT32 VmxRevId;
char Line[150];
char * EvalVmcs;
if(VmcsMapInit == 1)
{
return;
}
VmcsMapInit = 1;
// setup a dummy VMCS
// BUG - need to check processor about proper VMCS size
EvalVmcs = (char *) AllocatePages(VmcsSizeInPages);
// fill the VMCS regions with 16bit values that will provide us
// with indexes into the VMCS
for( index = 4; index < SIZE_4KB; index += 2 )
{
*(unsigned short *)(EvalVmcs + index) = index;
}
VmxRevId = AsmReadMsr32(IA32_VMX_BASIC_MSR_INDEX);
memcpy(EvalVmcs, &VmxRevId, 4);
AsmVmPtrStore(&CurrentVMCSSave); // save off the current Vmcs pointer
AsmVmClear(&CurrentVMCSSave);
AsmVmPtrLoad((UINT64 *) &EvalVmcs); // load our indexed Vmcs
// scan through the table and determine each offset
for( i = 0;
VmcsFieldOffsetTable[i].FieldEncoding != 0xFFFF;
i++)
{
Line[0] = '\0'; // start at the beginning...
FieldValue = VmRead64(VmcsFieldOffsetTable[i].FieldEncoding);
FieldOffset = FieldValue & 0xFFFFull;
VmcsFieldOffsetTable[i].FieldOffset = FieldOffset;
strcat(Line, "MapVmcs: ");
strcat(Line, VmcsFieldPrintTable[i].FieldPrint); // get around a bug
strcat(Line, " : 0x%08x : 0x%08lx : FV 0x%016llx\n");
//DEBUG((EFI_D_ERROR, "MapVmcs: %s : %d : %016llx : FV %016llx\n", VmcsFieldPrintTable[i].FieldPrint,
DEBUG((EFI_D_ERROR, Line,
VmcsFieldOffsetTable[i].FieldEncoding,
VmcsFieldOffsetTable[i].FieldOffset,
FieldValue));
}
AsmVmPtrLoad(&CurrentVMCSSave); // Put back the orignal Vmcs
AsmVmClear((UINT64 *)EvalVmcs);
FreePages(EvalVmcs, VmcsSizeInPages); // free up the eval vmcs
// validate with the current VMCS - especially since we just flushed it
DEBUG((EFI_D_ERROR, "MapVmcs: Field/Offset validation\n"));
for( i = 0;
VmcsFieldOffsetTable[i].FieldEncoding != 0xFFFF;
i++)
{
UINT64 VmReadValue;
UINT64 OffReadValue;
if(VmcsFieldOffsetTable[i].FieldOffset != 0) // zero offset is not valid
{
Line[0] = '\0'; // start at the beginning...
VmReadValue = VmRead64(VmcsFieldOffsetTable[i].FieldEncoding);
OffReadValue = *(UINT64*)((UINTN)CurrentVMCSSave + (UINTN)VmcsFieldOffsetTable[i].FieldOffset);
strcat(Line, "MapVmcs: ");
strcat(Line, VmcsFieldPrintTable[i].FieldPrint); // get around a bug
strcat(Line, " VMREAD: 0x%016llx Offset read: 0x%016llx\n");
//DEBUG((EFI_D_ERROR, "MapVmcs: %s : %d : %016llx : FV %016llx\n", VmcsFieldPrintTable[i].FieldPrint,
DEBUG((EFI_D_ERROR, Line,
VmReadValue,
OffReadValue));
}
}
}
// parses the VmcsFieldOffsetTable to find the offset based on the field_encoding
// an Offset of zero (0) indicates no match found (or the encoding is not valid for
// this processor family
UINT32 GetVmcsOffset( UINT32 field_encoding)
{
int i = 0;
while(VmcsFieldOffsetTable[i].FieldEncoding != 0xFFFF)
{
if(field_encoding == VmcsFieldOffsetTable[i].FieldEncoding)
{
DEBUG((EFI_D_ERROR, "GetVmcsOffset - encoding: 0x%lx offset 0x%lx\n",
field_encoding,
VmcsFieldOffsetTable[i].FieldOffset));
return VmcsFieldOffsetTable[i].FieldOffset;
}
i++;
}
DEBUG((EFI_D_ERROR, "GetVmcsOffset - no VMCS offset fount for field encoding 0x%lx\n", field_encoding));
return 0; // offset of zero indicates no match found
}

View File

@ -0,0 +1,28 @@
/** @file
Mostly used to dynamically generate the VMCS Offsets for XHIM
add license stuff
*/
#ifndef _VMCSOFFSETS_H_
#define _VMCSOFFSETS_H_
#include "Library/vmx.h"
typedef struct VmcsFieldOffset
{
UINT32 FieldEncoding;
UINT32 FieldOffset;
} VMCSFIELDOFFSET;
// initialize all fields to zero
typedef struct VmcsFieldPrint
{
UINT64 FieldEncoding;
char * FieldPrint;
} VMCSFIELDPRINT;
#endif

View File

@ -0,0 +1,49 @@
;------------------------------------------------------------------------------
;
; Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php.
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;
; Module Name:
;
; PeVmExit.asm
;
;------------------------------------------------------------------------------
EXTERNDEF StmHandlerSmi:PROC
EXTERNDEF PeStmHandlerSmm:PROC
.CODE
AsmHostEntrypointSmmPe PROC PUBLIC
sub rsp, 512
fxsave [rsp]
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbp ; should be rsp
push rbx
push rdx
push rcx
push rax
mov rcx, rsp ; parameter
sub rsp, 20h
call PeStmHandlerSmm
add rsp, 20h
jmp $
AsmHostEntrypointSmmPe ENDP
END

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
/**
@ -30,8 +31,9 @@ ResumeToBiosExceptionHandler (
STM_PROTECTION_EXCEPTION_STACK_FRAME_X64 *StackFrame;
UINTN Rflags;
X86_REGISTER *Reg;
UINT32 VmType = SMI_HANDLER;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
StmProtectionExceptionHandler = &mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->StmProtectionExceptionHandler;
@ -56,14 +58,14 @@ ResumeToBiosExceptionHandler (
StackFrame = (STM_PROTECTION_EXCEPTION_STACK_FRAME_X64 *)(((UINTN)StackFrame - 0x10) & ~0xF);
StackFrame = (STM_PROTECTION_EXCEPTION_STACK_FRAME_X64 *)((UINTN)StackFrame - 0x8);
mGuestContextCommonSmm.GuestContextPerCpu[Index].InfoBasic.Uint32 = VmRead32 (VMCS_32_RO_EXIT_REASON_INDEX);
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].InfoBasic.Uint32 = VmRead32 (VMCS_32_RO_EXIT_REASON_INDEX);
mGuestContextCommonSmm.GuestContextPerCpu[Index].VmExitInstructionLength = VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX);
mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].VmExitInstructionLength = VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX);
StackFrame->VmcsExitQualification = VmReadN (VMCS_N_RO_EXIT_QUALIFICATION_INDEX);
StackFrame->VmcsExitInstructionLength = VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_LENGTH_INDEX);
StackFrame->VmcsExitInstructionInfo = VmRead32 (VMCS_32_RO_VMEXIT_INSTRUCTION_INFO_INDEX);
switch (mGuestContextCommonSmm.GuestContextPerCpu[Index].InfoBasic.Bits.Reason) {
switch (mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].InfoBasic.Bits.Reason) {
case VmExitReasonExceptionNmi:
case VmExitReasonEptViolation:
if (StmProtectionExceptionHandler->PageViolationException) {
@ -128,7 +130,7 @@ ResumeToBiosExceptionHandler (
StackFrame->Rax = Reg->Rax;
StackFrame->Cr8 = 0; // AsmReadCr8();
StackFrame->Cr3 = VmReadN (VMCS_N_GUEST_CR3_INDEX);
if (mGuestContextCommonSmm.GuestContextPerCpu[Index].InfoBasic.Bits.Reason == VmExitReasonEptViolation) {
if (mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].InfoBasic.Bits.Reason == VmExitReasonEptViolation) {
// For SMM handle, linear addr == physical addr
StackFrame->Cr2 = (UINTN)VmRead64(VMCS_64_RO_GUEST_PHYSICAL_ADDR_INDEX);
} else {
@ -164,6 +166,7 @@ ResumeToBiosExceptionHandler (
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (Reg);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
return ;
@ -185,8 +188,9 @@ ReturnFromBiosExceptionHandler (
STM_PROTECTION_EXCEPTION_STACK_FRAME_X64 *StackFrame;
UINTN Rflags;
X86_REGISTER *Reg;
UINT32 VmType = SMI_HANDLER;
Reg = &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register;
Reg = &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register;
StmProtectionExceptionHandler = &mHostContextCommon.HostContextPerCpu[Index].TxtProcessorSmmDescriptor->StmProtectionExceptionHandler;
@ -254,6 +258,7 @@ ReturnFromBiosExceptionHandler (
DEBUG ((EFI_D_ERROR, "VMCS_32_RO_VM_INSTRUCTION_ERROR: %08x\n", (UINTN)VmRead32 (VMCS_32_RO_VM_INSTRUCTION_ERROR_INDEX)));
DumpVmcsAllField ();
DumpRegContext (Reg);
DumpGuestStack(Index);
ReleaseSpinLock (&mHostContextCommon.DebugLock);
CpuDeadLoop ();
return ;

View File

@ -13,6 +13,7 @@
**/
#include "StmRuntime.h"
#include "PeStm.h"
/**
@ -101,10 +102,12 @@ WriteSyncSmmStateSaveAreaSse2 (
IN BOOLEAN Scrub
)
{
UINT32 VmType = SMI_HANDLER;
if (!Scrub) {
CopyMem (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.FxBuffer, &mGuestContextCommonSmi.GuestContextPerCpu[Index].Register.FxBuffer, sizeof(IA32_FX_BUFFER));
CopyMem (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.FxBuffer, &mGuestContextCommonSmi.GuestContextPerCpu[Index].Register.FxBuffer, sizeof(IA32_FX_BUFFER));
} else {
ZeroMem (&mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.FxBuffer, sizeof(IA32_FX_BUFFER));
ZeroMem (&mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.FxBuffer, sizeof(IA32_FX_BUFFER));
}
}
@ -120,5 +123,7 @@ ReadSyncSmmStateSaveAreaSse2 (
IN UINT32 Index
)
{
CopyMem (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register.FxBuffer, &mGuestContextCommonSmm.GuestContextPerCpu[Index].Register.FxBuffer, sizeof(IA32_FX_BUFFER));
UINT32 VmType = SMI_HANDLER;
CopyMem (&mGuestContextCommonSmi.GuestContextPerCpu[Index].Register.FxBuffer, &mGuestContextCommonSmm[VmType].GuestContextPerCpu[Index].Register.FxBuffer, sizeof(IA32_FX_BUFFER));
}

View File

@ -400,8 +400,10 @@ FreePages (
This function set EPT page table attribute by range.
@param EptPointer EPT table of interest
@param Base Memory base
@param Length Memory length
@param Physmem Physical Memory base
@param Ra Read access
@param Wa Write access
@param Xa Execute access
@ -411,8 +413,10 @@ FreePages (
**/
RETURN_STATUS
EPTSetPageAttributeRange (
IN UINT64 EptPointer,
IN UINT64 Base,
IN UINT64 Length,
IN UINT64 Physmem,
IN UINT32 Ra,
IN UINT32 Wa,
IN UINT32 Xa,
@ -561,7 +565,7 @@ AddEventLog (
/**
This function add event log for invalid parameter.
This function add event log for invalid parameter
@param VmcallApiNumber VMCALL API number which caused invalid parameter
@ -573,7 +577,7 @@ AddEventLogInvalidParameter (
/**
This function add event log for resource.
This function add event log for resource
@param EventType EvtHandledProtectionException, EvtBiosAccessToUnclaimedResource,
EvtMleResourceProtectionGranted, EvtMleResourceProtectionDenied,
@ -647,6 +651,18 @@ DumpRegContext (
IN X86_REGISTER *Reg
);
/**
This function dumps the guest stack.
@param Index - CPU Index
**/
VOID
DumpGuestStack(
IN UINT32 Index
);
/**
Initialize external vector table pointer.
@ -923,6 +939,8 @@ typedef struct _STM_HOST_CONTEXT_PER_CPU {
UINT32 Index;
UINT32 ApicId;
UINTN Stack;
UINT32 GuestVmType;
UINT32 NonSmiHandler;
UINT32 Smbase;
TXT_PROCESSOR_SMM_DESCRIPTOR *TxtProcessorSmmDescriptor;
UINT32 HostMsrEntryCount;
@ -931,6 +949,7 @@ typedef struct _STM_HOST_CONTEXT_PER_CPU {
// JumpBuffer for Setup/TearDown
BOOLEAN JumpBufferValid;
BASE_LIBRARY_JUMP_BUFFER JumpBuffer;
UINT64 Vmxon;
} STM_HOST_CONTEXT_PER_CPU;
typedef struct _STM_HOST_CONTEXT_COMMON {
@ -940,11 +959,13 @@ typedef struct _STM_HOST_CONTEXT_COMMON {
SPIN_LOCK PciLock;
UINT32 CpuNum;
UINT32 JoinedCpuNum;
UINT32 StmShutdown;
UINTN PageTable;
IA32_DESCRIPTOR Gdtr;
IA32_DESCRIPTOR Idtr;
UINT64 HeapBottom;
UINT64 HeapTop;
UINT64 HeapFree;
UINT8 PhysicalAddressBits;
UINT64 MaximumSupportAddress;
//
@ -993,6 +1014,6 @@ typedef struct _STM_HOST_CONTEXT_COMMON {
extern STM_HOST_CONTEXT_COMMON mHostContextCommon;
extern STM_GUEST_CONTEXT_COMMON mGuestContextCommonSmi;
extern STM_GUEST_CONTEXT_COMMON mGuestContextCommonSmm;
extern STM_GUEST_CONTEXT_COMMON mGuestContextCommonSmm[];
#endif

View File

@ -35,6 +35,7 @@
Init/IoInit.c
Init/MsrInit.c
Init/Relocate.c
Init/PeVmcsInit.c
Runtime/SmmMp.c
Runtime/SmmHandler.c
Runtime/SmmStateSync.c
@ -56,6 +57,29 @@
Runtime/StmTearDown.c
Runtime/StmExceptionHandler.c
Runtime/PageTable.c
Runtime/PeEptHandler.c
Runtime/PeSmmHandler.c
Runtime/PeSmmVmcallHandler.c
Runtime/PeSmmRsmHandler.c
Runtime/PeSmmIoHandler.c
Runtime/PeSmmBadGuestStateHandler.c
Runtime/PeSmmMsrHandler.c
Runtime/PeSmmCrHandler.c
Runtime/PeSmmExceptionHandler.c
Runtime/PeSmmCpuidHandler.c
Runtime/PeSmmPreemptionTimerHandler.c
Runtime/PeSmmTripleFaultHandler.c
Runtime/PeSmiVmcallHandler.c
Runtime/PeSmiHandler.c
Runtime/PeVmxState.c
Runtime/PeApicHandler.c
Runtime/PePciHandler.c
Runtime/PeLoadVm.c
Runtime/PeLoadVm.h
Runtime/PeStm.h
Runtime/PeVmcsInit.c
Runtime/VmcsOffsets.h
Runtime/VmcsMapper.c
Stm.h
CpuDef.h
Cpu.c
@ -64,11 +88,13 @@
StmResource.c
VmcsRecord.c
StmPerformance.c
PeStmEpt.h
[Sources.Ia32]
Init/Ia32/AsmStmInit.asm
Init/Ia32/AsmStmInit.s
Runtime/Ia32/VmExit.asm
Runtime/Ia32/PeVmExit.asm
Runtime/Ia32/VmExit.s
Runtime/Ia32/Exception.asm
Runtime/Ia32/Exception.s
@ -80,6 +106,7 @@
Init/x64/AsmStmInit.s
Runtime/x64/VmExit.asm
Runtime/x64/VmExit.s
Runtime/x64/PeVmExit.asm
Runtime/x64/Exception.asm
Runtime/x64/Exception.s
Runtime/x64/SmmException.c
@ -100,8 +127,8 @@
StmPlatformLib
[Pcd]
gEfiStmPkgTokenSpaceGuid.PcdPerformanceLibraryPropertyMask ## CONSUMES
gEfiStmPkgTokenSpaceGuid.PcdPciExpressBaseAddress ## PRODUCES
gEfiStmPkgTokenSpaceGuid.PcdPerformanceLibraryPropertyMask
gEfiStmPkgTokenSpaceGuid.PcdPciExpressBaseAddress
[BuildOptions]
# MSFT:*_*_X64_CC_FLAGS = /Od /GL-
@ -111,11 +138,11 @@
INTEL:*_*_X64_CC_FLAGS = /Qopt-jump-tables-
INTEL:*_*_IA32_CC_FLAGS = /Qopt-jump-tables-
MSFT:*_*_X64_DLINK_FLAGS = /BASE:0x0 /ALIGN:32 /FILEALIGN:32 /STACK:0x8000,0x8000 /HEAP:0x140000,0x140000 /OUT:$(DEBUG_DIR)\Stm.dll
MSFT:*_*_IA32_DLINK_FLAGS = /BASE:0x0 /ALIGN:32 /FILEALIGN:32 /STACK:0x8000,0x8000 /HEAP:0x140000,0x140000 /OUT:$(DEBUG_DIR)\Stm.dll
MSFT:*_*_X64_DLINK_FLAGS = /BASE:0x0 /ALIGN:32 /FILEALIGN:32 /STACK:0x8000,0x8000 /HEAP:0x240000,0x240000 /OUT:$(DEBUG_DIR)\Stm.dll
MSFT:*_*_IA32_DLINK_FLAGS = /BASE:0x0 /ALIGN:32 /FILEALIGN:32 /STACK:0x8000,0x8000 /HEAP:0x240000,0x240000 /OUT:$(DEBUG_DIR)\Stm.dll
INTEL:*_*_X64_DLINK_FLAGS = /BASE:0x0 /ALIGN:32 /FILEALIGN:32 /STACK:0x8000,0x8000 /HEAP:0x140000,0x140000 /OUT:$(DEBUG_DIR)\Stm.dll
INTEL:*_*_IA32_DLINK_FLAGS = /BASE:0x0 /ALIGN:32 /FILEALIGN:32 /STACK:0x8000,0x8000 /HEAP:0x140000,0x140000 /OUT:$(DEBUG_DIR)\Stm.dll
INTEL:*_*_X64_DLINK_FLAGS = /BASE:0x0 /ALIGN:32 /FILEALIGN:32 /STACK:0x8000,0x8000 /HEAP:0x240000,0x240000 /OUT:$(DEBUG_DIR)\Stm.dll
INTEL:*_*_IA32_DLINK_FLAGS = /BASE:0x0 /ALIGN:32 /FILEALIGN:32 /STACK:0x8000,0x8000 /HEAP:0x240000,0x240000 /OUT:$(DEBUG_DIR)\Stm.dll
GCC:*_*_X64_DLINK_FLAGS == -o $(DEBUG_DIR)/Stm.dll -nostdlib -n -q --gc-sections -z common-page-size=0x40 --entry _ModuleEntryPoint -u _ModuleEntryPoint -Map $(DEST_DIR_DEBUG)/$(BASE_NAME).map -m elf_x86_64 --oformat=elf64-x86-64 --defsym=PECOFF_HEADER_SIZE=0x228 #--script=$(MODULE_DIR)/Stm.lds
GCC:*_*_IA32_DLINK_FLAGS == -o $(DEBUG_DIR)/Stm.dll -nostdlib -n -q --gc-sections -z common-page-size=0x40 --entry _ModuleEntryPoint -u _ModuleEntryPoint -Map $(DEST_DIR_DEBUG)/$(BASE_NAME).map -m elf_i386 --oformat=elf32-i386 --defsym=PECOFF_HEADER_SIZE=0x220 #--script=$(MODULE_DIR)/Stm.lds
GCC:*_*_X64_DLINK_FLAGS == -o $(DEBUG_DIR)/Stm.dll -nostdlib -n -q --gc-sections -z common-page-size=0x40 --entry _ModuleEntryPoint -u _ModuleEntryPoint -Map $(DEST_DIR_DEBUG)/$(BASE_NAME).map -m elf_x86_64 --oformat=elf64-x86-64 --defsym=PECOFF_HEADER_SIZE=0x228 #--script=$(MODULE_DIR)/Stm.lds
GCC:*_*_IA32_DLINK_FLAGS == -o $(DEBUG_DIR)/Stm.dll -nostdlib -n -q --gc-sections -z common-page-size=0x40 --entry _ModuleEntryPoint -u _ModuleEntryPoint -Map $(DEST_DIR_DEBUG)/$(BASE_NAME).map -m elf_i386 --oformat=elf32-i386 --defsym=PECOFF_HEADER_SIZE=0x220 #--script=$(MODULE_DIR)/Stm.lds

View File

@ -14,6 +14,7 @@
#include "Stm.h"
#include "StmRuntime.h"
#include "PeStm.h"
#define DEFAULT_PROTECTED_DEFAULT_PAGES 4
@ -1434,8 +1435,10 @@ RegisterProtectedResourceNode (
case MEM_RANGE:
case MMIO_RANGE:
EPTSetPageAttributeRange (
mGuestContextCommonSmm[SMI_HANDLER].EptPointer.Uint64,
Resource->Mem.Base,
Resource->Mem.Length,
Resource->Mem.Base,
((Resource->Mem.RWXAttributes & STM_RSC_MEM_R) != 0) ? 0 : 1,
((Resource->Mem.RWXAttributes & STM_RSC_MEM_W) != 0) ? 0 : 1,
((Resource->Mem.RWXAttributes & STM_RSC_MEM_X) != 0) ? 0 : 1,
@ -1464,8 +1467,10 @@ RegisterProtectedResourceNode (
LastNodeBus = GetLastNodeBus (Resource);
PciExpressDeviceBase = PCI_EXPRESS_ADDRESS(LastNodeBus, Resource->PciCfg.PciDevicePath[Resource->PciCfg.LastNodeIndex].PciDevice, Resource->PciCfg.PciDevicePath[Resource->PciCfg.LastNodeIndex].PciFunction, 0);
EPTSetPageAttributeRange (
mGuestContextCommonSmm[SMI_HANDLER].EptPointer.Uint64,
PciExpressDeviceBase + mHostContextCommon.PciExpressBaseAddress,
SIZE_4KB,
PciExpressDeviceBase + mHostContextCommon.PciExpressBaseAddress,
((Resource->PciCfg.RWAttributes & STM_RSC_PCI_CFG_R) != 0) ? 0 : 1,
((Resource->PciCfg.RWAttributes & STM_RSC_PCI_CFG_W) != 0) ? 0 : 1,
0,
@ -1534,8 +1539,10 @@ UnRegisterProtectedResourceNode (
case MEM_RANGE:
case MMIO_RANGE:
EPTSetPageAttributeRange (
mGuestContextCommonSmm[SMI_HANDLER].EptPointer.Uint64,
Resource->Mem.Base,
Resource->Mem.Length,
Resource->Mem.Base,
((Resource->Mem.RWXAttributes & STM_RSC_MEM_R) != 0) ? 1 : 0,
((Resource->Mem.RWXAttributes & STM_RSC_MEM_W) != 0) ? 1 : 0,
((Resource->Mem.RWXAttributes & STM_RSC_MEM_X) != 0) ? 1 : 0,
@ -1563,8 +1570,10 @@ UnRegisterProtectedResourceNode (
LastNodeBus = GetLastNodeBus (Resource);
PciExpressDeviceBase = PCI_EXPRESS_ADDRESS(LastNodeBus, Resource->PciCfg.PciDevicePath[Resource->PciCfg.LastNodeIndex].PciDevice, Resource->PciCfg.PciDevicePath[Resource->PciCfg.LastNodeIndex].PciFunction, 0);
EPTSetPageAttributeRange (
mGuestContextCommonSmm[SMI_HANDLER].EptPointer.Uint64,
PciExpressDeviceBase + mHostContextCommon.PciExpressBaseAddress,
SIZE_4KB,
PciExpressDeviceBase + mHostContextCommon.PciExpressBaseAddress,
((Resource->PciCfg.RWAttributes & STM_RSC_PCI_CFG_R) != 0) ? 1 : 0,
((Resource->PciCfg.RWAttributes & STM_RSC_PCI_CFG_W) != 0) ? 1 : 0,
0,
@ -1805,6 +1814,9 @@ RegisterBiosResourceNode (
IN STM_RSC *Resource
)
{
UINT8 LastNodeBus;
UINT64 PciExpressDeviceBase;
if (Resource->Header.IgnoreResource != 0) {
return ;
}
@ -1821,8 +1833,41 @@ RegisterBiosResourceNode (
break;
case MEM_RANGE:
case MMIO_RANGE:
EPTSetPageAttributeRange (
mGuestContextCommonSmm[SMI_HANDLER].EptPointer.Uint64,
Resource->Mem.Base,
Resource->Mem.Length,
Resource->Mem.Base,
((Resource->Mem.RWXAttributes & STM_RSC_MEM_R) == STM_RSC_MEM_R) ? 1 : 0,
((Resource->Mem.RWXAttributes & STM_RSC_MEM_W) == STM_RSC_MEM_W) ? 1 : 0,
((Resource->Mem.RWXAttributes & STM_RSC_MEM_X) == STM_RSC_MEM_X) ? 1 : 0,
EptPageAttributeSet
);
Resource->Header.ReturnStatus = 1;
break;
case IO_RANGE:
break;
case PCI_CFG_RANGE:
SetIoBitmapRange (0xCF8, 1);
SetIoBitmapRange (0xCFC, 4);
// STM_RSC_BGI is NOT supported in this version
if (mHostContextCommon.PciExpressBaseAddress != 0) {
LastNodeBus = GetLastNodeBus (Resource);
PciExpressDeviceBase = PCI_EXPRESS_ADDRESS(LastNodeBus, Resource->PciCfg.PciDevicePath[Resource->PciCfg.LastNodeIndex].PciDevice, Resource->PciCfg.PciDevicePath[Resource->PciCfg.LastNodeIndex].PciFunction, 0);
EPTSetPageAttributeRange (
mGuestContextCommonSmm[SMI_HANDLER].EptPointer.Uint64,
PciExpressDeviceBase + mHostContextCommon.PciExpressBaseAddress,
SIZE_4KB,
PciExpressDeviceBase + mHostContextCommon.PciExpressBaseAddress,
((Resource->PciCfg.RWAttributes & STM_RSC_PCI_CFG_R) == STM_RSC_PCI_CFG_R) ? 1 : 0,
((Resource->PciCfg.RWAttributes & STM_RSC_PCI_CFG_W) == STM_RSC_PCI_CFG_W) ? 1 : 0,
0,
EptPageAttributeAnd
);
}
Resource->Header.ReturnStatus = 1;
break;
case TRAPPED_IO_RANGE:
// Not supported
break;

View File

@ -32,7 +32,9 @@
#define TXT_SINIT_SIZE 0x278
#define TXT_MLE_JOIN 0x290
#define TXT_HEAP_BASE 0x300
#define TXT_HEAP_SIZE 0x308
#define TXT_HEAP_SIZE 0x308
#define TXT_MSEG_BASE 0x310
#define TXT_MSEG_SIZE 0x318
#define TXT_DPR_REG 0x330
#define TXT_DPR_REG_LCK 0x1
#define TXT_DPR_REG_SIZE_MASK 0xFF0

View File

@ -560,6 +560,7 @@ typedef struct {
#define STM_API_UNMAP_ADDRESS_RANGE 0x00000002
#define STM_API_ADDRESS_LOOKUP 0x00000003
#define STM_API_RETURN_FROM_PROTECTION_EXCEPTION 0x00000004
#define STM_API_GET_VMCS_MAP 0x00000005
// API number convention: MLE facing VMCALL interfaces have bit 16 set
//
@ -579,6 +580,11 @@ typedef struct {
#define STM_API_MANAGE_VMCS_DATABASE 0x00010006
#define STM_API_INITIALIZE_PROTECTION 0x00010007
#define STM_API_MANAGE_EVENT_LOG 0x00010008
#define STM_API_ADD_TEMP_PE_VM 0x00010009
#define STM_API_ADD_PERM_PE_VM 0x0001000a
#define STM_API_RUN_PE_VM 0x0001000b
#define STM_API_END_ADD_PERM_PE_VM 0x0001000c
#define STM_API_ADD_PERM_PE_VM_NORUN 0x0001000d
//
// Return codes

View File

@ -0,0 +1,12 @@
.686P
.MMX
.MODEL FLAT,C
.CODE
AsmSendInt2 PROC PUBLIC
int 2
ret
AsmSendInt2 ENDP
END

View File

@ -70,6 +70,7 @@
Ia32/AsmTestAndReset.asm
Ia32/AsmTestAndSet.s
Ia32/AsmTestAndReset.s
Ia32/AsmSendInt2.asm
[Sources.x64]
x64/AsmInvEpt.asm
@ -109,7 +110,7 @@
x64/AsmTestAndReset.asm
x64/AsmTestAndSet.s
x64/AsmTestAndReset.s
x64/AsmSendInt2.asm
[Packages]
StmPkg/StmPkg.dec

View File

@ -0,0 +1,8 @@
.CODE
AsmSendInt2 PROC PUBLIC
int 2
ret
AsmSendInt2 ENDP
END