cpu/x86/smm: Add support for long mode

Enable long mode in SMM handler.
x86_32 isn't affected by this change.

As the rsm instruction used to leave SMM doesn't restore MSR registers,
drop back to protected mode after running the smi_handler and restore
IA32_EFER MSR (which enables long mode support) to previous value.

NOTE: This commit does NOT introduce a new security model. It uses the
      same page tables as the remaining firmware does.
      This can be a security risk if someone is able to manipulate the
      page tables stored in ROM at runtime. USE FOR TESTING ONLY!

Tested on Qemu Q35.

Change-Id: I8bba4af4688c723fc079ae905dac95f57ea956f8
Signed-off-by: Patrick Rudolph <siro@das-labor.org>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/35681
Reviewed-by: Raul Rangel <rrangel@chromium.org>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Patrick Rudolph 2019-09-29 11:08:33 +02:00 committed by Patrick Georgi
parent d5321bf2fb
commit 03a79520d6
3 changed files with 130 additions and 1 deletions

View File

@ -45,6 +45,7 @@ At the moment *$n* is 4, which results in identity mapping the lower 4 GiB.
* Add x86_64 exception handlers - *DONE* * Add x86_64 exception handlers - *DONE*
* Setup page tables for long mode - *DONE* * Setup page tables for long mode - *DONE*
* Add assembly code for long mode - *DONE* * Add assembly code for long mode - *DONE*
* Add assembly code for SMM - *DONE*
* Add assembly code for postcar stage - *TODO* * Add assembly code for postcar stage - *TODO*
* Add assembly code to return to protected mode - *TODO* * Add assembly code to return to protected mode - *TODO*
* Implement reference code for mainboard `emulation/qemu-q35` - *TODO* * Implement reference code for mainboard `emulation/qemu-q35` - *TODO*

View File

@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* For droping from long mode to protected mode.
*
* For reference see "AMD64 ArchitectureProgrammer's Manual Volume 2",
* Document 24593-Rev. 3.31-July 2019 Chapter 5.3
*
* Clobbers: rax, rbx, rcx, rdx
*/
.code64
#include <cpu/x86/msr.h>
#include <cpu/x86/cr.h>
#include <arch/rom_segs.h>
drop_longmode:
/* Ensure cache is clean. */
wbinvd
/* Set 32-bit code segment and ss */
mov $ROM_CODE_SEG, %rcx
/* SetCodeSelector32 will drop us to protected mode on return */
call SetCodeSelector32
/* Skip SetCodeSelector32 */
.code32
jmp __longmode_compatibility
.align 8
.code64
SetCodeSelector32:
# pop the return address from stack
pop %rbx
# save rsp because we need to push it after ss
mov %rsp, %rdx
# use iret to jump to a 32-bit offset in a new code segment
# iret will pop cs:rip, flags, then ss:rsp
mov %ss, %ax # need to push ss, but push ss instuction
push %rax # not valid in x64 mode, so use ax
push %rdx # the rsp to load
pushfq # push rflags
push %rcx # cx is code segment selector from caller
push %rbx # push the IP for the next instruction
# the iretq will behave like ret, with the new cs/ss value loaded
iretq
.align 4
.code32
__longmode_compatibility:
/* Running in 32-bit compatibility mode */
/* Use flat data segment */
movl $ROM_DATA_SEG, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %ss
movl %eax, %fs
movl %eax, %gs
/* Disable paging. */
movl %cr0, %eax
andl $(~CR0_PG), %eax
movl %eax, %cr0
/* Disable long mode. */
movl $(IA32_EFER), %ecx
rdmsr
andl $(~EFER_LME), %eax
wrmsr
/* Disable PAE. */
movl %cr4, %eax
andl $(~CR4_PAE), %eax
movl %eax, %cr4
/* Clear page table register */
xor %eax, %eax
movl %eax, %cr3
__longmode_exit:

View File

@ -8,6 +8,7 @@
*/ */
#include <cpu/x86/lapic_def.h> #include <cpu/x86/lapic_def.h>
#include <cpu/x86/msr.h>
/* /*
* +--------------------------------+ 0xaffff * +--------------------------------+ 0xaffff
@ -42,6 +43,14 @@
#define SMM_HANDLER_OFFSET 0x0000 #define SMM_HANDLER_OFFSET 0x0000
#if defined(__x86_64__)
.bss
ia32efer_backup_eax:
.long
ia32efer_backup_edx:
.long
#endif
/* initially SMM is some sort of real mode. Let gcc know /* initially SMM is some sort of real mode. Let gcc know
* how to treat the SMM handler stub * how to treat the SMM handler stub
*/ */
@ -159,12 +168,44 @@ untampered_lapic:
/* Get SMM revision */ /* Get SMM revision */
movl $0xa8000 + 0x7efc, %ebx /* core 0 address */ movl $0xa8000 + 0x7efc, %ebx /* core 0 address */
subl %ebp, %ebx /* subtract core X offset */ subl %ebp, %ebx /* subtract core X offset */
#if defined(__x86_64__)
/* Backup IA32_EFER. Preserves ebx. */
movl $(IA32_EFER), %ecx
rdmsr
movl %eax, ia32efer_backup_eax
movl %edx, ia32efer_backup_edx
/* Enable long mode. Preserves ebx. */
#include <cpu/x86/64bit/entry64.inc>
mov (%ebx), %rdi
#else
movl (%ebx), %eax movl (%ebx), %eax
pushl %eax pushl %eax
#endif
/* Call 32bit C handler */ /* Call C handler */
call smi_handler call smi_handler
#if defined(__x86_64__)
/*
* The only reason to go back to protected mode is that RSM doesn't restore
* MSR registers and MSR IA32_EFER was modified by entering long mode.
* Drop to protected mode to safely operate on the IA32_EFER MSR.
*/
/* Disable long mode. */
#include <cpu/x86/64bit/exit32.inc>
/* Restore IA32_EFER as RSM doesn't restore MSRs. */
movl $(IA32_EFER), %ecx
movl ia32efer_backup_eax, %eax
movl ia32efer_backup_edx, %edx
wrmsr
#endif
/* To return, just do rsm. It will "clean up" protected mode */ /* To return, just do rsm. It will "clean up" protected mode */
rsm rsm
@ -190,6 +231,9 @@ smm_gdt:
.word 0xffff, 0x0000 .word 0xffff, 0x0000
.byte 0x00, 0x93, 0xcf, 0x00 .byte 0x00, 0x93, 0xcf, 0x00
/* gdt selector 0x18, flat code segment (64-bit) */
.word 0xffff, 0x0000
.byte 0x00, 0x9b, 0xaf, 0x00
smm_gdt_end: smm_gdt_end: