Add PoC of Arm DRTM specification version Beta-0

Change-Id: I26e6f2d4b2299edc246f5e8504d5d15b1399f640
This commit is contained in:
Lucian Paul-Trifu 2022-03-08 15:02:31 +00:00
parent c158878249
commit 3519afe0f6
48 changed files with 3518 additions and 184 deletions

View File

@ -38,7 +38,8 @@ BL31_SOURCES += bl31/bl31_main.c \
services/std_svc/std_svc_setup.c \
${PSCI_LIB_SOURCES} \
${SPMD_SOURCES} \
${SPM_SOURCES}
${SPM_SOURCES} \
${MBEDTLS_SOURCES}
ifeq (${DISABLE_MTPMU},1)
BL31_SOURCES += lib/extensions/mtpmu/aarch64/mtpmu.S
@ -73,6 +74,35 @@ BL31_SOURCES += services/std_svc/trng/trng_main.c \
services/std_svc/trng/trng_entropy_pool.c
endif
ifeq (${DRTM_SUPPORT},1)
# TODO-LPT: move this to a new drtm.mk
BL31_SOURCES += services/std_svc/drtm/drtm_main.c \
services/std_svc/drtm/drtm_cache.c \
services/std_svc/drtm/drtm_dma_prot.c \
services/std_svc/drtm/drtm_measurements.c \
services/std_svc/drtm/drtm_res_tcb_hashes.c \
services/std_svc/drtm/drtm_remediation.c \
DRTM_SHA_ALG ?= 256
MBEDTLS_CONFIG_FILE := \"../services/std_svc/drtm/drtm_mbedtls_config.h\"
$(info Including drivers/auth/mbedtls/mbedtls_common.mk)
include drivers/auth/mbedtls/mbedtls_common.mk
$(info Including lib/tpm/tpm_lib.mk)
include lib/tpm/tpm_lib.mk
BL31_SOURCES += ${TPM_LIB_SOURCES}
PLAT_XLAT_TABLES_DYNAMIC := 1
$(eval $(call add_defines,\
$(sort \
PLAT_XLAT_TABLES_DYNAMIC \
DRTM_SHA_ALG \
)))
endif
ifeq (${ENABLE_SPE_FOR_LOWER_ELS},1)
BL31_SOURCES += lib/extensions/spe/spe.c
endif
@ -108,6 +138,7 @@ $(eval $(call assert_booleans,\
CRASH_REPORTING \
EL3_EXCEPTION_HANDLING \
SDEI_SUPPORT \
DRTM_SUPPORT \
)))
$(eval $(call add_defines,\
@ -115,4 +146,5 @@ $(eval $(call add_defines,\
CRASH_REPORTING \
EL3_EXCEPTION_HANDLING \
SDEI_SUPPORT \
DRTM_SUPPORT \
)))

View File

@ -200,6 +200,9 @@ Common build options
that do not implement FEAT_MTPMU. For more information on FEAT_MTPMU,
check the latest Arm ARM.
- ``DRTM_SUPPORT``: Boolean to enable support for Dynamic Root of Trust for
Measurement (DRTM). Defaults to disabled.
- ``DYN_DISABLE_AUTH``: Provides the capability to dynamically disable Trusted
Board Boot authentication at runtime. This option is meant to be enabled only
for development platforms. ``TRUSTED_BOARD_BOOT`` flag must be set if this

View File

@ -13,7 +13,7 @@
/* SMMU poll number of retries */
#define SMMU_POLL_TIMEOUT_US U(1000)
static int __init smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
static int smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
uint32_t value)
{
uint32_t reg_val;
@ -94,3 +94,45 @@ int __init smmuv3_init(uintptr_t smmu_base)
return smmuv3_poll(smmu_base + SMMU_S_INIT,
SMMU_S_INIT_INV_ALL, 0U);
}
int smmuv3_ns_set_abort_all(uintptr_t smmu_base)
{
/* Attribute update has completed when SMMU_GBPA.Update bit is 0 */
if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
return -1;
/*
* Set GBPA's ABORT bit. Other GBPA fields are presumably ignored then, so
* simply preserve their value.
*/
mmio_setbits_32(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
return -1;
/* Disable the SMMU to engage the GBPA fields previously configured. */
mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN);
if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U)
return -1;
return 0;
}
int smmuv3_ns_set_bypass_all(uintptr_t smmu_base)
{
/* Attribute update has completed when SMMU_GBPA.Update bit is 0 */
if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
return -1;
/* Clear GBPA's ABORT bit. Other GBPA fields are preserved. */
mmio_clrsetbits_32(smmu_base + SMMU_GBPA,
SMMU_GBPA_ABORT, SMMU_GBPA_UPDATE);
if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
return -1;
/* Disable the SMMU to engage the GBPA fields previously configured. */
mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN);
if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U)
return -1;
return 0;
}

View File

@ -13,7 +13,7 @@
#include <common/debug.h>
#include <drivers/auth/mbedtls/mbedtls_common.h>
#include <drivers/auth/mbedtls/mbedtls_config.h>
#include MBEDTLS_CONFIG_FILE
#include <plat/common/platform.h>
static void cleanup(void)
@ -36,7 +36,16 @@ void mbedtls_init(void)
if (atexit(cleanup))
panic();
#if DRTM_SUPPORT && defined(IMAGE_BL31)
/*
* XXX-LPT: Short-circuit the mbedtls heap linkage for DRTM.
* The heap linkage should ideally be integrated with the other sub-
* systems that require it (e.g. trusted board boot).
*/
err = get_mbedtls_heap_helper(&heap_addr, &heap_size);
#else
err = plat_get_mbedtls_heap(&heap_addr, &heap_size);
#endif
/* Ensure heap setup is proper */
if (err < 0) {

View File

@ -16,7 +16,7 @@ endif
MBEDTLS_INC = -I${MBEDTLS_DIR}/include
# Specify mbed TLS configuration file
MBEDTLS_CONFIG_FILE := "<drivers/auth/mbedtls/mbedtls_config.h>"
MBEDTLS_CONFIG_FILE ?= "<drivers/auth/mbedtls/mbedtls_config.h>"
$(eval $(call add_define,MBEDTLS_CONFIG_FILE))
MBEDTLS_SOURCES += drivers/auth/mbedtls/mbedtls_common.c

View File

@ -13,10 +13,17 @@
#include <common/debug.h>
#include <drivers/auth/crypto_mod.h>
#include <drivers/measured_boot/event_log.h>
#include <lib/tpm/tpm_log.h>
#include <mbedtls/md.h>
#include <plat/common/platform.h>
/*
* TODO: Remove dependency on private header, depend solely on
* lib/tpm/tpm_log.h instead.
*/
#include "../../lib/tpm/tpm_log_private.h"
/* Event Log data */
static uint8_t event_log[EVENT_LOG_SIZE];
@ -35,17 +42,17 @@ static uintptr_t tos_fw_config_base;
static uintptr_t nt_fw_config_base;
/* TCG_EfiSpecIdEvent */
static const id_event_headers_t id_event_header = {
.header = {
.pcr_index = PCR_0,
.event_type = EV_NO_ACTION,
static const id_event_container_t id_event_header = {
.container = {
.pcr_index = TPM_PCR_0,
.event_type = TPM_LOG_EV_NO_ACTION,
.digest = {0},
.event_size = (uint32_t)(sizeof(id_event_struct_t) +
(sizeof(id_event_algorithm_size_t) *
.event_size = (uint32_t)(sizeof(__id_event_t) +
(sizeof(id_event_alg_info_t) *
HASH_ALG_COUNT))
},
.struct_header = {
.id_event_misc_data = {
.signature = TCG_ID_EVENT_SIGNATURE_03,
.platform_class = PLATFORM_CLASS_CLIENT,
.spec_version_minor = TCG_SPEC_VERSION_MINOR_TPM2,
@ -62,13 +69,13 @@ static const event2_header_t locality_event_header = {
* All EV_NO_ACTION events SHALL set
* TCG_PCR_EVENT2.pcrIndex = 0, unless otherwise specified
*/
.pcr_index = PCR_0,
.pcr_index = TPM_PCR_0,
/*
* All EV_NO_ACTION events SHALL set
* TCG_PCR_EVENT2.eventType = 03h
*/
.event_type = EV_NO_ACTION,
.event_type = TPM_LOG_EV_NO_ACTION,
/*
* All EV_NO_ACTION events SHALL set
@ -82,7 +89,7 @@ static const event2_header_t locality_event_header = {
/* Platform's table with platform specific image IDs, names and PCRs */
static const image_data_t plat_images_data[] = {
{ BL2_IMAGE_ID, BL2_STRING, PCR_0 }, /* Reserved for BL2 */
{ BL2_IMAGE_ID, BL2_STRING, TPM_PCR_0 }, /* Reserved for BL2 */
{ INVALID_ID, NULL, (unsigned int)(-1) } /* Terminator */
};
@ -140,21 +147,21 @@ static int add_event2(const uint8_t *hash, const image_data_t *image_ptr)
((event2_header_t *)ptr)->pcr_index = image_ptr->pcr;
/* TCG_PCR_EVENT2.EventType */
((event2_header_t *)ptr)->event_type = EV_POST_CODE;
((event2_header_t *)ptr)->event_type = TPM_LOG_EV_POST_CODE;
/* TCG_PCR_EVENT2.Digests.Count */
ptr = (uint8_t *)ptr + offsetof(event2_header_t, digests);
((tpml_digest_values *)ptr)->count = HASH_ALG_COUNT;
((tpml_digest_values_t *)ptr)->count = HASH_ALG_COUNT;
/* TCG_PCR_EVENT2.Digests[] */
ptr = (uint8_t *)((uintptr_t)ptr +
offsetof(tpml_digest_values, digests));
offsetof(tpml_digest_values_t, digests));
/* TCG_PCR_EVENT2.Digests[].AlgorithmId */
((tpmt_ha *)ptr)->algorithm_id = TPM_ALG_ID;
((tpmt_ha_t *)ptr)->algorithm_id = TPM_ALG_ID;
/* TCG_PCR_EVENT2.Digests[].Digest[] */
ptr = (uint8_t *)((uintptr_t)ptr + offsetof(tpmt_ha, digest));
ptr = (uint8_t *)((uintptr_t)ptr + offsetof(tpmt_ha_t, digest));
/* Check for space in Event Log buffer */
if (((uintptr_t)ptr + TCG_DIGEST_SIZE) > EVENT_LOG_END) {
@ -210,17 +217,17 @@ void event_log_init(void)
ptr = (uint8_t *)((uintptr_t)ptr + sizeof(id_event_header));
/* TCG_EfiSpecIdEventAlgorithmSize structure */
((id_event_algorithm_size_t *)ptr)->algorithm_id = TPM_ALG_ID;
((id_event_algorithm_size_t *)ptr)->digest_size = TCG_DIGEST_SIZE;
ptr = (uint8_t *)((uintptr_t)ptr + sizeof(id_event_algorithm_size_t));
((id_event_alg_info_t *)ptr)->algorithm_id = TPM_ALG_ID;
((id_event_alg_info_t *)ptr)->digest_size = TCG_DIGEST_SIZE;
ptr = (uint8_t *)((uintptr_t)ptr + sizeof(id_event_alg_info_t));
/*
* TCG_EfiSpecIDEventStruct.vendorInfoSize
* No vendor data
*/
((id_event_struct_data_t *)ptr)->vendor_info_size = 0;
((id_event_vendor_data_t *)ptr)->vendor_info_size = 0;
ptr = (uint8_t *)((uintptr_t)ptr +
offsetof(id_event_struct_data_t, vendor_info));
offsetof(id_event_vendor_data_t, vendor_info));
if ((uintptr_t)ptr != ((uintptr_t)event_log + ID_EVENT_SIZE)) {
panic();
}
@ -240,16 +247,16 @@ void event_log_init(void)
ptr = (uint8_t *)((uintptr_t)ptr + sizeof(locality_event_header));
/* TCG_PCR_EVENT2.Digests[].AlgorithmId */
((tpmt_ha *)ptr)->algorithm_id = TPM_ALG_ID;
((tpmt_ha_t *)ptr)->algorithm_id = TPM_ALG_ID;
/* TCG_PCR_EVENT2.Digests[].Digest[] */
(void)memset(&((tpmt_ha *)ptr)->digest, 0, TPM_ALG_ID);
(void)memset(&((tpmt_ha_t *)ptr)->digest, 0, TPM_ALG_ID);
ptr = (uint8_t *)((uintptr_t)ptr +
offsetof(tpmt_ha, digest) + TCG_DIGEST_SIZE);
offsetof(tpmt_ha_t, digest) + TCG_DIGEST_SIZE);
/* TCG_PCR_EVENT2.EventSize */
((event2_data_t *)ptr)->event_size =
(uint32_t)sizeof(startup_locality_event_t);
(uint32_t)sizeof(startup_locality_event_data_t);
ptr = (uint8_t *)((uintptr_t)ptr + offsetof(event2_data_t, event));
/* TCG_EfiStartupLocalityEvent.Signature */
@ -260,8 +267,8 @@ void event_log_init(void)
* TCG_EfiStartupLocalityEvent.StartupLocality = 0:
* the platform's boot firmware
*/
((startup_locality_event_t *)ptr)->startup_locality = 0U;
ptr = (uint8_t *)((uintptr_t)ptr + sizeof(startup_locality_event_t));
((startup_locality_event_data_t *)ptr)->startup_locality = 0U;
ptr = (uint8_t *)((uintptr_t)ptr + sizeof(startup_locality_event_data_t));
if ((uintptr_t)ptr != ((uintptr_t)start_ptr + LOC_EVENT_SIZE)) {
panic();
}

View File

@ -9,6 +9,13 @@
#include <common/debug.h>
#include <drivers/measured_boot/event_log.h>
#include <lib/tpm/tpm_log.h>
/*
* TODO: Remove dependency on private header, depend solely on
* lib/tpm/tpm_log.h instead.
*/
#include "../../lib/tpm/tpm_log_private.h"
#if LOG_LEVEL >= EVENT_LOG_LEVEL
@ -23,8 +30,8 @@ static void id_event_print(uint8_t **log_addr, size_t *log_size)
unsigned int i;
uint8_t info_size, *info_size_ptr;
void *ptr = *log_addr;
id_event_headers_t *event = (id_event_headers_t *)ptr;
id_event_algorithm_size_t *alg_ptr;
id_event_container_t *event = (id_event_container_t *)ptr;
id_event_alg_info_t *alg_ptr;
uint32_t event_size, number_of_algorithms;
size_t digest_len;
#if ENABLE_ASSERTIONS
@ -32,22 +39,22 @@ static void id_event_print(uint8_t **log_addr, size_t *log_size)
bool valid = true;
#endif
assert(*log_size >= sizeof(id_event_headers_t));
assert(*log_size >= sizeof(id_event_container_t));
/* The fields of the event log header are defined to be PCRIndex of 0,
* EventType of EV_NO_ACTION, Digest of 20 bytes of 0, and
* Event content defined as TCG_EfiSpecIDEventStruct.
*/
LOG_EVENT("TCG_EfiSpecIDEvent:\n");
LOG_EVENT(" PCRIndex : %u\n", event->header.pcr_index);
assert(event->header.pcr_index == (uint32_t)PCR_0);
LOG_EVENT(" PCRIndex : %u\n", event->container.pcr_index);
assert(event->container.pcr_index == (uint32_t)TPM_PCR_0);
LOG_EVENT(" EventType : %u\n", event->header.event_type);
assert(event->header.event_type == EV_NO_ACTION);
LOG_EVENT(" EventType : %u\n", event->container.event_type);
assert(event->container.event_type == TPM_LOG_EV_NO_ACTION);
LOG_EVENT(" Digest :");
for (i = 0U; i < sizeof(event->header.digest); ++i) {
uint8_t val = event->header.digest[i];
for (i = 0U; i < sizeof(event->container.digest); ++i) {
uint8_t val = event->container.digest[i];
(void)printf(" %02x", val);
if ((i & U(0xF)) == 0U) {
@ -67,29 +74,29 @@ static void id_event_print(uint8_t **log_addr, size_t *log_size)
assert(valid);
/* EventSize */
event_size = event->header.event_size;
event_size = event->container.event_size;
LOG_EVENT(" EventSize : %u\n", event_size);
LOG_EVENT(" Signature : %s\n",
event->struct_header.signature);
event->id_event_misc_data.signature);
LOG_EVENT(" PlatformClass : %u\n",
event->struct_header.platform_class);
event->id_event_misc_data.platform_class);
LOG_EVENT(" SpecVersion : %u.%u.%u\n",
event->struct_header.spec_version_major,
event->struct_header.spec_version_minor,
event->struct_header.spec_errata);
event->id_event_misc_data.spec_version_major,
event->id_event_misc_data.spec_version_minor,
event->id_event_misc_data.spec_errata);
LOG_EVENT(" UintnSize : %u\n",
event->struct_header.uintn_size);
event->id_event_misc_data.uintn_size);
/* NumberOfAlgorithms */
number_of_algorithms = event->struct_header.number_of_algorithms;
number_of_algorithms = event->id_event_misc_data.number_of_algorithms;
LOG_EVENT(" NumberOfAlgorithms : %u\n", number_of_algorithms);
/* Address of DigestSizes[] */
alg_ptr = event->struct_header.digest_size;
alg_ptr = event->id_event_misc_data.digest_sizes;
/* Size of DigestSizes[] */
digest_len = number_of_algorithms * sizeof(id_event_algorithm_size_t);
digest_len = number_of_algorithms * sizeof(id_event_alg_info_t);
assert(((uintptr_t)alg_ptr + digest_len) <= (uintptr_t)end_ptr);
LOG_EVENT(" DigestSizes :\n");
@ -128,7 +135,7 @@ static void id_event_print(uint8_t **log_addr, size_t *log_size)
assert(((uintptr_t)info_size_ptr + info_size) <= (uintptr_t)end_ptr);
/* Check EventSize */
assert(event_size == (sizeof(id_event_struct_t) +
assert(event_size == (sizeof(__id_event_t) +
digest_len + info_size));
if (info_size != 0U) {
LOG_EVENT(" VendorInfo :");
@ -175,10 +182,10 @@ static void event2_print(uint8_t **log_addr, size_t *log_size)
for (unsigned int i = 0U; i < count; ++i) {
/* Check AlgorithmId address */
assert(((uintptr_t)ptr +
offsetof(tpmt_ha, digest)) <= (uintptr_t)end_ptr);
offsetof(tpmt_ha_t, digest)) <= (uintptr_t)end_ptr);
LOG_EVENT(" #%u AlgorithmId : SHA", i);
switch (((tpmt_ha *)ptr)->algorithm_id) {
switch (((tpmt_ha_t *)ptr)->algorithm_id) {
case TPM_ALG_SHA256:
sha_size = SHA256_DIGEST_SIZE;
(void)printf("256\n");
@ -194,12 +201,12 @@ static void event2_print(uint8_t **log_addr, size_t *log_size)
default:
(void)printf("?\n");
ERROR("Algorithm 0x%x not found\n",
((tpmt_ha *)ptr)->algorithm_id);
((tpmt_ha_t *)ptr)->algorithm_id);
panic();
}
/* End of Digest[] */
ptr = (uint8_t *)((uintptr_t)ptr + offsetof(tpmt_ha, digest));
ptr = (uint8_t *)((uintptr_t)ptr + offsetof(tpmt_ha_t, digest));
assert(((uintptr_t)ptr + sha_size) <= (uintptr_t)end_ptr);
/* Total size of all digests */
@ -229,12 +236,12 @@ static void event2_print(uint8_t **log_addr, size_t *log_size)
/* End of TCG_PCR_EVENT2.Event[EventSize] */
assert(((uintptr_t)ptr + event_size) <= (uintptr_t)end_ptr);
if ((event_size == sizeof(startup_locality_event_t)) &&
if ((event_size == sizeof(startup_locality_event_data_t)) &&
(strcmp((const char *)ptr, TCG_STARTUP_LOCALITY_SIGNATURE) == 0)) {
LOG_EVENT(" Signature : %s\n",
((startup_locality_event_t *)ptr)->signature);
((startup_locality_event_data_t *)ptr)->signature);
LOG_EVENT(" StartupLocality : %u\n",
((startup_locality_event_t *)ptr)->startup_locality);
((startup_locality_event_data_t *)ptr)->startup_locality);
} else {
LOG_EVENT(" Event : %s\n", (uint8_t *)ptr);
}

View File

@ -263,6 +263,8 @@ DEFINE_SYSREG_RW_FUNCS(elr_el3)
DEFINE_SYSREG_RW_FUNCS(mdccsr_el0)
DEFINE_SYSREG_RW_FUNCS(dbgdtrrx_el0)
DEFINE_SYSREG_RW_FUNCS(dbgdtrtx_el0)
DEFINE_SYSREG_RW_FUNCS(sp_el1)
DEFINE_SYSREG_RW_FUNCS(sp_el2)
DEFINE_SYSOP_FUNC(wfi)
DEFINE_SYSOP_FUNC(wfe)
@ -567,7 +569,7 @@ static inline unsigned int get_current_el_maybe_constant(void)
/*
* Check if an EL is implemented from AA64PFR0 register fields.
*/
static inline uint64_t el_implemented(unsigned int el)
static inline uint64_t nonsecure_el_implemented(unsigned int el)
{
if (el > 3U) {
return EL_IMPL_NONE;
@ -578,6 +580,19 @@ static inline uint64_t el_implemented(unsigned int el)
}
}
static inline uint64_t secure_el_implemented(unsigned int el)
{
if (el == 2) {
unsigned int shift = ID_AA64PFR0_SEL2_SHIFT;
return (read_id_aa64pfr0_el1() >> shift) & ID_AA64PFR0_ELX_MASK;
} else {
return nonsecure_el_implemented(el);
}
}
#define el_implemented(el) nonsecure_el_implemented(el)
/* Previously defined accesor functions with incomplete register names */
#define read_current_el() read_CurrentEl()

View File

@ -11,11 +11,21 @@
#include <lib/utils_def.h>
/* SMMUv3 register offsets from device base */
#define SMMU_CR0 U(0x0020)
#define SMMU_CR0ACK U(0x0024)
#define SMMU_GBPA U(0x0044)
#define SMMU_S_IDR1 U(0x8004)
#define SMMU_S_INIT U(0x803c)
#define SMMU_S_GBPA U(0x8044)
/* SMMU_CR0 register fields */
#define SMMU_CR0_VMW (7UL << 6)
#define SMMU_CR0_ATSCHK (1UL << 4)
#define SMMU_CR0_CMDQEN (1UL << 3)
#define SMMU_CR0_EVENTQEN (1UL << 2)
#define SMMU_CR0_PRIQEN (1UL << 1)
#define SMMU_CR0_SMMUEN (1UL << 0)
/* SMMU_GBPA register fields */
#define SMMU_GBPA_UPDATE (1UL << 31)
#define SMMU_GBPA_ABORT (1UL << 20)
@ -33,4 +43,7 @@
int smmuv3_init(uintptr_t smmu_base);
int smmuv3_security_init(uintptr_t smmu_base);
int smmuv3_ns_set_abort_all(uintptr_t smmu_base);
int smmuv3_ns_set_bypass_all(uintptr_t smmu_base);
#endif /* SMMU_V3_H */

View File

@ -10,7 +10,6 @@
#include <stdint.h>
#include <common/debug.h>
#include <drivers/measured_boot/tcg.h>
/*
* Set Event Log debug level to one of:
@ -72,19 +71,19 @@ typedef struct {
size_t log_size);
} measured_boot_data_t;
#define ID_EVENT_SIZE (sizeof(id_event_headers_t) + \
(sizeof(id_event_algorithm_size_t) * HASH_ALG_COUNT) + \
sizeof(id_event_struct_data_t))
#define ID_EVENT_SIZE (sizeof(id_event_container_t) + \
(sizeof(id_event_alg_info_t) * HASH_ALG_COUNT) + \
sizeof(id_event_vendor_data_t))
#define LOC_EVENT_SIZE (sizeof(event2_header_t) + \
sizeof(tpmt_ha) + TCG_DIGEST_SIZE + \
sizeof(tpmt_ha_t) + TCG_DIGEST_SIZE + \
sizeof(event2_data_t) + \
sizeof(startup_locality_event_t))
sizeof(startup_locality_event_data_t))
#define LOG_MIN_SIZE (ID_EVENT_SIZE + LOC_EVENT_SIZE)
#define EVENT2_HDR_SIZE (sizeof(event2_header_t) + \
sizeof(tpmt_ha) + TCG_DIGEST_SIZE + \
sizeof(tpmt_ha_t) + TCG_DIGEST_SIZE + \
sizeof(event2_data_t))
/* Functions' declarations */

View File

@ -195,17 +195,6 @@ static inline unsigned int psci_check_power_state(unsigned int power_state)
return ((power_state) & PSTATE_VALID_MASK);
}
/*
* These are the states reported by the PSCI_AFFINITY_INFO API for the specified
* CPU. The definitions of these states can be found in Section 5.7.1 in the
* PSCI specification (ARM DEN 0022C).
*/
typedef enum {
AFF_STATE_ON = U(0),
AFF_STATE_OFF = U(1),
AFF_STATE_ON_PENDING = U(2)
} aff_info_state_t;
/*
* These are the power states reported by PSCI_NODE_HW_STATE API for the
* specified CPU. The definitions of these states can be found in Section 5.15.3

View File

@ -38,6 +38,17 @@ typedef struct spd_pm_ops {
*/
typedef void (*mailbox_entrypoint_t)(void);
/*
* These are the states reported by the PSCI_AFFINITY_INFO API for the specified
* CPU. The definitions of these states can be found in Section 5.7.1 in the
* PSCI specification (ARM DEN 0022C). Available for psci_lib clients.
*/
typedef enum {
AFF_STATE_ON = U(0),
AFF_STATE_OFF = U(1),
AFF_STATE_ON_PENDING = U(2)
} aff_info_state_t;
/******************************************************************************
* Structure to pass PSCI Library arguments.
*****************************************************************************/
@ -91,6 +102,8 @@ void psci_prepare_next_non_secure_ctx(
entry_point_info_t *next_image_info);
int psci_stop_other_cores(unsigned int wait_ms,
void (*stop_func)(u_register_t mpidr));
unsigned int psci_is_last_on_core_safe(void);
#endif /* __ASSEMBLER__ */
#endif /* PSCI_LIB_H */

116
include/lib/tpm/tpm.h Normal file
View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef TPM_H
#define TPM_H
#include <lib/utils_def.h>
/*
* TPM_ALG_ID constants.
* Ref. Table 9 - Definition of (UINT16) TPM_ALG_ID Constants
* Trusted Platform Module Library. Part 2: Structures,
* Family "2.0", Level 00 Revision 01.38, September 29 2016.
*/
enum tpm_hash_alg {
TPM_ALG_NONE = 0x0,
TPM_ALG_SHA256 = 0x000B,
TPM_ALG_SHA384 = 0x000C,
TPM_ALG_SHA512 = 0x000D,
};
static inline bool tpm_alg_is_valid(enum tpm_hash_alg alg)
{
switch (alg) {
case TPM_ALG_SHA256:
case TPM_ALG_SHA384:
case TPM_ALG_SHA512:
return true;
default:
return false;
}
}
enum tpm_hash_alg_dsize {
TPM_ALG_SHA256_DSIZE = 32,
TPM_ALG_SHA384_DSIZE = 48,
TPM_ALG_SHA512_DSIZE = 64,
TPM_ALG_MAX_DSIZE = TPM_ALG_SHA512_DSIZE
};
static inline size_t tpm_alg_dsize(enum tpm_hash_alg alg)
{
switch (alg) {
case TPM_ALG_SHA256:
return TPM_ALG_SHA256_DSIZE;
case TPM_ALG_SHA384:
return TPM_ALG_SHA384_DSIZE;
case TPM_ALG_SHA512:
return TPM_ALG_SHA512_DSIZE;
default:
return 0;
}
}
enum tpm_pcr_idx {
/*
* SRTM, BIOS, Host Platform Extensions, Embedded
* Option ROMs and PI Drivers
*/
TPM_PCR_0 = 0,
/* Host Platform Configuration */
TPM_PCR_1,
/* UEFI driver and application Code */
TPM_PCR_2,
/* UEFI driver and application Configuration and Data */
TPM_PCR_3,
/* UEFI Boot Manager Code (usually the MBR) and Boot Attempts */
TPM_PCR_4,
/*
* Boot Manager Code Configuration and Data (for use
* by the Boot Manager Code) and GPT/Partition Table
*/
TPM_PCR_5,
/* Host Platform Manufacturer Specific */
TPM_PCR_6,
/* Secure Boot Policy */
TPM_PCR_7,
/* 8-15: Defined for use by the Static OS */
TPM_PCR_8,
/* Debug */
TPM_PCR_16 = 16,
/* DRTM (1) */
TPM_PCR_17 = 17,
/* DRTM (2) */
TPM_PCR_18 = 18,
};
static bool inline tpm_pcr_idx_is_valid(enum tpm_pcr_idx pcr_idx)
{
switch (pcr_idx) {
case TPM_PCR_0:
case TPM_PCR_1:
case TPM_PCR_2:
case TPM_PCR_3:
case TPM_PCR_4:
case TPM_PCR_5:
case TPM_PCR_6:
case TPM_PCR_7:
case TPM_PCR_8:
case TPM_PCR_16:
case TPM_PCR_17:
case TPM_PCR_18:
return true;
default:
return false;
}
}
#endif /* TPM_H */

90
include/lib/tpm/tpm_log.h Normal file
View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef TPM_LOG_H
#define TPM_LOG_H
#include <stddef.h>
#include <lib/tpm/tpm.h>
#include <export/lib/utils_def_exp.h>
/*
* Event types
* Ref. Table 9 Events
* TCG PC Client Platform Firmware Profile Specification,
* Family "2.0", Level 00 Revision 1.04, June 3 2019.
*/
#define TPM_LOG_EV_PREBOOT_CERT U(0x00000000)
#define TPM_LOG_EV_POST_CODE U(0x00000001)
#define TPM_LOG_EV_UNUSED U(0x00000002)
#define TPM_LOG_EV_NO_ACTION U(0x00000003)
#define TPM_LOG_EV_SEPARATOR U(0x00000004)
#define TPM_LOG_EV_ACTION U(0x00000005)
#define TPM_LOG_EV_EVENT_TAG U(0x00000006)
#define TPM_LOG_EV_S_CRTM_CONTENTS U(0x00000007)
#define TPM_LOG_EV_S_CRTM_VERSION U(0x00000008)
#define TPM_LOG_EV_CPU_MICROCODE U(0x00000009)
#define TPM_LOG_EV_PLATFORM_CONFIG_FLAGS U(0x0000000A)
#define TPM_LOG_EV_TABLE_OF_DEVICES U(0x0000000B)
#define TPM_LOG_EV_COMPACT_HASH U(0x0000000C)
#define TPM_LOG_EV_IPL U(0x0000000D)
#define TPM_LOG_EV_IPL_PARTITION_DATA U(0x0000000E)
#define TPM_LOG_EV_NONHOST_CODE U(0x0000000F)
#define TPM_LOG_EV_NONHOST_CONFIG U(0x00000010)
#define TPM_LOG_EV_NONHOST_INFO U(0x00000011)
#define TPM_LOG_EV_OMIT_BOOT_DEVICE_EVENTS U(0x00000012)
#define TPM_LOG_EV_EFI_EVENT_BASE U(0x80000000)
#define TPM_LOG_EV_EFI_VARIABLE_DRIVER_CONFIG U(0x80000001)
#define TPM_LOG_EV_EFI_VARIABLE_BOOT U(0x80000002)
#define TPM_LOG_EV_EFI_BOOT_SERVICES_APPLICATION U(0x80000003)
#define TPM_LOG_EV_EFI_BOOT_SERVICES_DRIVER U(0x80000004)
#define TPM_LOG_EV_EFI_RUNTIME_SERVICES_DRIVER U(0x80000005)
#define TPM_LOG_EV_EFI_GPT_EVENT U(0x80000006)
#define TPM_LOG_EV_EFI_ACTION U(0x80000007)
#define TPM_LOG_EV_EFI_PLATFORM_FIRMWARE_BLOB U(0x80000008)
#define TPM_LOG_EV_EFI_HANDOFF_TABLES U(0x80000009)
#define TPM_LOG_EV_EFI_HCRTM_EVENT U(0x80000010)
#define TPM_LOG_EV_EFI_VARIABLE_AUTHORITY U(0x800000E0)
struct tpm_log_digest {
enum tpm_hash_alg h_alg;
size_t buf_bytes;
char buf[];
};
struct tpm_log_digests {
size_t count;
struct tpm_log_digest d[];
};
struct tpm_log_info {
char *buf;
size_t buf_bytes;
/* Running cursor, into the buffer. */
char *cursor;
/* */
char *startup_locality_event_data;
};
/* Opaque / encapsulated type */
typedef struct tpm_log_info tpm_log_info_t;
int tpm_log_init(uint32_t *const tpm_log_buf, size_t tpm_log_buf_bytes,
enum tpm_hash_alg alg[], size_t num_algs,
tpm_log_info_t *log_info_out);
int tpm_log_add_event(tpm_log_info_t *tpm_log_info,
uint32_t event_type, enum tpm_pcr_idx pcr,
struct tpm_log_digests *digests,
const unsigned char *event_data, size_t event_data_bytes);
void tpm_log_serialise(char *dst, const tpm_log_info_t *tpm_log,
size_t *tpm_log_size_out);
#endif /* TPM_LOG_H */

View File

@ -21,6 +21,11 @@ typedef struct mem_region {
size_t nbytes;
} mem_region_t;
typedef struct p_mem_region {
unsigned long long base;
unsigned long long nbytes;
} p_mem_region_t;
/*
* zero_normalmem all the regions defined in tbl.
*/

View File

@ -63,6 +63,7 @@ int plat_get_image_source(unsigned int image_id,
uintptr_t plat_get_ns_image_entrypoint(void);
unsigned int plat_my_core_pos(void);
int plat_core_pos_by_mpidr(u_register_t mpidr);
unsigned int plat_is_my_cpu_primary(void);
int plat_get_mbedtls_heap(void **heap_addr, size_t *heap_size);
#if STACK_PROTECTOR_ENABLED

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef __DRTM_CACHE_H
#define __DRTM_CACHE_H
#include <stdbool.h>
/*
* XXX Note: the generic protected DRTM resources are being specialised into
* DRTM TCB hashes. Platform resources retrieved through the generic DRTM cache
* are going to be retrieved through bespoke interfaces instead.
* This file will be removed once the transition is complete.
*/
void drtm_cache_init(void);
int drtm_cache_resource_opt(const char *id, size_t bytes, const char *data, bool cache_data);
#define drtm_cache_resource(id, bytes, data) \
drtm_cache_resource_opt(id, bytes, data, true)
#define drtm_cache_resource_ptr(id, bytes, data) \
drtm_cache_resource_opt(id, bytes, data, false)
void drtm_cache_get_resource(const char *id,
const char **res_out, size_t *res_out_bytes);
#endif /* __DRTM_CACHE_H */

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM service
*
* Authors:
* Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
* Brian Nezvadovitz
*
*/
#ifndef ARM_DRTM_SVC_H
#define ARM_DRTM_SVC_H
/*
* SMC function IDs for DRTM Service
* Upper word bits set: Fast call, SMC64, Standard Secure Svc. Call (OEN = 4)
*/
#define ARM_DRTM_SVC_VERSION 0xC4000110u
#define ARM_DRTM_SVC_FEATURES 0xC4000111u
#define ARM_DRTM_SVC_UNPROTECT_MEM 0xC4000113u
#define ARM_DRTM_SVC_DYNAMIC_LAUNCH 0xC4000114u
#define ARM_DRTM_SVC_CLOSE_LOCALITY 0xC4000115u
#define ARM_DRTM_SVC_GET_ERROR 0xC4000116u
#define ARM_DRTM_SVC_SET_ERROR 0xC4000117u
#define ARM_DRTM_SVC_SET_TCB_HASH 0xC4000118u
#define ARM_DRTM_SVC_LOCK_TCB_HASHES 0xC4000119u
#define ARM_DRTM_FEATURES_TPM 0x1u
#define ARM_DRTM_FEATURES_MEM_REQ 0x2u
#define ARM_DRTM_FEATURES_DMA_PROT 0x3u
#define ARM_DRTM_FEATURES_BOOT_PE_ID 0x4u
#define ARM_DRTM_FEATURES_TCB_HASHES 0x5u
#define is_drtm_fid(_fid) \
(((_fid) >= ARM_DRTM_SVC_VERSION) && ((_fid) <= ARM_DRTM_SVC_SET_ERROR))
/* ARM DRTM Service Calls version numbers */
#define ARM_DRTM_VERSION_MAJOR 0x0000u
#define ARM_DRTM_VERSION_MINOR 0x0001u
#define ARM_DRTM_VERSION \
((ARM_DRTM_VERSION_MAJOR << 16) | ARM_DRTM_VERSION_MINOR)
/* Initialization routine for the DRTM service */
int drtm_setup(void);
/* Handler to be called to handle DRTM SMC calls */
uint64_t drtm_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
void *cookie,
void *handle,
uint64_t flags);
#endif /* ARM_DRTM_SVC_H */

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM service's dependencies on the platform.
*
*/
#ifndef ARM_DRTM_SVC_PLAT_H
#define ARM_DRTM_SVC_PLAT_H
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#if !defined(DRTM_SHA_ALG)
#error "The DRTM service requires definition of the DRTM_SHA_ALG macro"
#else
#if DRTM_SHA_ALG == 256
#define DRTM_SHA_ALG_DSIZE 32
#elif DRTM_SHA_ALG == 384
#define DRTM_SHA_ALG_DSIZE 48
#elif DRTM_SHA_ALG == 512
#define DRTM_SHA_ALG_DSIZE 64
#else
#warning "Unrecognised DRTM_SHA_ALG"
#define DRTM_SHA_ALG_DSIZE 64
#endif
#endif
/***
* DRTM's dependency on platform DMA protection.
*/
/* Sanity checks. */
bool plat_has_non_host_platforms(void);
bool plat_has_unmanaged_dma_peripherals(void);
unsigned int plat_get_total_num_smmus(void);
/* Dependency on Arm-compliant SMMUs. */
void plat_enumerate_smmus(const uintptr_t (*smmus_out)[],
size_t *smmu_count_out);
struct drtm_mem_region_descr_table_v1;
typedef struct drtm_mem_region_descr_table_v1 struct_drtm_mem_region_descr_table;
/* Dependencies on platform-specific region-based DMA protection. */
struct drtm_dma_protector_ops {
int (*protect_regions)(void *data,
const struct_drtm_mem_region_descr_table *regions);
};
struct drtm_dma_protector {
void *data;
struct drtm_dma_protector_ops *ops;
};
struct drtm_dma_protector plat_get_dma_protector(void);
/***
* DRTM's platform-specific DRTM TCB hashes.
*/
struct plat_drtm_tcb_hash {
union {
#define _HASH_ID_TYPE uint32_t
_HASH_ID_TYPE uint32;
unsigned char uchars[sizeof(_HASH_ID_TYPE)];
#undef _HASH_ID_TYPE
} hash_id;
size_t hash_bytes;
unsigned char hash_val[DRTM_SHA_ALG_DSIZE];
};
#define PLAT_DRTM_TCB_HASH_VAL_AND_SIZE(...) \
.hash_bytes = sizeof((unsigned char[]){ __VA_ARGS__ }), .hash_val = { __VA_ARGS__ }
void plat_enumerate_drtm_tcb_hashes(const struct plat_drtm_tcb_hash **hashes_out,
size_t *hashes_count_out);
#endif /* ARM_DRTM_SVC_PLAT_H */

View File

@ -1022,3 +1022,39 @@ int psci_stop_other_cores(unsigned int wait_ms,
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Return the index of the core that is not turned off, other than the current
* core, or PLATFORM_CORE_COUNT if there is no such core.
******************************************************************************/
unsigned int psci_is_last_on_core_safe(void)
{
unsigned int this_core = plat_my_core_pos();
unsigned int core_not_off = PLATFORM_CORE_COUNT;
unsigned int parent_nodes[PLAT_MAX_PWR_LVL + 1] = {0};
/*
* Lock all PSCI state to remove races that could result in false positives,
* in other words, to check atomically w.r.t. cores turning on.
*
* TODO: This logic must be reviewed further by someone knowledgeable of
* the PSCI TF-A implementation.
*/
psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
for (unsigned int core = 0U; core < psci_plat_core_count; core++) {
if (core == this_core) {
continue;
}
if (psci_get_aff_info_state_by_idx(core) != AFF_STATE_OFF) {
core_not_off = core;
break;
}
}
psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
return core_not_off;
}

8
lib/tpm/tpm_lib.mk Normal file
View File

@ -0,0 +1,8 @@
#
# Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
TPM_LIB_SOURCES := lib/tpm/tpm_log.c \

449
lib/tpm/tpm_log.c Normal file
View File

@ -0,0 +1,449 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <lib/tpm/tpm.h>
#include <lib/tpm/tpm_log.h>
#include "tpm_log_private.h"
/*
* TODO: The struct serialisation handling in this library is prone to alignment
* faults. It happens to work because the TCG-defined structure fields
* generally maintain natural alignment. And it avoids undefined C-language
* behaviour thanks to #pragma pack(1).
* However, future extensions of this library could introduce structures whose
* serialisation would break the natural alignment. For example, serialising
* vendor-specific info structures.
* Therefore, it would be an improvement if a more standard way of serialising
* struct was used, such as preparing structs on the stack first, and then
* serialising them to the destination via memcpy().
*/
static const id_event_container_t id_event_templ = {
.container = {
.pcr_index = TPM_PCR_0,
.event_type = TPM_LOG_EV_NO_ACTION,
.digest = {0},
/*
* Must be set at run-time, when hash_alg_count and the number of
* vendor bytes is given:
* .event_size = ...;
*/
},
.id_event_misc_data = {
.signature = TCG_ID_EVENT_SIGNATURE_03,
.platform_class = PLATFORM_CLASS_CLIENT,
.spec_version_minor = TCG_SPEC_VERSION_MINOR_TPM2,
.spec_version_major = TCG_SPEC_VERSION_MAJOR_TPM2,
.spec_errata = TCG_SPEC_ERRATA_TPM2,
.uintn_size = (uint8_t)(sizeof(unsigned int) /
sizeof(uint32_t)),
/*
* Must be set to hash_alg_count
* .number_of_algorithms = hash_alg_count
*/
}
};
static const event2_header_t startup_event_container_templ = {
/*
* All EV_NO_ACTION events SHALL set
* TCG_PCR_EVENT2.pcrIndex = 0, unless otherwise specified
*/
.pcr_index = TPM_PCR_0,
/*
* All EV_NO_ACTION events SHALL set
* TCG_PCR_EVENT2.eventType = 03h
*/
.event_type = TPM_LOG_EV_NO_ACTION,
/*
* All EV_NO_ACTION events SHALL set
* TCG_PCR_EVENT2.digests to all
* 0x00's for each allocated Hash algorithm
*
* Must be set at runtime, when hash_alg_count is known:
* .digests = {
* .count = hash_alg_count,
* .digests = {alg_id1, {0}, alg_id2, {0}, ...}
* }
*/
};
static const startup_locality_event_t startup_event_templ = {
.startup_event_header = {
.event_size = sizeof(startup_locality_event_data_t),
},
.startup_event_data = {
.signature = TCG_STARTUP_LOCALITY_SIGNATURE,
/*
* Must be set at run time, when startup_locality is provided:
* .startup_locality = startup_locality
*/
}
};
int tpm_log_init(uint32_t *const buf, size_t buf_bytes,
enum tpm_hash_alg alg[], size_t num_algs,
struct tpm_log_info *log_out)
{
const char *const buf_end = (char *)buf + buf_bytes;
char *cur , *cur_next;
char *id_event;
for (int i = 0; i < num_algs; i++) {
if (!tpm_alg_is_valid(alg[i])) {
return -EINVAL;
}
}
cur = (char *)buf;
cur_next = cur + sizeof(id_event_container_t);
if (cur_next > buf_end) {
return -ENOMEM;
}
/* Copy the TCG_EfiSpecIDEventStruct container template. */
(void)memcpy(cur, (const void *)&id_event_templ, sizeof(id_event_templ));
id_event = cur;
/* TCG_EfiSpecIDEventStruct.numberOfAlgorithms */
((id_event_container_t *)cur)->
id_event_misc_data.number_of_algorithms = num_algs;
cur = cur_next;
/* TCG_EfiSpecIDEventStruct.digestSizes[] */
for (int i = 0; i < num_algs; i++) {
cur_next = cur + sizeof(id_event_alg_info_t);
if (cur_next > buf_end) {
return -ENOMEM;
}
((id_event_alg_info_t *)cur)->algorithm_id = alg[i];
((id_event_alg_info_t *)cur)->digest_size = tpm_alg_dsize(alg[i]);
cur = cur_next;
}
#define VENDOR_INFO_SIZE 3U
cur_next = cur + offsetof(id_event_vendor_data_t, vendor_info) +
VENDOR_INFO_SIZE;
if (cur_next > buf_end) {
return -ENOMEM;
}
/*
* TCG_EfiSpecIDEventStruct.vendorInfoSize -- vendor data is not supported
* currently.
* Note that when supporting vendor data, it is recommended that only
* 4-byte-aligned sizes are supported, because other sizes break the
* alignment assumptions relied upon when writing to the event log.
*/
((id_event_vendor_data_t *)cur)->vendor_info_size = VENDOR_INFO_SIZE;
for (int i = 0; i < VENDOR_INFO_SIZE; i++) {
((id_event_vendor_data_t *)cur)->vendor_info[i] = 0;
}
cur = cur_next;
/* TCG_EfiSpecIDEventStruct container info. */
((id_event_container_t *)id_event)->container.event_size =
cur - id_event - sizeof(((id_event_container_t *)NULL)->container);
log_out->buf = (char *)buf;
log_out->buf_bytes = buf_bytes;
log_out->cursor = cur;
log_out->startup_locality_event_data = NULL;
return 0;
}
static const id_event_misc_data_t *tpm_log_get_id_event(
const struct tpm_log_info *log)
{
const char *const buf_end = log->buf + log->buf_bytes;
if (log->buf + sizeof(id_event_misc_data_t) > buf_end) {
return NULL;
}
return &((id_event_container_t *)log->buf)->id_event_misc_data;
}
static struct tpm_log_digest *digests_arg_get_digest(
struct tpm_log_digests *digests,
enum tpm_hash_alg required_h_alg)
{
for (int i = 0; i < digests->count; i++) {
struct tpm_log_digest *d = digests->d + i;
if (d->h_alg == required_h_alg) {
return d;
}
}
return NULL;
}
static int add_tpml_digest_values(const struct tpm_log_info *log, char *cur,
struct tpm_log_digests *digests,
char **cur_out)
{
const id_event_misc_data_t *id_event;
const char *const buf_end = log->buf + log->buf_bytes;
char *cur_next;
if (!(id_event = tpm_log_get_id_event(log))) {
return -EINVAL;
}
cur_next = cur + offsetof(tpml_digest_values_t, digests);
if (cur_next > buf_end) {
return -ENOMEM;
}
/* TCG_PCR_EVENT2.Digests.Count */
((tpml_digest_values_t *)cur)->count = id_event->number_of_algorithms;
cur = cur_next;
/* TCG_PCR_EVENT2.Digests.Digests[] */
for (int i = 0; i < id_event->number_of_algorithms; i++) {
const id_event_alg_info_t *required_d = id_event->digest_sizes + i;
struct tpm_log_digest *d;
cur_next = cur + offsetof(tpmt_ha_t, digest);
if (cur_next > buf_end) {
return -ENOMEM;
}
/* TCG_PCR_EVENT2.Digests.Digests.Algorithm_Id */
((tpmt_ha_t *)cur)->algorithm_id = required_d->algorithm_id;
cur = cur_next;
cur_next = cur + required_d->digest_size;
if (cur_next > buf_end) {
return -ENOMEM;
}
/* TCG_PCR_EVENT2.Digests.Digests.Digest */
if (digests) {
d = digests_arg_get_digest(digests, required_d->algorithm_id);
(void)memcpy(cur, d->buf, required_d->digest_size);
} else {
(void)memset(cur, 0, required_d->digest_size);
}
cur = cur_next;
}
*cur_out = cur;
return 0;
}
static int add_startup_locality_event2(const struct tpm_log_info *log, char *cur,
uint8_t startup_locality,
char **cur_out)
{
const char *const buf_end = log->buf + log->buf_bytes;
char *cur_next;
int rc;
cur_next = cur + offsetof(event2_header_t, digests);
if (cur_next > buf_end) {
return -ENOMEM;
}
/* Copy Startup Locality event container */
(void)memcpy(cur, &startup_event_container_templ, cur_next - cur);
cur = cur_next;
if ((rc = add_tpml_digest_values(log, cur, NULL, &cur))) {
return rc;
}
cur_next = cur + sizeof(startup_locality_event_t);
if (cur_next > buf_end) {
return -ENOMEM;
}
/* Copy TCG_EfiStartupLocalityEvent event */
(void)memcpy(cur, &startup_event_templ, sizeof(startup_locality_event_t));
/* Adjust TCG_EfiStartupLocalityEvent.StartupLocality */
((startup_locality_event_t *)cur)->
startup_event_data.startup_locality = startup_locality;
cur = cur_next;
*cur_out = cur;
return 0;
}
static int check_arg_event_type(uint32_t event_type, enum tpm_pcr_idx pcr,
struct tpm_log_digests *digests,
const unsigned char *event_data, size_t event_data_bytes)
{
/*
* As per TCG specifications, firmware components that are measured
* into PCR[0] must be logged in the event log using the event type
* EV_POST_CODE.
*/
if (pcr == TPM_PCR_0 && event_type != TPM_LOG_EV_POST_CODE) {
return -EINVAL;
}
/*
* EV_NO_ACTION have digest byte values 0s for each allocated hash alg.
*
* Ref. Section 9.4.5 "EV_NO_ACTION Event Types", requirement #3.
*/
if (event_type == TPM_LOG_EV_NO_ACTION && digests) {
return -EINVAL;
}
if (event_type != TPM_LOG_EV_NO_ACTION && !digests) {
return -EINVAL;
}
/*
* TODO: Further event-specific validation or exceptions, e.g. as per
* Section 9.4 "Event Descriptions":
* - EV_ACTION
* - EV_EFI_ACTION
*/
return 0;
}
static int check_arg_digests(const id_event_misc_data_t *id_event,
struct tpm_log_digests *digests)
{
/* Check that the digests being added fit the event log's structure. */
if (digests->count != id_event->number_of_algorithms) {
return -EINVAL;
}
for (int i = 0; i < digests->count; i++) {
struct tpm_log_digest *d = digests->d + i;
if (!tpm_alg_is_valid(d->h_alg)) {
return -EINVAL;
} else if (d->buf_bytes < tpm_alg_dsize(d->h_alg)) {
return -EINVAL;
}
}
for (int i = 0; i < id_event->number_of_algorithms; i++) {
const id_event_alg_info_t *required_d = id_event->digest_sizes + i;
if (!digests_arg_get_digest(digests, required_d->algorithm_id)) {
return -EINVAL;
}
}
return 0;
}
int tpm_log_add_event(struct tpm_log_info *log,
uint32_t event_type, enum tpm_pcr_idx pcr,
struct tpm_log_digests *digests,
const unsigned char *event_data, size_t event_data_bytes)
{
const id_event_misc_data_t *id_event;
const char *const buf_end = log->buf + log->buf_bytes;
char *cur = log->cursor, *cur_next;
int rc;
if ((rc = check_arg_event_type(event_type, pcr, digests,
event_data, event_data_bytes))) {
return rc;
}
if (!(id_event = tpm_log_get_id_event(log))) {
return -EINVAL;
}
if (digests && (rc = check_arg_digests(id_event, digests))) {
return rc;
}
/*
* The Startup Locality event should be placed in the log before
* any event that extends PCR[0].
*
* Ref. TCG PC Client Platform Firmware Profile 9.4.5.3
*/
if (pcr == TPM_PCR_0 && log->startup_locality_event_data == NULL) {
if ((rc = add_startup_locality_event2(log, cur, 3, &cur))) {
return rc;
}
}
cur_next = cur + offsetof(event2_header_t, digests);
if (cur_next > buf_end) {
return -ENOMEM;
}
/* TCG_PCR_EVENT2.PCRIndex */
((event2_header_t *)cur)->pcr_index = pcr;
/* TCG_PCR_EVENT2.EventType */
((event2_header_t *)cur)->event_type = event_type;
cur = cur_next;
/*
* TODO: Further event-specific handling, e.g. as per Section 9.4 "Event
* Descriptions":
* - EV_ACTION
* - EV_EFI_ACTION
*/
/* TCG_PCR_EVENT2.Digests */
if ((rc = add_tpml_digest_values(log, cur, digests, &cur))) {
return rc;
}
cur_next = cur + offsetof(event2_data_t, event);
if (cur_next > buf_end) {
return -ENOMEM;
}
/* TCG_PCR_EVENT2.EventSize */
((event2_data_t *)cur)->event_size = event_data_bytes;
cur = cur_next;
/* End of event data */
cur_next = cur + event_data_bytes;
if (cur_next > buf_end) {
return -ENOMEM;
}
/* TCG_PCR_EVENT2.Event */
(void)memcpy(cur, event_data, event_data_bytes);
cur = cur_next;
log->cursor = cur;
return 0;
}
void tpm_log_serialise(char *dst, const struct tpm_log_info *log,
size_t *log_size_out)
{
size_t log_size = log->cursor - log->buf;
if (dst) {
(void)memcpy(dst, log->buf, log_size);
}
if (log_size_out) {
*log_size_out = log_size;
}
}

View File

@ -4,8 +4,8 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef TCG_H
#define TCG_H
#ifndef TPM_LOG_PRIVATE_H
#define TPM_LOG_PRIVATE_H
#include <stdint.h>
@ -16,52 +16,6 @@
#define TCG_SPEC_VERSION_MINOR_TPM2 0
#define TCG_SPEC_ERRATA_TPM2 2
/*
* Event types
* Ref. Table 9 Events
* TCG PC Client Platform Firmware Profile Specification.
*/
#define EV_PREBOOT_CERT U(0x00000000)
#define EV_POST_CODE U(0x00000001)
#define EV_UNUSED U(0x00000002)
#define EV_NO_ACTION U(0x00000003)
#define EV_SEPARATOR U(0x00000004)
#define EV_ACTION U(0x00000005)
#define EV_EVENT_TAG U(0x00000006)
#define EV_S_CRTM_CONTENTS U(0x00000007)
#define EV_S_CRTM_VERSION U(0x00000008)
#define EV_CPU_MICROCODE U(0x00000009)
#define EV_PLATFORM_CONFIG_FLAGS U(0x0000000A)
#define EV_TABLE_OF_DEVICES U(0x0000000B)
#define EV_COMPACT_HASH U(0x0000000C)
#define EV_IPL U(0x0000000D)
#define EV_IPL_PARTITION_DATA U(0x0000000E)
#define EV_NONHOST_CODE U(0x0000000F)
#define EV_NONHOST_CONFIG U(0x00000010)
#define EV_NONHOST_INFO U(0x00000011)
#define EV_OMIT_BOOT_DEVICE_EVENTS U(0x00000012)
#define EV_EFI_EVENT_BASE U(0x80000000)
#define EV_EFI_VARIABLE_DRIVER_CONFIG U(0x80000001)
#define EV_EFI_VARIABLE_BOOT U(0x80000002)
#define EV_EFI_BOOT_SERVICES_APPLICATION U(0x80000003)
#define EV_EFI_BOOT_SERVICES_DRIVER U(0x80000004)
#define EV_EFI_RUNTIME_SERVICES_DRIVER U(0x80000005)
#define EV_EFI_GPT_EVENT U(0x80000006)
#define EV_EFI_ACTION U(0x80000007)
#define EV_EFI_PLATFORM_FIRMWARE_BLOB U(0x80000008)
#define EV_EFI_HANDOFF_TABLES U(0x80000009)
#define EV_EFI_HCRTM_EVENT U(0x80000010)
#define EV_EFI_VARIABLE_AUTHORITY U(0x800000E0)
/*
* TPM_ALG_ID constants.
* Ref. Table 9 - Definition of (UINT16) TPM_ALG_ID Constants
* Trusted Platform Module Library. Part 2: Structures
*/
#define TPM_ALG_SHA256 0x000B
#define TPM_ALG_SHA384 0x000C
#define TPM_ALG_SHA512 0x000D
/* TCG Platform Type */
#define PLATFORM_CLASS_CLIENT 0
#define PLATFORM_CLASS_SERVER 1
@ -72,40 +26,12 @@
#define SHA384_DIGEST_SIZE 48
#define SHA512_DIGEST_SIZE 64
enum {
/*
* SRTM, BIOS, Host Platform Extensions, Embedded
* Option ROMs and PI Drivers
*/
PCR_0 = 0,
/* Host Platform Configuration */
PCR_1,
/* UEFI driver and application Code */
PCR_2,
/* UEFI driver and application Configuration and Data */
PCR_3,
/* UEFI Boot Manager Code (usually the MBR) and Boot Attempts */
PCR_4,
/*
* Boot Manager Code Configuration and Data (for use
* by the Boot Manager Code) and GPT/Partition Table
*/
PCR_5,
/* Host Platform Manufacturer Specific */
PCR_6,
/* Secure Boot Policy */
PCR_7,
/* 8-15: Defined for use by the Static OS */
PCR_8,
/* Debug */
PCR_16 = 16
};
#pragma pack(push, 1)
/*
* PCR Event Header
* TCG EFI Protocol Specification
* TCG EFI Protocol Specification,
* Family "2.0", Level 00 Revision 00.13, March 30 2016.
* 5.3 Event Log Header
*/
typedef struct {
@ -140,7 +66,7 @@ typedef struct {
/* The size of the digest produced by the implemented Hash algorithm */
uint16_t digest_size;
} id_event_algorithm_size_t;
} id_event_alg_info_t;
/*
* TCG_EfiSpecIdEvent structure
@ -202,8 +128,8 @@ typedef struct {
* structure, the first of which is a Hash algorithmID and the second
* is the size of the respective digest.
*/
id_event_algorithm_size_t digest_size[]; /* number_of_algorithms */
} id_event_struct_header_t;
id_event_alg_info_t digest_sizes[]; /* number_of_algorithms */
} id_event_misc_data_t;
typedef struct {
/*
@ -222,17 +148,17 @@ typedef struct {
*
*/
uint8_t vendor_info[]; /* [vendorInfoSize] */
} id_event_struct_data_t;
} id_event_vendor_data_t;
typedef struct {
id_event_struct_header_t struct_header;
id_event_struct_data_t struct_data;
} id_event_struct_t;
id_event_misc_data_t id_event_misc_data;
id_event_vendor_data_t id_event_vendor_data;
} __id_event_t;
typedef struct {
tcg_pcr_event_t header;
id_event_struct_header_t struct_header;
} id_event_headers_t;
tcg_pcr_event_t container;
id_event_misc_data_t id_event_misc_data;
} id_event_container_t;
/* TPMT_HA Structure */
typedef struct {
@ -243,7 +169,7 @@ typedef struct {
/* Digest, depends on AlgorithmId */
uint8_t digest[]; /* Digest[] */
} tpmt_ha;
} tpmt_ha_t;
/*
* TPML_DIGEST_VALUES Structure
@ -255,8 +181,8 @@ typedef struct {
/* The list of tagged digests, as sent to the TPM as part of a
* TPM2_PCR_Extend or as received from a TPM2_PCR_Event command
*/
tpmt_ha digests[]; /* Digests[Count] */
} tpml_digest_values;
tpmt_ha_t digests[]; /* Digests[Count] */
} tpml_digest_values_t;
/*
* TCG_PCR_EVENT2 header
@ -272,7 +198,7 @@ typedef struct {
* A counted list of tagged digests, which contain the digest of
* the event data (or external data) for all active PCR banks
*/
tpml_digest_values digests; /* Digests */
tpml_digest_values_t digests; /* Digests */
} event2_header_t;
typedef struct event2_data {
@ -297,8 +223,13 @@ typedef struct {
/* The Locality Indicator which sent the TPM2_Startup command */
uint8_t startup_locality;
} startup_locality_event_data_t;
typedef struct {
event2_data_t startup_event_header;
startup_locality_event_data_t startup_event_data;
} startup_locality_event_t;
#pragma pack(pop)
#endif /* TCG_H */
#endif /* TPM_LOG_PRIVATE_H */

View File

@ -86,6 +86,9 @@ DISABLE_BIN_GENERATION := 0
# compatibility.
DISABLE_MTPMU := 0
# Dynamic Root of Trust for Measurement support
DRTM_SUPPORT := 0
# Enable capability to disable authentication dynamically. Only meant for
# development platforms.
DYN_DISABLE_AUTH := 0

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <stdbool.h>
#include <stddef.h>
#include <drivers/arm/smmu_v3.h>
#include <plat/arm/common/arm_config.h>
#include <platform_def.h>
#include <services/drtm_svc_plat.h>
bool plat_has_non_host_platforms(void)
{
/* Note: FVP base platforms typically have GPU, as per --list-instances. */
return true;
}
bool plat_has_unmanaged_dma_peripherals(void)
{
/*
* Note-LPT: As far as I can tell, RevC's --list-instances does not show
* devices that are described as DMA-capable but not managed by an SMMU
* in the FVP documentation.
* However, the SMMU seems to have only been introduced in the RevC
* revision.
*/
return !(arm_config.flags & ARM_CONFIG_FVP_HAS_SMMUV3);
}
unsigned int plat_get_total_num_smmus(void)
{
if ((arm_config.flags & ARM_CONFIG_FVP_HAS_SMMUV3)) {
return 1;
} else {
return 0;
}
}
static const uintptr_t smmus[] = {
PLAT_FVP_SMMUV3_BASE,
};
void plat_enumerate_smmus(const uintptr_t (*smmus_out)[],
size_t *smmu_count_out)
{
if ((arm_config.flags & ARM_CONFIG_FVP_HAS_SMMUV3)) {
*(const uintptr_t **)smmus_out = smmus;
*smmu_count_out = sizeof(smmus) / sizeof(uintptr_t);
} else {
*(const uintptr_t **)smmus_out = NULL;
*smmu_count_out = 0;
}
}

View File

@ -6,22 +6,23 @@
#include <drivers/measured_boot/event_log.h>
#include <plat/arm/common/plat_arm.h>
#include <lib/tpm/tpm_log.h>
/* FVP table with platform specific image IDs, names and PCRs */
static const image_data_t fvp_images_data[] = {
{ BL2_IMAGE_ID, BL2_STRING, PCR_0 }, /* Reserved for BL2 */
{ BL31_IMAGE_ID, BL31_STRING, PCR_0 },
{ BL32_IMAGE_ID, BL32_STRING, PCR_0 },
{ BL32_EXTRA1_IMAGE_ID, BL32_EXTRA1_IMAGE_STRING, PCR_0 },
{ BL32_EXTRA2_IMAGE_ID, BL32_EXTRA2_IMAGE_STRING, PCR_0 },
{ BL33_IMAGE_ID, BL33_STRING, PCR_0 },
{ GPT_IMAGE_ID, GPT_IMAGE_STRING, PCR_0 },
{ HW_CONFIG_ID, HW_CONFIG_STRING, PCR_0 },
{ NT_FW_CONFIG_ID, NT_FW_CONFIG_STRING, PCR_0 },
{ SCP_BL2_IMAGE_ID, SCP_BL2_IMAGE_STRING, PCR_0 },
{ SOC_FW_CONFIG_ID, SOC_FW_CONFIG_STRING, PCR_0 },
{ STM32_IMAGE_ID, STM32_IMAGE_STRING, PCR_0 },
{ TOS_FW_CONFIG_ID, TOS_FW_CONFIG_STRING, PCR_0 },
{ BL2_IMAGE_ID, BL2_STRING, TPM_PCR_0 }, /* Reserved for BL2 */
{ BL31_IMAGE_ID, BL31_STRING, TPM_PCR_0 },
{ BL32_IMAGE_ID, BL32_STRING, TPM_PCR_0 },
{ BL32_EXTRA1_IMAGE_ID, BL32_EXTRA1_IMAGE_STRING, TPM_PCR_0 },
{ BL32_EXTRA2_IMAGE_ID, BL32_EXTRA2_IMAGE_STRING, TPM_PCR_0 },
{ BL33_IMAGE_ID, BL33_STRING, TPM_PCR_0 },
{ GPT_IMAGE_ID, GPT_IMAGE_STRING, TPM_PCR_0 },
{ HW_CONFIG_ID, HW_CONFIG_STRING, TPM_PCR_0 },
{ NT_FW_CONFIG_ID, NT_FW_CONFIG_STRING, TPM_PCR_0 },
{ SCP_BL2_IMAGE_ID, SCP_BL2_IMAGE_STRING, TPM_PCR_0 },
{ SOC_FW_CONFIG_ID, SOC_FW_CONFIG_STRING, TPM_PCR_0 },
{ STM32_IMAGE_ID, STM32_IMAGE_STRING, TPM_PCR_0 },
{ TOS_FW_CONFIG_ID, TOS_FW_CONFIG_STRING, TPM_PCR_0 },
{ INVALID_ID, NULL, (unsigned int)(-1) } /* Terminator */
};

View File

@ -78,6 +78,11 @@
* plat_arm_mmap array defined for each BL stage.
*/
#if defined(IMAGE_BL31)
# if DRTM_SUPPORT
# define DRTM_SUPPORT_ADDED_XLAT_TABLES 1
# else
# define DRTM_SUPPORT_ADDED_XLAT_TABLES 0
# endif
# if SPM_MM
# define PLAT_ARM_MMAP_ENTRIES 10
# define MAX_XLAT_TABLES 9
@ -88,7 +93,7 @@
# if USE_DEBUGFS
# define MAX_XLAT_TABLES 8
# else
# define MAX_XLAT_TABLES 7
# define MAX_XLAT_TABLES (7 + DRTM_SUPPORT_ADDED_XLAT_TABLES)
# endif
# endif
#elif defined(IMAGE_BL32)
@ -176,7 +181,11 @@
#elif defined(IMAGE_BL2U)
# define PLATFORM_STACK_SIZE UL(0x400)
#elif defined(IMAGE_BL31)
# if DRTM_SUPPORT
# define PLATFORM_STACK_SIZE UL(0x1000)
# else
# define PLATFORM_STACK_SIZE UL(0x800)
# endif
#elif defined(IMAGE_BL32)
# define PLATFORM_STACK_SIZE UL(0x440)
#endif

View File

@ -377,3 +377,7 @@ endif
# dynamically if TRUSTED_BOARD_BOOT is set.
DYN_DISABLE_AUTH := 1
endif
ifeq (${DRTM_SUPPORT}, 1)
BL31_SOURCES += plat/arm/board/fvp/fvp_drtm_dma_prot.c
endif

View File

@ -146,7 +146,7 @@
* Put BL3-1 at the top of the Trusted SRAM. BL31_BASE is calculated using the
* current BL3-1 debug size plus a little space for growth.
*/
#define BL31_BASE (BL31_LIMIT - 0x20000)
#define BL31_BASE (BL31_LIMIT - 0x30000)
#define BL31_LIMIT (BL_RAM_BASE + BL_RAM_SIZE)
#define BL31_PROGBITS_LIMIT BL1_RW_BASE

View File

@ -222,3 +222,11 @@ $(eval $(call add_define,ARM_PRELOADED_DTB_BASE))
# Do not enable SVE
ENABLE_SVE_FOR_NS := 0
ifeq ($(DRTM_SUPPORT), 1)
BL31_SOURCES += \
drivers/arm/smmu/smmu_v3.c \
plat/qemu/qemu/qemu_virt_drtm_dma_prot.c \
plat/qemu/qemu/qemu_virt_drtm_res_tcb_hashes.c \
endif

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
bool plat_has_non_host_platforms(void)
{
/* Arm QEMU virt may have a GPU via cmd-line -virtio-gpu-device. */
return true;
}
/* Note-LPT:
* From looking at the docs and source code, I have not found clues whether this
* platform has DMA-capable devices, and if it does, whether their memory
* accesses necessarily go through IOMMU translation (SMMUv3). Therefore,
* for now assume there are no DMA-capable devices and therefore no (useful)
* SMMUs. This is a supported DRTM case whereby full DMA protection is still
* advertised.
*/
bool plat_has_unmanaged_dma_peripherals(void)
{
return false;
}
unsigned int plat_get_total_num_smmus(void)
{
return 0;
}
void plat_enumerate_smmus(const uintptr_t (*smmus_out)[],
size_t *smmu_count_out)
{
*(const uintptr_t **)smmus_out = NULL;
*smmu_count_out = 0;
}

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM TCB hashes of QEMU virt platform.
*
*/
#include <lib/utils_def.h>
#include <services/drtm_svc_plat.h>
/* TODO: Add the DRTM TCB hash of the GTDT ACPI table. */
#if DRTM_SHA_ALG == 256 || DRTM_SHA_ALG == 384 || DRTM_SHA_ALG == 512
static const struct plat_drtm_tcb_hash plat_drtm_res_tcb_hashes[] = {
{
.hash_id.uchars = { 'T', 'P', 'M', '2' } /* ACPI data table signature 0x324d5054 */,
PLAT_DRTM_TCB_HASH_VAL_AND_SIZE(
#if DRTM_SHA_ALG == 256
0x53, 0xb3, 0x79, 0x5d, 0xc0, 0x3f, 0x04, 0xb8, 0x14, 0x7a, 0x87, 0x75, 0x1b, 0x98, 0x76, 0xf8,
0x69, 0x0f, 0x1b, 0x3e, 0x59, 0x05, 0x7d, 0x88, 0x59, 0xcc, 0xce, 0xed, 0xe4, 0xe1, 0x31, 0x3d,
#elif DRTM_SHA_ALG == 384
0xfe, 0x69, 0x77, 0x8b, 0x26, 0x68, 0x9c, 0xd4, 0x49, 0xd9, 0xa1, 0x06, 0xa3, 0x29, 0xd2, 0xb2,
0x42, 0xff, 0x46, 0x50, 0xcf, 0xd0, 0xaa, 0x5f, 0x12, 0x0e, 0x86, 0x5a, 0xb8, 0x7b, 0xa2, 0x7e,
0x8d, 0x33, 0x7c, 0x4d, 0xbe, 0xa5, 0x1b, 0x90, 0xd2, 0x59, 0x2f, 0xd7, 0x9d, 0xb0, 0xeb, 0x22,
#elif DRTM_SHA_ALG == 512
0xcf, 0xce, 0x6d, 0xe6, 0x02, 0x3f, 0x03, 0xe6, 0x06, 0xba, 0x7a, 0xd9, 0x82, 0x15, 0xba, 0x48,
0x71, 0xb5, 0x5d, 0xbe, 0xe4, 0xe7, 0x94, 0xe8, 0x17, 0x74, 0x4a, 0x1c, 0xe9, 0xc8, 0x57, 0xba,
0x06, 0xd9, 0x5a, 0xcf, 0x73, 0xa8, 0xeb, 0x3f, 0x39, 0xca, 0x95, 0xff, 0x41, 0xc0, 0xcc, 0x49,
0xc5, 0xc6, 0x32, 0xb3, 0x3b, 0x48, 0xba, 0x54, 0x56, 0x97, 0xb3, 0x17, 0x38, 0xbe, 0x5a, 0x11,
#endif
),
},
{
.hash_id.uchars = { 'M', 'C', 'F', 'G' } /* ACPI data table signature 0x4746434d */,
PLAT_DRTM_TCB_HASH_VAL_AND_SIZE(
#if DRTM_SHA_ALG == 256
0x95, 0xd9, 0x00, 0x26, 0x74, 0x20, 0x19, 0x57, 0xe5, 0x8b, 0xe5, 0x16, 0xda, 0xf2, 0x97, 0x29,
0xc2, 0xac, 0x8e, 0x92, 0x6b, 0xfb, 0x72, 0x56, 0xc1, 0xbc, 0x56, 0xfb, 0x8d, 0xc6, 0xeb, 0x01,
#elif DRTM_SHA_ALG == 384
0x32, 0xf7, 0x6a, 0x60, 0xc2, 0xeb, 0xec, 0x25, 0xe7, 0x80, 0xeb, 0x9d, 0x27, 0x66, 0xa1, 0xff,
0xa4, 0x5f, 0xf2, 0xa8, 0xfe, 0x13, 0xd3, 0x58, 0x43, 0x84, 0x2f, 0xdb, 0x17, 0x00, 0x7c, 0x23,
0x3b, 0xa7, 0xe5, 0x4e, 0x78, 0x98, 0xe2, 0x2d, 0xea, 0x45, 0xf4, 0x87, 0x3c, 0x13, 0x9f, 0xb9,
#elif DRTM_SHA_ALG == 512
0x42, 0x28, 0x9a, 0x7c, 0x2d, 0xb3, 0x7c, 0xf8, 0x85, 0x36, 0x51, 0x8f, 0x75, 0x0b, 0x27, 0xeb,
0xf4, 0xe9, 0x17, 0x5f, 0x46, 0xa0, 0xa7, 0xe2, 0x67, 0x4d, 0x9e, 0x9b, 0xc4, 0xdf, 0xe5, 0xcb,
0x8e, 0x53, 0xfe, 0xb8, 0x0e, 0xd4, 0x59, 0x69, 0xaa, 0x9f, 0xd4, 0x9a, 0xe2, 0x68, 0x1c, 0xaa,
0xcd, 0x54, 0x96, 0x07, 0x28, 0xaa, 0x10, 0xe0, 0xc4, 0xb7, 0x26, 0xb7, 0x0c, 0x0f, 0x9c, 0x3d,
#endif
),
},
{
.hash_id.uchars = { 'A', 'P', 'I', 'C' } /* ACPI data table signature 0x43495041 */,
PLAT_DRTM_TCB_HASH_VAL_AND_SIZE(
#if DRTM_SHA_ALG == 256
0x9e, 0x0e, 0x6a, 0x81, 0xb4, 0x79, 0x67, 0xa2, 0x83, 0xdb, 0xdc, 0xf1, 0x35, 0x02, 0xe0, 0x5c,
0x8d, 0x7a, 0x0a, 0x09, 0x3f, 0x5a, 0xd2, 0xbe, 0x3c, 0xb4, 0xd9, 0xd0, 0x47, 0x58, 0x87, 0x47,
#elif DRTM_SHA_ALG == 384
0xdd, 0xd8, 0xa6, 0x39, 0x59, 0x80, 0x0b, 0x72, 0x53, 0x87, 0x9f, 0xb2, 0x0a, 0xec, 0xbd, 0x7e,
0x47, 0x11, 0xae, 0xb6, 0xaf, 0x7d, 0x62, 0xc1, 0x6d, 0xf2, 0x0d, 0x23, 0xa5, 0x81, 0x66, 0x14,
0x76, 0x3a, 0xf9, 0xfa, 0x8f, 0x7f, 0xbc, 0xc7, 0xeb, 0x5d, 0x2c, 0x94, 0x61, 0xa5, 0xaa, 0x0b,
#elif DRTM_SHA_ALG == 512
0xd4, 0x65, 0xcc, 0x8f, 0x69, 0xab, 0x3e, 0xe8, 0xa3, 0x9e, 0xf2, 0x10, 0xf5, 0x3e, 0x4e, 0x75,
0x75, 0xc4, 0x97, 0xa4, 0x59, 0xb2, 0x62, 0x90, 0xd3, 0x5c, 0x29, 0x8d, 0xaa, 0x01, 0x90, 0x2a,
0xb2, 0x6e, 0xd0, 0x2a, 0x17, 0xea, 0xdc, 0x26, 0xf1, 0xd7, 0x10, 0x65, 0x62, 0xeb, 0xc8, 0xcf,
0x58, 0x14, 0x58, 0xb6, 0x01, 0x45, 0xad, 0x9f, 0x41, 0xdd, 0xfb, 0x28, 0xb0, 0x0f, 0x7a, 0x99,
#endif
),
},
};
void plat_enumerate_drtm_tcb_hashes(const struct plat_drtm_tcb_hash **hashes_out,
size_t *hashes_count_out)
{
*hashes_out = plat_drtm_res_tcb_hashes;
*hashes_count_out = ARRAY_SIZE(plat_drtm_res_tcb_hashes);
}
#else
#warning "Unrecognised DRTM_SHA_ALG"
#endif /* DRTM_SHA_ALG == 256 || ... */

View File

@ -1,3 +1,12 @@
--------------------------------------------------------------------------
Disclaimer: This branch contains Arm DRTM prototype code that is only
demonstrative and proof of concept. It is designed for experimentation
with Arm DRTM. Any productization should only use code delivered through
the master branch, and should not rely on the particular code structure of
this prototype or any other feature herein that is not part of the Arm
DRTM specification.
--------------------------------------------------------------------------
Trusted Firmware-A
==================

View File

@ -0,0 +1,119 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM protected-resources cache
*
* Authors:
* Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
*/
#include <common/debug.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <services/drtm_cache.h>
/*
* XXX Note: the generic protected DRTM resources are being specialised into
* DRTM TCB hashes. Platform resources retrieved through the generic DRTM cache
* are going to be retrieved through bespoke interfaces instead.
* This file and drtm_qemu_virt_cached_resources_init.c will be removed once the
* transition is complete.
*/
static char cache[1 * 1024];
static char *cache_free = cache;
#define CACHE_END ((char *)cache + sizeof(cache))
#include "drtm_qemu_virt_cached_resources_init.c"
static struct cached_res *cache_alloc(size_t bytes)
{
struct cached_res *r;
if (cache_free + bytes >= CACHE_END) {
return NULL;
}
r = (struct cached_res *)cache_free;
cache_free += bytes;
return r;
}
void drtm_cache_init(void)
{
const struct cached_res *r;
memset(&cache, 0, sizeof(cache));
r = CACHED_RESOURCES_INIT;
while (r < CACHED_RESOURCES_INIT_END) {
int rc;
if (r->data_ptr) {
rc = drtm_cache_resource_ptr(r->id, r->bytes, r->data_ptr);
} else {
rc = drtm_cache_resource(r->id, r->bytes, r->data);
}
if (rc) {
WARN("%s: drtm_cache_resource_opt() failed rc=%d\n", __func__, rc);
break;
}
r = (struct cached_res *)((char *)r + sizeof(*r)
+ (r->data_ptr ? 0 : r->bytes));
}
}
int drtm_cache_resource_opt(const char *id, size_t bytes, const char *data,
bool copy_the_data)
{
struct cached_res *res;
size_t bytes_req = sizeof(struct cached_res) + (copy_the_data ? bytes : 0);
if (strnlen(id, sizeof(res->id)) == sizeof(res->id) || !data) {
return -EINVAL;
}
res = cache_alloc(bytes_req);
if (!res) {
return -ENOMEM;
}
(void)strlcpy(res->id, id, sizeof(res->id));
res->bytes = bytes;
if (copy_the_data) {
res->data_ptr = NULL;
(void)memcpy((char *)res->data, data, bytes);
} else {
res->data_ptr = data;
}
return 0;
}
void drtm_cache_get_resource(const char *id,
const char **res_out, size_t *res_out_bytes)
{
struct cached_res *r = (struct cached_res *)cache;
while ((char *)r < CACHE_END) {
if (strncmp(r->id, id, sizeof(r->id)) == 0) {
*res_out = r->data_ptr ? r->data_ptr : r->data;
*res_out_bytes = r->bytes;
return;
}
r = (struct cached_res *)((char *)r + sizeof(*r)
+ (r->data_ptr ? 0 : r->bytes));
}
*res_out = NULL;
*res_out_bytes = 0;
}

View File

@ -0,0 +1,19 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef DRTM_CACHE_H
#define DRTM_CACHE_H
#pragma pack(push, 1)
struct cached_res {
char id[32];
size_t bytes;
const char *data_ptr; /* If NULL, then the data follows. */
const char data[];
};
#pragma pack(pop)
#endif /* DRTM_CACHE_H */

View File

@ -0,0 +1,295 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM DMA protection.
*
* Authors:
* Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
*
*/
#include <stdint.h>
#include <string.h>
#include <common/debug.h>
#include <drivers/arm/smmu_v3.h>
#include <services/drtm_svc_plat.h>
#include <smccc_helpers.h>
#include "drtm_dma_prot.h"
#include "drtm_remediation.h"
#include "drtm_main.h"
/* Values for DRTM_PROTECT_MEMORY */
enum dma_prot_type {
PROTECT_NONE = -1,
PROTECT_MEM_ALL = 0,
PROTECT_MEM_REGION = 1,
};
struct dma_prot {
enum dma_prot_type type;
};
/*
* ________________________ LAUNCH success ________________________
* | Initial | -------------------> | Prot engaged |
* |````````````````````````| |````````````````````````|
* | request.type == NONE | | request.type != NONE |
* | | <------------------- | |
* `________________________' UNPROTECT_MEM `________________________'
*
* Transitions that are not shown correspond to ABI calls that do not change
* state and result in an error being returned to the caller.
*/
static struct dma_prot active_prot = {
.type = PROTECT_NONE,
};
/* Version-independent type. */
typedef struct drtm_dl_dma_prot_args_v1 struct_drtm_dl_dma_prot_args;
int drtm_dma_prot_init(void)
{
bool must_init_fail = false;
const uintptr_t *smmus;
size_t num_smmus = 0;
unsigned int num_smmus_total;
/* Report presence of non-host platforms, for info only. */
if (plat_has_non_host_platforms()) {
WARN("DRTM: the platform includes trusted DMA-capable devices"
" (non-host platforms)\n");
}
/*
* DLME protection is uncertain on platforms with peripherals whose
* DMA is not managed by an SMMU. DRTM doesn't work on such platforms.
*/
if (plat_has_unmanaged_dma_peripherals()) {
ERROR("DRTM: this platform does not provide DMA protection\n");
must_init_fail = true;
}
/*
* Check that the platform reported all SMMUs.
* It is acceptable if the platform doesn't have any SMMUs when it
* doesn't have any DMA-capable devices.
*/
num_smmus_total = plat_get_total_num_smmus();
plat_enumerate_smmus((const uintptr_t (*)[])&smmus, &num_smmus);
if (num_smmus != num_smmus_total) {
ERROR("DRTM: could not discover all SMMUs\n");
must_init_fail = true;
}
/* Check any SMMUs enumerated. */
for (const uintptr_t *smmu = smmus; smmu < smmus + num_smmus; smmu++) {
if (*smmu == 0) {
WARN("DRTM: SMMU reported at unusual PA 0x0\n");
}
}
return (int)must_init_fail;
}
uint64_t drtm_features_dma_prot(void *ctx)
{
SMC_RET2(ctx, 1ULL, /* DMA protection feature is supported */
1u /* DMA protection support: Complete DMA protection. */
);
}
/*
* Checks that the DMA protection arguments are valid and that the given
* protected regions would be covered by DMA protection.
*/
enum drtm_retc drtm_dma_prot_check_args(const struct_drtm_dl_dma_prot_args *a,
int a_dma_prot_type,
struct __protected_regions p)
{
switch ((enum dma_prot_type)a_dma_prot_type) {
case PROTECT_MEM_ALL:
if (a->dma_prot_table_paddr || a->dma_prot_table_size) {
ERROR("DRTM: invalid launch due to inconsistent"
" DMA protection arguments\n");
return MEM_PROTECT_INVALID;
}
/*
* Full DMA protection ought to ensure that the DLME and NWd
* DCE regions are protected, no further checks required.
*/
return SUCCESS;
default:
ERROR("DRTM: invalid launch due to unsupported DMA protection type\n");
return MEM_PROTECT_INVALID;
}
}
enum drtm_retc drtm_dma_prot_engage(const struct_drtm_dl_dma_prot_args *a,
int a_dma_prot_type)
{
const uintptr_t *smmus;
size_t num_smmus = 0;
if (active_prot.type != PROTECT_NONE) {
ERROR("DRTM: launch denied as previous DMA protection"
" is still engaged\n");
return DENIED;
}
if (a_dma_prot_type == PROTECT_NONE) {
return SUCCESS;
/* Only PROTECT_MEM_ALL is supported currently. */
} else if (a_dma_prot_type != PROTECT_MEM_ALL) {
ERROR("%s(): unimplemented DMA protection type\n", __func__);
panic();
}
/*
* Engage SMMUs in accordance with the request we have previously received.
* Only PROTECT_MEM_ALL is implemented currently.
*/
plat_enumerate_smmus((const uintptr_t (*)[])&smmus, &num_smmus);
for (const uintptr_t *smmu = smmus; smmu < smmus+num_smmus; smmu++) {
int rc;
/*
* TODO: Invalidate SMMU's Stage-1 and Stage-2 TLB entries. This ensures
* that any outstanding device transactions are completed, see Section
* 3.21.1, specification IHI_0070_C_a for an approximate reference.
*/
if ((rc = smmuv3_ns_set_abort_all(*smmu))) {
ERROR("DRTM: SMMU at PA 0x%lx failed to engage DMA protection"
" rc=%d\n", *smmu, rc);
return INTERNAL_ERROR;
}
}
/*
* TODO: Restrict DMA from the GIC.
*
* Full DMA protection may be achieved as follows:
*
* With a GICv3:
* - Set GICR_CTLR.EnableLPIs to 0, for each GICR;
* GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
* - Set GITS_CTLR.Enabled to 0;
* GITS_CTLR.Quiescent == 1 must be the case before finishing.
*
* In addition, with a GICv4:
* - Set GICR_VPENDBASER.Valid to 0, for each GICR;
* GICR_CTLR.RWP == 0 must be the case before finishing, for each GICR.
*
* Alternatively, e.g. if some bit values cannot be changed at runtime,
* this procedure should return an error if the LPI Pending and
* Configuration tables overlap the regions being protected.
*/
active_prot.type = a_dma_prot_type;
return SUCCESS;
}
/*
* Undo what has previously been done in drtm_dma_prot_engage(), or enter
* remediation if it is not possible.
*/
enum drtm_retc drtm_dma_prot_disengage(void)
{
const uintptr_t *smmus;
size_t num_smmus = 0;
if (active_prot.type == PROTECT_NONE) {
return SUCCESS;
/* Only PROTECT_MEM_ALL is supported currently. */
} else if (active_prot.type != PROTECT_MEM_ALL) {
ERROR("%s(): unimplemented DMA protection type\n", __func__);
panic();
}
/*
* For PROTECT_MEM_ALL, undo the SMMU configuration for "abort all" mode
* done during engage().
*/
/* Simply enter remediation for now. */
(void)smmus;
(void)num_smmus;
drtm_enter_remediation(1, "cannot undo PROTECT_MEM_ALL SMMU configuration");
/* TODO: Undo GIC DMA restrictions. */
active_prot.type = PROTECT_NONE;
return SUCCESS;
}
uint64_t drtm_unprotect_mem(void *ctx)
{
enum drtm_retc ret;
switch (active_prot.type) {
case PROTECT_NONE:
ERROR("DRTM: invalid UNPROTECT_MEM, no DMA protection has"
" previously been engaged\n");
ret = DENIED;
break;
case PROTECT_MEM_ALL:
/*
* UNPROTECT_MEM is a no-op for PROTECT_MEM_ALL: DRTM must not touch
* the NS SMMU as it is expected that the DLME has configured it.
*/
active_prot.type = PROTECT_NONE;
ret = SUCCESS;
break;
default:
ret = drtm_dma_prot_disengage();
break;
}
SMC_RET1(ctx, ret);
}
void drtm_dma_prot_serialise_table(char *dst, size_t *size_out)
{
if (active_prot.type == PROTECT_NONE) {
if (size_out) {
*size_out = 0;
}
return;
} else if (active_prot.type != PROTECT_MEM_ALL) {
ERROR("%s(): unimplemented DMA protection type\n", __func__);
panic();
}
struct __packed descr_table_1 {
struct_drtm_mem_region_descr_table header;
struct_drtm_mem_region_descr regions[1];
} prot_table = {
.header = {
.version = 1,
.num_regions = sizeof(((struct descr_table_1 *)NULL)->regions) /
sizeof(((struct descr_table_1 *)NULL)->regions[0])
},
#define PAGES_AND_TYPE(pages, type) \
.pages_and_type = DRTM_MEM_REGION_PAGES_AND_TYPE(pages, type)
.regions = {
{.paddr = 0, PAGES_AND_TYPE(UINT64_MAX, 0x3)},
}
};
if (dst) {
(void)memcpy(dst, &prot_table, sizeof(prot_table));
}
if (size_out) {
*size_out = sizeof(prot_table);
}
}

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef DRTM_DMA_PROT_H
#define DRTM_DMA_PROT_H
#include <stdint.h>
#include <lib/utils.h>
struct __packed drtm_dl_dma_prot_args_v1 {
uint64_t dma_prot_table_paddr;
uint64_t dma_prot_table_size;
};
/* Opaque / encapsulated type. */
typedef struct drtm_dl_dma_prot_args_v1 drtm_dl_dma_prot_args_v1_t;
struct __protected_regions {
struct p_mem_region dlme_region;
struct p_mem_region dce_nwd_region;
};
struct __packed drtm_mem_region_descr_v1 {
uint64_t paddr;
uint64_t pages_and_type;
};
#define DRTM_MEM_REGION_PAGES_AND_TYPE(pages, type) \
(((uint64_t)(pages) & (((uint64_t)1 << 52) - 1)) \
| (((uint64_t)(type) & 0x7) << 52))
#define DRTM_MEM_REGION_PAGES(pages_and_type) \
((uint64_t)(pages_and_type) & (((uint64_t)1 << 52) - 1))
#define DRTM_MEM_REGION_TYPE(pages_and_type) \
((uint8_t)((pages_and_type) >> 52 & 0x7))
enum drtm_mem_region_type {
DRTM_MEM_REGION_TYPE_NORMAL = 0,
DRTM_MEM_REGION_TYPE_NORMAL_WITH_CACHEABILITY_ATTRS = 1,
DRTM_MEM_REGION_TYPE_DEVICE = 2,
DRTM_MEM_REGION_TYPE_NON_VOLATILE = 3,
DRTM_MEM_REGION_TYPE_RESERVED = 4,
};
struct __packed drtm_mem_region_descr_table_v1 {
uint16_t version; /* Must be 1. */
uint8_t __res[2];
uint32_t num_regions;
struct drtm_mem_region_descr_v1 regions[];
};
typedef struct drtm_mem_region_descr_v1 struct_drtm_mem_region_descr;
typedef struct drtm_mem_region_descr_table_v1 struct_drtm_mem_region_descr_table;
int drtm_dma_prot_init(void);
uint64_t drtm_features_dma_prot(void *ctx);
enum drtm_retc drtm_dma_prot_check_args(const drtm_dl_dma_prot_args_v1_t *a,
int a_dma_prot_type,
struct __protected_regions p);
enum drtm_retc drtm_dma_prot_engage(const drtm_dl_dma_prot_args_v1_t *a,
int a_dma_prot_type);
enum drtm_retc drtm_dma_prot_disengage(void);
uint64_t drtm_unprotect_mem(void *ctx);
void drtm_dma_prot_serialise_table(char *dst, size_t *prot_table_size_out);
#endif /* DRTM_DMA_PROT_H */

View File

@ -0,0 +1,810 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM service
*
* Authors:
* Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
* Brian Nezvadovitz
*/
#include <stdint.h>
#include <common/debug.h>
#include <common/runtime_svc.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <plat/arm/common/plat_arm.h>
#include <plat/common/platform.h>
#include <services/drtm_svc.h>
#include <services/drtm_cache.h>
#include <tools_share/uuid.h>
#include "drtm_dma_prot.h"
#include "drtm_main.h"
#include "drtm_measurements.h"
#include "drtm_remediation.h"
#include "drtm_res_tcb_hashes.h"
#define XLAT_PAGE_SIZE PAGE_SIZE
#if XLAT_PAGE_SIZE != DRTM_PAGE_SIZE
#warning "xlat library page size differs from DRTM page size;"\
" mmap_add_dynamic_region() calls to the xlat library might fail"
#endif
enum drtm_dlme_el {
DLME_AT_EL1,
DLME_AT_EL2
};
static enum drtm_dlme_el drtm_dlme_el(unsigned int el)
{
return (enum drtm_dlme_el)el - 1;
}
struct __packed dlme_data_header_v1 {
uint16_t version; /* Must be 1. */
uint16_t this_hdr_size;
uint8_t __res[4];
uint64_t dlme_data_size;
uint64_t dlme_prot_regions_size;
uint64_t dlme_addr_map_size;
uint64_t dlme_tpm_log_size;
uint64_t dlme_tcb_hashes_table_size;
uint64_t dlme_impdef_region_size;
} __aligned(__alignof(uint16_t /* First member's type, `uint16_t version'. */));
typedef struct dlme_data_header_v1 struct_dlme_data_header;
static uint64_t boot_pe_aff_value;
static int locality2, locality3;
static unsigned int get_highest_ns_el_implemented(void)
{
return nonsecure_el_implemented(2) != EL_IMPL_NONE ? 2 : 1;
}
int drtm_setup(void)
{
int rc;
INFO("++ DRTM service setup\n");
boot_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
if ((rc = drtm_dma_prot_init())) {
return rc;
}
if ((rc = drtm_tcb_hashes_init())) {
return rc;
}
drtm_cache_init();
if ((rc = drtm_measurements_init())) {
return rc;
}
return 0;
}
static enum drtm_retc drtm_dl_check_caller_el(void *ctx)
{
uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
uint64_t dl_caller_el;
uint64_t dl_caller_aarch;
dl_caller_el = spsr_el3 >> MODE_EL_SHIFT & MODE_EL_MASK;
dl_caller_aarch = spsr_el3 >> MODE_RW_SHIFT & MODE_RW_MASK;
if (dl_caller_el == MODE_EL3) {
ERROR("DRTM: invalid launch from EL3\n");
return DENIED;
}
if (dl_caller_aarch != MODE_RW_64) {
ERROR("DRTM: invalid launch from non-AArch64 execution state\n");
return DENIED;
}
return SUCCESS;
}
static enum drtm_retc drtm_dl_check_cores(void)
{
unsigned int core_not_off;
uint64_t this_pe_aff_value = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
if (this_pe_aff_value != boot_pe_aff_value) {
ERROR("DRTM: invalid launch on a non-boot PE\n");
return DENIED;
}
core_not_off = psci_is_last_on_core_safe();
if (core_not_off < PLATFORM_CORE_COUNT) {
ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n");
return DENIED;
}
return SUCCESS;
}
static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args,
const drtm_event_log_t *ev_log,
size_t *dlme_data_size_out);
/*
* Note: accesses to the dynamic launch args, and to the DLME data are
* little-endian as required, thanks to TF-A BL31 init requirements.
*/
static enum drtm_retc drtm_dl_check_args(uint64_t x1,
struct_drtm_dl_args *a_out)
{
uint64_t dlme_start, dlme_end;
uint64_t dlme_img_start, dlme_img_ep, dlme_img_end;
uint64_t dlme_data_start, dlme_data_end;
uintptr_t args_mapping;
size_t args_mapping_size;
struct_drtm_dl_args *a;
struct_drtm_dl_args args_buf;
size_t dlme_data_size_req;
struct __protected_regions protected_regions;
int rc;
enum drtm_retc ret;
if (x1 % DRTM_PAGE_SIZE != 0) {
ERROR("DRTM: parameters structure is not "
DRTM_PAGE_SIZE_STR "-aligned\n");
return INVALID_PARAMETERS;
}
args_mapping_size = ALIGNED_UP(sizeof(struct_drtm_dl_args), DRTM_PAGE_SIZE);
rc = mmap_add_dynamic_region_alloc_va(x1, &args_mapping, args_mapping_size,
MT_MEMORY | MT_NS | MT_RO | MT_SHAREABILITY_ISH);
if (rc) {
WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n",
__func__, rc);
return INTERNAL_ERROR;
}
a = (struct_drtm_dl_args *)args_mapping;
/*
* TODO: invalidate all data cache before reading the data passed by the
* DCE Preamble. This is required to avoid / defend against racing with
* cache evictions.
*/
args_buf = *a;
rc = mmap_remove_dynamic_region(args_mapping, args_mapping_size);
if (rc) {
ERROR("%s(): mmap_remove_dynamic_region() failed unexpectedly"
" rc=%d\n", __func__, rc);
panic();
}
a = &args_buf;
if (a->version != 1) {
ERROR("DRTM: parameters structure incompatible with major version %d\n",
ARM_DRTM_VERSION_MAJOR);
return NOT_SUPPORTED;
}
if (!(a->dlme_img_off < a->dlme_size &&
a->dlme_data_off < a->dlme_size)) {
ERROR("DRTM: argument offset is outside of the DLME region\n");
return INVALID_PARAMETERS;
}
dlme_start = a->dlme_paddr;
dlme_end = a->dlme_paddr + a->dlme_size;
dlme_img_start = a->dlme_paddr + a->dlme_img_off;
dlme_img_ep = DL_ARGS_GET_DLME_ENTRY_POINT(a);
dlme_img_end = dlme_img_start + a->dlme_img_size;
dlme_data_start = a->dlme_paddr + a->dlme_data_off;
dlme_data_end = dlme_end;
/*
* TODO: validate that the DLME physical address range is all NS memory,
* return INVALID_PARAMETERS if it is not.
* Note that this check relies on platform-specific information. For
* examples, see psci_plat_pm_ops->validate_ns_entrypoint() or
* arm_validate_ns_entrypoint().
*/
/* Check the DLME regions arguments. */
if (dlme_start % DRTM_PAGE_SIZE) {
ERROR("DRTM: argument DLME region is not "
DRTM_PAGE_SIZE_STR "-aligned\n");
return INVALID_PARAMETERS;
}
if (!(dlme_start < dlme_end &&
dlme_start <= dlme_img_start && dlme_img_start < dlme_img_end &&
dlme_start <= dlme_data_start && dlme_data_start < dlme_data_end)) {
ERROR("DRTM: argument DLME region is discontiguous\n");
return INVALID_PARAMETERS;
}
if (dlme_img_start < dlme_data_end && dlme_data_start < dlme_img_end) {
ERROR("DRTM: argument DLME regions overlap\n");
return INVALID_PARAMETERS;
}
/* Check the DLME image region arguments. */
if (dlme_img_start % DRTM_PAGE_SIZE) {
ERROR("DRTM: argument DLME image region is not "
DRTM_PAGE_SIZE_STR "-aligned\n");
return INVALID_PARAMETERS;
}
if (!(dlme_img_start <= dlme_img_ep && dlme_img_ep < dlme_img_end)) {
ERROR("DRTM: DLME entry point is outside of the DLME image region\n");
return INVALID_PARAMETERS;
}
if (dlme_img_ep % 4) {
ERROR("DRTM: DLME image entry point is not 4-byte-aligned\n");
return INVALID_PARAMETERS;
}
/* Check the DLME data region arguments. */
if (dlme_data_start % DRTM_PAGE_SIZE) {
ERROR("DRTM: argument DLME data region is not "
DRTM_PAGE_SIZE_STR "-aligned\n");
return INVALID_PARAMETERS;
}
rc = drtm_dl_prepare_dlme_data(NULL, NULL, &dlme_data_size_req);
if (rc) {
ERROR("%s: drtm_dl_prepare_dlme_data() failed unexpectedly rc=%d\n",
__func__, rc);
panic();
}
if (dlme_data_end - dlme_data_start < dlme_data_size_req) {
ERROR("DRTM: argument DLME data region is short of %lu bytes\n",
dlme_data_size_req - (size_t)(dlme_data_end - dlme_data_start));
return INVALID_PARAMETERS;
}
/* Check the Normal World DCE region arguments. */
if (a->dce_nwd_paddr != 0) {
uint32_t dce_nwd_start = a->dce_nwd_paddr;
uint32_t dce_nwd_end = dce_nwd_start + a->dce_nwd_size;
if (!(dce_nwd_start < dce_nwd_end)) {
ERROR("DRTM: argument Normal World DCE region is dicontiguous\n");
return INVALID_PARAMETERS;
}
if (dce_nwd_start < dlme_end && dlme_start < dce_nwd_end) {
ERROR("DRTM: argument Normal World DCE regions overlap\n");
return INVALID_PARAMETERS;
}
}
protected_regions = (struct __protected_regions) {
.dlme_region = { a->dlme_paddr, a->dlme_size },
.dce_nwd_region = { a->dce_nwd_paddr, a->dce_nwd_size },
};
if ((ret = drtm_dma_prot_check_args(&a->dma_prot_args,
DL_ARGS_GET_DMA_PROT_TYPE(a),
protected_regions))){
return ret;
}
*a_out = *a;
return SUCCESS;
}
static enum drtm_retc drtm_dl_prepare_dlme_data(const struct_drtm_dl_args *args,
const drtm_event_log_t *drtm_event_log,
size_t *dlme_data_size_out)
{
int rc;
size_t dlme_data_total_bytes_req = 0;
uint64_t dlme_data_paddr;
size_t dlme_data_max_size;
uintptr_t dlme_data_mapping;
size_t dlme_data_mapping_bytes;
struct_dlme_data_header *dlme_data_hdr;
char *dlme_data_cursor;
size_t dlme_prot_tables_bytes;
const char *dlme_addr_map;
size_t dlme_addr_map_bytes;
size_t drtm_event_log_bytes;
size_t drtm_tcb_hashes_bytes;
size_t serialised_bytes_actual;
/* Size the DLME protected regions. */
drtm_dma_prot_serialise_table(NULL, &dlme_prot_tables_bytes);
dlme_data_total_bytes_req += dlme_prot_tables_bytes;
/* Size the DLME address map. */
drtm_cache_get_resource("address-map",
&dlme_addr_map, &dlme_addr_map_bytes);
dlme_data_total_bytes_req += dlme_addr_map_bytes;
/* Size the DRTM event log. */
drtm_serialise_event_log(NULL, drtm_event_log, &drtm_event_log_bytes);
dlme_data_total_bytes_req += drtm_event_log_bytes;
/* Size the TCB hashes table. */
drtm_serialise_tcb_hashes_table(NULL, &drtm_tcb_hashes_bytes);
dlme_data_total_bytes_req += drtm_tcb_hashes_bytes;
/* Size the implementation-specific DLME region. */
if (args == NULL) {
if (dlme_data_size_out) {
*dlme_data_size_out = dlme_data_total_bytes_req;
}
return SUCCESS;
}
dlme_data_paddr = args->dlme_paddr + args->dlme_data_off;
dlme_data_max_size = args->dlme_size - args->dlme_data_off;
/*
* The capacity of the given DLME data region is checked when
* the other dynamic launch arguments are.
*/
if (dlme_data_max_size < dlme_data_total_bytes_req) {
ERROR("%s: assertion failed:"
" dlme_data_max_size (%ld) < dlme_data_total_bytes_req (%ld)\n",
__func__, dlme_data_max_size, dlme_data_total_bytes_req);
panic();
}
/* Map the DLME data region as NS memory. */
dlme_data_mapping_bytes = ALIGNED_UP(dlme_data_max_size, DRTM_PAGE_SIZE);
rc = mmap_add_dynamic_region_alloc_va(dlme_data_paddr, &dlme_data_mapping,
dlme_data_mapping_bytes, MT_RW_DATA | MT_NS | MT_SHAREABILITY_ISH);
if (rc) {
WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n", __func__, rc);
return INTERNAL_ERROR;
}
dlme_data_hdr = (struct_dlme_data_header *)dlme_data_mapping;
dlme_data_cursor = (char *)dlme_data_hdr + sizeof(*dlme_data_hdr);
/* Set the header version and size. */
dlme_data_hdr->version = 1;
dlme_data_hdr->this_hdr_size = sizeof(*dlme_data_hdr);
/* Prepare DLME protected regions. */
drtm_dma_prot_serialise_table(dlme_data_cursor, &serialised_bytes_actual);
assert(serialised_bytes_actual == dlme_prot_tables_bytes);
dlme_data_hdr->dlme_prot_regions_size = dlme_prot_tables_bytes;
dlme_data_cursor += dlme_prot_tables_bytes;
/* Prepare DLME address map. */
if (dlme_addr_map) {
memcpy(dlme_data_cursor, dlme_addr_map, dlme_addr_map_bytes);
} else {
WARN("DRTM: DLME address map is not in the cache\n");
}
dlme_data_hdr->dlme_addr_map_size = dlme_addr_map_bytes;
dlme_data_cursor += dlme_addr_map_bytes;
/* Prepare DRTM event log for DLME. */
drtm_serialise_event_log(dlme_data_cursor, drtm_event_log,
&serialised_bytes_actual);
assert(serialised_bytes_actual <= drtm_event_log_bytes);
dlme_data_hdr->dlme_tpm_log_size = serialised_bytes_actual;
dlme_data_cursor += serialised_bytes_actual;
/* Prepare the TCB hashes for DLME. */
drtm_serialise_tcb_hashes_table(dlme_data_cursor, &serialised_bytes_actual);
assert(serialised_bytes_actual == drtm_tcb_hashes_bytes);
dlme_data_hdr->dlme_tcb_hashes_table_size = drtm_tcb_hashes_bytes;
dlme_data_cursor += drtm_tcb_hashes_bytes;
/* Implementation-specific region size is unused. */
dlme_data_hdr->dlme_impdef_region_size = 0;
dlme_data_cursor += 0;
/* Prepare DLME data size. */
dlme_data_hdr->dlme_data_size = dlme_data_cursor - (char *)dlme_data_hdr;
/* Unmap the DLME data region. */
rc = mmap_remove_dynamic_region(dlme_data_mapping, dlme_data_mapping_bytes);
if (rc) {
ERROR("%s(): mmap_remove_dynamic_region() failed"
" unexpectedly rc=%d\n", __func__, rc);
panic();
}
if (dlme_data_size_out) {
*dlme_data_size_out = dlme_data_total_bytes_req;
}
return SUCCESS;
}
static void drtm_dl_reset_dlme_el_state(enum drtm_dlme_el dlme_el)
{
uint64_t sctlr;
/*
* TODO: Set PE state according to the PSCI's specification of the initial
* state after CPU_ON, or to reset values if unspecified, where they exist,
* or define sensible values otherwise.
*/
switch (dlme_el) {
case DLME_AT_EL1:
sctlr = read_sctlr_el1();
break;
case DLME_AT_EL2:
sctlr = read_sctlr_el2();
break;
default: /* Not reached */
ERROR("%s(): dlme_el has the unexpected value %d\n",
__func__, dlme_el);
panic();
}
sctlr &= ~(
/* Disable DLME's EL MMU, since the existing page-tables are untrusted. */
SCTLR_M_BIT
| SCTLR_EE_BIT /* Little-endian data accesses. */
);
sctlr |=
SCTLR_C_BIT | SCTLR_I_BIT /* Allow instruction and data caching. */
;
switch (dlme_el) {
case DLME_AT_EL1:
write_sctlr_el1(sctlr);
break;
case DLME_AT_EL2:
write_sctlr_el2(sctlr);
break;
}
}
static void drtm_dl_reset_dlme_context(enum drtm_dlme_el dlme_el)
{
void *ns_ctx = cm_get_context(NON_SECURE);
gp_regs_t *gpregs = get_gpregs_ctx(ns_ctx);
uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3);
/* Reset all gpregs, including SP_EL0. */
memset(gpregs, 0, sizeof(*gpregs));
/* Reset SP_ELx. */
switch (dlme_el) {
case DLME_AT_EL1:
write_sp_el1(0);
break;
case DLME_AT_EL2:
write_sp_el2(0);
break;
}
/*
* DLME's async exceptions are masked to avoid a NWd attacker's timed
* interference with any state we established trust in or measured.
*/
spsr_el3 |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
write_ctx_reg(get_el3state_ctx(ns_ctx), CTX_SPSR_EL3, spsr_el3);
}
static void drtm_dl_prepare_eret_to_dlme(const struct_drtm_dl_args *args,
enum drtm_dlme_el dlme_el)
{
void *ctx = cm_get_context(NON_SECURE);
uint64_t dlme_ep = DL_ARGS_GET_DLME_ENTRY_POINT(args);
uint64_t spsr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SPSR_EL3);
/* Next ERET is to the DLME's EL. */
spsr_el3 &= ~(MODE_EL_MASK << MODE_EL_SHIFT);
switch (dlme_el) {
case DLME_AT_EL1:
spsr_el3 |= MODE_EL1 << MODE_EL_SHIFT;
break;
case DLME_AT_EL2:
spsr_el3 |= MODE_EL2 << MODE_EL_SHIFT;
break;
}
/* Next ERET is to the DLME entry point. */
cm_set_elr_spsr_el3(NON_SECURE, dlme_ep, spsr_el3);
}
/*
* TODO:
* - Close locality 3;
* - See section 4.4 and section 4.5 for other requirements;
*/
static uint64_t drtm_dynamic_launch(uint64_t x1, void *handle)
{
enum drtm_retc ret;
struct_drtm_dl_args args;
enum drtm_dlme_el dlme_el;
drtm_event_log_t event_log;
/*
* Non-secure interrupts are masked to avoid a NWd attacker's timed
* interference with any state we are establishing trust in or measuring.
* Note that in this particular implementation, both Non-secure and Secure
* interrupts are automatically masked consequence of the SMC call.
*/
if ((ret = drtm_dl_check_caller_el(handle))) {
SMC_RET1(handle, ret);
}
if ((ret = drtm_dl_check_cores())) {
SMC_RET1(handle, ret);
}
if ((ret = drtm_dl_check_args(x1, &args))) {
SMC_RET1(handle, ret);
}
drtm_dl_ensure_tcb_hashes_are_final();
/*
* Engage the DMA protections. The launch cannot proceed without the DMA
* protections due to potential TOC/TOU vulnerabilities w.r.t. the DLME
* region (and to the NWd DCE region).
*/
if ((ret = drtm_dma_prot_engage(&args.dma_prot_args,
DL_ARGS_GET_DMA_PROT_TYPE(&args)))) {
SMC_RET1(handle, ret);
}
/*
* The DMA protection is now engaged. Note that any failure mode that
* returns an error to the DRTM-launch caller must now disengage DMA
* protections before returning to the caller.
*/
if ((ret = drtm_take_measurements(&args, &event_log))) {
goto err_undo_dma_prot;
}
if ((ret = drtm_dl_prepare_dlme_data(&args, &event_log, NULL))) {
goto err_undo_dma_prot;
}
/*
* Note that, at the time of writing, the DRTM spec allows a successful
* launch from NS-EL1 to return to a DLME in NS-EL2. The practical risk
* of a privilege escalation, e.g. due to a compromised hypervisor, is
* considered small enough not to warrant the specification of additional
* DRTM conduits that would be necessary to maintain OSs' abstraction from
* the presence of EL2 were the dynamic launch only be allowed from the
* highest NS EL.
*/
dlme_el = drtm_dlme_el(get_highest_ns_el_implemented());
drtm_dl_reset_dlme_el_state(dlme_el);
drtm_dl_reset_dlme_context(dlme_el);
/*
* TODO: Reset all SDEI event handlers, since they are untrusted. Both
* private and shared events for all cores must be unregistered.
* Note that simply calling SDEI ABIs would not be adequate for this, since
* there is currently no SDEI operation that clears private data for all PEs.
*/
drtm_dl_prepare_eret_to_dlme(&args, dlme_el);
/*
* TODO: invalidate the instruction cache before jumping to the DLME.
* This is required to defend against potentially-malicious cache contents.
*/
/* Return the DLME region's address in x0, and the DLME data offset in x1.*/
SMC_RET2(handle, args.dlme_paddr, args.dlme_data_off);
err_undo_dma_prot:
;
int rc;
if ((rc = drtm_dma_prot_disengage())) {
ERROR("%s(): drtm_dma_prot_disengage() failed unexpectedly"
" rc=%d\n", __func__, rc);
panic();
}
SMC_RET1(handle, ret);
}
static uint64_t drtm_features_tpm(void *ctx)
{
SMC_RET2(ctx, 1ULL, /* TPM feature is supported */
1ULL << 33 /* Default PCR usage schema */
| 0ULL << 32 /* Firmware-based hashing */
/* The firmware hashing algorithm */
| (uint32_t)DRTM_TPM_HASH_ALG << 0
);
}
static uint64_t drtm_features_mem_req(void *ctx)
{
int rc;
size_t dlme_data_bytes_req;
uint64_t dlme_data_pages_req;
rc = drtm_dl_prepare_dlme_data(NULL, NULL, &dlme_data_bytes_req);
if (rc) {
ERROR("%s(): drtm_dl_prepare_dlme_data() failed unexpectedly"
" rc=%d\n", __func__, rc);
panic();
}
dlme_data_pages_req = ALIGNED_UP(dlme_data_bytes_req, DRTM_PAGE_SIZE)
/ DRTM_PAGE_SIZE;
if (dlme_data_pages_req > UINT32_MAX) {
ERROR("%s(): dlme_data_pages_req is unexpectedly large"
" (does not fit in the bit-field)\n", __func__);
panic();
}
SMC_RET2(ctx, 1ULL, /* Feature is supported */
0ULL << 32 /* Not using a Normal World DCE */
/* Minimum amount of space needed for the DLME data */
| (dlme_data_pages_req & 0xffffffffULL)
);
}
static uint64_t drtm_features_boot_pe_id(void *ctx)
{
SMC_RET2(ctx, 1ULL, /* Boot PE feature is supported */
boot_pe_aff_value /* Boot PE identification */
);
}
uint64_t drtm_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
void *cookie,
void *handle,
uint64_t flags)
{
/* Check that the SMC call is from the Normal World. */
if (is_caller_secure(flags)) {
SMC_RET1(handle, NOT_SUPPORTED);
}
switch (smc_fid) {
case ARM_DRTM_SVC_VERSION:
INFO("++ DRTM service handler: version\n");
/* Return the version of current implementation */
SMC_RET1(handle, ARM_DRTM_VERSION);
case ARM_DRTM_SVC_FEATURES:
if ((x1 >> 63 & 0x1U) == 0) {
uint32_t func_id = x1;
/* Dispatch function-based queries. */
switch (func_id) {
case ARM_DRTM_SVC_VERSION:
INFO("++ DRTM service handler: DRTM_VERSION feature\n");
SMC_RET1(handle, SUCCESS);
case ARM_DRTM_SVC_FEATURES:
INFO("++ DRTM service handler: DRTM_FEATURES feature\n");
SMC_RET1(handle, SUCCESS);
case ARM_DRTM_SVC_UNPROTECT_MEM:
INFO("++ DRTM service handler: DRTM_UNPROTECT_MEMORY feature\n");
SMC_RET1(handle, SUCCESS);
case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
INFO("++ DRTM service handler: DRTM_DYNAMIC_LAUNCH feature\n");
SMC_RET1(handle, SUCCESS);
case ARM_DRTM_SVC_CLOSE_LOCALITY:
INFO("++ DRTM service handler: DRTM_CLOSE_LOCALITY feature\n");
SMC_RET1(handle, NOT_SUPPORTED);
case ARM_DRTM_SVC_GET_ERROR:
INFO("++ DRTM service handler: DRTM_GET_ERROR feature\n");
SMC_RET1(handle, NOT_SUPPORTED);
case ARM_DRTM_SVC_SET_ERROR:
INFO("++ DRTM service handler: DRTM_SET_ERROR feature\n");
SMC_RET1(handle, NOT_SUPPORTED);
case ARM_DRTM_SVC_SET_TCB_HASH:
INFO("++ DRTM service handler: DRTM_SET_TCB_HASH feature\n");
SMC_RET1(handle, NOT_SUPPORTED);
case ARM_DRTM_SVC_LOCK_TCB_HASHES:
INFO("++ DRTM service handler: DRTM_LOCK_TCB_HASHES feature\n");
SMC_RET1(handle, NOT_SUPPORTED);
default:
ERROR("Unknown ARM DRTM service function feature\n");
SMC_RET1(handle, NOT_SUPPORTED);
}
} else {
uint8_t feat_id = x1;
/* Dispatch feature-based queries. */
switch (feat_id) {
case ARM_DRTM_FEATURES_TPM:
INFO("++ DRTM service handler: TPM features\n");
return drtm_features_tpm(handle);
case ARM_DRTM_FEATURES_MEM_REQ:
INFO("++ DRTM service handler: Min. mem."
" requirement features\n");
return drtm_features_mem_req(handle);
case ARM_DRTM_FEATURES_DMA_PROT:
INFO("++ DRTM service handler: DMA protection features\n");
return drtm_features_dma_prot(handle);
case ARM_DRTM_FEATURES_BOOT_PE_ID:
INFO("++ DRTM service handler: Boot PE ID features\n");
return drtm_features_boot_pe_id(handle);
case ARM_DRTM_FEATURES_TCB_HASHES:
INFO("++ DRTM service handler: TCB-hashes features\n");
return drtm_features_tcb_hashes(handle);
default:
ERROR("Unknown ARM DRTM service feature\n");
SMC_RET1(handle, NOT_SUPPORTED);
}
}
case ARM_DRTM_SVC_UNPROTECT_MEM:
INFO("++ DRTM service handler: unprotect mem\n");
return drtm_unprotect_mem(handle);
case ARM_DRTM_SVC_DYNAMIC_LAUNCH:
INFO("++ DRTM service handler: dynamic launch\n");
//locality2 = 1;
//locality3 = 1;
return drtm_dynamic_launch(x1, handle);
case ARM_DRTM_SVC_CLOSE_LOCALITY:
INFO("++ DRTM service handler: close locality\n");
if (x1 == 2) {
if (locality2 == 1) {
locality2 = 0;
SMC_RET1(handle, SMC_OK);
}
SMC_RET1(handle, DENIED);
}
if (x1 == 3) {
if (locality3 == 1) {
locality3 = 0;
SMC_RET1(handle, SMC_OK);
}
SMC_RET1(handle, DENIED);
}
SMC_RET1(handle, INVALID_PARAMETERS);
case ARM_DRTM_SVC_GET_ERROR:
INFO("++ DRTM service handler: get error\n");
return drtm_get_error(handle);
case ARM_DRTM_SVC_SET_ERROR:
INFO("++ DRTM service handler: set error\n");
return drtm_set_error(x1, handle);
default:
ERROR("Unknown ARM DRTM service call: 0x%x \n", smc_fid);
SMC_RET1(handle, SMC_UNK);
}
}

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef DRTM_MAIN_H
#define DRTM_MAIN_H
#include <stdint.h>
#include <lib/smccc.h>
#include "drtm_dma_prot.h"
#define ALIGNED_UP(x, a) __extension__ ({ \
__typeof__(a) _a = (a); \
__typeof__(a) _one = 1; \
assert(IS_POWER_OF_TWO(_a)); \
((x) + (_a - _one)) & ~(_a - _one); \
})
#define ALIGNED_DOWN(x, a) __extension__ ({ \
__typeof__(a) _a = (a); \
__typeof__(a) _one = 1; \
assert(IS_POWER_OF_TWO(_a)); \
(x) & ~(_a - _one); \
})
#define DRTM_PAGE_SIZE (4 * (1 << 10))
#define DRTM_PAGE_SIZE_STR "4-KiB"
enum drtm_retc {
SUCCESS = SMC_OK,
NOT_SUPPORTED = SMC_UNK,
INVALID_PARAMETERS = -2,
DENIED = -3,
NOT_FOUND = -4,
INTERNAL_ERROR = -5,
MEM_PROTECT_INVALID = -6,
};
struct __packed drtm_dl_args_v1 {
uint16_t version; /* Must be 1. */
uint8_t __res[2];
uint32_t features;
uint64_t dlme_paddr;
uint64_t dlme_size;
uint64_t dlme_img_off;
uint64_t dlme_img_ep_off;
uint64_t dlme_img_size;
uint64_t dlme_data_off;
uint64_t dce_nwd_paddr;
uint64_t dce_nwd_size;
drtm_dl_dma_prot_args_v1_t dma_prot_args;
} __aligned(__alignof(uint16_t /* First member's type, `uint16_t version' */));
#define DL_ARGS_GET_DMA_PROT_TYPE(a) (((a)->features >> 3) & 0x7U)
#define DL_ARGS_GET_PCR_SCHEMA(a) (((a)->features >> 1) & 0x3U)
#define DL_ARGS_GET_DLME_ENTRY_POINT(a) \
(((a)->dlme_paddr + (a)->dlme_img_off + (a)->dlme_img_ep_off))
/*
* Version-independent type. May be used to avoid excessive line of code
* changes when migrating to new struct versions.
*/
typedef struct drtm_dl_args_v1 struct_drtm_dl_args;
#endif /* DRTM_MAIN_H */

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2015-2020, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef MBEDTLS_CONFIG_H
#define MBEDTLS_CONFIG_H
/*
* Key algorithms currently supported on mbed TLS libraries
*/
#define TF_MBEDTLS_RSA 1
#define TF_MBEDTLS_ECDSA 2
#define TF_MBEDTLS_RSA_AND_ECDSA 3
#define TF_MBEDTLS_USE_RSA (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA \
|| TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA_AND_ECDSA)
#define TF_MBEDTLS_USE_ECDSA (TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_ECDSA \
|| TF_MBEDTLS_KEY_ALG_ID == TF_MBEDTLS_RSA_AND_ECDSA)
/*
* Hash algorithms currently supported on mbed TLS libraries
*/
#define TF_MBEDTLS_SHA256 1
#define TF_MBEDTLS_SHA384 2
#define TF_MBEDTLS_SHA512 3
/*
* Configuration file to build mbed TLS with the required features for
* Trusted Boot
*/
#define MBEDTLS_PLATFORM_MEMORY
#define MBEDTLS_PLATFORM_NO_STD_FUNCTIONS
/* Prevent mbed TLS from using snprintf so that it can use tf_snprintf. */
#define MBEDTLS_PLATFORM_SNPRINTF_ALT
#define MBEDTLS_PLATFORM_C
#define MBEDTLS_MEMORY_BUFFER_ALLOC_C
#if DRTM_SHA_ALG == 256
#define MBEDTLS_SHA256_C
#elif DRTM_SHA_ALG == 384 || DRTM_SHA_ALG == 512
#define MBEDTLS_SHA512_C
#else
#define MBEDTLS_SHA512_C
#endif
#define MBEDTLS_MD_C
#define MBEDTLS_ERROR_C
#define MBEDTLS_VERSION_C
/* Memory buffer allocator options */
#define MBEDTLS_MEMORY_ALIGN_MULTIPLE 8
/*
* Prevent the use of 128-bit division which
* creates dependency on external libraries.
*/
#define MBEDTLS_NO_UDBL_DIVISION
#ifndef __ASSEMBLER__
/* System headers required to build mbed TLS with the current configuration */
#include <stdlib.h>
#include <mbedtls/check_config.h>
#endif
#define TF_MBEDTLS_HEAP_SIZE U(4 * 1024)
#endif /* MBEDTLS_CONFIG_H */

View File

@ -0,0 +1,259 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM measurements into TPM PCRs.
*
* Authors:
* Lucian Paul-Trifu <lucian.paultrifu@gmail.com>
*
*/
#include <assert.h>
#include <mbedtls/md.h>
#include <common/debug.h>
#include <drivers/auth/mbedtls/mbedtls_common.h>
#include <lib/xlat_tables/xlat_tables_v2.h>
#include "drtm_main.h"
#include "drtm_measurements.h"
#define XLAT_PAGE_SIZE PAGE_SIZE
#if XLAT_PAGE_SIZE != DRTM_PAGE_SIZE
#warning "xlat library page size differs from DRTM page size;"\
" mmap_add_dynamic_region() calls to the xlat library might fail"
#endif
#define DRTM_EVENT_ARM_BASE 0x9000U
#define DRTM_EVENT_TYPE(n) (DRTM_EVENT_ARM_BASE + (unsigned int)(n))
#define DRTM_EVENT_ARM_PCR_SCHEMA DRTM_EVENT_TYPE(1)
#define DRTM_EVENT_ARM_DCE DRTM_EVENT_TYPE(2)
#define DRTM_EVENT_ARM_DCE_PUBKEY DRTM_EVENT_TYPE(3)
#define DRTM_EVENT_ARM_DLME DRTM_EVENT_TYPE(4)
#define DRTM_EVENT_ARM_DLME_EP DRTM_EVENT_TYPE(5)
#define DRTM_EVENT_ARM_DEBUG_CONFIG DRTM_EVENT_TYPE(6)
#define DRTM_EVENT_ARM_NONSECURE_CONFIG DRTM_EVENT_TYPE(7)
#define DRTM_EVENT_ARM_DCE_SECONDARY DRTM_EVENT_TYPE(8)
#define DRTM_EVENT_ARM_TZFW DRTM_EVENT_TYPE(9)
#define DRTM_EVENT_ARM_SEPARATOR DRTM_EVENT_TYPE(10)
#define DRTM_NULL_DATA ((unsigned char []){ 0 })
#define DRTM_EVENT_ARM_SEP_DATA \
(const unsigned char []){'A', 'R', 'M', '_', 'D', 'R', 'T', 'M' }
#if !defined(DRTM_TPM_HASH_ALG)
/*
* This is an error condition. However, avoid emitting a further error message,
* since an explanatory one will have already been emitted by the header file.
*/
#define DRTM_TPM_HASH_ALG TPM_ALG_NONE
#define DRTM_MBEDTLS_HASH_ALG MBEDTLS_MD_NONE
#else
#define DRTM_MBEDTLS_HASH_ALG \
EXPAND_AND_COMBINE(MBEDTLS_MD_SHA, DRTM_SHA_ALG)
#endif
#define CHECK_RC(rc, func_call) { \
if ((rc)) { \
ERROR("%s(): " #func_call "failed unexpectedly rc=%d\n", \
__func__, rc); \
panic(); \
} \
}
int drtm_measurements_init(void)
{
mbedtls_init();
return 0;
}
#define calc_hash(data_ptr, data_len, output) \
mbedtls_md(mbedtls_md_info_from_type((mbedtls_md_type_t)DRTM_MBEDTLS_HASH_ALG),\
data_ptr, data_len, output)
enum drtm_retc drtm_take_measurements(const struct_drtm_dl_args *a,
struct drtm_event_log *log)
{
struct tpm_log_1digest_shaX {
struct tpm_log_digests digests_1;
struct tpm_log_digest d;
unsigned char digest[MBEDTLS_MD_MAX_SIZE];
} __packed __aligned(__alignof(struct tpm_log_digests));
struct tpm_log_1digest_shaX digests_buf = {
.digests_1 = {
.count = 1,
},
.d = (struct tpm_log_digest) {
.h_alg = DRTM_TPM_HASH_ALG,
.buf_bytes = sizeof(((struct tpm_log_1digest_shaX *)0)->digest),
},
{0}
};
int rc;
uint8_t pcr_schema;
tpm_log_info_t *const tpm_log_info = &log->tpm_log_info;
rc = tpm_log_init(log->tpm_log_mem, sizeof(log->tpm_log_mem),
(enum tpm_hash_alg[]){ DRTM_TPM_HASH_ALG }, 1,
tpm_log_info);
CHECK_RC(rc, tpm_log_init);
/**
* Measurements extended into PCR-17.
*
* PCR-17: Measure the DCE image. Extend digest of (char)0 into PCR-17
* since the D-CRTM and the DCE are not separate.
*/
rc = calc_hash(DRTM_NULL_DATA, sizeof(DRTM_NULL_DATA), digests_buf.digest);
CHECK_RC(rc, calc_hash(NULL_DATA_1));
rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_DCE, TPM_PCR_17,
&digests_buf.digests_1, NULL, 0);
CHECK_RC(rc, tpm_log_add_event_arm_dce);
/* PCR-17: Measure the PCR schema DRTM launch argument. */
pcr_schema = DL_ARGS_GET_PCR_SCHEMA(a);
rc = calc_hash(&pcr_schema, sizeof(pcr_schema), digests_buf.digest);
CHECK_RC(rc, calc_hash(pcr_schema));
rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_PCR_SCHEMA, TPM_PCR_17,
&digests_buf.digests_1, NULL, 0);
CHECK_RC(rc, tpm_log_add_event(ARM_PCR_SCHEMA_17));
/* PCR-17: Measure the enable state of external-debug, and trace. */
/*
* TODO: Measure the enable state of external-debug and trace. This should
* be returned through a platform-specific hook.
*/
/* PCR-17: Measure the security lifecycle state. */
/*
* TODO: Measure the security lifecycle state. This is an implementation-
* defined value, retrieved through an implementation-defined mechanisms.
*/
/*
* PCR-17: Optionally measure the NWd DCE.
* It is expected that such subsequent DCE stages are signed and verified.
* Whether they are measured in addition to signing is implementation
* -defined.
* Here the choice is to not measure any NWd DCE, in favour of PCR value
* resilience to any NWd DCE updates.
*/
/* PCR-17: End of DCE measurements. */
rc = calc_hash(DRTM_EVENT_ARM_SEP_DATA, sizeof(DRTM_EVENT_ARM_SEP_DATA),
digests_buf.digest);
CHECK_RC(rc, calc_hash(ARM_SEP_DATA_17));
rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_SEPARATOR, TPM_PCR_17,
&digests_buf.digests_1,
DRTM_EVENT_ARM_SEP_DATA, sizeof(DRTM_EVENT_ARM_SEP_DATA));
CHECK_RC(rc, tpm_log_add_event(ARM_SEPARATOR_17));
/**
* Measurements extended into PCR-18.
*
* PCR-18: Measure the PCR schema DRTM launch argument.
*/
pcr_schema = DL_ARGS_GET_PCR_SCHEMA(a);
rc = calc_hash(&pcr_schema, sizeof(pcr_schema), digests_buf.digest);
CHECK_RC(rc, calc_hash(pcr_schema));
rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_PCR_SCHEMA, TPM_PCR_18,
&digests_buf.digests_1, NULL, 0);
CHECK_RC(rc, tpm_log_add_event(ARM_PCR_SCHEMA_17));
/*
* PCR-18: Measure the public key used to verify DCE image(s) signatures.
* Extend digest of (char)0, since we do not expect the NWd DCE to be
* present.
*/
assert(a->dce_nwd_size == 0);
rc = calc_hash(DRTM_NULL_DATA, sizeof(DRTM_NULL_DATA), digests_buf.digest);
CHECK_RC(rc, calc_hash(NULL_DATA_2));
rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_DCE_PUBKEY, TPM_PCR_18,
&digests_buf.digests_1, NULL, 0);
CHECK_RC(rc, tpm_log_add_event(ARM_DCE_PUBKEY));
/* PCR-18: Measure the DLME image. */
uintptr_t dlme_img_mapping;
size_t dlme_img_mapping_bytes;
dlme_img_mapping_bytes = ALIGNED_UP(a->dlme_img_size, DRTM_PAGE_SIZE);
rc = mmap_add_dynamic_region_alloc_va(a->dlme_paddr + a->dlme_img_off,
&dlme_img_mapping,
dlme_img_mapping_bytes, MT_RO_DATA | MT_NS);
if (rc) {
WARN("DRTM: %s: mmap_add_dynamic_region() failed rc=%d\n", __func__, rc);
return INTERNAL_ERROR;
}
rc = calc_hash((void *)dlme_img_mapping, a->dlme_img_size,
digests_buf.digest);
CHECK_RC(rc, calc_hash(dlme_img));
rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_DLME, TPM_PCR_18,
&digests_buf.digests_1, NULL, 0);
CHECK_RC(rc, tpm_log_add_event(ARM_DLME));
rc = mmap_remove_dynamic_region(dlme_img_mapping, dlme_img_mapping_bytes);
CHECK_RC(rc, mmap_remove_dynamic_region);
/* PCR-18: Measure the DLME image entry point. */
uint64_t dlme_img_ep = DL_ARGS_GET_DLME_ENTRY_POINT(a);
rc = calc_hash((unsigned char *)&dlme_img_ep, sizeof(dlme_img_ep),
digests_buf.digest);
CHECK_RC(rc, calc_hash(dlme_img_ep_off));
rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_DLME_EP, TPM_PCR_18,
&digests_buf.digests_1, NULL, 0);
CHECK_RC(rc, tpm_log_add_event(ARM_DLME_EP));
/* PCR-18: End of DCE measurements. */
rc = calc_hash(DRTM_EVENT_ARM_SEP_DATA, sizeof(DRTM_EVENT_ARM_SEP_DATA),
digests_buf.digest);
CHECK_RC(rc, calc_hash(ARM_SEP_DATA_18));
rc = tpm_log_add_event(tpm_log_info, DRTM_EVENT_ARM_SEPARATOR, TPM_PCR_18,
&digests_buf.digests_1,
DRTM_EVENT_ARM_SEP_DATA, sizeof(DRTM_EVENT_ARM_SEP_DATA));
CHECK_RC(rc, tpm_log_add_event(ARM_SEPARATOR_18));
/*
* If the DCE is unable to log a measurement because there is no available
* space in the event log region, the DCE must extend a hash of the value
* 0xFF (1 byte in size) into PCR[17] and PCR[18] and enter remediation.
*/
return SUCCESS;
}
void drtm_serialise_event_log(char *dst, const struct drtm_event_log *src,
size_t *event_log_size_out)
{
if (src) {
tpm_log_serialise(dst, &src->tpm_log_info, event_log_size_out);
} else {
if (dst != NULL) {
ERROR("%s(): cannot serialise the unexpected NULL event log\n",
__func__);
panic();
}
if (event_log_size_out) {
/*
* DRTM Beta0: Note that the advertised minimum required size ought
* to be 64KiB, rather than a more economical size of our choosing.
*/
*event_log_size_out = DRTM_EVENT_LOG_INIT_SIZE;
}
}
}

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef DRTM_MEASUREMENTS_H
#define DRTM_MEASUREMENTS_H
#include <stdint.h>
#include <lib/tpm/tpm_log.h>
#include "drtm_main.h"
#define DRTM_EVENT_LOG_INIT_SIZE ((size_t)(768))
#if !defined(DRTM_SHA_ALG)
#error "The DRTM service requires definition of the DRTM_SHA_ALG macro"
#else
#define COMBINE(a, b) a ## b
#define EXPAND_AND_COMBINE(a, b) COMBINE(a, b)
#define DRTM_TPM_HASH_ALG EXPAND_AND_COMBINE(TPM_ALG_SHA, DRTM_SHA_ALG)
#if DRTM_SHA_ALG == 256
#define DRTM_TPM_HASH_ALG_DSIZE 32
#elif DRTM_SHA_ALG == 384
#define DRTM_TPM_HASH_ALG_DSIZE 48
#elif DRTM_SHA_ALG == 512
#define DRTM_TPM_HASH_ALG_DSIZE 64
#endif
#endif
struct drtm_event_log {
tpm_log_info_t tpm_log_info;
uint32_t tpm_log_mem[DRTM_EVENT_LOG_INIT_SIZE / sizeof(uint32_t)];
};
/* Opaque / encapsulated type. */
typedef struct drtm_event_log drtm_event_log_t;
int drtm_measurements_init(void);
enum drtm_retc drtm_take_measurements(const struct_drtm_dl_args *a,
drtm_event_log_t *log);
void drtm_serialise_event_log(char *dst, const drtm_event_log_t *src_log,
size_t *event_log_size_out);
#endif /* DRTM_MEASUREMENTS_H */

View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM protected resources
*/
#include "drtm_main.h"
#include "drtm_cache.h"
#include "drtm_dma_prot.h"
/*
* XXX Note: the generic protected DRTM resources are being specialised into
* DRTM TCB hashes. Platform resources retrieved through the generic DRTM cache
* are going to be retrieved through bespoke interfaces instead.
* This file and drtm_cache.c will be removed once the transition is complete.
*/
struct __packed __descr_table_n {
struct_drtm_mem_region_descr_table header;
struct_drtm_mem_region_descr regions[24];
};
static const struct __descr_table_n qemu_virt_address_map = {
.header = {
.version = 1,
.num_regions = sizeof(((struct __descr_table_n *)NULL)->regions) /
sizeof(((struct __descr_table_n *)NULL)->regions[0])
},
/* See qemu/hw/arm/virt.c :
*
* static const MemMapEntry base_memmap[] = {
* // Space up to 0x8000000 is reserved for a boot ROM
* [VIRT_FLASH] = { 0, 0x08000000 },
* [VIRT_CPUPERIPHS] = { 0x08000000, 0x00020000 },
* // GIC distributor and CPU interfaces sit inside the CPU peripheral space
* [VIRT_GIC_DIST] = { 0x08000000, 0x00010000 },
* [VIRT_GIC_CPU] = { 0x08010000, 0x00010000 },
* [VIRT_GIC_V2M] = { 0x08020000, 0x00001000 },
* [VIRT_GIC_HYP] = { 0x08030000, 0x00010000 },
* [VIRT_GIC_VCPU] = { 0x08040000, 0x00010000 },
* // The space in between here is reserved for GICv3 CPU/vCPU/HYP
* [VIRT_GIC_ITS] = { 0x08080000, 0x00020000 },
* // This redistributor space allows up to 2*64kB*123 CPUs
* [VIRT_GIC_REDIST] = { 0x080A0000, 0x00F60000 },
* [VIRT_UART] = { 0x09000000, 0x00001000 },
* [VIRT_RTC] = { 0x09010000, 0x00001000 },
* [VIRT_FW_CFG] = { 0x09020000, 0x00000018 },
* [VIRT_GPIO] = { 0x09030000, 0x00001000 },
* [VIRT_SECURE_UART] = { 0x09040000, 0x00001000 },
* [VIRT_SMMU] = { 0x09050000, 0x00020000 },
* [VIRT_PCDIMM_ACPI] = { 0x09070000, MEMORY_HOTPLUG_IO_LEN },
* [VIRT_ACPI_GED] = { 0x09080000, ACPI_GED_EVT_SEL_LEN },
* [VIRT_NVDIMM_ACPI] = { 0x09090000, NVDIMM_ACPI_IO_LEN},
* [VIRT_PVTIME] = { 0x090a0000, 0x00010000 },
* [VIRT_SECURE_GPIO] = { 0x090b0000, 0x00001000 },
* [VIRT_MMIO] = { 0x0a000000, 0x00000200 },
* // ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size
* [VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 },
* [VIRT_SECURE_MEM] = { 0x0e000000, 0x01000000 },
* [VIRT_PCIE_MMIO] = { 0x10000000, 0x2eff0000 },
* [VIRT_PCIE_PIO] = { 0x3eff0000, 0x00010000 },
* [VIRT_PCIE_ECAM] = { 0x3f000000, 0x01000000 },
* // Actual RAM size depends on initial RAM and device memory settings
* [VIRT_MEM] = { GiB, LEGACY_RAMLIMIT_BYTES },
* };
*
* Note: When adjusting the regions below, please update the array length
* in the __descr_table_n structure accordingly.
*
*/
#define PAGES_AND_TYPE(bytes, type) \
.pages_and_type = DRTM_MEM_REGION_PAGES_AND_TYPE( \
(size_t)(bytes) / DRTM_PAGE_SIZE + \
((size_t)(bytes) % DRTM_PAGE_SIZE != 0), \
DRTM_MEM_REGION_TYPE_##type)
.regions = {
{.paddr = 0, PAGES_AND_TYPE(0x08000000, NON_VOLATILE)},
{.paddr = 0x08000000, PAGES_AND_TYPE(0x00021000, DEVICE)},
{.paddr = 0x08030000, PAGES_AND_TYPE(0x00020000, DEVICE)},
{.paddr = 0x08080000, PAGES_AND_TYPE(0x00F80000, DEVICE)},
{.paddr = 0x09000000, PAGES_AND_TYPE(0x00001000, DEVICE)},
{.paddr = 0x09010000, PAGES_AND_TYPE(0x00001000, DEVICE)},
{.paddr = 0x09020000, PAGES_AND_TYPE(0x00000018, DEVICE)},
{.paddr = 0x09030000, PAGES_AND_TYPE(0x00001000, DEVICE)},
/* {.paddr = 0x09040000, PAGES_AND_TYPE(0x00001000, RESERVED)}, */
{.paddr = 0x09050000, PAGES_AND_TYPE(0x00020000 + DRTM_PAGE_SIZE, DEVICE)},
{.paddr = 0x09080000, PAGES_AND_TYPE(DRTM_PAGE_SIZE, DEVICE)},
{.paddr = 0x09090000, PAGES_AND_TYPE(DRTM_PAGE_SIZE, DEVICE)},
{.paddr = 0x090a0000, PAGES_AND_TYPE(0x00010000, DEVICE)},
/* {.paddr = 0x090b0000, PAGES_AND_TYPE(0x00001000, RESERVED)}, */
{.paddr = 0x0a000000, PAGES_AND_TYPE(0x00000200, DEVICE)},
{.paddr = 0x0c000000, PAGES_AND_TYPE(0x02000000, DEVICE)},
/* {.paddr = 0x0e000000, PAGES_AND_TYPE(0x01000000, RESERVED)}, */
{.paddr = 0x10000000, PAGES_AND_TYPE(0x30000000, DEVICE)},
/*
* At most 3 GiB RAM, to align with TF-A's max PA on ARM QEMU.
* Actual RAM size depends on initial RAM and device memory settings.
*/
{.paddr = 0x40000000, PAGES_AND_TYPE(0xc0000000 /* 3 GiB */, NORMAL)},
},
#undef PAGES_AND_TYPE
};
static const struct cached_res CACHED_RESOURCES_INIT[] = {
{
.id = "address-map",
.bytes = sizeof(qemu_virt_address_map),
.data_ptr = (char *)&qemu_virt_address_map,
},
};
#define CACHED_RESOURCES_INIT_END (CACHED_RESOURCES_INIT + \
sizeof(CACHED_RESOURCES_INIT) / sizeof(CACHED_RESOURCES_INIT[0]))

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM support for DRTM error remediation.
*
*/
#include <stdint.h>
#include <common/debug.h>
#include <common/runtime_svc.h>
#include "drtm_main.h"
static enum drtm_retc drtm_error_set(long long error_code)
{
/* TODO: Store the error code in non-volatile memory. */
return SUCCESS;
}
static enum drtm_retc drtm_error_get(long long *error_code)
{
/* TODO: Get error code from non-volatile memory. */
*error_code = 0;
return SUCCESS;
}
void drtm_enter_remediation(long long err_code, const char *err_str)
{
int rc;
if ((rc = drtm_error_set(err_code))) {
ERROR("%s(): drtm_error_set() failed unexpectedly rc=%d\n",
__func__, rc);
panic();
}
NOTICE("DRTM: entering remediation of error:\n%lld\t\'%s\'\n",
err_code, err_str);
/* TODO: Reset the system rather than panic(). */
ERROR("%s(): system reset is not yet supported\n", __func__);
panic();
}
uintptr_t drtm_set_error(uint64_t x1, void *ctx)
{
int rc;
if ((rc = drtm_error_set(x1))) {
SMC_RET1(ctx, rc);
}
SMC_RET1(ctx, SUCCESS);
}
uintptr_t drtm_get_error(void *ctx)
{
long long error_code;
int rc;
if ((rc = drtm_error_get(&error_code))) {
SMC_RET1(ctx, rc);
}
SMC_RET2(ctx, SUCCESS, error_code);
}

View File

@ -0,0 +1,15 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
*/
#ifndef DRTM_REMEDIATION_H
#define DRTM_REMEDIATION_H
uintptr_t drtm_set_error(uint64_t x1, void *ctx);
uintptr_t drtm_get_error(void *ctx);
void drtm_enter_remediation(long long error_code, const char *error_str);
#endif /* DRTM_REMEDIATION_H */

View File

@ -0,0 +1,183 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM resource: TCB hashes.
*
*/
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
#include <common/runtime_svc.h>
#include <services/drtm_svc_plat.h>
#include "drtm_measurements.h" /* DRTM_TPM_HASH_ALG and _DSIZE */
#include "drtm_remediation.h"
struct __packed drtm_tcb_hash_v1 {
uint32_t hash_id;
uint8_t hash_val[DRTM_TPM_HASH_ALG_DSIZE];
};
struct __packed drtm_tcb_hash_table_hdr_v1 {
uint16_t version; /* Must be 1. */
uint16_t num_hashes;
uint32_t hashing_alg;
};
/* Version-agnostic types. */
typedef struct drtm_tcb_hash_table_hdr_v1 struct_drtm_tcb_hash_table_hdr;
typedef struct drtm_tcb_hash_v1 struct_drtm_tcb_hash;
CASSERT(sizeof(((struct plat_drtm_tcb_hash *)NULL)->hash_val)
== sizeof(((struct_drtm_tcb_hash *)NULL)->hash_val),
bad_plat_drtm_tcb_digest_buffer_size
);
static bool tcb_hashes_set_at_runtime;
static bool tcb_hashes_locked;
/* Default platform's DRTM TCB hashes enumeration -- no hashes. */
void plat_enumerate_drtm_tcb_hashes(const struct plat_drtm_tcb_hash **hashes_out,
size_t *hashes_count_out)
{
*hashes_out = NULL;
*hashes_count_out = 0;
}
#pragma weak plat_enumerate_drtm_tcb_hashes
int drtm_tcb_hashes_init(void)
{
const struct plat_drtm_tcb_hash *init_hashes;
size_t num_init_hashes;
bool init_hashes_invalid = false;
plat_enumerate_drtm_tcb_hashes(&init_hashes, &num_init_hashes);
if (!init_hashes) {
return 0;
}
/* Validate the platform DRTM TCB hashes. */
for (size_t j = 0; j < num_init_hashes; j++) {
const struct plat_drtm_tcb_hash *plat_h = init_hashes + j;
if (plat_h->hash_bytes != DRTM_TPM_HASH_ALG_DSIZE) {
ERROR("DRTM: invalid hash value size of platform TCB hash"
" at index %ld\n", j);
init_hashes_invalid = true;
}
for (size_t i = 0; i < j; i++) {
const struct plat_drtm_tcb_hash *prev_h = init_hashes + i;
if (plat_h->hash_id.uint32 == prev_h->hash_id.uint32) {
ERROR("DRTM: duplicate hash value ID of platform TCB hash"
" at index %ld (duplicates ID at index %ld)\n", j, i);
init_hashes_invalid = true;
}
}
}
if (init_hashes_invalid) {
return -EINVAL;
}
return 0;
}
uint64_t drtm_features_tcb_hashes(void *ctx)
{
SMC_RET2(ctx, 1, /* TCB hashes supported. */
(uint64_t)0 << 8 /* MBZ */
| (uint8_t)0 /* TCB hashes may not be recorded at runtime. */
);
}
void drtm_dl_ensure_tcb_hashes_are_final(void)
{
if (!tcb_hashes_set_at_runtime || tcb_hashes_locked) {
return;
}
/*
* Some runtime TCB hashes were set, but the set of TCB hashes hasn't been
* locked / frozen by trusted Normal World firmware. Therefore there is no
* way to guarantee that the set of TCB hashes doesn't contain malicious
* ones from an untrusted Normal World component.
* Refuse to complete the dynamic launch, and reboot the system.
*/
drtm_enter_remediation(0x4, "TCB hashes are still open (missing LOCK call)");
}
/*
* enum drtm_retc drtm_set_tcb_hash(uint64_t x1)
* {
* // Sets `tcb_hashes_set_at_runtime' when it succeeds
* }
*/
/*
* enum drtm_retc drtm_lock_tcb_hashes(void)
* {
* // Sets `tcb_hashes_locked' when it succeeds
* }
*/
void drtm_serialise_tcb_hashes_table(char *dst, size_t *size_out)
{
const struct plat_drtm_tcb_hash *init_hashes;
size_t num_init_hashes;
size_t num_hashes_total = 0;
uintptr_t table_cur = (uintptr_t)dst;
/* Enumerate all available TCB hashes. */
plat_enumerate_drtm_tcb_hashes(&init_hashes, &num_init_hashes);
num_hashes_total += num_init_hashes;
if (num_hashes_total == 0) {
goto serialise_tcb_hashes_table_done;
}
/* Serialise DRTM TCB_HASHES_TABLE header. */
struct_drtm_tcb_hash_table_hdr hdr;
hdr.version = 1;
hdr.num_hashes = 0;
hdr.num_hashes += num_init_hashes;
hdr.hashing_alg = DRTM_TPM_HASH_ALG;
if (dst) {
memcpy((char *)table_cur, &hdr, sizeof(hdr));
}
table_cur += sizeof(hdr);
/* Serialise platform DRTM TCB hashes. */
for (const struct plat_drtm_tcb_hash *plat_h = init_hashes;
plat_h < init_hashes + num_init_hashes;
plat_h++) {
struct_drtm_tcb_hash drtm_h;
drtm_h.hash_id = plat_h->hash_id.uint32;
/* This assertion follows from the init-time check. */
assert(plat_h->hash_bytes == sizeof(drtm_h.hash_val));
/* This assertion follows from the one above and the compile-time one.*/
assert(plat_h->hash_bytes <= sizeof(plat_h->hash_val));
memcpy(&drtm_h.hash_val, plat_h->hash_val, plat_h->hash_bytes);
if (dst) {
memcpy((char *)table_cur, &drtm_h, sizeof(drtm_h));
}
table_cur += sizeof(drtm_h);
}
serialise_tcb_hashes_table_done:
/* Return the number of bytes serialised. */
if (size_out) {
*size_out = table_cur - (uintptr_t)dst;
}
}

View File

@ -0,0 +1,18 @@
/*
* Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*
* DRTM resource: TCB hashes.
*
*/
#ifndef DRTM_RES_TCB_HASHES_H
#define DRTM_RES_TCB_HASHES_H
int drtm_tcb_hashes_init(void);
uint64_t drtm_features_tcb_hashes(void *ctx);
void drtm_dl_ensure_tcb_hashes_are_final(void);
void drtm_serialise_tcb_hashes_table(char *dst,
size_t *tcb_hashes_table_size_out);
#endif /* DRTM_RES_TCB_HASHES_H */

View File

@ -13,6 +13,7 @@
#include <lib/pmf/pmf.h>
#include <lib/psci/psci.h>
#include <lib/runtime_instr.h>
#include <services/drtm_svc.h>
#include <services/sdei.h>
#include <services/spm_mm_svc.h>
#include <services/spmd_svc.h>
@ -66,6 +67,12 @@ static int32_t std_svc_setup(void)
trng_setup();
#if DRTM_SUPPORT
if (drtm_setup() != 0) {
ret = 1;
}
#endif
return ret;
}
@ -145,6 +152,11 @@ static uintptr_t std_svc_smc_handler(uint32_t smc_fid,
#if TRNG_SUPPORT
if (is_trng_fid(smc_fid)) {
return trng_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
#endif
#if DRTM_SUPPORT
if (is_drtm_fid(smc_fid)) {
return drtm_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
flags);
}
#endif