Merge changes I31ec0001,Ib06cd024,I7c11f15d,Ie0d5d4c8,I285f3b59, ... into integration

* changes:
  fix(el3-spmc): correctly account for emad_offset
  refactor(el3-spmc): avoid unnecessarily revalidating offset
  fix(el3-spmc): only call spmc_shm_check_obj() on complete objects
  refactor(spmc): assert on out-of-bounds emad access
  refactor(el3-spmc): spmc_shmem_obj_get_emad() will never fail
  fix(el3-spmc): validate descriptor headers
  fix(el3-spmc): use version-dependent minimum descriptor length
  refactor(el3-spmc): check emad_count offset
This commit is contained in:
Manish Pandey 2023-05-25 12:35:46 +02:00 committed by TrustedFirmware Code Review
commit 4bb0cdc159
2 changed files with 136 additions and 82 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -217,6 +217,8 @@ struct ffa_mtd_v1_0 {
struct ffa_emad_v1_0 emad[];
};
CASSERT(sizeof(struct ffa_mtd_v1_0) == 32, assert_ffa_mtd_size_v1_0_mismatch);
CASSERT(offsetof(struct ffa_mtd_v1_0, emad) == 32,
assert_ffa_mtd_size_v1_0_mismatch_2);
/**
* struct ffa_mtd - Memory transaction descriptor for FF-A v1.1.
@ -254,5 +256,8 @@ struct ffa_mtd {
uint64_t reserved_40_47;
};
CASSERT(sizeof(struct ffa_mtd) == 48, assert_ffa_mtd_size_mismatch);
CASSERT(offsetof(struct ffa_mtd, emad_count) ==
offsetof(struct ffa_mtd_v1_0, emad_count),
assert_ffa_mtd_emad_count_offset_mismatch);
#endif /* EL3_SPMC_FFA_MEM_H */

View File

@ -1,10 +1,11 @@
/*
* Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2022-2023, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <errno.h>
#include <inttypes.h>
#include <common/debug.h>
#include <common/runtime_svc.h>
@ -195,24 +196,23 @@ spmc_shmem_obj_get_emad(const struct ffa_mtd *desc, uint32_t index,
uint32_t ffa_version, size_t *emad_size)
{
uint8_t *emad;
assert(index < desc->emad_count);
/*
* If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
* format, otherwise assume it is a v1.1 format.
*/
if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
/* Cast our descriptor to the v1.0 format. */
struct ffa_mtd_v1_0 *mtd_v1_0 =
(struct ffa_mtd_v1_0 *) desc;
emad = (uint8_t *) &(mtd_v1_0->emad);
emad = (uint8_t *)desc + offsetof(struct ffa_mtd_v1_0, emad);
*emad_size = sizeof(struct ffa_emad_v1_0);
} else {
if (!is_aligned(desc->emad_offset, 16)) {
WARN("Emad offset is not aligned.\n");
return NULL;
}
assert(is_aligned(desc->emad_offset, 16));
emad = ((uint8_t *) desc + desc->emad_offset);
*emad_size = desc->emad_size;
}
assert(((uint64_t)index * (uint64_t)*emad_size) <= UINT32_MAX);
return (emad + (*emad_size * index));
}
@ -236,10 +236,6 @@ spmc_shmem_obj_get_comp_mrd(struct spmc_shmem_obj *obj, uint32_t ffa_version)
struct ffa_emad_v1_0 *emad = spmc_shmem_obj_get_emad(&obj->desc, 0,
ffa_version,
&emad_size);
/* Ensure the emad array was found. */
if (emad == NULL) {
return NULL;
}
/* Ensure the composite descriptor offset is aligned. */
if (!is_aligned(emad->comp_mrd_offset, 8)) {
@ -699,6 +695,87 @@ spmc_populate_ffa_v1_0_descriptor(void *dst, struct spmc_shmem_obj *orig_obj,
return 0;
}
static int
spmc_validate_mtd_start(struct ffa_mtd *desc, uint32_t ffa_version,
size_t fragment_length, size_t total_length)
{
unsigned long long emad_end;
unsigned long long emad_size;
unsigned long long emad_offset;
unsigned int min_desc_size;
/* Determine the appropriate minimum descriptor size. */
if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
min_desc_size = sizeof(struct ffa_mtd_v1_0);
} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
min_desc_size = sizeof(struct ffa_mtd);
} else {
return FFA_ERROR_INVALID_PARAMETER;
}
if (fragment_length < min_desc_size) {
WARN("%s: invalid length %zu < %u\n", __func__, fragment_length,
min_desc_size);
return FFA_ERROR_INVALID_PARAMETER;
}
if (desc->emad_count == 0U) {
WARN("%s: unsupported attribute desc count %u.\n",
__func__, desc->emad_count);
return FFA_ERROR_INVALID_PARAMETER;
}
/*
* If the caller is using FF-A v1.0 interpret the descriptor as a v1.0
* format, otherwise assume it is a v1.1 format.
*/
if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
emad_offset = emad_size = sizeof(struct ffa_emad_v1_0);
} else {
if (!is_aligned(desc->emad_offset, 16)) {
WARN("%s: Emad offset %" PRIx32 " is not 16-byte aligned.\n",
__func__, desc->emad_offset);
return FFA_ERROR_INVALID_PARAMETER;
}
if (desc->emad_offset < sizeof(struct ffa_mtd)) {
WARN("%s: Emad offset too small: 0x%" PRIx32 " < 0x%zx.\n",
__func__, desc->emad_offset,
sizeof(struct ffa_mtd));
return FFA_ERROR_INVALID_PARAMETER;
}
emad_offset = desc->emad_offset;
if (desc->emad_size < sizeof(struct ffa_emad_v1_0)) {
WARN("%s: Bad emad size (%" PRIu32 " < %zu).\n", __func__,
desc->emad_size, sizeof(struct ffa_emad_v1_0));
return FFA_ERROR_INVALID_PARAMETER;
}
if (!is_aligned(desc->emad_size, 16)) {
WARN("%s: Emad size 0x%" PRIx32 " is not 16-byte aligned.\n",
__func__, desc->emad_size);
return FFA_ERROR_INVALID_PARAMETER;
}
emad_size = desc->emad_size;
}
/*
* Overflow is impossible: the arithmetic happens in at least 64-bit
* precision, but all of the operands are bounded by UINT32_MAX, and
* ((2^32 - 1)^2 + (2^32 - 1) + (2^32 - 1)) = ((2^32 - 1) * (2^32 + 1))
* = (2^64 - 1).
*/
CASSERT(sizeof(desc->emad_count == 4), assert_emad_count_max_too_large);
emad_end = (desc->emad_count * (unsigned long long)emad_size) +
(unsigned long long)sizeof(struct ffa_comp_mrd) +
(unsigned long long)emad_offset;
if (emad_end > total_length) {
WARN("%s: Composite memory region extends beyond descriptor: 0x%llx > 0x%zx\n",
__func__, emad_end, total_length);
return FFA_ERROR_INVALID_PARAMETER;
}
return 0;
}
/**
* spmc_shmem_check_obj - Check that counts in descriptor match overall size.
* @obj: Object containing ffa_memory_region_descriptor.
@ -724,7 +801,6 @@ static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
size_t expected_size;
size_t total_page_count;
size_t emad_size;
size_t desc_size;
size_t header_emad_size;
uint32_t offset;
struct ffa_comp_mrd *comp;
@ -732,10 +808,6 @@ static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
emad = spmc_shmem_obj_get_emad(&obj->desc, emad_num,
ffa_version, &emad_size);
if (emad == NULL) {
WARN("%s: invalid emad structure.\n", __func__);
return -EINVAL;
}
/*
* Validate the calculated emad address resides within the
@ -749,13 +821,23 @@ static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
offset = emad->comp_mrd_offset;
if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
desc_size = sizeof(struct ffa_mtd_v1_0);
/*
* The offset provided to the composite memory region descriptor
* should be consistent across endpoint descriptors. Store the
* first entry and compare against subsequent entries.
*/
if (comp_mrd_offset == 0) {
comp_mrd_offset = offset;
} else {
desc_size = sizeof(struct ffa_mtd);
if (comp_mrd_offset != offset) {
ERROR("%s: mismatching offsets provided, %u != %u\n",
__func__, offset, comp_mrd_offset);
return -EINVAL;
}
continue; /* Remainder only executed on first iteration. */
}
header_emad_size = desc_size +
header_emad_size = (size_t)((uint8_t *)emad - (uint8_t *)&obj->desc) +
(obj->desc.emad_count * emad_size);
if (offset < header_emad_size) {
@ -805,29 +887,6 @@ static int spmc_shmem_check_obj(struct spmc_shmem_obj *obj,
return -EINVAL;
}
if (obj->desc_filled < obj->desc_size) {
/*
* The whole descriptor has not yet been received.
* Skip final checks.
*/
return 0;
}
/*
* The offset provided to the composite memory region descriptor
* should be consistent across endpoint descriptors. Store the
* first entry and compare against subsequent entries.
*/
if (comp_mrd_offset == 0) {
comp_mrd_offset = offset;
} else {
if (comp_mrd_offset != offset) {
ERROR("%s: mismatching offsets provided, %u != %u\n",
__func__, offset, comp_mrd_offset);
return -EINVAL;
}
}
total_page_count = 0;
for (size_t i = 0; i < count; i++) {
@ -960,16 +1019,17 @@ static long spmc_ffa_fill_desc(struct mailbox *mbox,
if (obj->desc_filled == 0U) {
/* First fragment, descriptor header has been copied */
ret = spmc_validate_mtd_start(&obj->desc, ffa_version,
fragment_length, obj->desc_size);
if (ret != 0) {
goto err_bad_desc;
}
obj->desc.handle = spmc_shmem_obj_state.next_handle++;
obj->desc.flags |= mtd_flag;
}
obj->desc_filled += fragment_length;
ret = spmc_shmem_check_obj(obj, ffa_version);
if (ret != 0) {
ret = FFA_ERROR_INVALID_PARAMETER;
goto err_bad_desc;
}
handle_low = (uint32_t)obj->desc.handle;
handle_high = obj->desc.handle >> 32;
@ -982,6 +1042,12 @@ static long spmc_ffa_fill_desc(struct mailbox *mbox,
/* The full descriptor has been received, perform any final checks. */
ret = spmc_shmem_check_obj(obj, ffa_version);
if (ret != 0) {
ret = FFA_ERROR_INVALID_PARAMETER;
goto err_bad_desc;
}
/*
* If a partition ID resides in the secure world validate that the
* partition ID is for a known partition. Ignore any partition ID
@ -991,10 +1057,6 @@ static long spmc_ffa_fill_desc(struct mailbox *mbox,
for (size_t i = 0; i < obj->desc.emad_count; i++) {
emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
&emad_size);
if (emad == NULL) {
ret = FFA_ERROR_INVALID_PARAMETER;
goto err_bad_desc;
}
ffa_endpoint_id16_t ep_id = emad->mapd.endpoint_id;
@ -1012,18 +1074,11 @@ static long spmc_ffa_fill_desc(struct mailbox *mbox,
for (size_t i = 0; i < obj->desc.emad_count; i++) {
emad = spmc_shmem_obj_get_emad(&obj->desc, i, ffa_version,
&emad_size);
if (emad == NULL) {
ret = FFA_ERROR_INVALID_PARAMETER;
goto err_bad_desc;
}
for (size_t j = i + 1; j < obj->desc.emad_count; j++) {
other_emad = spmc_shmem_obj_get_emad(&obj->desc, j,
ffa_version,
&emad_size);
if (other_emad == NULL) {
ret = FFA_ERROR_INVALID_PARAMETER;
goto err_bad_desc;
}
if (emad->mapd.endpoint_id ==
other_emad->mapd.endpoint_id) {
@ -1142,6 +1197,7 @@ long spmc_ffa_mem_send(uint32_t smc_fid,
struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
ffa_mtd_flag32_t mtd_flag;
uint32_t ffa_version = get_partition_ffa_version(secure_origin);
size_t min_desc_size;
if (address != 0U || page_count != 0U) {
WARN("%s: custom memory region for message not supported.\n",
@ -1156,11 +1212,18 @@ long spmc_ffa_mem_send(uint32_t smc_fid,
FFA_ERROR_INVALID_PARAMETER);
}
/*
* Check if the descriptor is smaller than the v1.0 descriptor. The
* descriptor cannot be smaller than this structure.
*/
if (fragment_length < sizeof(struct ffa_mtd_v1_0)) {
if (ffa_version == MAKE_FFA_VERSION(1, 0)) {
min_desc_size = sizeof(struct ffa_mtd_v1_0);
} else if (ffa_version == MAKE_FFA_VERSION(1, 1)) {
min_desc_size = sizeof(struct ffa_mtd);
} else {
WARN("%s: bad FF-A version.\n", __func__);
return spmc_ffa_error_return(handle,
FFA_ERROR_INVALID_PARAMETER);
}
/* Check if the descriptor is too small for the FF-A version. */
if (fragment_length < min_desc_size) {
WARN("%s: bad first fragment size %u < %zu\n",
__func__, fragment_length, sizeof(struct ffa_mtd_v1_0));
return spmc_ffa_error_return(handle,
@ -1482,11 +1545,6 @@ spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
&emad_size);
if (emad == NULL) {
WARN("%s: invalid emad structure.\n", __func__);
ret = FFA_ERROR_INVALID_PARAMETER;
goto err_unlock_all;
}
if ((uintptr_t) emad >= (uintptr_t)
((uint8_t *) req + total_length)) {
@ -1510,21 +1568,12 @@ spmc_ffa_mem_retrieve_req(uint32_t smc_fid,
emad = spmc_shmem_obj_get_emad(req, i, ffa_version,
&emad_size);
if (emad == NULL) {
ret = FFA_ERROR_INVALID_PARAMETER;
goto err_unlock_all;
}
for (size_t j = 0; j < obj->desc.emad_count; j++) {
other_emad = spmc_shmem_obj_get_emad(
&obj->desc, j, MAKE_FFA_VERSION(1, 1),
&emad_size);
if (other_emad == NULL) {
ret = FFA_ERROR_INVALID_PARAMETER;
goto err_unlock_all;
}
if (req->emad_count &&
emad->mapd.endpoint_id ==
other_emad->mapd.endpoint_id) {