nvme: Pass large I/O requests as PRP lists

Today, we split every I/O request into at most 4kb chunks and wait for these
requests to finish. We encountered issues where the backing storage is network
based, so every I/O request needs to go over the network with associated
latency cost. A few ms of latency when loading 100MB initrd in 4kb chunks
does add up.

NVMe implements a feature to allow I/O requests spanning multiple pages,
called PRP lists. This patch takes larger I/O operations and checks if
they can be directly passed to the NVMe backing device as PRP list.
At least for grub, read operations can always be mapped directly into
PRP list items.

This reduces the number of I/O operations required during a typical boot
path by roughly a factor of 5.

Signed-off-by: Alexander Graf <graf@amazon.com>
This commit is contained in:
Alexander Graf 2020-09-30 23:10:55 +02:00 committed by Kevin O'Connor
parent 23258d39ff
commit 01f2736cc9
2 changed files with 82 additions and 10 deletions

View File

@ -10,6 +10,8 @@
#include "types.h" // u32
#include "pcidevice.h" // struct pci_device
#define NVME_MAX_PRPL_ENTRIES 15 /* Allows requests up to 64kb */
/* Data structures */
/* The register file of a NVMe host controller. This struct follows the naming
@ -121,6 +123,11 @@ struct nvme_namespace {
/* Page aligned buffer of size NVME_PAGE_SIZE. */
char *dma_buffer;
/* Page List */
u32 prpl_len;
void *prp1;
u64 prpl[NVME_MAX_PRPL_ENTRIES];
};
/* Data structures for NVMe admin identify commands */
@ -195,6 +202,7 @@ union nvme_identify {
#define NVME_CQE_DW3_P (1U << 16)
#define NVME_PAGE_SIZE 4096
#define NVME_PAGE_MASK ~(NVME_PAGE_SIZE - 1)
/* Length for the queue entries. */
#define NVME_SQE_SIZE_LOG 6

View File

@ -168,11 +168,6 @@ nvme_get_next_sqe(struct nvme_sq *sq, u8 opc, void *metadata, void *data, void *
sqe->dptr_prp1 = (u32)data;
sqe->dptr_prp2 = (u32)data2;
if (sqe->dptr_prp1 & (NVME_PAGE_SIZE - 1)) {
/* Data buffer not page aligned. */
warn_internalerror();
}
return sqe;
}
@ -418,19 +413,29 @@ nvme_io_readwrite(struct nvme_namespace *ns, u64 lba, char *buf, u16 count,
int write)
{
u32 buf_addr = (u32)buf;
void *prp2;
if ((buf_addr & 0x3) ||
((buf_addr & ~(NVME_PAGE_SIZE - 1)) !=
((buf_addr + ns->block_size * count - 1) & ~(NVME_PAGE_SIZE - 1)))) {
/* Buffer is misaligned or crosses page boundary */
if (buf_addr & 0x3) {
/* Buffer is misaligned */
warn_internalerror();
return DISK_RET_EBADTRACK;
}
if ((ns->block_size * count) > (NVME_PAGE_SIZE * 2)) {
/* We need to describe more than 2 pages, rely on PRP List */
prp2 = ns->prpl;
} else if ((ns->block_size * count) > NVME_PAGE_SIZE) {
/* Directly embed the 2nd page if we only need 2 pages */
prp2 = (void *)(long)ns->prpl[0];
} else {
/* One page is enough, don't expose anything else */
prp2 = NULL;
}
struct nvme_sqe *io_read = nvme_get_next_sqe(&ns->ctrl->io_sq,
write ? NVME_SQE_OPC_IO_WRITE
: NVME_SQE_OPC_IO_READ,
NULL, buf, NULL);
NULL, buf, prp2);
io_read->nsid = ns->ns_id;
io_read->dword[10] = (u32)lba;
io_read->dword[11] = (u32)(lba >> 32);
@ -450,6 +455,60 @@ nvme_io_readwrite(struct nvme_namespace *ns, u64 lba, char *buf, u16 count,
return DISK_RET_SUCCESS;
}
static void nvme_reset_prpl(struct nvme_namespace *ns)
{
ns->prpl_len = 0;
}
static int nvme_add_prpl(struct nvme_namespace *ns, u64 base)
{
if (ns->prpl_len >= NVME_MAX_PRPL_ENTRIES)
return -1;
ns->prpl[ns->prpl_len++] = base;
return 0;
}
int nvme_build_prpl(struct nvme_namespace *ns, struct disk_op_s *op)
{
int first_page = 1;
u32 base = (long)op->buf_fl;
s32 size = op->count * ns->block_size;
if (op->count > ns->max_req_size)
return -1;
nvme_reset_prpl(ns);
/* Special case for transfers that fit into PRP1, but are unaligned */
if (((size + (base & ~NVME_PAGE_MASK)) <= NVME_PAGE_SIZE)) {
ns->prp1 = op->buf_fl;
return 0;
}
/* Every request has to be page aligned */
if (base & ~NVME_PAGE_MASK)
return -1;
/* Make sure a full block fits into the last chunk */
if (size & (ns->block_size - 1ULL))
return -1;
for (; size > 0; base += NVME_PAGE_SIZE, size -= NVME_PAGE_SIZE) {
if (first_page) {
/* First page is special */
ns->prp1 = (void*)base;
first_page = 0;
continue;
}
if (nvme_add_prpl(ns, base))
return -1;
}
return 0;
}
static int
nvme_create_io_queues(struct nvme_ctrl *ctrl)
{
@ -668,6 +727,11 @@ nvme_cmd_readwrite(struct nvme_namespace *ns, struct disk_op_s *op, int write)
u16 const max_blocks = NVME_PAGE_SIZE / ns->block_size;
u16 i;
if (!nvme_build_prpl(ns, op)) {
/* Request goes via PRP List logic */
return nvme_io_readwrite(ns, op->lba, ns->prp1, op->count, write);
}
for (i = 0; i < op->count && res == DISK_RET_SUCCESS;) {
u16 blocks_remaining = op->count - i;
u16 blocks = blocks_remaining < max_blocks ? blocks_remaining