Skip to content

Commit

Permalink
nvme: Only allocate one dma bounce buffer for all nvme drives
Browse files Browse the repository at this point in the history
There is no need to create multiple dma bounce buffers as the BIOS
disk code isn't reentrant capable.

Also, verify that the allocation succeeds.

Signed-off-by: Kevin O'Connor <kevin@koconnor.net>
Reviewed-by: Alexander Graf <graf@amazon.com>
  • Loading branch information
KevinOConnor committed Jan 27, 2022
1 parent f13b650 commit 6d46283
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 9 deletions.
3 changes: 0 additions & 3 deletions src/hw/nvme-int.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,6 @@ struct nvme_namespace {
u32 block_size;
u32 metadata_size;
u32 max_req_size;

/* Page aligned buffer of size NVME_PAGE_SIZE. */
char *dma_buffer;
};

/* Data structures for NVMe admin identify commands */
Expand Down
21 changes: 15 additions & 6 deletions src/hw/nvme.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@
#include "nvme.h"
#include "nvme-int.h"

// Page aligned "dma bounce buffer" of size NVME_PAGE_SIZE in high memory
static void *nvme_dma_buffer;

static void *
zalloc_page_aligned(struct zone_s *zone, u32 size)
{
Expand Down Expand Up @@ -257,6 +260,14 @@ nvme_probe_ns(struct nvme_ctrl *ctrl, u32 ns_idx, u8 mdts)
goto free_buffer;
}

if (!nvme_dma_buffer) {
nvme_dma_buffer = zalloc_page_aligned(&ZoneHigh, NVME_PAGE_SIZE);
if (!nvme_dma_buffer) {
warn_noalloc();
goto free_buffer;
}
}

struct nvme_namespace *ns = malloc_fseg(sizeof(*ns));
if (!ns) {
warn_noalloc();
Expand Down Expand Up @@ -294,8 +305,6 @@ nvme_probe_ns(struct nvme_ctrl *ctrl, u32 ns_idx, u8 mdts)
ns->max_req_size = -1U;
}

ns->dma_buffer = zalloc_page_aligned(&ZoneHigh, NVME_PAGE_SIZE);

char *desc = znprintf(MAXDESCSIZE, "NVMe NS %u: %llu MiB (%llu %u-byte "
"blocks + %u-byte metadata)",
ns_id, (ns->lba_count * ns->block_size) >> 20,
Expand Down Expand Up @@ -459,12 +468,12 @@ nvme_bounce_xfer(struct nvme_namespace *ns, u64 lba, void *buf, u16 count,
u16 blocks = count < max_blocks ? count : max_blocks;

if (write)
memcpy(ns->dma_buffer, buf, blocks * ns->block_size);
memcpy(nvme_dma_buffer, buf, blocks * ns->block_size);

int res = nvme_io_xfer(ns, lba, ns->dma_buffer, NULL, blocks, write);
int res = nvme_io_xfer(ns, lba, nvme_dma_buffer, NULL, blocks, write);

if (!write && res >= 0)
memcpy(buf, ns->dma_buffer, res * ns->block_size);
memcpy(buf, nvme_dma_buffer, res * ns->block_size);

return res;
}
Expand Down Expand Up @@ -498,7 +507,7 @@ nvme_prpl_xfer(struct nvme_namespace *ns, u64 lba, void *buf, u16 count,
/* Build PRP list if we need to describe more than 2 pages */
if ((ns->block_size * count) > (NVME_PAGE_SIZE * 2)) {
u32 prpl_len = 0;
u64 *prpl = (void*)ns->dma_buffer;
u64 *prpl = nvme_dma_buffer;
int first_page = 1;
for (; size > 0; base += NVME_PAGE_SIZE, size -= NVME_PAGE_SIZE) {
if (first_page) {
Expand Down

0 comments on commit 6d46283

Please sign in to comment.