Skip to content

Commit

Permalink
more fixes from Thanos's review
Browse files Browse the repository at this point in the history
Signed-off-by: William Henderson <william.henderson@nutanix.com>
  • Loading branch information
w-henderson committed Aug 30, 2023
1 parent 3c9dcab commit 1507e56
Show file tree
Hide file tree
Showing 4 changed files with 166 additions and 82 deletions.
97 changes: 71 additions & 26 deletions lib/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -611,11 +611,11 @@ dma_controller_dirty_page_get(dma_controller_t *dma, vfu_dma_addr_t addr,
}

if (pgsize == dma->dirty_pgsize) {
dirty_page_get_same_pgsize(region, bitmap, server_bitmap_size);
dirty_page_get_same_pgsize(region, bitmap, client_bitmap_size);
} else {
dirty_page_get_different_pgsize(region, bitmap, server_bitmap_size,
client_bitmap_size, pgsize,
dma->dirty_pgsize);
dma->dirty_pgsize, client_bitmap_size,
pgsize);
}

#ifdef DEBUG
Expand Down Expand Up @@ -657,47 +657,92 @@ dirty_page_get_same_pgsize(dma_memory_region_t *region, char *bitmap,

void
dirty_page_get_different_pgsize(dma_memory_region_t *region, char *bitmap,
size_t server_bitmap_size,
size_t client_bitmap_size, size_t server_pgsize,
size_t client_pgsize)
size_t server_bitmap_size, size_t server_pgsize,
size_t client_bitmap_size, size_t client_pgsize)
{
uint8_t bit = 0;
size_t i;
int j;
/*
* The index of the bit in the client bitmap that we are currently
* considering. By keeping track of this separately to the for loops, we
* allow for one server bit to be repeated for multiple client bytes, or
* multiple bytes' worth of server bits to be OR'd together to calculate one
* client bit.
*/
uint8_t client_bit_idx = 0;
/*
* The index of the byte in the server bitmap that we are currently
* considering.
*/
size_t server_byte_idx;
/*
* The index of the bit in the currently considered byte of the server
* bitmap that we are currently considering.
*/
int server_bit_idx_into_byte;

bool extend = server_pgsize <= client_pgsize;
/*
* Whether we are extending the server bitmap (repeating bits to
* generate a larger client bitmap) or not (combining bits with OR to
* generate a smaller client bitmap).
*
* If the server page size is greater than the client's requested page size,
* we will be extending.
*/
bool extend = server_pgsize >= client_pgsize;
/*
* If extending, the number of times to repeat each bit of the server
* bitmap. If not, the number of bits of the server bitmap to OR together to
* calculate one bit of the client bitmap.
*/
size_t factor = extend ?
client_pgsize / server_pgsize : server_pgsize / client_pgsize;
server_pgsize / client_pgsize : client_pgsize / server_pgsize;

for (i = 0; i < server_bitmap_size &&
bit / CHAR_BIT < (size_t)client_bitmap_size; i++) {
/*
* Iterate through the bytes of the server bitmap.
*/
for (server_byte_idx = 0; server_byte_idx < server_bitmap_size &&
client_bit_idx / CHAR_BIT < client_bitmap_size; server_byte_idx++) {
uint8_t out = 0;

dirty_page_exchange(&out, &region->dirty_bitmap[i]);
dirty_page_exchange(&out, &region->dirty_bitmap[server_byte_idx]);

/*
* Iterate through the bits of the byte, repeating or combining bits
* to reach the desired page size.
* Iterate through the bits of the server byte, repeating or combining
* bits to reach the desired page size.
*/
for (j = 0; j < CHAR_BIT; j++) {
uint8_t b = (out >> j) & 1;
for (server_bit_idx_into_byte = 0;
server_bit_idx_into_byte < CHAR_BIT;
server_bit_idx_into_byte++) {
uint8_t server_bit = (out >> server_bit_idx_into_byte) & 1;

if (extend) {
/*
* Repeat `factor` times the bit at index `j` of `out`.
*
* OR the same bit from the server bitmap (`server_bit`) with
* `factor` bits in the client bitmap, from `client_bit_idx` to
* `end_client_bit_idx`.
*/
size_t new_bit = bit + factor;
while (bit < new_bit) {
bitmap[bit / 8] |= b << (bit % 8);
bit++;
size_t end_client_bit_idx = client_bit_idx + factor;
while (client_bit_idx < end_client_bit_idx) {
bitmap[client_bit_idx / CHAR_BIT] |=
server_bit << (client_bit_idx % CHAR_BIT);
client_bit_idx++;
}
} else {
/*
* OR the same bit with `factor` bits of the raw bitmap.
* OR `factor` bits of the server bitmap with the same bit at
* index `client_bit_idx` in the client bitmap.
*/
bitmap[client_bit_idx / CHAR_BIT] |=
server_bit << (client_bit_idx % CHAR_BIT);

/*
* Only move onto the next bit in the client bitmap once we've
* OR'd `factor` bits.
*/
bitmap[bit / 8] |= b << (bit % 8);
if (((i * 8) + j) % factor == factor - 1) {
bit++;
if (((server_byte_idx * CHAR_BIT) + server_bit_idx_into_byte)
% factor == factor - 1) {
client_bit_idx++;
}
}
}
Expand Down
6 changes: 3 additions & 3 deletions lib/dma.h
Original file line number Diff line number Diff line change
Expand Up @@ -392,9 +392,9 @@ dirty_page_get_same_pgsize(dma_memory_region_t *region, char *bitmap,
size_t bitmap_size);
void
dirty_page_get_different_pgsize(dma_memory_region_t *region, char *bitmap,
size_t bitmap_size,
size_t converted_bitmap_size, size_t pgsize,
size_t converted_pgsize);
size_t server_bitmap_size, size_t server_pgsize,
size_t client_bitmap_size,
size_t client_pgsize);

bool
dma_sg_is_mappable(const dma_controller_t *dma, const dma_sg_t *sg);
Expand Down
139 changes: 86 additions & 53 deletions lib/libvfio-user.c
Original file line number Diff line number Diff line change
Expand Up @@ -962,8 +962,10 @@ static int
handle_migration_device_feature_get(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg,
struct vfio_user_device_feature *req)
{
// all supported outgoing data is currently the same size as
// vfio_user_device_feature_migration
/*
* All supported outgoing data is currently the same size as
* struct vfio_user_device_feature_migration.
*/
msg->out.iov.iov_len = sizeof(struct vfio_user_device_feature)
+ sizeof(struct vfio_user_device_feature_migration);

Expand Down Expand Up @@ -1005,7 +1007,10 @@ handle_migration_device_feature_get(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg,
return 0;
}

default: return ERROR_INT(EINVAL);
default:
vfu_log(vfu_ctx, LOG_ERR, "invalid flags for migration GET (%d)",
req->flags);
return ERROR_INT(EINVAL);
}
}

Expand All @@ -1030,6 +1035,12 @@ handle_dma_device_feature_get(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg,
struct vfio_user_device_feature_dma_logging_report *rep =
(void *)req->data;

dma_controller_t *dma = vfu_ctx->dma;

if (dma == NULL) {
vfu_log(vfu_ctx, LOG_ERR, "DMA not enabled for DMA device feature");
}

ssize_t bitmap_size = get_bitmap_size(rep->length, rep->page_size);
if (bitmap_size < 0) {
return bitmap_size;
Expand All @@ -1054,10 +1065,6 @@ handle_dma_device_feature_get(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg,

res->argsz = msg->out.iov.iov_len;

dma_controller_t *dma = vfu_ctx->dma;

assert(dma != NULL);

char *bitmap = (char *)msg->out.iov.iov_base + header_size;

int ret = dma_controller_dirty_page_get(dma,
Expand Down Expand Up @@ -1087,12 +1094,12 @@ handle_dma_device_feature_set(vfu_ctx_t *vfu_ctx, uint32_t feature,
(void *)res->data;
return dma_controller_dirty_page_logging_start(dma,
ctl->page_size);
} else {
assert(feature == VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP);

dma_controller_dirty_page_logging_stop(dma);
return 0;
}

assert(feature == VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP);

dma_controller_dirty_page_logging_stop(dma);
return 0;
}

static int
Expand All @@ -1102,6 +1109,7 @@ handle_device_feature(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
assert(msg != NULL);

if (msg->in.iov.iov_len < sizeof(struct vfio_user_device_feature)) {
vfu_log(vfu_ctx, LOG_ERR, "message too short");
return ERROR_INT(EINVAL);
}

Expand All @@ -1113,65 +1121,90 @@ handle_device_feature(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
uint32_t supported_ops = device_feature_flags_supported(vfu_ctx, feature);

if ((req->flags & supported_ops) != operations || supported_ops == 0) {
vfu_log(vfu_ctx, LOG_ERR, "unsupported operation(s)");
return ERROR_INT(EINVAL);
}

ssize_t ret;

if (req->flags & VFIO_DEVICE_FEATURE_PROBE) {
msg->out.iov.iov_len = msg->in.iov.iov_len;

if (req->argsz < msg->out.iov.iov_len) {
msg->out.iov.iov_len = 0;
return ERROR_INT(EINVAL);
switch (operations) {
case VFIO_DEVICE_FEATURE_GET: {
if (is_migration_feature(feature)) {
ret = handle_migration_device_feature_get(vfu_ctx, msg, req);
} else if (is_dma_feature(feature)) {
ret = handle_dma_device_feature_get(vfu_ctx, msg, req);
} else {
vfu_log(vfu_ctx, LOG_ERR, "unsupported feature %d for GET",
feature);
return ERROR_INT(EINVAL);
}
break;
}

msg->out.iov.iov_base = malloc(msg->out.iov.iov_len);
case VFIO_DEVICE_FEATURE_SET: {
msg->out.iov.iov_len = msg->in.iov.iov_len;

if (msg->out.iov.iov_base == NULL) {
return ERROR_INT(ENOMEM);
}
if (req->argsz < msg->out.iov.iov_len) {
vfu_log(vfu_ctx, LOG_ERR, "bad argsz (%d<%ld)", req->argsz,
msg->out.iov.iov_len);
msg->out.iov.iov_len = 0;
return ERROR_INT(EINVAL);
}

memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
msg->out.iov.iov_len);
msg->out.iov.iov_base = malloc(msg->out.iov.iov_len);

ret = 0;
} else if (req->flags & VFIO_DEVICE_FEATURE_GET) {
if (is_migration_feature(feature)) {
ret = handle_migration_device_feature_get(vfu_ctx, msg, req);
} else if (is_dma_feature(feature)) {
ret = handle_dma_device_feature_get(vfu_ctx, msg, req);
} else {
return ERROR_INT(EINVAL);
}
} else if (req->flags & VFIO_DEVICE_FEATURE_SET) {
msg->out.iov.iov_len = msg->in.iov.iov_len;
if (msg->out.iov.iov_base == NULL) {
return ERROR_INT(ENOMEM);
}

if (req->argsz < msg->out.iov.iov_len) {
msg->out.iov.iov_len = 0;
return ERROR_INT(EINVAL);
}
memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
msg->out.iov.iov_len);

msg->out.iov.iov_base = malloc(msg->out.iov.iov_len);
struct vfio_user_device_feature *res = msg->out.iov.iov_base;

if (msg->out.iov.iov_base == NULL) {
return ERROR_INT(ENOMEM);
if (is_migration_feature(feature)) {
ret = handle_migration_device_feature_set(vfu_ctx, feature, res);
} else if (is_dma_feature(feature)) {
ret = handle_dma_device_feature_set(vfu_ctx, feature, res);
} else {
vfu_log(vfu_ctx, LOG_ERR, "unsupported feature %d for SET",
feature);
return ERROR_INT(EINVAL);
}
break;
}

memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
msg->out.iov.iov_len);
default: {
/*
* PROBE allows GET/SET to also be set (to specify which operations
* we want to probe the feature for), so we only check that PROBE
* is set, not that it is the only operation flag set.
*/
if (!(operations & VFIO_DEVICE_FEATURE_PROBE)) {
vfu_log(vfu_ctx, LOG_ERR, "no operation specified");
return ERROR_INT(EINVAL);
}

struct vfio_user_device_feature *res = msg->out.iov.iov_base;
msg->out.iov.iov_len = msg->in.iov.iov_len;

if (is_migration_feature(feature)) {
ret = handle_migration_device_feature_set(vfu_ctx, feature, res);
} else if (is_dma_feature(feature)) {
ret = handle_dma_device_feature_set(vfu_ctx, feature, res);
} else {
return ERROR_INT(EINVAL);
if (req->argsz < msg->out.iov.iov_len) {
vfu_log(vfu_ctx, LOG_ERR, "bad argsz (%d<%ld)", req->argsz,
msg->out.iov.iov_len);
msg->out.iov.iov_len = 0;
return ERROR_INT(EINVAL);
}

msg->out.iov.iov_base = malloc(msg->out.iov.iov_len);

if (msg->out.iov.iov_base == NULL) {
return ERROR_INT(ENOMEM);
}

memcpy(msg->out.iov.iov_base, msg->in.iov.iov_base,
msg->out.iov.iov_len);

ret = 0;
}
} else {
return ERROR_INT(EINVAL);
}

return ret;
Expand Down
6 changes: 6 additions & 0 deletions lib/migration.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,13 +292,15 @@ handle_mig_data_read(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
assert(msg != NULL);

if (msg->in.iov.iov_len < sizeof(struct vfio_user_mig_data)) {
vfu_log(vfu_ctx, LOG_ERR, "message too short");
return ERROR_INT(EINVAL);
}

struct migration *migr = vfu_ctx->migration;
struct vfio_user_mig_data *req = msg->in.iov.iov_base;

if (vfu_ctx->migration == NULL) {
vfu_log(vfu_ctx, LOG_ERR, "migration not enabled");
return ERROR_INT(EINVAL);
}

Expand Down Expand Up @@ -333,6 +335,7 @@ handle_mig_data_read(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
ssize_t ret = migr->callbacks.read_data(vfu_ctx, &res->data, req->size);

if (ret < 0) {
vfu_log(vfu_ctx, LOG_ERR, "read_data callback failed");
msg->out.iov.iov_len = 0;
return ret;
}
Expand All @@ -350,13 +353,15 @@ handle_mig_data_write(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
assert(msg != NULL);

if (msg->in.iov.iov_len < sizeof(struct vfio_user_mig_data)) {
vfu_log(vfu_ctx, LOG_ERR, "message too short");
return ERROR_INT(EINVAL);
}

struct migration *migr = vfu_ctx->migration;
struct vfio_user_mig_data *req = msg->in.iov.iov_base;

if (vfu_ctx->migration == NULL) {
vfu_log(vfu_ctx, LOG_ERR, "migration not enabled");
return ERROR_INT(EINVAL);
}

Expand All @@ -381,6 +386,7 @@ handle_mig_data_write(vfu_ctx_t *vfu_ctx, vfu_msg_t *msg)
ssize_t ret = migr->callbacks.write_data(vfu_ctx, &req->data, req->size);

if (ret < 0) {
vfu_log(vfu_ctx, LOG_ERR, "write_data callback failed");
return ret;
} else if (ret != req->size) {
vfu_log(vfu_ctx, LOG_ERR, "migration data partial write");
Expand Down

0 comments on commit 1507e56

Please sign in to comment.