Skip to content

Commit

Permalink
Refactor zvol request processing to pass uio
Browse files Browse the repository at this point in the history
This makes the zvol_write/zvol_read operate on a uio_t pointer like
their illumos analogs while cleaning up the code.

Signed-off-by: Richard Yao <ryao@gentoo.org>
  • Loading branch information
ryao committed Feb 6, 2016
1 parent 6758989 commit 0213a82
Showing 1 changed file with 43 additions and 58 deletions.
101 changes: 43 additions & 58 deletions module/zfs/zvol.c
Original file line number Diff line number Diff line change
Expand Up @@ -601,42 +601,18 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
}

static int
zvol_write(struct bio *bio)
zvol_write(zvol_state_t *zv, uio_t *uio, boolean_t sync)
{
uio_t uio;
zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
uint64_t volsize = zv->zv_volsize;
rl_t *rl;
int error = 0;
boolean_t sync;

uio.uio_resid = BIO_BI_SIZE(bio);

/*
* Some requests are just for flush and nothing else.
*/
if (uio.uio_resid == 0) {
if (bio->bi_rw & VDEV_REQ_FLUSH)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
return (0);
}

uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
uio.uio_skip = BIO_BI_SKIP(bio);
uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
uio.uio_loffset = BIO_BI_SECTOR(bio) << 9;
uio.uio_limit = MAXOFFSET_T;
uio.uio_segflg = UIO_BVEC;

sync = ((bio->bi_rw & (VDEV_REQ_FUA|VDEV_REQ_FLUSH)) ||
zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);

rl = zfs_range_lock(&zv->zv_znode, uio.uio_loffset, uio.uio_resid,
rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
RL_WRITER);

while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
uint64_t off = uio.uio_loffset;
while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
uint64_t off = uio->uio_loffset;
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);

if (bytes > volsize - off) /* don't write past the end */
Expand All @@ -650,7 +626,7 @@ zvol_write(struct bio *bio)
dmu_tx_abort(tx);
break;
}
error = dmu_write_uio_dbuf(zv->zv_dbuf, &uio, bytes, tx);
error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
if (error == 0)
zvol_log_write(zv, tx, off, bytes, sync);
dmu_tx_commit(tx);
Expand Down Expand Up @@ -740,36 +716,27 @@ zvol_discard(struct bio *bio)
}

static int
zvol_read(struct bio *bio)
zvol_read(zvol_state_t *zv, uio_t *uio)
{
uio_t uio;
zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
uint64_t volsize = zv->zv_volsize;
rl_t *rl;
int error = 0;

uio.uio_resid = BIO_BI_SIZE(bio);
uio->uio_resid = BIO_BI_SIZE(bio);

if (uio.uio_resid == 0)
if (uio->uio_resid == 0)
return (0);

uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
uio.uio_skip = BIO_BI_SKIP(bio);
uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
uio.uio_loffset = BIO_BI_SECTOR(bio) << 9;
uio.uio_limit = MAXOFFSET_T;
uio.uio_segflg = UIO_BVEC;

rl = zfs_range_lock(&zv->zv_znode, uio.uio_loffset, uio.uio_resid,
rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
RL_READER);
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);

/* don't read past the end */
if (bytes > volsize - uio.uio_loffset)
bytes = volsize - uio.uio_loffset;
if (bytes > volsize - uio->uio_loffset)
bytes = volsize - uio->uio_loffset;

error = dmu_read_uio_dbuf(zv->zv_dbuf, &uio, bytes);
error = dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes);
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
Expand All @@ -784,28 +751,35 @@ zvol_read(struct bio *bio)
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
{
uio_t uio;
zvol_state_t *zv = q->queuedata;
fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = BIO_BI_SECTOR(bio);
unsigned int sectors = bio_sectors(bio);
int rw = bio_data_dir(bio);
#ifdef HAVE_GENERIC_IO_ACCT
unsigned long start = jiffies;
#endif
int error = 0;

if (bio_has_data(bio) && offset + sectors >
get_capacity(zv->zv_disk)) {
uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
uio.uio_skip = BIO_BI_SKIP(bio);
uio.uio_resid = BIO_BI_SIZE(bio);
uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
uio.uio_loffset = BIO_BI_SECTOR(bio) << 9;
uio.uio_limit = MAXOFFSET_T;
uio.uio_segflg = UIO_BVEC;

if (bio_has_data(bio) && uio.uio_loffset + uio.uio_resid >
zv->zv_volsize) {
printk(KERN_INFO
"%s: bad access: block=%llu, count=%lu\n",
"%s: bad access: offset=%llu, size=%lu\n",
zv->zv_disk->disk_name,
(long long unsigned)offset,
(long unsigned)sectors);
(long long unsigned)uio.uio_loffset,
(long unsigned)uio.uio_resid);
error = SET_ERROR(EIO);
goto out1;
}

generic_start_io_acct(rw, sectors, &zv->zv_disk->part0);
generic_start_io_acct(rw, bio_sectors(bio), &zv->zv_disk->part0);

if (rw == WRITE) {
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
Expand All @@ -818,9 +792,20 @@ zvol_request(struct request_queue *q, struct bio *bio)
goto out2;
}

error = zvol_write(bio);
/*
* Some requests are just for flush and nothing else.
*/
if (uio->uio_resid == 0) {
if (bio->bi_rw & VDEV_REQ_FLUSH)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
goto out2;
}

error = zvol_write(zv, uio,
!!((bio->bi_rw & (VDEV_REQ_FUA|VDEV_REQ_FLUSH)) ||
zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS));
} else
error = zvol_read(bio);
error = zvol_read(zv, uio);

out2:
generic_end_io_acct(rw, &zv->zv_disk->part0, start);
Expand Down

0 comments on commit 0213a82

Please sign in to comment.