Skip to content

Commit

Permalink
zvol_os: Add copy offload support
Browse files Browse the repository at this point in the history
Signed-off-by: Ameer Hamza <[email protected]>
  • Loading branch information
ixhamza committed Dec 6, 2024
1 parent 0b69a86 commit 2d6deeb
Show file tree
Hide file tree
Showing 6 changed files with 416 additions and 4 deletions.
32 changes: 32 additions & 0 deletions config/kernel-blkdev.m4
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,36 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_BLK_MODE_T], [
])
])

dnl #
dnl # Upstream patch for blkdev copy offload support
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV_COPY_OFFLOAD], [
ZFS_LINUX_TEST_SRC([blkdev_copy_offload], [
#include <linux/bio.h>
#include <linux/blkdev.h>
], [
struct block_device *bdev_in = NULL, *bdev_out = NULL;
loff_t pos_in = 0, pos_out = 0;
ssize_t ret __attribute__ ((unused));
ssize_t len = 0;
void *private = NULL;
void (*endio)(void *, int, ssize_t) = NULL;
ret = blkdev_copy_offload(bdev_in, pos_in, pos_out, len,
endio, private, GFP_KERNEL, bdev_out);
])
])

AC_DEFUN([ZFS_AC_KERNEL_BLKDEV_COPY_OFFLOAD], [
AC_MSG_CHECKING([whether blkdev_copy_offload exists])
ZFS_LINUX_TEST_RESULT([blkdev_copy_offload], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLKDEV_COPY_OFFLOAD, 1,
[blkdev_copy_offload exits])
], [
AC_MSG_RESULT(no)
])
])

dnl #
dnl # 2.6.38 API change,
dnl # Added blkdev_put()
Expand Down Expand Up @@ -759,6 +789,7 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_BLKDEV], [
ZFS_AC_KERNEL_SRC_BLKDEV_DISK_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_SRC_BLKDEV_BLK_STS_RESV_CONFLICT
ZFS_AC_KERNEL_SRC_BLKDEV_BLK_MODE_T
ZFS_AC_KERNEL_SRC_BLKDEV_COPY_OFFLOAD
])

AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
Expand All @@ -781,4 +812,5 @@ AC_DEFUN([ZFS_AC_KERNEL_BLKDEV], [
ZFS_AC_KERNEL_BLKDEV_DISK_CHECK_MEDIA_CHANGE
ZFS_AC_KERNEL_BLKDEV_BLK_STS_RESV_CONFLICT
ZFS_AC_KERNEL_BLKDEV_BLK_MODE_T
ZFS_AC_KERNEL_BLKDEV_COPY_OFFLOAD
])
3 changes: 3 additions & 0 deletions include/sys/zvol_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off,
uint64_t len);
void zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
uint64_t size, boolean_t commit);
void zvol_log_clone_range(zilog_t *zilog, dmu_tx_t *tx, int txtype,
uint64_t off, uint64_t len, uint64_t blksz, const blkptr_t *bps,
size_t nbps);
int zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio);
int zvol_init_impl(void);
Expand Down
270 changes: 270 additions & 0 deletions module/os/linux/zfs/zvol_os.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
#include <cityhash.h>
#include <sys/zfs_znode.h>

#include <linux/blkdev_compat.h>
#include <linux/task_io_accounting_ops.h>
Expand Down Expand Up @@ -82,6 +83,7 @@ static boolean_t zvol_use_blk_mq = B_FALSE;
static unsigned int zvol_blk_mq_blocks_per_thread = 8;

static unsigned int zvol_num_taskqs = 0;
extern int zfs_bclone_wait_dirty;

#ifndef BLKDEV_DEFAULT_RQ
/* BLKDEV_MAX_RQ was renamed to BLKDEV_DEFAULT_RQ in the 5.16 kernel */
Expand Down Expand Up @@ -496,6 +498,244 @@ zvol_read_task(void *arg)
zv_request_task_free(task);
}

#ifdef HAVE_BLKDEV_COPY_OFFLOAD
static void zvol_clone_range(zv_request_t *zvr)
{
zvol_state_t *zv_src = zvr->zv, *zv_dst = zvr->zv;
struct request *req = zvr->rq;
struct bio *bio = zvr->bio;
zilog_t *zilog_dst;
zfs_uio_t uio_src, uio_dst;
zfs_locked_range_t *inlr, *outlr;
objset_t *inos, *outos;
dmu_tx_t *tx;
blkptr_t *bps;
size_t maxblocks;
uint64_t inoff, outoff, len = 0;
int error = EINVAL, seg = 1;
struct blkdev_copy_offload_io *offload_io;

memset(&uio_src, 0, sizeof (zfs_uio_t));
memset(&uio_dst, 0, sizeof (zfs_uio_t));

if (bio) {
offload_io = bio->bi_private;
zv_dst = offload_io->driver_private;
if (bio->bi_iter.bi_size !=
offload_io->dst_bio->bi_iter.bi_size) {
zvol_end_io(bio, req, -SET_ERROR(error));
return;
}
zfs_uio_bvec_init(&uio_src, bio, NULL);
zfs_uio_bvec_init(&uio_dst, offload_io->dst_bio, NULL);
len = bio->bi_iter.bi_size;
} else {
/*
* First bio contains information about destination and
* the second contains information about the source
*/
struct bio *bio_temp;
__rq_for_each_bio(bio_temp, req) {
if (seg == blk_rq_nr_phys_segments(req)) {
offload_io = bio_temp->bi_private;
zfs_uio_bvec_init(&uio_src, bio_temp, NULL);
if (len != bio_temp->bi_iter.bi_size) {
zvol_end_io(bio, req,
-SET_ERROR(error));
return;
}
if (offload_io && offload_io->driver_private)
zv_dst = offload_io->driver_private;
} else {
zfs_uio_bvec_init(&uio_dst, bio_temp, NULL);
len = bio_temp->bi_iter.bi_size;
}
seg++;
}
}

if (!zv_src || !zv_dst) {
zvol_end_io(bio, req, -SET_ERROR(error));
return;
}

rw_enter(&zv_dst->zv_suspend_lock, RW_READER);
if (zv_dst->zv_zilog == NULL) {
rw_exit(&zv_dst->zv_suspend_lock);
rw_enter(&zv_dst->zv_suspend_lock, RW_WRITER);
if (zv_dst->zv_zilog == NULL) {
zv_dst->zv_zilog = zil_open(zv_dst->zv_objset,
zvol_get_data, &zv_dst->zv_kstat.dk_zil_sums);
zv_dst->zv_flags |= ZVOL_WRITTEN_TO;
VERIFY0((zv_dst->zv_zilog->zl_header->zh_flags &
ZIL_REPLAY_NEEDED));
}
rw_downgrade(&zv_dst->zv_suspend_lock);
}
if (zv_src != zv_dst)
rw_enter(&zv_src->zv_suspend_lock, RW_READER);

inoff = uio_src.uio_loffset;
outoff = uio_dst.uio_loffset;
inos = zv_src->zv_objset;
outos = zv_dst->zv_objset;

/*
* Sanity checks
*/
if (!spa_feature_is_enabled(dmu_objset_spa(outos),
SPA_FEATURE_BLOCK_CLONING)) {
error = EOPNOTSUPP;
goto out;
}
if (dmu_objset_spa(inos) != dmu_objset_spa(outos)) {
error = EXDEV;
goto out;
}
if (inos->os_encrypted != outos->os_encrypted) {
error = EXDEV;
goto out;
}
if (zv_src->zv_volblocksize != zv_dst->zv_volblocksize) {
error = EINVAL;
goto out;
}
if (inoff >= zv_src->zv_volsize || outoff >= zv_dst->zv_volsize) {
error = 0;
goto out;
}

/*
* Do not read beyond source boundary
*/
if (len > zv_src->zv_volsize - inoff)
len = zv_src->zv_volsize - inoff;
if (len == 0) {
error = 0;
goto out;
}

/*
* No overlapping if we are cloning within the same file
*/
if (zv_src == zv_dst) {
if (inoff < outoff + len && outoff < inoff + len) {
error = EINVAL;
goto out;
}
}

/*
* Block size must be power-of-2 if destination offset != 0.
* There can be no multiple blocks of non-power-of-2 size.
*/
if (outoff != 0 && !ISP2(zv_src->zv_volblocksize)) {
error = EINVAL;
goto out;
}

/*
* Offsets and length must be at block boundaries
*/
if ((inoff % zv_src->zv_volblocksize) != 0 ||
(outoff % zv_dst->zv_volblocksize) != 0) {
error = EINVAL;
goto out;
}

/*
* Length must be multiple of block size, except for the end of the file
*/
if ((len % zv_src->zv_volblocksize) != 0 && (len < zv_src->zv_volsize -
inoff || len < zv_dst->zv_volsize - outoff)) {
error = EINVAL;
goto out;
}

zilog_dst = zv_dst->zv_zilog;
maxblocks = zil_max_log_data(zilog_dst, sizeof (lr_clone_range_t)) /
sizeof (bps[0]);
bps = vmem_alloc(sizeof (bps[0]) * maxblocks, KM_SLEEP);
/*
* Maintain predictable lock order.
*/
if (zv_src < zv_dst || (zv_src == zv_dst && inoff < outoff)) {
inlr = zfs_rangelock_enter(&zv_src->zv_rangelock, inoff, len,
RL_READER);
outlr = zfs_rangelock_enter(&zv_dst->zv_rangelock, outoff, len,
RL_WRITER);
} else {
outlr = zfs_rangelock_enter(&zv_dst->zv_rangelock, outoff, len,
RL_WRITER);
inlr = zfs_rangelock_enter(&zv_src->zv_rangelock, inoff, len,
RL_READER);
}

while (len > 0) {
uint64_t size, last_synced_txg;
size_t nbps = maxblocks;
size = MIN(zv_src->zv_volblocksize * maxblocks, len);
last_synced_txg = spa_last_synced_txg(
dmu_objset_spa(zv_src->zv_objset));
error = dmu_read_l0_bps(zv_src->zv_objset, ZVOL_OBJ, inoff,
size, bps, &nbps);
if (error != 0) {
/*
* If we are trying to clone a block that was created
* in the current transaction group, the error will be
* EAGAIN here. Based on zfs_bclone_wait_dirty either
* return a shortened range to the caller so it can
* fallback, or wait for the next TXG and check again.
*/
if (error == EAGAIN && zfs_bclone_wait_dirty) {
txg_wait_synced(dmu_objset_pool
(zv_src->zv_objset), last_synced_txg + 1);
continue;
}
break;
}

tx = dmu_tx_create(zv_dst->zv_objset);
dmu_tx_hold_clone_by_dnode(tx, zv_dst->zv_dn, outoff, size);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
break;
}
error = dmu_brt_clone(zv_dst->zv_objset, ZVOL_OBJ, outoff, size,
tx, bps, nbps);
if (error != 0) {
dmu_tx_commit(tx);
break;
}
zvol_log_clone_range(zilog_dst, tx, TX_CLONE_RANGE, outoff,
size, zv_src->zv_volblocksize, bps, nbps);
dmu_tx_commit(tx);
inoff += size;
outoff += size;
len -= size;
}
vmem_free(bps, sizeof (bps[0]) * maxblocks);
zfs_rangelock_exit(outlr);
zfs_rangelock_exit(inlr);
if (error == 0 && zv_dst->zv_objset->os_sync == ZFS_SYNC_ALWAYS) {
zil_commit(zilog_dst, ZVOL_OBJ);
}
out:
if (zv_src != zv_dst)
rw_exit(&zv_src->zv_suspend_lock);
rw_exit(&zv_dst->zv_suspend_lock);
zvol_end_io(bio, req, -SET_ERROR(error));
}

static void
zvol_clone_range_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_clone_range(&task->zvr);
zv_request_task_free(task);
}
#endif

/*
* Process a BIO or request
Expand Down Expand Up @@ -555,6 +795,24 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
blk_mq_hw_queue);
tq_idx = taskq_hash % ztqs->tqs_cnt;

#ifdef HAVE_BLKDEV_COPY_OFFLOAD
if ((bio && op_is_copy(bio_op(bio))) ||
(rq && op_is_copy(req_op(rq)))) {
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
zvol_end_io(bio, rq, -SET_ERROR(EROFS));
goto out;
}
if (force_sync) {
zvol_clone_range(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(ztqs->tqs_taskq[tq_idx],
zvol_clone_range_task, task, 0, &task->ent);
}
goto out;
}
#endif

if (rw == WRITE) {
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
zvol_end_io(bio, rq, -SET_ERROR(EROFS));
Expand Down Expand Up @@ -1607,6 +1865,9 @@ zvol_os_create_minor(const char *name)
uint64_t hash = zvol_name_hash(name);
uint64_t volthreading;
bool replayed_zil = B_FALSE;
#ifdef HAVE_BLKDEV_COPY_OFFLOAD
struct queue_limits *lim;
#endif

if (zvol_inhibit_dev)
return (0);
Expand Down Expand Up @@ -1667,6 +1928,15 @@ zvol_os_create_minor(const char *name)

set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);

#ifdef HAVE_BLKDEV_COPY_OFFLOAD
/*
* We've seen SCST sending 256 MB XCOPY request for large IOs
*/
lim = &zv->zv_zso->zvo_queue->limits;
lim->max_copy_hw_sectors = (256 * 1024 * 1024) >> 9;
lim->max_copy_sectors = (256 * 1024 * 1024) >> 9;
#endif

#ifdef QUEUE_FLAG_DISCARD
blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
#endif
Expand Down
2 changes: 1 addition & 1 deletion module/zfs/zfs_vnops.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ int zfs_bclone_enabled = 1;
* a copy of the file and is therefore not the default. However, in certain
* scenarios this behavior may be desirable so a tunable is provided.
*/
static int zfs_bclone_wait_dirty = 0;
int zfs_bclone_wait_dirty = 0;

/*
* Enable Direct I/O. If this setting is 0, then all I/O requests will be
Expand Down
Loading

0 comments on commit 2d6deeb

Please sign in to comment.