Skip to content

Commit

Permalink
Send / Recv Fixes following b52563
Browse files Browse the repository at this point in the history
    This patch fixes several issues discovered after
    the encryption patch was merged:

    Fixed a bug where encrypted datasets could attempt
    to receive embedded data records.

    Fixed a bug where dirty records created by the recv
    code wasn't properly setting the dr_raw flag.

    Fixed a typo where a dmu_tx_commit() was changed to
    dmu_tx_abort()

    Fixed a few error handling bugs unrelated to the
    encryption patch in dmu_recv_stream()

    Signed-off-by: Tom Caputi <[email protected]>
  • Loading branch information
Tom Caputi authored and lundman committed Aug 23, 2017
1 parent 1b0174c commit df6ddaf
Show file tree
Hide file tree
Showing 6 changed files with 87 additions and 47 deletions.
6 changes: 6 additions & 0 deletions usr/src/lib/libzfs/common/libzfs_sendrecv.c
Original file line number Diff line number Diff line change
Expand Up @@ -3537,6 +3537,8 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
DMU_BACKUP_FEATURE_RESUMING;
boolean_t raw = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW;
boolean_t embedded = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_EMBED_DATA;
stream_wantsnewfs = (drrb->drr_fromguid == NULL ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && !resuming;

Expand Down Expand Up @@ -3922,6 +3924,10 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
*cp = '@';
break;
case EINVAL:
if (embedded && !raw)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incompatible embedded data stream "
"feature with encrypted receive."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ECKSUM:
Expand Down
3 changes: 2 additions & 1 deletion usr/src/man/man1m/zfs.1m
Original file line number Diff line number Diff line change
Expand Up @@ -3086,7 +3086,8 @@ feature enabled.
If the
.Sy lz4_compress
feature is active on the sending system, then the receiving system must have
that feature enabled as well.
that feature enabled as well. Note that streams generated using this flag are
unable to be received into an encrypted dataset.
See
.Xr zpool-features 5
for details on ZFS feature flags and the
Expand Down
19 changes: 19 additions & 0 deletions usr/src/uts/common/fs/zfs/dmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2018,6 +2018,25 @@ dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dnode_rele(dn, FTAG);
}

/*
* Dirty an object and set the dirty record's raw flag. This is used
* when writing raw data to an object that will not effect the
* encryption parameters, specifically during raw receives.
*/
int
dmu_object_dirty_raw(objset_t *os, uint64_t object, dmu_tx_t *tx)
{
dnode_t *dn;
int err;

err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
dmu_buf_will_change_crypt_params((dmu_buf_t *)dn->dn_dbuf, tx);
dnode_rele(dn, FTAG);
return (err);
}

int zfs_mdcomp_disable = 0;

/*
Expand Down
85 changes: 50 additions & 35 deletions usr/src/uts/common/fs/zfs/dmu_send.c
Original file line number Diff line number Diff line change
Expand Up @@ -293,12 +293,11 @@ dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, uint64_t object,
ASSERT(BP_IS_PROTECTED(bp));

/*
* This is a raw protected block so we set the encrypted
* flag. We need to pass along everything the receiving
* side will need to interpret this block, including the
* byteswap, salt, IV, and MAC.
* This is a raw protected block so we need to pass
* along everything the receiving side will need to
* interpret this block, including the byteswap, salt,
* IV, and MAC.
*/
drrw->drr_flags |= DRR_RAW_ENCRYPTED;
if (BP_SHOULD_BYTESWAP(bp))
drrw->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drrw->drr_salt,
Expand Down Expand Up @@ -401,9 +400,9 @@ dump_spill(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object, void *data)
drrs->drr_toguid = dsp->dsa_toguid;

/* handle raw send fields */
if ((dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) != 0 &&
BP_IS_PROTECTED(bp)) {
drrs->drr_flags |= DRR_RAW_ENCRYPTED;
if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));

if (BP_SHOULD_BYTESWAP(bp))
drrs->drr_flags |= DRR_RAW_BYTESWAP;
drrs->drr_compressiontype = BP_GET_COMPRESS(bp);
Expand Down Expand Up @@ -508,9 +507,9 @@ dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object,
drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;

if ((dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) &&
BP_IS_PROTECTED(bp)) {
drro->drr_flags |= DRR_RAW_ENCRYPTED;
if ((dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW)) {
ASSERT(BP_IS_ENCRYPTED(bp));

if (BP_SHOULD_BYTESWAP(bp))
drro->drr_flags |= DRR_RAW_BYTESWAP;

Expand Down Expand Up @@ -567,7 +566,6 @@ dump_object_range(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t firstobj,
drror->drr_firstobj = firstobj;
drror->drr_numslots = numslots;
drror->drr_toguid = dsp->dsa_toguid;
drror->drr_flags |= DRR_RAW_ENCRYPTED;
if (BP_SHOULD_BYTESWAP(bp))
drror->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
Expand Down Expand Up @@ -1684,15 +1682,13 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
return (error);
}
if (!origin->ds_is_snapshot) {
dsl_dataset_rele_flags(origin,
DS_HOLD_FLAG_DECRYPT, FTAG);
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
fromguid != 0) {
dsl_dataset_rele_flags(origin,
DS_HOLD_FLAG_DECRYPT, FTAG);
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(ENODEV));
}
Expand Down Expand Up @@ -2081,6 +2077,7 @@ struct receive_writer_arg {
/* A map from guid to dataset to help handle dedup'd streams. */
avl_tree_t *guid_to_ds_map;
boolean_t resumable;
boolean_t raw;
uint64_t last_object, last_offset;
uint64_t bytes_read; /* bytes read when current record created */
};
Expand Down Expand Up @@ -2115,6 +2112,7 @@ struct receive_arg {
zio_cksum_t prev_cksum;
int err;
boolean_t byteswap;
boolean_t raw;
uint64_t featureflags;
/* Sorted list of objects not to issue prefetches for. */
struct objlist ignore_objlist;
Expand Down Expand Up @@ -2359,7 +2357,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
return (SET_ERROR(EINVAL));
}

if (DRR_IS_RAW_ENCRYPTED(drro->drr_flags)) {
if (rwa->raw) {
if (drro->drr_raw_bonuslen < drro->drr_bonuslen ||
drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
drro->drr_nlevels > DN_MAX_LEVELS ||
Expand Down Expand Up @@ -2394,13 +2392,12 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
drro->drr_bonuslen);

/* nblkptr will be bounded by the bonus size and type */
if (DRR_IS_RAW_ENCRYPTED(drro->drr_flags) &&
nblkptr != drro->drr_nblkptr)
if (rwa->raw && nblkptr != drro->drr_nblkptr)
return (SET_ERROR(EINVAL));

if (drro->drr_blksz != doi.doi_data_block_size ||
nblkptr < doi.doi_nblkptr ||
(DRR_IS_RAW_ENCRYPTED(drro->drr_flags) &&
(rwa->raw &&
(indblksz != doi.doi_metadata_block_size ||
drro->drr_nlevels < doi.doi_indirection))) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
Expand Down Expand Up @@ -2438,13 +2435,16 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
return (SET_ERROR(EINVAL));
}

if (rwa->raw)
VERIFY0(dmu_object_dirty_raw(rwa->os, drro->drr_object, tx));

dmu_object_set_checksum(rwa->os, drro->drr_object,
drro->drr_checksumtype, tx);
dmu_object_set_compress(rwa->os, drro->drr_object,
drro->drr_compress, tx);

/* handle more restrictive dnode structuring for raw recvs */
if (DRR_IS_RAW_ENCRYPTED(drro->drr_flags)) {
if (rwa->raw) {
/*
* Set the indirect block shift and nlevels. This will not fail
* because we ensured all of the blocks were free earlier if
Expand All @@ -2460,7 +2460,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
dmu_buf_t *db;
uint32_t flags = DMU_READ_NO_PREFETCH;

if (DRR_IS_RAW_ENCRYPTED(drro->drr_flags))
if (rwa->raw)
flags |= DMU_READ_NO_DECRYPT;

VERIFY0(dmu_bonus_hold_impl(rwa->os, drro->drr_object,
Expand All @@ -2474,7 +2474,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
* Raw bonus buffers have their byteorder determined by the
* DRR_OBJECT_RANGE record.
*/
if (rwa->byteswap && !DRR_IS_RAW_ENCRYPTED(drro->drr_flags)) {
if (rwa->byteswap && !rwa->raw) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drro->drr_bonustype);
dmu_ot_byteswap[byteswap].ob_func(db->db_data,
Expand Down Expand Up @@ -2550,6 +2550,10 @@ receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
dmu_tx_abort(tx);
return (err);
}

if (rwa->raw)
VERIFY0(dmu_object_dirty_raw(rwa->os, drrw->drr_object, tx));

if (rwa->byteswap && !arc_is_encrypted(abuf) &&
arc_get_compression(abuf) == ZIO_COMPRESS_OFF) {
dmu_object_byteswap_t byteswap =
Expand Down Expand Up @@ -2616,9 +2620,8 @@ receive_write_byref(struct receive_writer_arg *rwa,
ref_os = rwa->os;
}

if (DRR_IS_RAW_ENCRYPTED(drrwbr->drr_flags)) {
if (rwa->raw)
flags |= DMU_READ_NO_DECRYPT;
}

/* may return either a regular db or an encrypted one */
err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
Expand All @@ -2636,7 +2639,8 @@ receive_write_byref(struct receive_writer_arg *rwa,
return (err);
}

if (DRR_IS_RAW_ENCRYPTED(drrwbr->drr_flags)) {
if (rwa->raw) {
VERIFY0(dmu_object_dirty_raw(rwa->os, drrwbr->drr_object, tx));
dmu_copy_from_buf(rwa->os, drrwbr->drr_object,
drrwbr->drr_offset, dbp, tx);
} else {
Expand Down Expand Up @@ -2702,7 +2706,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
return (SET_ERROR(EINVAL));

if (DRR_IS_RAW_ENCRYPTED(drrs->drr_flags)) {
if (rwa->raw) {
if (!DMU_OT_IS_VALID(drrs->drr_type) ||
drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
drrs->drr_compressed_size == 0)
Expand Down Expand Up @@ -2730,6 +2734,8 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
return (err);
}
dmu_buf_will_dirty(db_spill, tx);
if (rwa->raw)
VERIFY0(dmu_object_dirty_raw(rwa->os, drrs->drr_object, tx));

if (db_spill->db_size < drrs->drr_length)
VERIFY(0 == dbuf_spill_set_blksz(db_spill,
Expand Down Expand Up @@ -2795,7 +2801,7 @@ receive_object_range(struct receive_writer_arg *rwa,
*/
if (drror->drr_numslots != DNODES_PER_BLOCK ||
P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
!DRR_IS_RAW_ENCRYPTED(drror->drr_flags))
!rwa->raw)
return (SET_ERROR(EINVAL));

offset = drror->drr_firstobj * sizeof (dnode_phys_t);
Expand Down Expand Up @@ -3075,7 +3081,7 @@ receive_read_record(struct receive_arg *ra)
arc_buf_t *abuf;
boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type);

if (DRR_IS_RAW_ENCRYPTED(drrw->drr_flags)) {
if (ra->raw) {
boolean_t byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
ra->byteswap;
Expand Down Expand Up @@ -3159,7 +3165,7 @@ receive_read_record(struct receive_arg *ra)
int len = DRR_SPILL_PAYLOAD_SIZE(drrs);

/* DRR_SPILL records are either raw or uncompressed */
if (DRR_IS_RAW_ENCRYPTED(drrs->drr_flags)) {
if (ra->raw) {
boolean_t byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
ra->byteswap;
Expand Down Expand Up @@ -3360,6 +3366,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
nvlist_t *begin_nvl = NULL;

ra.byteswap = drc->drc_byteswap;
ra.raw = drc->drc_raw;
ra.cksum = drc->drc_cksum;
ra.vp = vp;
ra.voff = *voffp;
Expand Down Expand Up @@ -3387,16 +3394,23 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
ra.featureflags = featureflags;

/* embedded data is incompatible with encrypted datasets */
if (ra.os->os_encrypted &&
(featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
err = SET_ERROR(EINVAL);
goto out;
}

/* if this stream is dedup'ed, set up the avl tree for guid mapping */
if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
minor_t minor;

if (cleanup_fd == -1) {
ra.err = SET_ERROR(EBADF);
err = SET_ERROR(EBADF);
goto out;
}
ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
if (ra.err != 0) {
err = zfs_onexit_fd_hold(cleanup_fd, &minor);
if (err != 0) {
cleanup_fd = -1;
goto out;
}
Expand All @@ -3410,12 +3424,12 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
err = zfs_onexit_add_cb(minor,
free_guid_map_onexit, rwa.guid_to_ds_map,
action_handlep);
if (ra.err != 0)
if (err != 0)
goto out;
} else {
err = zfs_onexit_cb_data(minor, *action_handlep,
(void **)&rwa.guid_to_ds_map);
if (ra.err != 0)
if (err != 0)
goto out;
}

Expand Down Expand Up @@ -3471,6 +3485,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
rwa.os = ra.os;
rwa.byteswap = drc->drc_byteswap;
rwa.resumable = drc->drc_resumable;
rwa.raw = drc->drc_raw;

(void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, curproc,
TS_RUN, minclsyspri);
Expand Down
5 changes: 3 additions & 2 deletions usr/src/uts/common/fs/zfs/sys/dmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -437,8 +437,9 @@ void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dmu_tx_t *tx);

void
dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
int dmu_object_dirty_raw(objset_t *os, uint64_t object, dmu_tx_t *tx);

void dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
int compressed_size, int byteorder, dmu_tx_t *tx);

Expand Down
Loading

0 comments on commit df6ddaf

Please sign in to comment.