diff --git a/.github/workflows/scripts/qemu-2-start.sh b/.github/workflows/scripts/qemu-2-start.sh index 39ac92107b71..73496d4f3de6 100755 --- a/.github/workflows/scripts/qemu-2-start.sh +++ b/.github/workflows/scripts/qemu-2-start.sh @@ -14,7 +14,7 @@ OSv=$OS # compressed with .zst extension REPO="https://github.com/mcmilk/openzfs-freebsd-images" -FREEBSD="$REPO/releases/download/v2024-10-05" +FREEBSD="$REPO/releases/download/v2024-12-14" URLzs="" # Ubuntu mirrors @@ -40,6 +40,12 @@ case "$OS" in # dns sometimes fails with that url :/ echo "89.187.191.12 geo.mirror.pkgbuild.com" | sudo tee /etc/hosts > /dev/null ;; + centos-stream10) + OSNAME="CentOS Stream 10" + # TODO: #16903 Overwrite OSv to stream9 for virt-install until it's added to osinfo + OSv="centos-stream9" + URL="https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-10-latest.x86_64.qcow2" + ;; centos-stream9) OSNAME="CentOS Stream 9" URL="https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-latest.x86_64.qcow2" @@ -76,28 +82,29 @@ case "$OS" in BASH="/usr/local/bin/bash" NIC="rtl8139" ;; - freebsd14-0r) - OSNAME="FreeBSD 14.0-RELEASE" - OSv="freebsd14.0" - URLzs="$FREEBSD/amd64-freebsd-14.0-RELEASE.qcow2.zst" - BASH="/usr/local/bin/bash" - ;; freebsd14-1r) OSNAME="FreeBSD 14.1-RELEASE" OSv="freebsd14.0" URLzs="$FREEBSD/amd64-freebsd-14.1-RELEASE.qcow2.zst" BASH="/usr/local/bin/bash" ;; + freebsd14-2r) + OSNAME="FreeBSD 14.2-RELEASE" + OSv="freebsd14.0" + URLzs="$FREEBSD/amd64-freebsd-14.2-RELEASE.qcow2.zst" + BASH="/usr/local/bin/bash" + ;; freebsd13-4s) OSNAME="FreeBSD 13.4-STABLE" OSv="freebsd13.0" URLzs="$FREEBSD/amd64-freebsd-13.4-STABLE.qcow2.zst" BASH="/usr/local/bin/bash" + NIC="rtl8139" ;; - freebsd14-1s) - OSNAME="FreeBSD 14.1-STABLE" + freebsd14-2s) + OSNAME="FreeBSD 14.2-STABLE" OSv="freebsd14.0" - URLzs="$FREEBSD/amd64-freebsd-14.1-STABLE.qcow2.zst" + URLzs="$FREEBSD/amd64-freebsd-14.2-STABLE.qcow2.zst" BASH="/usr/local/bin/bash" ;; freebsd15-0c) diff --git a/.github/workflows/scripts/qemu-3-deps.sh b/.github/workflows/scripts/qemu-3-deps.sh index 96979cd02e09..9b8957734277 100755 --- a/.github/workflows/scripts/qemu-3-deps.sh +++ b/.github/workflows/scripts/qemu-3-deps.sh @@ -104,7 +104,7 @@ case "$1" in sudo dnf install -y kernel-abi-whitelists echo "##[endgroup]" ;; - almalinux9|centos-stream9) + almalinux9|centos-stream9|centos-stream10) echo "##[group]Enable epel and crb repositories" sudo dnf config-manager -y --set-enabled crb sudo dnf install -y epel-release diff --git a/.github/workflows/zfs-qemu.yml b/.github/workflows/zfs-qemu.yml index e90030f4c02e..af26e135b91f 100644 --- a/.github/workflows/zfs-qemu.yml +++ b/.github/workflows/zfs-qemu.yml @@ -3,6 +3,18 @@ name: zfs-qemu on: push: pull_request: + workflow_dispatch: + inputs: + include_stream9: + type: boolean + required: false + default: false + description: 'Test on CentOS 9 stream' + include_stream10: + type: boolean + required: false + default: false + description: 'Test on CentOS 10 stream' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -22,8 +34,8 @@ jobs: - name: Generate OS config and CI type id: os run: | - FULL_OS='["almalinux8", "almalinux9", "centos-stream9", "debian11", "debian12", "fedora40", "fedora41", "freebsd13-4r", "freebsd14-0r", "freebsd14-1s", "ubuntu20", "ubuntu22", "ubuntu24"]' - QUICK_OS='["almalinux8", "almalinux9", "debian12", "fedora41", "freebsd13-3r", "freebsd14-1r", "ubuntu24"]' + FULL_OS='["almalinux8", "almalinux9", "debian11", "debian12", "fedora40", "fedora41", "freebsd13-3r", "freebsd13-4s", "freebsd14-1r", "freebsd14-2s", "freebsd15-0c", "ubuntu20", "ubuntu22", "ubuntu24"]' + QUICK_OS='["almalinux8", "almalinux9", "debian12", "fedora41", "freebsd13-3r", "freebsd14-2r", "ubuntu24"]' # determine CI type when running on PR ci_type="full" if ${{ github.event_name == 'pull_request' }}; then @@ -37,9 +49,22 @@ jobs: os_selection="$FULL_OS" fi os_json=$(echo ${os_selection} | jq -c) + + # Add optional runners + if [ "${{ github.event.inputs.include_stream9 }}" == 'true' ]; then + os_json=$(echo $os_json | jq -c '. += ["centos-stream9"]') + fi + if [ "${{ github.event.inputs.include_stream10 }}" == 'true' ]; then + os_json=$(echo $os_json | jq -c '. += ["centos-stream10"]') + fi + + echo $os_json echo "os=$os_json" >> $GITHUB_OUTPUT echo "ci_type=$ci_type" >> $GITHUB_OUTPUT + + + qemu-vm: name: qemu-x86 needs: [ test-config ] @@ -49,8 +74,9 @@ jobs: # rhl: almalinux8, almalinux9, centos-stream9, fedora40, fedora41 # debian: debian11, debian12, ubuntu20, ubuntu22, ubuntu24 # misc: archlinux, tumbleweed - # FreeBSD Release: freebsd13-3r, freebsd13-4r, freebsd14-0r, freebsd14-1r - # FreeBSD Stable: freebsd13-4s, freebsd14-1s + # FreeBSD variants of 2024-12: + # FreeBSD Release: freebsd13-3r, freebsd13-4r, freebsd14-1r, freebsd14-2r + # FreeBSD Stable: freebsd13-4s, freebsd14-2s # FreeBSD Current: freebsd15-0c os: ${{ fromJson(needs.test-config.outputs.test_os) }} runs-on: ubuntu-24.04 diff --git a/cmd/zfs/zfs_main.c b/cmd/zfs/zfs_main.c index 7836f5909f4a..73ccf72d263c 100644 --- a/cmd/zfs/zfs_main.c +++ b/cmd/zfs/zfs_main.c @@ -500,7 +500,7 @@ usage_prop_cb(int prop, void *cb) { FILE *fp = cb; - (void) fprintf(fp, "\t%-15s ", zfs_prop_to_name(prop)); + (void) fprintf(fp, "\t%-22s ", zfs_prop_to_name(prop)); if (zfs_prop_readonly(prop)) (void) fprintf(fp, " NO "); @@ -561,40 +561,40 @@ usage(boolean_t requested) (void) fprintf(fp, "%s", gettext("\nThe following properties are supported:\n")); - (void) fprintf(fp, "\n\t%-14s %s %s %s\n\n", + (void) fprintf(fp, "\n\t%-21s %s %s %s\n\n", "PROPERTY", "EDIT", "INHERIT", "VALUES"); /* Iterate over all properties */ (void) zprop_iter(usage_prop_cb, fp, B_FALSE, B_TRUE, ZFS_TYPE_DATASET); - (void) fprintf(fp, "\t%-15s ", "userused@..."); + (void) fprintf(fp, "\t%-22s ", "userused@..."); (void) fprintf(fp, " NO NO \n"); - (void) fprintf(fp, "\t%-15s ", "groupused@..."); + (void) fprintf(fp, "\t%-22s ", "groupused@..."); (void) fprintf(fp, " NO NO \n"); - (void) fprintf(fp, "\t%-15s ", "projectused@..."); + (void) fprintf(fp, "\t%-22s ", "projectused@..."); (void) fprintf(fp, " NO NO \n"); - (void) fprintf(fp, "\t%-15s ", "userobjused@..."); + (void) fprintf(fp, "\t%-22s ", "userobjused@..."); (void) fprintf(fp, " NO NO \n"); - (void) fprintf(fp, "\t%-15s ", "groupobjused@..."); + (void) fprintf(fp, "\t%-22s ", "groupobjused@..."); (void) fprintf(fp, " NO NO \n"); - (void) fprintf(fp, "\t%-15s ", "projectobjused@..."); + (void) fprintf(fp, "\t%-22s ", "projectobjused@..."); (void) fprintf(fp, " NO NO \n"); - (void) fprintf(fp, "\t%-15s ", "userquota@..."); + (void) fprintf(fp, "\t%-22s ", "userquota@..."); (void) fprintf(fp, "YES NO | none\n"); - (void) fprintf(fp, "\t%-15s ", "groupquota@..."); + (void) fprintf(fp, "\t%-22s ", "groupquota@..."); (void) fprintf(fp, "YES NO | none\n"); - (void) fprintf(fp, "\t%-15s ", "projectquota@..."); + (void) fprintf(fp, "\t%-22s ", "projectquota@..."); (void) fprintf(fp, "YES NO | none\n"); - (void) fprintf(fp, "\t%-15s ", "userobjquota@..."); + (void) fprintf(fp, "\t%-22s ", "userobjquota@..."); (void) fprintf(fp, "YES NO | none\n"); - (void) fprintf(fp, "\t%-15s ", "groupobjquota@..."); + (void) fprintf(fp, "\t%-22s ", "groupobjquota@..."); (void) fprintf(fp, "YES NO | none\n"); - (void) fprintf(fp, "\t%-15s ", "projectobjquota@..."); + (void) fprintf(fp, "\t%-22s ", "projectobjquota@..."); (void) fprintf(fp, "YES NO | none\n"); - (void) fprintf(fp, "\t%-15s ", "written@"); + (void) fprintf(fp, "\t%-22s ", "written@"); (void) fprintf(fp, " NO NO \n"); - (void) fprintf(fp, "\t%-15s ", "written#"); + (void) fprintf(fp, "\t%-22s ", "written#"); (void) fprintf(fp, " NO NO \n"); (void) fprintf(fp, gettext("\nSizes are specified in bytes " diff --git a/config/kernel.m4 b/config/kernel.m4 index ae66633907bf..9928ead1b6ce 100644 --- a/config/kernel.m4 +++ b/config/kernel.m4 @@ -681,11 +681,16 @@ AC_DEFUN([ZFS_LINUX_COMPILE], [ building kernel modules]) AC_ARG_VAR([KERNEL_LLVM], [Binary option to build kernel modules with LLVM/CLANG toolchain]) + AC_ARG_VAR([KERNEL_CROSS_COMPILE], [Cross compile prefix + for kernel module builds]) + AC_ARG_VAR([KERNEL_ARCH], [Architecture to build kernel modules for]) AC_TRY_COMMAND([ KBUILD_MODPOST_NOFINAL="$5" KBUILD_MODPOST_WARN="$6" make modules -k -j$TEST_JOBS ${KERNEL_CC:+CC=$KERNEL_CC} ${KERNEL_LD:+LD=$KERNEL_LD} ${KERNEL_LLVM:+LLVM=$KERNEL_LLVM} CONFIG_MODULES=y CFLAGS_MODULE=-DCONFIG_MODULES + ${KERNEL_CROSS_COMPILE:+CROSS_COMPILE=$KERNEL_CROSS_COMPILE} + ${KERNEL_ARCH:+ARCH=$KERNEL_ARCH} -C $LINUX_OBJ $ARCH_UM M=$PWD/$1 >$1/build.log 2>&1]) AS_IF([AC_TRY_COMMAND([$2])], [$3], [$4]) ]) diff --git a/config/user.m4 b/config/user.m4 index 4e31745a2abc..badd920d2b8a 100644 --- a/config/user.m4 +++ b/config/user.m4 @@ -33,7 +33,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER], [ ZFS_AC_CONFIG_USER_MAKEDEV_IN_MKDEV ZFS_AC_CONFIG_USER_ZFSEXEC - AC_CHECK_FUNCS([execvpe issetugid mlockall strerror_l strlcat strlcpy gettid]) + AC_CHECK_FUNCS([execvpe issetugid mlockall strlcat strlcpy gettid]) AC_SUBST(RM) ]) diff --git a/config/zfs-build.m4 b/config/zfs-build.m4 index c44a893bbb8c..55fc029f0847 100644 --- a/config/zfs-build.m4 +++ b/config/zfs-build.m4 @@ -393,6 +393,8 @@ AC_DEFUN([ZFS_AC_RPM], [ RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_cc KERNEL_CC=$(KERNEL_CC)"' RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_ld KERNEL_LD=$(KERNEL_LD)"' RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_llvm KERNEL_LLVM=$(KERNEL_LLVM)"' + RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_cross_compile KERNEL_CROSS_COMPILE=$(KERNEL_CROSS_COMPILE)"' + RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_arch KERNEL_ARCH=$(KERNEL_ARCH)"' ]) RPM_DEFINE_DKMS='' diff --git a/include/libzutil.h b/include/libzutil.h index f8712340cc5e..bcfe2fcf7960 100644 --- a/include/libzutil.h +++ b/include/libzutil.h @@ -27,7 +27,7 @@ #define _LIBZUTIL_H extern __attribute__((visibility("default"))) #include -#include +#include #include #include @@ -276,11 +276,14 @@ _LIBZUTIL_H void update_vdev_config_dev_sysfs_path(nvlist_t *nv, * Thread-safe strerror() for use in ZFS libraries */ static inline char *zfs_strerror(int errnum) { -#ifdef HAVE_STRERROR_L - return (strerror_l(errnum, uselocale(0))); -#else - return (strerror(errnum)); -#endif + static __thread char errbuf[512]; + static pthread_mutex_t zfs_strerror_lock = PTHREAD_MUTEX_INITIALIZER; + + (void) pthread_mutex_lock(&zfs_strerror_lock); + (void) strlcpy(errbuf, strerror(errnum), sizeof (errbuf)); + (void) pthread_mutex_unlock(&zfs_strerror_lock); + + return (errbuf); } #ifdef __cplusplus diff --git a/include/sys/zvol.h b/include/sys/zvol.h index e236a4cd18a3..c79fe1d9ad22 100644 --- a/include/sys/zvol.h +++ b/include/sys/zvol.h @@ -56,8 +56,6 @@ extern int zvol_set_ro(const char *, boolean_t); extern zvol_state_handle_t *zvol_suspend(const char *); extern int zvol_resume(zvol_state_handle_t *); extern void *zvol_tag(zvol_state_handle_t *); -extern int zvol_clone_range(zvol_state_handle_t *, uint64_t, - zvol_state_handle_t *, uint64_t, uint64_t); extern int zvol_init(void); extern void zvol_fini(void); diff --git a/include/sys/zvol_impl.h b/include/sys/zvol_impl.h index 55021a080076..a8168850023a 100644 --- a/include/sys/zvol_impl.h +++ b/include/sys/zvol_impl.h @@ -83,14 +83,16 @@ void zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len); void zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset, uint64_t size, boolean_t commit); -void zvol_log_clone_range(zilog_t *zilog, dmu_tx_t *tx, int txtype, - uint64_t off, uint64_t len, uint64_t blksz, const blkptr_t *bps, - size_t nbps); int zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio); int zvol_init_impl(void); void zvol_fini_impl(void); void zvol_wait_close(zvol_state_t *zv); +int zvol_clone_range(zvol_state_handle_t *, uint64_t, + zvol_state_handle_t *, uint64_t, uint64_t); +void zvol_log_clone_range(zilog_t *zilog, dmu_tx_t *tx, int txtype, + uint64_t off, uint64_t len, uint64_t blksz, const blkptr_t *bps, + size_t nbps); /* * platform dependent functions exported to platform independent code diff --git a/lib/libzfs/libzfs_changelist.c b/lib/libzfs/libzfs_changelist.c index 4db1cbce9568..47df8663165e 100644 --- a/lib/libzfs/libzfs_changelist.c +++ b/lib/libzfs/libzfs_changelist.c @@ -563,8 +563,15 @@ change_one(zfs_handle_t *zhp, void *data) cn = NULL; } - if (!clp->cl_alldependents) - ret = zfs_iter_children_v2(zhp, 0, change_one, data); + if (!clp->cl_alldependents) { + if (clp->cl_prop != ZFS_PROP_MOUNTPOINT) { + ret = zfs_iter_filesystems_v2(zhp, 0, + change_one, data); + } else { + ret = zfs_iter_children_v2(zhp, 0, change_one, + data); + } + } /* * If we added the handle to the changelist, we will re-use it @@ -738,6 +745,11 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags, changelist_free(clp); return (NULL); } + } else if (clp->cl_prop != ZFS_PROP_MOUNTPOINT) { + if (zfs_iter_filesystems_v2(zhp, 0, change_one, clp) != 0) { + changelist_free(clp); + return (NULL); + } } else if (zfs_iter_children_v2(zhp, 0, change_one, clp) != 0) { changelist_free(clp); return (NULL); diff --git a/lib/libzfs/libzfs_pool.c b/lib/libzfs/libzfs_pool.c index 15388ccbbe2b..bb513cbe6c38 100644 --- a/lib/libzfs/libzfs_pool.c +++ b/lib/libzfs/libzfs_pool.c @@ -5345,7 +5345,8 @@ zpool_get_vdev_prop_value(nvlist_t *nvprop, vdev_prop_t prop, char *prop_name, strval = fnvlist_lookup_string(nv, ZPROP_VALUE); } else { /* user prop not found */ - return (-1); + src = ZPROP_SRC_DEFAULT; + strval = "-"; } (void) strlcpy(buf, strval, len); if (srctype) diff --git a/man/man4/zfs.4 b/man/man4/zfs.4 index da027798f962..7078a5ba8373 100644 --- a/man/man4/zfs.4 +++ b/man/man4/zfs.4 @@ -867,14 +867,14 @@ where that percent may exceed This only operates during memory pressure/reclaim. . -.It Sy zfs_arc_shrinker_limit Ns = Ns Sy 10000 Pq int +.It Sy zfs_arc_shrinker_limit Ns = Ns Sy 0 Pq int This is a limit on how many pages the ARC shrinker makes available for eviction in response to one page allocation attempt. Note that in practice, the kernel's shrinker can ask us to evict up to about four times this for one allocation attempt. To reduce OOM risk, this limit is applied for kswapd reclaims only. .Pp -The default limit of +For example a value of .Sy 10000 Pq in practice, Em 160 MiB No per allocation attempt with 4 KiB pages limits the amount of time spent attempting to reclaim ARC memory to less than 100 ms per allocation attempt, diff --git a/module/Makefile.in b/module/Makefile.in index d42d9cc1f804..ff980db62d9a 100644 --- a/module/Makefile.in +++ b/module/Makefile.in @@ -56,6 +56,8 @@ modules-Linux: @# Build the kernel modules. $(MAKE) -C @LINUX_OBJ@ $(if @KERNEL_CC@,CC=@KERNEL_CC@) \ $(if @KERNEL_LD@,LD=@KERNEL_LD@) $(if @KERNEL_LLVM@,LLVM=@KERNEL_LLVM@) \ + $(if @KERNEL_CROSS_COMPILE@,CROSS_COMPILE=@KERNEL_CROSS_COMPILE@) \ + $(if @KERNEL_ARCH@,ARCH=@KERNEL_ARCH@) \ M="$$PWD" @KERNEL_MAKE@ CONFIG_ZFS=m modules modules-FreeBSD: diff --git a/module/os/freebsd/zfs/zfs_vnops_os.c b/module/os/freebsd/zfs/zfs_vnops_os.c index b8c2c341dace..5edd3fcc76e7 100644 --- a/module/os/freebsd/zfs/zfs_vnops_os.c +++ b/module/os/freebsd/zfs/zfs_vnops_os.c @@ -6258,7 +6258,7 @@ struct vop_vector zfs_vnodeops = { .vop_fplookup_vexec = zfs_freebsd_fplookup_vexec, .vop_fplookup_symlink = zfs_freebsd_fplookup_symlink, .vop_access = zfs_freebsd_access, - .vop_allocate = VOP_EINVAL, + .vop_allocate = VOP_EOPNOTSUPP, #if __FreeBSD_version >= 1400032 .vop_deallocate = zfs_deallocate, #endif diff --git a/module/os/linux/zfs/arc_os.c b/module/os/linux/zfs/arc_os.c index b1e45b28743e..3238977af6d1 100644 --- a/module/os/linux/zfs/arc_os.c +++ b/module/os/linux/zfs/arc_os.c @@ -63,7 +63,7 @@ * practice, the kernel's shrinker can ask us to evict up to about 4x this * for one allocation attempt. * - * The default limit of 10,000 (in practice, 160MB per allocation attempt + * For example a value of 10,000 (in practice, 160MB per allocation attempt * with 4K pages) limits the amount of time spent attempting to reclaim ARC * memory to less than 100ms per allocation attempt, even with a small * average compressed block size of ~8KB. @@ -71,7 +71,7 @@ * See also the comment in arc_shrinker_count(). * Set to 0 to disable limit. */ -static int zfs_arc_shrinker_limit = 10000; +static int zfs_arc_shrinker_limit = 0; /* * Relative cost of ARC eviction, AKA number of seeks needed to restore evicted diff --git a/module/os/linux/zfs/zfs_vfsops.c b/module/os/linux/zfs/zfs_vfsops.c index 782a1ae64094..bc65cde9ba99 100644 --- a/module/os/linux/zfs/zfs_vfsops.c +++ b/module/os/linux/zfs/zfs_vfsops.c @@ -1720,13 +1720,14 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp) /* A zero fid_gen means we are in the .zfs control directories */ if (fid_gen == 0 && (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) { - *ipp = zfsvfs->z_ctldir; - ASSERT(*ipp != NULL); - if (zfsvfs->z_show_ctldir == ZFS_SNAPDIR_DISABLED) { + zfs_exit(zfsvfs, FTAG); return (SET_ERROR(ENOENT)); } + *ipp = zfsvfs->z_ctldir; + ASSERT(*ipp != NULL); + if (object == ZFSCTL_INO_SNAPDIR) { VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp, 0, kcred, NULL, NULL) == 0); diff --git a/module/zcommon/zpool_prop.c b/module/zcommon/zpool_prop.c index a709679b9032..ea9eda4b316d 100644 --- a/module/zcommon/zpool_prop.c +++ b/module/zcommon/zpool_prop.c @@ -105,7 +105,7 @@ zpool_prop_init(void) PROP_READONLY, ZFS_TYPE_POOL, "", "FRAG", B_FALSE, sfeatures); zprop_register_number(ZPOOL_PROP_CAPACITY, "capacity", 0, PROP_READONLY, - ZFS_TYPE_POOL, "", "CAP", B_FALSE, sfeatures); + ZFS_TYPE_POOL, "", "CAP", B_FALSE, sfeatures); zprop_register_number(ZPOOL_PROP_GUID, "guid", 0, PROP_READONLY, ZFS_TYPE_POOL, "", "GUID", B_TRUE, sfeatures); zprop_register_number(ZPOOL_PROP_LOAD_GUID, "load_guid", 0, diff --git a/module/zfs/spa.c b/module/zfs/spa.c index b83c982c13fd..c9dfd7ac2e4d 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -8948,16 +8948,26 @@ spa_async_remove(spa_t *spa, vdev_t *vd) } static void -spa_async_fault_vdev(spa_t *spa, vdev_t *vd) +spa_async_fault_vdev(vdev_t *vd, boolean_t *suspend) { if (vd->vdev_fault_wanted) { + vdev_state_t newstate = VDEV_STATE_FAULTED; vd->vdev_fault_wanted = B_FALSE; - vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED, - VDEV_AUX_ERR_EXCEEDED); - } + /* + * If this device has the only valid copy of the data, then + * back off and simply mark the vdev as degraded instead. + */ + if (!vd->vdev_top->vdev_islog && vd->vdev_aux == NULL && + vdev_dtl_required(vd)) { + newstate = VDEV_STATE_DEGRADED; + /* A required disk is missing so suspend the pool */ + *suspend = B_TRUE; + } + vdev_set_state(vd, B_TRUE, newstate, VDEV_AUX_ERR_EXCEEDED); + } for (int c = 0; c < vd->vdev_children; c++) - spa_async_fault_vdev(spa, vd->vdev_child[c]); + spa_async_fault_vdev(vd->vdev_child[c], suspend); } static void @@ -9049,8 +9059,11 @@ spa_async_thread(void *arg) */ if (tasks & SPA_ASYNC_FAULT_VDEV) { spa_vdev_state_enter(spa, SCL_NONE); - spa_async_fault_vdev(spa, spa->spa_root_vdev); + boolean_t suspend = B_FALSE; + spa_async_fault_vdev(spa->spa_root_vdev, &suspend); (void) spa_vdev_state_exit(spa, NULL, 0); + if (suspend) + zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR); } /* diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index 9f0f1dee656c..d9c5871820ca 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -2041,6 +2041,7 @@ vdev_open(vdev_t *vd) vd->vdev_cant_read = B_FALSE; vd->vdev_cant_write = B_FALSE; vd->vdev_fault_wanted = B_FALSE; + vd->vdev_remove_wanted = B_FALSE; vd->vdev_min_asize = vdev_get_min_asize(vd); /* @@ -5969,7 +5970,7 @@ vdev_prop_set(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl) goto end; } - if (vdev_prop_readonly(prop)) { + if (prop != VDEV_PROP_USERPROP && vdev_prop_readonly(prop)) { error = EROFS; goto end; } diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c index 55b60006e58c..a9298d3e940e 100644 --- a/module/zfs/zap_micro.c +++ b/module/zfs/zap_micro.c @@ -54,14 +54,25 @@ * machinery to understand not to try to split a microzap block). * * If large_microzap is enabled, this value will be clamped to - * spa_maxblocksize(). If not, it will be clamped to SPA_OLD_MAXBLOCKSIZE. + * spa_maxblocksize(), up to 1M. If not, it will be clamped to + * SPA_OLD_MAXBLOCKSIZE. */ static int zap_micro_max_size = SPA_OLD_MAXBLOCKSIZE; +/* + * The 1M upper limit is necessary because the count of chunks in a microzap + * block is stored as a uint16_t (mze_chunkid). Each chunk is 64 bytes, and the + * first is used to store a header, so there are 32767 usable chunks, which is + * just under 2M. 1M is the largest power-2-rounded block size under 2M, so we + * must set the limit there. + */ +#define MZAP_MAX_SIZE (1048576) + uint64_t zap_get_micro_max_size(spa_t *spa) { - uint64_t maxsz = P2ROUNDUP(zap_micro_max_size, SPA_MINBLOCKSIZE); + uint64_t maxsz = MIN(MZAP_MAX_SIZE, + P2ROUNDUP(zap_micro_max_size, SPA_MINBLOCKSIZE)); if (maxsz <= SPA_OLD_MAXBLOCKSIZE) return (maxsz); if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_MICROZAP)) @@ -2031,5 +2042,6 @@ EXPORT_SYMBOL(zap_cursor_init_serialized); EXPORT_SYMBOL(zap_get_stats); ZFS_MODULE_PARAM(zfs, , zap_micro_max_size, INT, ZMOD_RW, - "Maximum micro ZAP size, before converting to a fat ZAP, in bytes"); + "Maximum micro ZAP size before converting to a fat ZAP, " + "in bytes (max 1M)"); #endif diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index b5d8d1b71111..d63c0d597db8 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -752,6 +752,50 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst, return (SET_ERROR(error)); } +/* + * Handles TX_CLONE_RANGE transactions. + */ +void +zvol_log_clone_range(zilog_t *zilog, dmu_tx_t *tx, int txtype, uint64_t off, + uint64_t len, uint64_t blksz, const blkptr_t *bps, size_t nbps) +{ + itx_t *itx; + lr_clone_range_t *lr; + uint64_t partlen, max_log_data; + size_t partnbps; + + if (zil_replaying(zilog, tx)) + return; + + max_log_data = zil_max_log_data(zilog, sizeof (lr_clone_range_t)); + + while (nbps > 0) { + partnbps = MIN(nbps, max_log_data / sizeof (bps[0])); + partlen = partnbps * blksz; + ASSERT3U(partlen, <, len + blksz); + partlen = MIN(partlen, len); + + itx = zil_itx_create(txtype, + sizeof (*lr) + sizeof (bps[0]) * partnbps); + lr = (lr_clone_range_t *)&itx->itx_lr; + lr->lr_foid = ZVOL_OBJ; + lr->lr_offset = off; + lr->lr_length = partlen; + lr->lr_blksz = blksz; + lr->lr_nbps = partnbps; + memcpy(lr->lr_bps, bps, sizeof (bps[0]) * partnbps); + + zil_itx_assign(zilog, itx, tx); + + bps += partnbps; + ASSERT3U(nbps, >=, partnbps); + nbps -= partnbps; + off += partlen; + ASSERT3U(len, >=, partlen); + len -= partlen; + } +} + static int zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap) { @@ -863,50 +907,6 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset, } } -/* - * Handles TX_CLONE_RANGE transactions. - */ -void -zvol_log_clone_range(zilog_t *zilog, dmu_tx_t *tx, int txtype, uint64_t off, - uint64_t len, uint64_t blksz, const blkptr_t *bps, size_t nbps) -{ - itx_t *itx; - lr_clone_range_t *lr; - uint64_t partlen, max_log_data; - size_t partnbps; - - if (zil_replaying(zilog, tx)) - return; - - max_log_data = zil_max_log_data(zilog, sizeof (lr_clone_range_t)); - - while (nbps > 0) { - partnbps = MIN(nbps, max_log_data / sizeof (bps[0])); - partlen = partnbps * blksz; - ASSERT3U(partlen, <, len + blksz); - partlen = MIN(partlen, len); - - itx = zil_itx_create(txtype, - sizeof (*lr) + sizeof (bps[0]) * partnbps); - lr = (lr_clone_range_t *)&itx->itx_lr; - lr->lr_foid = ZVOL_OBJ; - lr->lr_offset = off; - lr->lr_length = partlen; - lr->lr_blksz = blksz; - lr->lr_nbps = partnbps; - memcpy(lr->lr_bps, bps, sizeof (bps[0]) * partnbps); - - zil_itx_assign(zilog, itx, tx); - - bps += partnbps; - ASSERT3U(nbps, >=, partnbps); - nbps -= partnbps; - off += partlen; - ASSERT3U(len, >=, partlen); - len -= partlen; - } -} - /* * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. */ diff --git a/rpm/generic/zfs-kmod.spec.in b/rpm/generic/zfs-kmod.spec.in index 30524474d1ac..7ed828bd0c9c 100644 --- a/rpm/generic/zfs-kmod.spec.in +++ b/rpm/generic/zfs-kmod.spec.in @@ -144,7 +144,9 @@ for kernel_version in %{?kernel_versions}; do %{debuginfo} \ %{?kernel_cc} \ %{?kernel_ld} \ - %{?kernel_llvm} + %{?kernel_llvm} \ + %{?kernel_cross_compile} \ + %{?kernel_arch} # Pre-6.10 kernel builds didn't need to copy over the source files to the # build directory. However we do need to do it though post-6.10 due to diff --git a/rpm/redhat/zfs-kmod.spec.in b/rpm/redhat/zfs-kmod.spec.in index 876c198c64de..a95bdf20f873 100644 --- a/rpm/redhat/zfs-kmod.spec.in +++ b/rpm/redhat/zfs-kmod.spec.in @@ -69,7 +69,9 @@ fi %{debuginfo} \ %{?kernel_cc} \ %{?kernel_ld} \ - %{?kernel_llvm} + %{?kernel_llvm} \ + %{?kernel_cross_compile} \ + %{?kernel_arch} make %{?_smp_mflags} # Module signing (modsign) diff --git a/scripts/Makefile.am b/scripts/Makefile.am index 7d9cef83d2c6..ee8fb8717cec 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -79,7 +79,7 @@ CLEANFILES += %D%/common.sh -$(AM_V_at)echo "$$SCRIPTS_EXTRA_ENVIRONMENT" >>$@ ALL_LOCAL += scripts-all-local -scripts-all-local: %D%/common.sh +scripts-all-local: %D%/common.sh $(PROGRAMS) $(SCRIPTS) $(DATA) -SCRIPT_COMMON=$< $(srcdir)/%D%/zfs-tests.sh -c CLEAN_LOCAL += scripts-clean-local diff --git a/tests/runfiles/common.run b/tests/runfiles/common.run index a69d36df2f98..1d6f6d85200f 100644 --- a/tests/runfiles/common.run +++ b/tests/runfiles/common.run @@ -676,8 +676,8 @@ post = tags = ['functional', 'deadman'] [tests/functional/dedup] -tests = ['dedup_legacy_create', 'dedup_fdt_create', 'dedup_fdt_import', - 'dedup_legacy_create', 'dedup_legacy_import', 'dedup_legacy_fdt_upgrade', +tests = ['dedup_fdt_create', 'dedup_fdt_import', 'dedup_legacy_create', + 'dedup_legacy_import', 'dedup_legacy_fdt_upgrade', 'dedup_legacy_fdt_mixed', 'dedup_quota'] pre = post = diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index 76d07a6cc9c1..e55ec583d2cc 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -125,8 +125,8 @@ tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_online_002_pos', 'auto_replace_001_pos', 'auto_replace_002_pos', 'auto_spare_001_pos', 'auto_spare_002_pos', 'auto_spare_multiple', 'auto_spare_ashift', 'auto_spare_shared', 'decrypt_fault', 'decompress_fault', - 'fault_limits', 'scrub_after_resilver', 'suspend_resume_single', - 'zpool_status_-s'] + 'fault_limits', 'scrub_after_resilver', 'suspend_on_probe_errors', + 'suspend_resume_single', 'zpool_status_-s'] tags = ['functional', 'fault'] [tests/functional/features/large_dnode:Linux] diff --git a/tests/zfs-tests/include/blkdev.shlib b/tests/zfs-tests/include/blkdev.shlib index 51eff3023e73..5b505f925286 100644 --- a/tests/zfs-tests/include/blkdev.shlib +++ b/tests/zfs-tests/include/blkdev.shlib @@ -556,27 +556,15 @@ function list_file_blocks # input_file # 512B blocks for ease of use with dd. # typeset level vdev path offset length - if awk -n '' 2>/dev/null; then - # gawk needs -n to decode hex - AWK='awk -n' - else - AWK='awk' - fi sync_all_pools true - zdb -dddddd $ds $objnum | $AWK -v pad=$((4<<20)) -v bs=512 ' + zdb -dddddd $ds $objnum | awk ' /^$/ { looking = 0 } looking { level = $2 field = 3 while (split($field, dva, ":") == 3) { - # top level vdev id - vdev = int(dva[1]) - # offset + 4M label/boot pad in 512B blocks - offset = (int("0x"dva[2]) + pad) / bs - # length in 512B blocks - len = int("0x"dva[3]) / bs - print level, vdev, offset, len + print level, int(dva[1]), "0x"dva[2], "0x"dva[3] ++field } @@ -585,7 +573,8 @@ function list_file_blocks # input_file ' | \ while read level vdev offset length; do for path in ${VDEV_MAP[$vdev][@]}; do - echo "$level $path $offset $length" + echo "$level $path $(( ($offset + (4<<20)) / 512 ))" \ + "$(( $length / 512 ))" done done 2>/dev/null } diff --git a/tests/zfs-tests/tests/Makefile.am b/tests/zfs-tests/tests/Makefile.am index 67630cb564ae..bde33843098f 100644 --- a/tests/zfs-tests/tests/Makefile.am +++ b/tests/zfs-tests/tests/Makefile.am @@ -1531,6 +1531,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \ functional/fault/decrypt_fault.ksh \ functional/fault/fault_limits.ksh \ functional/fault/scrub_after_resilver.ksh \ + functional/fault/suspend_on_probe_errors.ksh \ functional/fault/suspend_resume_single.ksh \ functional/fault/setup.ksh \ functional/fault/zpool_status_-s.ksh \ diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_all_fail.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_all_fail.ksh index d1103bddccbd..7b6c2ccdf660 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_all_fail.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_mount/zfs_mount_all_fail.ksh @@ -16,6 +16,7 @@ # # Copyright (c) 2017 by Delphix. All rights reserved. +# Copyright 2024 MNX Cloud, Inc. # . $STF_SUITE/include/libtest.shlib @@ -44,8 +45,9 @@ typeset fscount=10 function setup_all { # Create $fscount filesystems at the top level of $path - for ((i=0; i<$fscount; i++)); do + for ((i=0; i /sys/block/$sd/device/state" + fi + unload_scsi_debug + rm -f $DATA_FILE + for i in {0..$((FILE_VDEV_CNT - 1))}; do + log_must rm -f "$TEST_BASE_DIR/dev-$i" + done + log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0 + zed_start +} + +log_onexit cleanup + +log_assert "VDEV probe errors for more disks than parity should suspend a pool" + +log_note "Stoping ZED process" +zed_stop +zpool events -c + +# Make a debug device that we can "unplug" and lose 4 drives at once +unload_scsi_debug +load_scsi_debug $DEV_SIZE_MB 1 1 1 '512b' +sd=$(get_debug_device) + +# Create 4 partitions that match the FILE_VDEV_SIZ +parted "/dev/${sd}" --script mklabel gpt +parted "/dev/${sd}" --script mkpart primary 0% 25% +parted "/dev/${sd}" --script mkpart primary 25% 50% +parted "/dev/${sd}" --script mkpart primary 50% 75% +parted "/dev/${sd}" --script mkpart primary 75% 100% +block_device_wait "/dev/${sd}" +blkdevs="/dev/${sd}1 /dev/${sd}2 /dev/${sd}3 /dev/${sd}4" + +# Create 8 file vdevs +typeset -a filedevs +for i in {0..$((FILE_VDEV_CNT - 1))}; do + device=$TEST_BASE_DIR/dev-$i + log_must truncate -s $FILE_VDEV_SIZ $device + # Use all but the last one for pool create + if [[ $i -lt "7" ]]; then + filedevs[${#filedevs[*]}+1]=$device + fi +done + +# Create a raidz-3 pool that we can pull 4 disks from +log_must zpool create -f $TESTPOOL raidz3 ${filedevs[@]} $blkdevs +sync_pool $TESTPOOL + +# Add some data to the pool +log_must zfs create $TESTPOOL/fs +MNTPOINT="$(get_prop mountpoint $TESTPOOL/fs)" +SECONDS=0 +log_must fill_fs $MNTPOINT 1 200 4096 10 Z +log_note "fill_fs took $SECONDS seconds" +sync_pool $TESTPOOL + +# Start a replacing vdev, but suspend the resilver +log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1 +log_must zpool replace -f $TESTPOOL /dev/${sd}4 $TEST_BASE_DIR/dev-7 + +# Remove 4 disks all at once +log_must eval "echo offline > /sys/block/${sd}/device/state" + +log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0 + +# Add some writes to drive the vdev probe errors +log_must dd if=/dev/urandom of=$MNTPOINT/writes bs=1M count=1 + +# Wait until sync starts, and the pool suspends +log_note "waiting for pool to suspend" +typeset -i tries=30 +until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; do + if ((tries-- == 0)); then + zpool status -s + log_fail "UNEXPECTED -- pool did not suspend" + fi + sleep 1 +done +log_note $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) + +# Put the missing disks back into service +log_must eval "echo running > /sys/block/$sd/device/state" + +# Clear the vdev error states, which will reopen the vdevs and resume the pool +log_must zpool clear $TESTPOOL + +# Wait until the pool resumes +log_note "waiting for pool to resume" +tries=30 +until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) != "SUSPENDED" ]] ; do + if ((tries-- == 0)); then + log_fail "pool did not resume" + fi + sleep 1 +done +log_must zpool wait -t resilver $TESTPOOL +sync_pool $TESTPOOL + +# Make sure a pool scrub comes back clean +log_must zpool scrub -w $TESTPOOL +log_must zpool status -v $TESTPOOL +log_must check_pool_status $TESTPOOL "errors" "No known data errors" + +log_pass "VDEV probe errors for more disks than parity should suspend a pool" diff --git a/tests/zfs-tests/tests/functional/mount/cleanup.ksh b/tests/zfs-tests/tests/functional/mount/cleanup.ksh index bd6b0e435ed1..0e88e2a1fc79 100755 --- a/tests/zfs-tests/tests/functional/mount/cleanup.ksh +++ b/tests/zfs-tests/tests/functional/mount/cleanup.ksh @@ -27,12 +27,14 @@ # # Copyright (c) 2013, 2016 by Delphix. All rights reserved. +# Copyright 2025 MNX Cloud, Inc. # . $STF_SUITE/include/libtest.shlib log_must destroy_pool $TESTPOOL -for dir in $TESTDIRS; do +for i in 1 2 3; do + dir=$TESTDIR.$i rm -rf $dir done diff --git a/tests/zfs-tests/tests/functional/pam/cleanup.ksh b/tests/zfs-tests/tests/functional/pam/cleanup.ksh index dbcb175ed069..bfb98cd30707 100755 --- a/tests/zfs-tests/tests/functional/pam/cleanup.ksh +++ b/tests/zfs-tests/tests/functional/pam/cleanup.ksh @@ -27,4 +27,4 @@ destroy_pool $TESTPOOL del_user ${username} del_user ${username}rec del_group pamtestgroup -log_must rm -rf "$runstatedir" $TESTDIRS +log_must rm -rf "$runstatedir" diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh index 087aef9027ea..2bf5ab199e6e 100755 --- a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh @@ -42,8 +42,8 @@ log_unsupported "Skipping, issue https://github.com/openzfs/zfs/issues/12053" function test_cleanup { - # reset memory limit to 16M - set_tunable64 SPA_DISCARD_MEMORY_LIMIT 1000000 + # reset to original value + log_must restore_tunable SPA_DISCARD_MEMORY_LIMIT cleanup_nested_pools } @@ -69,6 +69,7 @@ log_onexit test_cleanup # map, we should have even more time to # verify this. # +log_must save_tunable SPA_DISCARD_MEMORY_LIMIT set_tunable64 SPA_DISCARD_MEMORY_LIMIT 128 log_must zpool checkpoint $NESTEDPOOL @@ -101,8 +102,8 @@ log_mustnot zpool checkpoint -d $NESTEDPOOL log_mustnot zpool remove $NESTEDPOOL $FILEDISK1 log_mustnot zpool reguid $NESTEDPOOL -# reset memory limit to 16M -set_tunable64 SPA_DISCARD_MEMORY_LIMIT 16777216 +# reset to original value +log_must restore_tunable SPA_DISCARD_MEMORY_LIMIT nested_wait_discard_finish diff --git a/tests/zfs-tests/tests/functional/raidz/raidz_expand_001_pos.ksh b/tests/zfs-tests/tests/functional/raidz/raidz_expand_001_pos.ksh index d4923fdb67d9..125b0e5411a3 100755 --- a/tests/zfs-tests/tests/functional/raidz/raidz_expand_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/raidz/raidz_expand_001_pos.ksh @@ -200,13 +200,13 @@ log_must zpool create -f -o cachefile=none $TESTPOOL $raid ${disks[@]} log_must zfs set primarycache=metadata $TESTPOOL log_must zfs create $TESTPOOL/fs -log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R +log_must fill_fs /$TESTPOOL/fs 1 512 102400 1 R log_must zfs create -o compress=on $TESTPOOL/fs2 -log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R +log_must fill_fs /$TESTPOOL/fs2 1 512 102400 1 R log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3 -log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R +log_must fill_fs /$TESTPOOL/fs3 1 512 102400 1 R log_must check_pool_status $TESTPOOL "errors" "No known data errors" diff --git a/tests/zfs-tests/tests/functional/raidz/raidz_expand_002_pos.ksh b/tests/zfs-tests/tests/functional/raidz/raidz_expand_002_pos.ksh index 56810aca099f..185316a7cb85 100755 --- a/tests/zfs-tests/tests/functional/raidz/raidz_expand_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/raidz/raidz_expand_002_pos.ksh @@ -78,13 +78,13 @@ log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]} log_must zfs set primarycache=metadata $pool log_must zfs create $pool/fs -log_must fill_fs /$pool/fs 1 512 100 1024 R +log_must fill_fs /$pool/fs 1 512 102400 1 R log_must zfs create -o compress=on $pool/fs2 -log_must fill_fs /$pool/fs2 1 512 100 1024 R +log_must fill_fs /$pool/fs2 1 512 102400 1 R log_must zfs create -o compress=on -o recordsize=8k $pool/fs3 -log_must fill_fs /$pool/fs3 1 512 100 1024 R +log_must fill_fs /$pool/fs3 1 512 102400 1 R typeset pool_size=$(get_pool_prop size $pool) diff --git a/tests/zfs-tests/tests/functional/raidz/raidz_expand_003_neg.ksh b/tests/zfs-tests/tests/functional/raidz/raidz_expand_003_neg.ksh index 4d85c46897b8..a2eb87b1f722 100755 --- a/tests/zfs-tests/tests/functional/raidz/raidz_expand_003_neg.ksh +++ b/tests/zfs-tests/tests/functional/raidz/raidz_expand_003_neg.ksh @@ -92,7 +92,7 @@ log_must zpool destroy $pool log_must zpool create -f $opts $pool $raid ${disks[1..$(($devs-1))]} log_must zfs set primarycache=metadata $pool log_must zfs create $pool/fs -log_must fill_fs /$pool/fs 1 512 100 1024 R +log_must fill_fs /$pool/fs 1 512 102400 1 R allocated=$(zpool list -Hp -o allocated $pool) log_must set_tunable64 RAIDZ_EXPAND_MAX_REFLOW_BYTES $((allocated / 4)) log_must zpool attach $pool ${raid}-0 ${disks[$devs]} diff --git a/tests/zfs-tests/tests/functional/raidz/raidz_expand_003_pos.ksh b/tests/zfs-tests/tests/functional/raidz/raidz_expand_003_pos.ksh index 712b25261773..6f852c516ca4 100755 --- a/tests/zfs-tests/tests/functional/raidz/raidz_expand_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/raidz/raidz_expand_003_pos.ksh @@ -94,10 +94,10 @@ opts="-o cachefile=none" log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]} log_must zfs create -o recordsize=8k $pool/fs -log_must fill_fs /$pool/fs 1 256 100 1024 R +log_must fill_fs /$pool/fs 1 256 102400 1 R log_must zfs create -o recordsize=128k $pool/fs2 -log_must fill_fs /$pool/fs2 1 256 100 1024 R +log_must fill_fs /$pool/fs2 1 256 102400 1 R for disk in ${disks[$(($nparity+2))..$devs]}; do log_must mkfile -n 400m /$pool/fs/file diff --git a/tests/zfs-tests/tests/functional/raidz/raidz_expand_004_pos.ksh b/tests/zfs-tests/tests/functional/raidz/raidz_expand_004_pos.ksh index 2be55dae4254..5056e4e4b1fd 100755 --- a/tests/zfs-tests/tests/functional/raidz/raidz_expand_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/raidz/raidz_expand_004_pos.ksh @@ -81,10 +81,10 @@ log_must set_tunable32 SCRUB_AFTER_EXPAND 0 log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]} log_must zfs create -o recordsize=8k $pool/fs -log_must fill_fs /$pool/fs 1 128 100 1024 R +log_must fill_fs /$pool/fs 1 128 102400 1 R log_must zfs create -o recordsize=128k $pool/fs2 -log_must fill_fs /$pool/fs2 1 128 100 1024 R +log_must fill_fs /$pool/fs2 1 128 102400 1 R for disk in ${disks[$(($nparity+2))..$devs]}; do log_must zpool attach $pool ${raid}-0 $disk diff --git a/tests/zfs-tests/tests/functional/raidz/raidz_expand_005_pos.ksh b/tests/zfs-tests/tests/functional/raidz/raidz_expand_005_pos.ksh index 56ee3e9be67c..49b9f6c1d353 100755 --- a/tests/zfs-tests/tests/functional/raidz/raidz_expand_005_pos.ksh +++ b/tests/zfs-tests/tests/functional/raidz/raidz_expand_005_pos.ksh @@ -137,10 +137,10 @@ log_must zpool create -f $opts $pool $raid ${disks[1..$(($nparity+1))]} devices="${disks[1..$(($nparity+1))]}" log_must zfs create -o recordsize=8k $pool/fs -log_must fill_fs /$pool/fs 1 128 100 1024 R +log_must fill_fs /$pool/fs 1 128 102400 1 R log_must zfs create -o recordsize=128k $pool/fs2 -log_must fill_fs /$pool/fs2 1 128 100 1024 R +log_must fill_fs /$pool/fs2 1 128 102400 1 R for disk in ${disks[$(($nparity+2))..$devs]}; do # Set pause to some random value near halfway point diff --git a/tests/zfs-tests/tests/functional/redacted_send/redacted_panic.ksh b/tests/zfs-tests/tests/functional/redacted_send/redacted_panic.ksh index 032d1fb91a2e..a2438c2cd731 100755 --- a/tests/zfs-tests/tests/functional/redacted_send/redacted_panic.ksh +++ b/tests/zfs-tests/tests/functional/redacted_send/redacted_panic.ksh @@ -39,7 +39,7 @@ function cleanup log_onexit cleanup log_must zfs create -o recsize=8k $sendfs -log_must dd if=/dev/urandom of=/$sendfs/file bs=1024k count=2048 +log_must dd if=/dev/urandom of=/$sendfs/file bs=1024k count=1024 log_must zfs snapshot $sendfs@init log_must zfs clone $sendfs@init $clone log_must stride_dd -i /dev/urandom -o /$clone/file -b 8192 -s 2 -c 7226 diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid.ksh b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid.ksh index 8208d2b4a398..df113a98aa3c 100755 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid.ksh +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid.ksh @@ -223,13 +223,13 @@ for nparity in 1 2 3; do log_must zfs set primarycache=metadata $TESTPOOL log_must zfs create $TESTPOOL/fs - log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R + log_must fill_fs /$TESTPOOL/fs 1 512 102400 1 R log_must zfs create -o compress=on $TESTPOOL/fs2 - log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R + log_must fill_fs /$TESTPOOL/fs2 1 512 102400 1 R log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3 - log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R + log_must fill_fs /$TESTPOOL/fs3 1 512 102400 1 R typeset pool_size=$(get_pool_prop size $TESTPOOL) diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged1.ksh b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged1.ksh index 110c69159eb1..50d7358411dc 100755 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged1.ksh +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged1.ksh @@ -119,13 +119,13 @@ for nparity in 1 2 3; do log_must zfs set primarycache=metadata $TESTPOOL log_must zfs create $TESTPOOL/fs - log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R + log_must fill_fs /$TESTPOOL/fs 1 512 102400 1 R log_must zfs create -o compress=on $TESTPOOL/fs2 - log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R + log_must fill_fs /$TESTPOOL/fs2 1 512 102400 1 R log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3 - log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R + log_must fill_fs /$TESTPOOL/fs3 1 512 102400 1 R log_must zpool export $TESTPOOL log_must zpool import -o cachefile=none -d $dir $TESTPOOL diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged2.ksh b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged2.ksh index b0bb4ef84129..ad66f8633986 100755 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged2.ksh +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy_draid_damaged2.ksh @@ -94,13 +94,13 @@ for nparity in 1 2 3; do # log_must zfs set primarycache=metadata $TESTPOOL log_must zfs create $TESTPOOL/fs - log_must fill_fs /$TESTPOOL/fs 1 256 10 1024 R + log_must fill_fs /$TESTPOOL/fs 1 256 10240 1 R log_must zfs create -o compress=on $TESTPOOL/fs2 - log_must fill_fs /$TESTPOOL/fs2 1 256 10 1024 R + log_must fill_fs /$TESTPOOL/fs2 1 256 10240 1 R log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3 - log_must fill_fs /$TESTPOOL/fs3 1 256 10 1024 R + log_must fill_fs /$TESTPOOL/fs3 1 256 10240 1 R log_must zpool export $TESTPOOL log_must zpool import -o cachefile=none -d $dir $TESTPOOL diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy_raidz.ksh b/tests/zfs-tests/tests/functional/redundancy/redundancy_raidz.ksh index 83cacda84b09..7de35c947fec 100755 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy_raidz.ksh +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy_raidz.ksh @@ -223,13 +223,13 @@ for nparity in 1 2 3; do log_must zfs set primarycache=metadata $TESTPOOL log_must zfs create $TESTPOOL/fs - log_must fill_fs /$TESTPOOL/fs 1 512 100 1024 R + log_must fill_fs /$TESTPOOL/fs 1 512 102400 1 R log_must zfs create -o compress=on $TESTPOOL/fs2 - log_must fill_fs /$TESTPOOL/fs2 1 512 100 1024 R + log_must fill_fs /$TESTPOOL/fs2 1 512 102400 1 R log_must zfs create -o compress=on -o recordsize=8k $TESTPOOL/fs3 - log_must fill_fs /$TESTPOOL/fs3 1 512 100 1024 R + log_must fill_fs /$TESTPOOL/fs3 1 512 102400 1 R typeset pool_size=$(get_pool_prop size $TESTPOOL)