Skip to content

Commit 5014beb

Browse files
committed
Merge tag 'for-6.15/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mikulas Patocka: - dm-crypt: switch to using the crc32 library - dm-verity, dm-integrity, dm-crypt: documentation improvement - dm-vdo fixes - dm-stripe: enable inline crypto passthrough - dm-integrity: set ti->error on memory allocation failure - dm-bufio: remove unused return value - dm-verity: do forward error correction on metadata I/O errors - dm: fix unconditional IO throttle caused by REQ_PREFLUSH - dm cache: prevent BUG_ON by blocking retries on failed device resumes - dm cache: support shrinking the origin device - dm: restrict dm device size to 2^63-512 bytes - dm-delay: support zoned devices - dm-verity: support block number limits for different ioprio classes - dm-integrity: fix non-constant-time tag verification (security bug) - dm-verity, dm-ebs: fix prefetch-vs-suspend race * tag 'for-6.15/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (27 commits) dm-ebs: fix prefetch-vs-suspend race dm-verity: fix prefetch-vs-suspend race dm-integrity: fix non-constant-time tag verification dm-verity: support block number limits for different ioprio classes dm-delay: support zoned devices dm: restrict dm device size to 2^63-512 bytes dm cache: support shrinking the origin device dm cache: prevent BUG_ON by blocking retries on failed device resumes dm vdo indexer: reorder uds_request to reduce padding dm: fix unconditional IO throttle caused by REQ_PREFLUSH dm vdo: rework processing of loaded refcount byte arrays dm vdo: remove remaining ring references dm-verity: do forward error correction on metadata I/O errors dm-bufio: remove unused return value dm-integrity: set ti->error on memory allocation failure dm: Enable inline crypto passthrough for striped target dm vdo slab-depot: read refcount blocks in large chunks at load time dm vdo vio-pool: allow variable-sized metadata vios dm vdo vio-pool: support pools with multiple data blocks per vio dm vdo vio-pool: add a pool pointer to pooled_vio ...
2 parents 447d2d2 + 9c56542 commit 5014beb

33 files changed

+520
-244
lines changed

Documentation/admin-guide/device-mapper/dm-crypt.rst

+5
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,11 @@ integrity:<bytes>:<type>
146146
integrity for the encrypted device. The additional space is then
147147
used for storing authentication tag (and persistent IV if needed).
148148

149+
integrity_key_size:<bytes>
150+
Optionally set the integrity key size if it differs from the digest size.
151+
It allows the use of wrapped key algorithms where the key size is
152+
independent of the cryptographic key size.
153+
149154
sector_size:<bytes>
150155
Use <bytes> as the encryption unit instead of 512 bytes sectors.
151156
This option can be in range 512 - 4096 bytes and must be power of two.

Documentation/admin-guide/device-mapper/dm-integrity.rst

+5
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,11 @@ Target arguments:
9292
allowed. This mode is useful for data recovery if the
9393
device cannot be activated in any of the other standard
9494
modes.
95+
I - inline mode - in this mode, dm-integrity will store integrity
96+
data directly in the underlying device sectors.
97+
The underlying device must have an integrity profile that
98+
allows storing user integrity data and provides enough
99+
space for the selected integrity tag.
95100

96101
5. the number of additional arguments
97102

Documentation/admin-guide/device-mapper/verity.rst

+18-2
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,15 @@ panic_on_corruption
8787
Panic the device when a corrupted block is discovered. This option is
8888
not compatible with ignore_corruption and restart_on_corruption.
8989

90+
restart_on_error
91+
Restart the system when an I/O error is detected.
92+
This option can be combined with the restart_on_corruption option.
93+
94+
panic_on_error
95+
Panic the device when an I/O error is detected. This option is
96+
not compatible with the restart_on_error option but can be combined
97+
with the panic_on_corruption option.
98+
9099
ignore_zero_blocks
91100
Do not verify blocks that are expected to contain zeroes and always return
92101
zeroes instead. This may be useful if the partition contains unused blocks
@@ -142,8 +151,15 @@ root_hash_sig_key_desc <key_description>
142151
already in the secondary trusted keyring.
143152

144153
try_verify_in_tasklet
145-
If verity hashes are in cache, verify data blocks in kernel tasklet instead
146-
of workqueue. This option can reduce IO latency.
154+
If verity hashes are in cache and the IO size does not exceed the limit,
155+
verify data blocks in bottom half instead of workqueue. This option can
156+
reduce IO latency. The size limits can be configured via
157+
/sys/module/dm_verity/parameters/use_bh_bytes. The four parameters
158+
correspond to limits for IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT,
159+
IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE in turn.
160+
For example:
161+
<none>,<rt>,<be>,<idle>
162+
4096,4096,4096,4096
147163

148164
Theory of operation
149165
===================

drivers/md/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,7 @@ config DM_CRYPT
267267
depends on BLK_DEV_DM
268268
depends on (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n)
269269
depends on (TRUSTED_KEYS || TRUSTED_KEYS=n)
270+
select CRC32
270271
select CRYPTO
271272
select CRYPTO_CBC
272273
select CRYPTO_ESSIV

drivers/md/dm-bufio.c

+1-3
Original file line numberDiff line numberDiff line change
@@ -2234,7 +2234,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
22342234
}
22352235
EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
22362236

2237-
static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
2237+
static void forget_buffer(struct dm_bufio_client *c, sector_t block)
22382238
{
22392239
struct dm_buffer *b;
22402240

@@ -2249,8 +2249,6 @@ static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
22492249
cache_put_and_wake(c, b);
22502250
}
22512251
}
2252-
2253-
return b ? true : false;
22542252
}
22552253

22562254
/*

drivers/md/dm-cache-target.c

+93-3
Original file line numberDiff line numberDiff line change
@@ -406,6 +406,12 @@ struct cache {
406406
mempool_t migration_pool;
407407

408408
struct bio_set bs;
409+
410+
/*
411+
* Cache_size entries. Set bits indicate blocks mapped beyond the
412+
* target length, which are marked for invalidation.
413+
*/
414+
unsigned long *invalid_bitset;
409415
};
410416

411417
struct per_bio_data {
@@ -1922,6 +1928,9 @@ static void __destroy(struct cache *cache)
19221928
if (cache->discard_bitset)
19231929
free_bitset(cache->discard_bitset);
19241930

1931+
if (cache->invalid_bitset)
1932+
free_bitset(cache->invalid_bitset);
1933+
19251934
if (cache->copier)
19261935
dm_kcopyd_client_destroy(cache->copier);
19271936

@@ -2510,6 +2519,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
25102519
}
25112520
clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
25122521

2522+
cache->invalid_bitset = alloc_bitset(from_cblock(cache->cache_size));
2523+
if (!cache->invalid_bitset) {
2524+
*error = "could not allocate bitset for invalid blocks";
2525+
goto bad;
2526+
}
2527+
clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size));
2528+
25132529
cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
25142530
if (IS_ERR(cache->copier)) {
25152531
*error = "could not create kcopyd client";
@@ -2808,6 +2824,24 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
28082824
return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
28092825
}
28102826

2827+
static int load_filtered_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2828+
bool dirty, uint32_t hint, bool hint_valid)
2829+
{
2830+
struct cache *cache = context;
2831+
2832+
if (from_oblock(oblock) >= from_oblock(cache->origin_blocks)) {
2833+
if (dirty) {
2834+
DMERR("%s: unable to shrink origin; cache block %u is dirty",
2835+
cache_device_name(cache), from_cblock(cblock));
2836+
return -EFBIG;
2837+
}
2838+
set_bit(from_cblock(cblock), cache->invalid_bitset);
2839+
return 0;
2840+
}
2841+
2842+
return load_mapping(context, oblock, cblock, dirty, hint, hint_valid);
2843+
}
2844+
28112845
/*
28122846
* The discard block size in the on disk metadata is not
28132847
* necessarily the same as we're currently using. So we have to
@@ -2899,6 +2933,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
28992933
return to_cblock(size);
29002934
}
29012935

2936+
static bool can_resume(struct cache *cache)
2937+
{
2938+
/*
2939+
* Disallow retrying the resume operation for devices that failed the
2940+
* first resume attempt, as the failure leaves the policy object partially
2941+
* initialized. Retrying could trigger BUG_ON when loading cache mappings
2942+
* into the incomplete policy object.
2943+
*/
2944+
if (cache->sized && !cache->loaded_mappings) {
2945+
if (get_cache_mode(cache) != CM_WRITE)
2946+
DMERR("%s: unable to resume a failed-loaded cache, please check metadata.",
2947+
cache_device_name(cache));
2948+
else
2949+
DMERR("%s: unable to resume cache due to missing proper cache table reload",
2950+
cache_device_name(cache));
2951+
return false;
2952+
}
2953+
2954+
return true;
2955+
}
2956+
29022957
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
29032958
{
29042959
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
@@ -2941,12 +2996,33 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
29412996
return 0;
29422997
}
29432998

2999+
static int truncate_oblocks(struct cache *cache)
3000+
{
3001+
uint32_t nr_blocks = from_cblock(cache->cache_size);
3002+
uint32_t i;
3003+
int r;
3004+
3005+
for_each_set_bit(i, cache->invalid_bitset, nr_blocks) {
3006+
r = dm_cache_remove_mapping(cache->cmd, to_cblock(i));
3007+
if (r) {
3008+
DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
3009+
cache_device_name(cache));
3010+
return r;
3011+
}
3012+
}
3013+
3014+
return 0;
3015+
}
3016+
29443017
static int cache_preresume(struct dm_target *ti)
29453018
{
29463019
int r = 0;
29473020
struct cache *cache = ti->private;
29483021
dm_cblock_t csize = get_cache_dev_size(cache);
29493022

3023+
if (!can_resume(cache))
3024+
return -EINVAL;
3025+
29503026
/*
29513027
* Check to see if the cache has resized.
29523028
*/
@@ -2962,11 +3038,25 @@ static int cache_preresume(struct dm_target *ti)
29623038
}
29633039

29643040
if (!cache->loaded_mappings) {
3041+
/*
3042+
* The fast device could have been resized since the last
3043+
* failed preresume attempt. To be safe we start by a blank
3044+
* bitset for cache blocks.
3045+
*/
3046+
clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size));
3047+
29653048
r = dm_cache_load_mappings(cache->cmd, cache->policy,
2966-
load_mapping, cache);
3049+
load_filtered_mapping, cache);
29673050
if (r) {
29683051
DMERR("%s: could not load cache mappings", cache_device_name(cache));
2969-
metadata_operation_failed(cache, "dm_cache_load_mappings", r);
3052+
if (r != -EFBIG)
3053+
metadata_operation_failed(cache, "dm_cache_load_mappings", r);
3054+
return r;
3055+
}
3056+
3057+
r = truncate_oblocks(cache);
3058+
if (r) {
3059+
metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
29703060
return r;
29713061
}
29723062

@@ -3426,7 +3516,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
34263516

34273517
static struct target_type cache_target = {
34283518
.name = "cache",
3429-
.version = {2, 2, 0},
3519+
.version = {2, 3, 0},
34303520
.module = THIS_MODULE,
34313521
.ctr = cache_ctr,
34323522
.dtr = cache_dtr,

drivers/md/dm-crypt.c

+10-31
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <linux/bio.h>
1818
#include <linux/blkdev.h>
1919
#include <linux/blk-integrity.h>
20+
#include <linux/crc32.h>
2021
#include <linux/mempool.h>
2122
#include <linux/slab.h>
2223
#include <linux/crypto.h>
@@ -125,7 +126,6 @@ struct iv_lmk_private {
125126

126127
#define TCW_WHITENING_SIZE 16
127128
struct iv_tcw_private {
128-
struct crypto_shash *crc32_tfm;
129129
u8 *iv_seed;
130130
u8 *whitening;
131131
};
@@ -607,10 +607,6 @@ static void crypt_iv_tcw_dtr(struct crypt_config *cc)
607607
tcw->iv_seed = NULL;
608608
kfree_sensitive(tcw->whitening);
609609
tcw->whitening = NULL;
610-
611-
if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
612-
crypto_free_shash(tcw->crc32_tfm);
613-
tcw->crc32_tfm = NULL;
614610
}
615611

616612
static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -628,13 +624,6 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
628624
return -EINVAL;
629625
}
630626

631-
tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
632-
CRYPTO_ALG_ALLOCATES_MEMORY);
633-
if (IS_ERR(tcw->crc32_tfm)) {
634-
ti->error = "Error initializing CRC32 in TCW";
635-
return PTR_ERR(tcw->crc32_tfm);
636-
}
637-
638627
tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
639628
tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
640629
if (!tcw->iv_seed || !tcw->whitening) {
@@ -668,36 +657,28 @@ static int crypt_iv_tcw_wipe(struct crypt_config *cc)
668657
return 0;
669658
}
670659

671-
static int crypt_iv_tcw_whitening(struct crypt_config *cc,
672-
struct dm_crypt_request *dmreq,
673-
u8 *data)
660+
static void crypt_iv_tcw_whitening(struct crypt_config *cc,
661+
struct dm_crypt_request *dmreq, u8 *data)
674662
{
675663
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
676664
__le64 sector = cpu_to_le64(dmreq->iv_sector);
677665
u8 buf[TCW_WHITENING_SIZE];
678-
SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
679-
int i, r;
666+
int i;
680667

681668
/* xor whitening with sector number */
682669
crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
683670
crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
684671

685672
/* calculate crc32 for every 32bit part and xor it */
686-
desc->tfm = tcw->crc32_tfm;
687-
for (i = 0; i < 4; i++) {
688-
r = crypto_shash_digest(desc, &buf[i * 4], 4, &buf[i * 4]);
689-
if (r)
690-
goto out;
691-
}
673+
for (i = 0; i < 4; i++)
674+
put_unaligned_le32(crc32(0, &buf[i * 4], 4), &buf[i * 4]);
692675
crypto_xor(&buf[0], &buf[12], 4);
693676
crypto_xor(&buf[4], &buf[8], 4);
694677

695678
/* apply whitening (8 bytes) to whole sector */
696679
for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
697680
crypto_xor(data + i * 8, buf, 8);
698-
out:
699681
memzero_explicit(buf, sizeof(buf));
700-
return r;
701682
}
702683

703684
static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
@@ -707,13 +688,12 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
707688
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
708689
__le64 sector = cpu_to_le64(dmreq->iv_sector);
709690
u8 *src;
710-
int r = 0;
711691

712692
/* Remove whitening from ciphertext */
713693
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
714694
sg = crypt_get_sg_data(cc, dmreq->sg_in);
715695
src = kmap_local_page(sg_page(sg));
716-
r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
696+
crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
717697
kunmap_local(src);
718698
}
719699

@@ -723,26 +703,25 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
723703
crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
724704
cc->iv_size - 8);
725705

726-
return r;
706+
return 0;
727707
}
728708

729709
static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
730710
struct dm_crypt_request *dmreq)
731711
{
732712
struct scatterlist *sg;
733713
u8 *dst;
734-
int r;
735714

736715
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
737716
return 0;
738717

739718
/* Apply whitening on ciphertext */
740719
sg = crypt_get_sg_data(cc, dmreq->sg_out);
741720
dst = kmap_local_page(sg_page(sg));
742-
r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
721+
crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
743722
kunmap_local(dst);
744723

745-
return r;
724+
return 0;
746725
}
747726

748727
static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,

0 commit comments

Comments
 (0)