@@ -406,6 +406,12 @@ struct cache {
406
406
mempool_t migration_pool ;
407
407
408
408
struct bio_set bs ;
409
+
410
+ /*
411
+ * Cache_size entries. Set bits indicate blocks mapped beyond the
412
+ * target length, which are marked for invalidation.
413
+ */
414
+ unsigned long * invalid_bitset ;
409
415
};
410
416
411
417
struct per_bio_data {
@@ -1922,6 +1928,9 @@ static void __destroy(struct cache *cache)
1922
1928
if (cache -> discard_bitset )
1923
1929
free_bitset (cache -> discard_bitset );
1924
1930
1931
+ if (cache -> invalid_bitset )
1932
+ free_bitset (cache -> invalid_bitset );
1933
+
1925
1934
if (cache -> copier )
1926
1935
dm_kcopyd_client_destroy (cache -> copier );
1927
1936
@@ -2510,6 +2519,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2510
2519
}
2511
2520
clear_bitset (cache -> discard_bitset , from_dblock (cache -> discard_nr_blocks ));
2512
2521
2522
+ cache -> invalid_bitset = alloc_bitset (from_cblock (cache -> cache_size ));
2523
+ if (!cache -> invalid_bitset ) {
2524
+ * error = "could not allocate bitset for invalid blocks" ;
2525
+ goto bad ;
2526
+ }
2527
+ clear_bitset (cache -> invalid_bitset , from_cblock (cache -> cache_size ));
2528
+
2513
2529
cache -> copier = dm_kcopyd_client_create (& dm_kcopyd_throttle );
2514
2530
if (IS_ERR (cache -> copier )) {
2515
2531
* error = "could not create kcopyd client" ;
@@ -2808,6 +2824,24 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2808
2824
return policy_load_mapping (cache -> policy , oblock , cblock , dirty , hint , hint_valid );
2809
2825
}
2810
2826
2827
+ static int load_filtered_mapping (void * context , dm_oblock_t oblock , dm_cblock_t cblock ,
2828
+ bool dirty , uint32_t hint , bool hint_valid )
2829
+ {
2830
+ struct cache * cache = context ;
2831
+
2832
+ if (from_oblock (oblock ) >= from_oblock (cache -> origin_blocks )) {
2833
+ if (dirty ) {
2834
+ DMERR ("%s: unable to shrink origin; cache block %u is dirty" ,
2835
+ cache_device_name (cache ), from_cblock (cblock ));
2836
+ return - EFBIG ;
2837
+ }
2838
+ set_bit (from_cblock (cblock ), cache -> invalid_bitset );
2839
+ return 0 ;
2840
+ }
2841
+
2842
+ return load_mapping (context , oblock , cblock , dirty , hint , hint_valid );
2843
+ }
2844
+
2811
2845
/*
2812
2846
* The discard block size in the on disk metadata is not
2813
2847
* necessarily the same as we're currently using. So we have to
@@ -2899,6 +2933,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
2899
2933
return to_cblock (size );
2900
2934
}
2901
2935
2936
+ static bool can_resume (struct cache * cache )
2937
+ {
2938
+ /*
2939
+ * Disallow retrying the resume operation for devices that failed the
2940
+ * first resume attempt, as the failure leaves the policy object partially
2941
+ * initialized. Retrying could trigger BUG_ON when loading cache mappings
2942
+ * into the incomplete policy object.
2943
+ */
2944
+ if (cache -> sized && !cache -> loaded_mappings ) {
2945
+ if (get_cache_mode (cache ) != CM_WRITE )
2946
+ DMERR ("%s: unable to resume a failed-loaded cache, please check metadata." ,
2947
+ cache_device_name (cache ));
2948
+ else
2949
+ DMERR ("%s: unable to resume cache due to missing proper cache table reload" ,
2950
+ cache_device_name (cache ));
2951
+ return false;
2952
+ }
2953
+
2954
+ return true;
2955
+ }
2956
+
2902
2957
static bool can_resize (struct cache * cache , dm_cblock_t new_size )
2903
2958
{
2904
2959
if (from_cblock (new_size ) > from_cblock (cache -> cache_size )) {
@@ -2941,12 +2996,33 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2941
2996
return 0 ;
2942
2997
}
2943
2998
2999
+ static int truncate_oblocks (struct cache * cache )
3000
+ {
3001
+ uint32_t nr_blocks = from_cblock (cache -> cache_size );
3002
+ uint32_t i ;
3003
+ int r ;
3004
+
3005
+ for_each_set_bit (i , cache -> invalid_bitset , nr_blocks ) {
3006
+ r = dm_cache_remove_mapping (cache -> cmd , to_cblock (i ));
3007
+ if (r ) {
3008
+ DMERR_LIMIT ("%s: invalidation failed; couldn't update on disk metadata" ,
3009
+ cache_device_name (cache ));
3010
+ return r ;
3011
+ }
3012
+ }
3013
+
3014
+ return 0 ;
3015
+ }
3016
+
2944
3017
static int cache_preresume (struct dm_target * ti )
2945
3018
{
2946
3019
int r = 0 ;
2947
3020
struct cache * cache = ti -> private ;
2948
3021
dm_cblock_t csize = get_cache_dev_size (cache );
2949
3022
3023
+ if (!can_resume (cache ))
3024
+ return - EINVAL ;
3025
+
2950
3026
/*
2951
3027
* Check to see if the cache has resized.
2952
3028
*/
@@ -2962,11 +3038,25 @@ static int cache_preresume(struct dm_target *ti)
2962
3038
}
2963
3039
2964
3040
if (!cache -> loaded_mappings ) {
3041
+ /*
3042
+ * The fast device could have been resized since the last
3043
+ * failed preresume attempt. To be safe we start by a blank
3044
+ * bitset for cache blocks.
3045
+ */
3046
+ clear_bitset (cache -> invalid_bitset , from_cblock (cache -> cache_size ));
3047
+
2965
3048
r = dm_cache_load_mappings (cache -> cmd , cache -> policy ,
2966
- load_mapping , cache );
3049
+ load_filtered_mapping , cache );
2967
3050
if (r ) {
2968
3051
DMERR ("%s: could not load cache mappings" , cache_device_name (cache ));
2969
- metadata_operation_failed (cache , "dm_cache_load_mappings" , r );
3052
+ if (r != - EFBIG )
3053
+ metadata_operation_failed (cache , "dm_cache_load_mappings" , r );
3054
+ return r ;
3055
+ }
3056
+
3057
+ r = truncate_oblocks (cache );
3058
+ if (r ) {
3059
+ metadata_operation_failed (cache , "dm_cache_remove_mapping" , r );
2970
3060
return r ;
2971
3061
}
2972
3062
@@ -3426,7 +3516,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3426
3516
3427
3517
static struct target_type cache_target = {
3428
3518
.name = "cache" ,
3429
- .version = {2 , 2 , 0 },
3519
+ .version = {2 , 3 , 0 },
3430
3520
.module = THIS_MODULE ,
3431
3521
.ctr = cache_ctr ,
3432
3522
.dtr = cache_dtr ,
0 commit comments