@@ -18,18 +18,18 @@ pub struct ProvenanceMap<Prov = AllocId> {
18
18
/// Provenance in this map only applies to the given single byte.
19
19
/// This map is disjoint from the previous. It will always be empty when
20
20
/// `Prov::OFFSET_IS_ADDR` is false.
21
- bytes : SortedMap < Size , Prov > ,
21
+ bytes : Option < Box < SortedMap < Size , Prov > > > ,
22
22
}
23
23
24
24
impl < Prov > ProvenanceMap < Prov > {
25
25
pub fn new ( ) -> Self {
26
- ProvenanceMap { ptrs : SortedMap :: new ( ) , bytes : SortedMap :: new ( ) }
26
+ ProvenanceMap { ptrs : SortedMap :: new ( ) , bytes : None }
27
27
}
28
28
29
29
/// The caller must guarantee that the given provenance list is already sorted
30
30
/// by address and contain no duplicates.
31
31
pub fn from_presorted_ptrs ( r : Vec < ( Size , Prov ) > ) -> Self {
32
- ProvenanceMap { ptrs : SortedMap :: from_presorted_elements ( r) , bytes : SortedMap :: new ( ) }
32
+ ProvenanceMap { ptrs : SortedMap :: from_presorted_elements ( r) , bytes : None }
33
33
}
34
34
}
35
35
@@ -40,7 +40,7 @@ impl ProvenanceMap {
40
40
/// Only exposed with `AllocId` provenance, since it panics if there is bytewise provenance.
41
41
#[ inline]
42
42
pub fn ptrs ( & self ) -> & SortedMap < Size , AllocId > {
43
- debug_assert ! ( self . bytes. is_empty ( ) ) ; // `AllocId::OFFSET_IS_ADDR` is false so this cannot fail
43
+ debug_assert ! ( self . bytes. is_none ( ) ) ; // `AllocId::OFFSET_IS_ADDR` is false so this cannot fail
44
44
& self . ptrs
45
45
}
46
46
}
@@ -60,7 +60,11 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
60
60
61
61
/// Returns all byte-wise provenance in the given range.
62
62
fn range_get_bytes ( & self , range : AllocRange ) -> & [ ( Size , Prov ) ] {
63
- self . bytes . range ( range. start ..range. end ( ) )
63
+ if let Some ( bytes) = self . bytes . as_ref ( ) {
64
+ bytes. range ( range. start ..range. end ( ) )
65
+ } else {
66
+ & [ ]
67
+ }
64
68
}
65
69
66
70
/// Get the provenance of a single byte.
@@ -69,11 +73,11 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
69
73
debug_assert ! ( prov. len( ) <= 1 ) ;
70
74
if let Some ( entry) = prov. first ( ) {
71
75
// If it overlaps with this byte, it is on this byte.
72
- debug_assert ! ( self . bytes. get( & offset) . is_none( ) ) ;
76
+ debug_assert ! ( self . bytes. as_ref ( ) . map_or ( true , |b| b . get( & offset) . is_none( ) ) ) ;
73
77
Some ( entry. 1 )
74
78
} else {
75
79
// Look up per-byte provenance.
76
- self . bytes . get ( & offset) . copied ( )
80
+ self . bytes . as_ref ( ) . and_then ( |b| b . get ( & offset) . copied ( ) )
77
81
}
78
82
}
79
83
@@ -94,7 +98,8 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
94
98
95
99
/// Yields all the provenances stored in this map.
96
100
pub fn provenances ( & self ) -> impl Iterator < Item = Prov > + ' _ {
97
- self . ptrs . values ( ) . chain ( self . bytes . values ( ) ) . copied ( )
101
+ let bytes = self . bytes . iter ( ) . flat_map ( |b| b. values ( ) ) ;
102
+ self . ptrs . values ( ) . chain ( bytes) . copied ( )
98
103
}
99
104
100
105
pub fn insert_ptr ( & mut self , offset : Size , prov : Prov , cx : & impl HasDataLayout ) {
@@ -109,9 +114,11 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
109
114
let end = range. end ( ) ;
110
115
// Clear the bytewise part -- this is easy.
111
116
if Prov :: OFFSET_IS_ADDR {
112
- self . bytes . remove_range ( start..end) ;
117
+ if let Some ( bytes) = self . bytes . as_mut ( ) {
118
+ bytes. remove_range ( start..end) ;
119
+ }
113
120
} else {
114
- debug_assert ! ( self . bytes. is_empty ( ) ) ;
121
+ debug_assert ! ( self . bytes. is_none ( ) ) ;
115
122
}
116
123
117
124
// For the ptr-sized part, find the first (inclusive) and last (exclusive) byte of
@@ -138,8 +145,9 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
138
145
}
139
146
// Insert the remaining part in the bytewise provenance.
140
147
let prov = self . ptrs [ & first] ;
148
+ let bytes = self . bytes . get_or_insert_with ( Box :: default) ;
141
149
for offset in first..start {
142
- self . bytes . insert ( offset, prov) ;
150
+ bytes. insert ( offset, prov) ;
143
151
}
144
152
}
145
153
if last > end {
@@ -150,8 +158,9 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
150
158
}
151
159
// Insert the remaining part in the bytewise provenance.
152
160
let prov = self . ptrs [ & begin_of_last] ;
161
+ let bytes = self . bytes . get_or_insert_with ( Box :: default) ;
153
162
for offset in end..last {
154
- self . bytes . insert ( offset, prov) ;
163
+ bytes. insert ( offset, prov) ;
155
164
}
156
165
}
157
166
@@ -168,8 +177,8 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
168
177
///
169
178
/// Offsets are already adjusted to the destination allocation.
170
179
pub struct ProvenanceCopy < Prov > {
171
- dest_ptrs : Vec < ( Size , Prov ) > ,
172
- dest_bytes : Vec < ( Size , Prov ) > ,
180
+ dest_ptrs : Option < Box < [ ( Size , Prov ) ] > > ,
181
+ dest_bytes : Option < Box < [ ( Size , Prov ) ] > > ,
173
182
}
174
183
175
184
impl < Prov : Provenance > ProvenanceMap < Prov > {
@@ -192,96 +201,104 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
192
201
// Get the provenances that are entirely within this range.
193
202
// (Different from `range_get_ptrs` which asks if they overlap the range.)
194
203
// Only makes sense if we are copying at least one pointer worth of bytes.
195
- let mut dest_ptrs = Vec :: new ( ) ;
204
+ let mut dest_ptrs_box = None ;
196
205
if src. size >= ptr_size {
197
206
let adjusted_end = Size :: from_bytes ( src. end ( ) . bytes ( ) - ( ptr_size. bytes ( ) - 1 ) ) ;
198
207
let ptrs = self . ptrs . range ( src. start ..adjusted_end) ;
199
- dest_ptrs. reserve_exact ( ptrs. len ( ) * ( count as usize ) ) ;
200
208
// If `count` is large, this is rather wasteful -- we are allocating a big array here, which
201
209
// is mostly filled with redundant information since it's just N copies of the same `Prov`s
202
210
// at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range`
203
211
// we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
204
212
// the right sequence of provenance for all N copies.
205
213
// Basically, this large array would have to be created anyway in the target allocation.
214
+ let mut dest_ptrs = Vec :: with_capacity ( ptrs. len ( ) * ( count as usize ) ) ;
206
215
for i in 0 ..count {
207
216
dest_ptrs
208
217
. extend ( ptrs. iter ( ) . map ( |& ( offset, reloc) | ( shift_offset ( i, offset) , reloc) ) ) ;
209
218
}
219
+ debug_assert_eq ! ( dest_ptrs. len( ) , dest_ptrs. capacity( ) ) ;
220
+ dest_ptrs_box = Some ( dest_ptrs. into_boxed_slice ( ) ) ;
210
221
} ;
211
222
212
223
// # Byte-sized provenances
213
- let mut bytes = Vec :: new ( ) ;
214
- // First, if there is a part of a pointer at the start, add that.
215
- if let Some ( entry) = self . range_get_ptrs ( alloc_range ( src. start , Size :: ZERO ) , cx) . first ( ) {
216
- if !Prov :: OFFSET_IS_ADDR {
217
- // We can't split up the provenance into less than a pointer.
224
+ // This includes the existing bytewise provenance in the range, and ptr provenance
225
+ // that overlaps with the begin/end of the range.
226
+ let mut dest_bytes_box = None ;
227
+ let begin_overlap = self . range_get_ptrs ( alloc_range ( src. start , Size :: ZERO ) , cx) . first ( ) ;
228
+ let end_overlap = self . range_get_ptrs ( alloc_range ( src. end ( ) , Size :: ZERO ) , cx) . first ( ) ;
229
+ if !Prov :: OFFSET_IS_ADDR {
230
+ // There can't be any bytewise provenance, and we cannot split up the begin/end overlap.
231
+ if let Some ( entry) = begin_overlap {
218
232
return Err ( AllocError :: PartialPointerCopy ( entry. 0 ) ) ;
219
233
}
220
- trace ! ( "start overlapping entry: {entry:?}" ) ;
221
- // For really small copies, make sure we don't run off the end of the `src` range.
222
- let entry_end = cmp:: min ( entry. 0 + ptr_size, src. end ( ) ) ;
223
- for offset in src. start ..entry_end {
224
- bytes. push ( ( offset, entry. 1 ) ) ;
225
- }
226
- } else {
227
- trace ! ( "no start overlapping entry" ) ;
228
- }
229
- // Then the main part, bytewise provenance from `self.bytes`.
230
- if Prov :: OFFSET_IS_ADDR {
231
- bytes. extend ( self . bytes . range ( src. start ..src. end ( ) ) ) ;
232
- } else {
233
- debug_assert ! ( self . bytes. is_empty( ) ) ;
234
- }
235
- // And finally possibly parts of a pointer at the end.
236
- if let Some ( entry) = self . range_get_ptrs ( alloc_range ( src. end ( ) , Size :: ZERO ) , cx) . first ( ) {
237
- if !Prov :: OFFSET_IS_ADDR {
238
- // We can't split up the provenance into less than a pointer.
234
+ if let Some ( entry) = end_overlap {
239
235
return Err ( AllocError :: PartialPointerCopy ( entry. 0 ) ) ;
240
236
}
241
- trace ! ( "end overlapping entry: {entry:?}" ) ;
242
- // For really small copies, make sure we don't start before `src` does.
243
- let entry_start = cmp:: max ( entry. 0 , src. start ) ;
244
- for offset in entry_start..src. end ( ) {
245
- if bytes. last ( ) . map_or ( true , |bytes_entry| bytes_entry. 0 < offset) {
246
- // The last entry, if it exists, has a lower offset than us.
237
+ debug_assert ! ( self . bytes. is_none( ) ) ;
238
+ } else {
239
+ let mut bytes = Vec :: new ( ) ;
240
+ // First, if there is a part of a pointer at the start, add that.
241
+ if let Some ( entry) = begin_overlap {
242
+ trace ! ( "start overlapping entry: {entry:?}" ) ;
243
+ // For really small copies, make sure we don't run off the end of the `src` range.
244
+ let entry_end = cmp:: min ( entry. 0 + ptr_size, src. end ( ) ) ;
245
+ for offset in src. start ..entry_end {
247
246
bytes. push ( ( offset, entry. 1 ) ) ;
248
- } else {
249
- // There already is an entry for this offset in there! This can happen when the
250
- // start and end range checks actually end up hitting the same pointer, so we
251
- // already added this in the "pointer at the start" part above.
252
- assert ! ( entry. 0 <= src. start) ;
253
247
}
248
+ } else {
249
+ trace ! ( "no start overlapping entry" ) ;
254
250
}
255
- } else {
256
- trace ! ( "no end overlapping entry" ) ;
257
- }
258
- trace ! ( "byte provenances: {bytes:?}" ) ;
251
+ // Then the main part, bytewise provenance from `self.bytes`.
252
+ if let Some ( all_bytes) = self . bytes . as_ref ( ) {
253
+ bytes. extend ( all_bytes. range ( src. start ..src. end ( ) ) ) ;
254
+ }
255
+ // And finally possibly parts of a pointer at the end.
256
+ if let Some ( entry) = end_overlap {
257
+ trace ! ( "end overlapping entry: {entry:?}" ) ;
258
+ // For really small copies, make sure we don't start before `src` does.
259
+ let entry_start = cmp:: max ( entry. 0 , src. start ) ;
260
+ for offset in entry_start..src. end ( ) {
261
+ if bytes. last ( ) . map_or ( true , |bytes_entry| bytes_entry. 0 < offset) {
262
+ // The last entry, if it exists, has a lower offset than us.
263
+ bytes. push ( ( offset, entry. 1 ) ) ;
264
+ } else {
265
+ // There already is an entry for this offset in there! This can happen when the
266
+ // start and end range checks actually end up hitting the same pointer, so we
267
+ // already added this in the "pointer at the start" part above.
268
+ assert ! ( entry. 0 <= src. start) ;
269
+ }
270
+ }
271
+ } else {
272
+ trace ! ( "no end overlapping entry" ) ;
273
+ }
274
+ trace ! ( "byte provenances: {bytes:?}" ) ;
259
275
260
- // And again a buffer for the new list on the target side.
261
- let mut dest_bytes = Vec :: new ( ) ;
262
- if Prov :: OFFSET_IS_ADDR {
263
- dest_bytes. reserve_exact ( bytes. len ( ) * ( count as usize ) ) ;
276
+ // And again a buffer for the new list on the target side.
277
+ let mut dest_bytes = Vec :: with_capacity ( bytes. len ( ) * ( count as usize ) ) ;
264
278
for i in 0 ..count {
265
279
dest_bytes
266
280
. extend ( bytes. iter ( ) . map ( |& ( offset, reloc) | ( shift_offset ( i, offset) , reloc) ) ) ;
267
281
}
268
- } else {
269
- // There can't be any bytewise provenance when OFFSET_IS_ADDR is false.
270
- debug_assert ! ( bytes. is_empty( ) ) ;
282
+ debug_assert_eq ! ( dest_bytes. len( ) , dest_bytes. capacity( ) ) ;
283
+ dest_bytes_box = Some ( dest_bytes. into_boxed_slice ( ) ) ;
271
284
}
272
285
273
- Ok ( ProvenanceCopy { dest_ptrs, dest_bytes } )
286
+ Ok ( ProvenanceCopy { dest_ptrs : dest_ptrs_box , dest_bytes : dest_bytes_box } )
274
287
}
275
288
276
289
/// Applies a provenance copy.
277
290
/// The affected range, as defined in the parameters to `prepare_copy` is expected
278
291
/// to be clear of provenance.
279
292
pub fn apply_copy ( & mut self , copy : ProvenanceCopy < Prov > ) {
280
- self . ptrs . insert_presorted ( copy. dest_ptrs ) ;
293
+ if let Some ( dest_ptrs) = copy. dest_ptrs {
294
+ self . ptrs . insert_presorted ( dest_ptrs. into ( ) ) ;
295
+ }
281
296
if Prov :: OFFSET_IS_ADDR {
282
- self . bytes . insert_presorted ( copy. dest_bytes ) ;
297
+ if let Some ( dest_bytes) = copy. dest_bytes && !dest_bytes. is_empty ( ) {
298
+ self . bytes . get_or_insert_with ( Box :: default) . insert_presorted ( dest_bytes. into ( ) ) ;
299
+ }
283
300
} else {
284
- debug_assert ! ( copy. dest_bytes. is_empty ( ) ) ;
301
+ debug_assert ! ( copy. dest_bytes. is_none ( ) ) ;
285
302
}
286
303
}
287
304
}
0 commit comments