@@ -105,7 +105,7 @@ impl<Tag> Allocation<Tag> {
105
105
Allocation :: from_bytes ( slice, Align :: from_bytes ( 1 ) . unwrap ( ) )
106
106
}
107
107
108
- pub fn undef ( size : Size , align : Align ) -> Self {
108
+ pub fn uninit ( size : Size , align : Align ) -> Self {
109
109
Allocation {
110
110
bytes : vec ! [ 0 ; size. bytes_usize( ) ] ,
111
111
relocations : Relocations :: new ( ) ,
@@ -153,7 +153,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
153
153
self . size . bytes_usize ( )
154
154
}
155
155
156
- /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
156
+ /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
157
157
/// from `get_bytes_with_undef_and_ptr` in that it does no relocation checks (even on the
158
158
/// edges) at all. It further ignores `AllocationExtra` callbacks.
159
159
/// This must not be used for reads affecting the interpreter execution.
@@ -192,7 +192,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
192
192
offset. bytes_usize ( ) ..end
193
193
}
194
194
195
- /// The last argument controls whether we error out when there are undefined
195
+ /// The last argument controls whether we error out when there are uninitialized
196
196
/// or pointer bytes. You should never call this, call `get_bytes` or
197
197
/// `get_bytes_with_undef_and_ptr` instead,
198
198
///
@@ -206,12 +206,12 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
206
206
cx : & impl HasDataLayout ,
207
207
ptr : Pointer < Tag > ,
208
208
size : Size ,
209
- check_defined_and_ptr : bool ,
209
+ check_init_and_ptr : bool ,
210
210
) -> InterpResult < ' tcx , & [ u8 ] > {
211
211
let range = self . check_bounds ( ptr. offset , size) ;
212
212
213
- if check_defined_and_ptr {
214
- self . check_defined ( ptr, size) ?;
213
+ if check_init_and_ptr {
214
+ self . check_init ( ptr, size) ?;
215
215
self . check_relocations ( cx, ptr, size) ?;
216
216
} else {
217
217
// We still don't want relocations on the *edges*.
@@ -239,7 +239,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
239
239
self . get_bytes_internal ( cx, ptr, size, true )
240
240
}
241
241
242
- /// It is the caller's responsibility to handle undefined and pointer bytes.
242
+ /// It is the caller's responsibility to handle uninitialized and pointer bytes.
243
243
/// However, this still checks that there are no relocations on the *edges*.
244
244
///
245
245
/// It is the caller's responsibility to check bounds and alignment beforehand.
@@ -267,7 +267,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
267
267
) -> InterpResult < ' tcx , & mut [ u8 ] > {
268
268
let range = self . check_bounds ( ptr. offset , size) ;
269
269
270
- self . mark_definedness ( ptr, size, true ) ;
270
+ self . mark_init ( ptr, size, true ) ;
271
271
self . clear_relocations ( cx, ptr, size) ?;
272
272
273
273
AllocationExtra :: memory_written ( self , ptr, size) ?;
@@ -303,7 +303,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
303
303
304
304
/// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
305
305
/// relocation. If `allow_ptr_and_undef` is `false`, also enforces that the memory in the
306
- /// given range contains neither relocations nor undef bytes.
306
+ /// given range contains neither relocations nor uninitialized bytes.
307
307
pub fn check_bytes (
308
308
& self ,
309
309
cx : & impl HasDataLayout ,
@@ -313,9 +313,9 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
313
313
) -> InterpResult < ' tcx > {
314
314
// Check bounds and relocations on the edges.
315
315
self . get_bytes_with_undef_and_ptr ( cx, ptr, size) ?;
316
- // Check undef and ptr.
316
+ // Check uninit and ptr.
317
317
if !allow_ptr_and_undef {
318
- self . check_defined ( ptr, size) ?;
318
+ self . check_init ( ptr, size) ?;
319
319
self . check_relocations ( cx, ptr, size) ?;
320
320
}
321
321
Ok ( ( ) )
@@ -364,7 +364,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
364
364
let bytes = self . get_bytes_with_undef_and_ptr ( cx, ptr, size) ?;
365
365
// Uninit check happens *after* we established that the alignment is correct.
366
366
// We must not return `Ok()` for unaligned pointers!
367
- if self . is_defined ( ptr, size) . is_err ( ) {
367
+ if self . is_init ( ptr, size) . is_err ( ) {
368
368
// This inflates uninitialized bytes to the entire scalar, even if only a few
369
369
// bytes are uninitialized.
370
370
return Ok ( ScalarMaybeUninit :: Uninit ) ;
@@ -416,7 +416,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
416
416
let val = match val {
417
417
ScalarMaybeUninit :: Scalar ( scalar) => scalar,
418
418
ScalarMaybeUninit :: Uninit => {
419
- self . mark_definedness ( ptr, type_size, false ) ;
419
+ self . mark_init ( ptr, type_size, false ) ;
420
420
return Ok ( ( ) ) ;
421
421
}
422
422
} ;
@@ -512,7 +512,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
512
512
let start = ptr. offset ;
513
513
let end = start + size; // `Size` addition
514
514
515
- // Mark parts of the outermost relocations as undefined if they partially fall outside the
515
+ // Mark parts of the outermost relocations as uninitialized if they partially fall outside the
516
516
// given range.
517
517
if first < start {
518
518
self . init_mask . set_range ( first, start, false ) ;
@@ -542,20 +542,20 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
542
542
}
543
543
}
544
544
545
- /// Undefined bytes.
545
+ /// Uninitialized bytes.
546
546
impl < ' tcx , Tag : Copy , Extra > Allocation < Tag , Extra > {
547
- /// Checks whether the given range is entirely defined .
547
+ /// Checks whether the given range is entirely initialized .
548
548
///
549
- /// Returns `Ok(())` if it's defined . Otherwise returns the range of byte
550
- /// indexes of the first contiguous undefined access.
551
- fn is_defined ( & self , ptr : Pointer < Tag > , size : Size ) -> Result < ( ) , Range < Size > > {
549
+ /// Returns `Ok(())` if it's initialized . Otherwise returns the range of byte
550
+ /// indexes of the first contiguous uninitialized access.
551
+ fn is_init ( & self , ptr : Pointer < Tag > , size : Size ) -> Result < ( ) , Range < Size > > {
552
552
self . init_mask . is_range_initialized ( ptr. offset , ptr. offset + size) // `Size` addition
553
553
}
554
554
555
- /// Checks that a range of bytes is defined . If not, returns the `InvalidUndefBytes `
556
- /// error which will report the first range of bytes which is undefined .
557
- fn check_defined ( & self , ptr : Pointer < Tag > , size : Size ) -> InterpResult < ' tcx > {
558
- self . is_defined ( ptr, size) . or_else ( |idx_range| {
555
+ /// Checks that a range of bytes is initialized . If not, returns the `InvalidUninitBytes `
556
+ /// error which will report the first range of bytes which is uninitialized .
557
+ fn check_init ( & self , ptr : Pointer < Tag > , size : Size ) -> InterpResult < ' tcx > {
558
+ self . is_init ( ptr, size) . or_else ( |idx_range| {
559
559
throw_ub ! ( InvalidUninitBytes ( Some ( Box :: new( UninitBytesAccess {
560
560
access_ptr: ptr. erase_tag( ) ,
561
561
access_size: size,
@@ -565,44 +565,44 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
565
565
} )
566
566
}
567
567
568
- pub fn mark_definedness ( & mut self , ptr : Pointer < Tag > , size : Size , new_state : bool ) {
568
+ pub fn mark_init ( & mut self , ptr : Pointer < Tag > , size : Size , is_init : bool ) {
569
569
if size. bytes ( ) == 0 {
570
570
return ;
571
571
}
572
- self . init_mask . set_range ( ptr. offset , ptr. offset + size, new_state ) ;
572
+ self . init_mask . set_range ( ptr. offset , ptr. offset + size, is_init ) ;
573
573
}
574
574
}
575
575
576
- /// Run-length encoding of the undef mask.
576
+ /// Run-length encoding of the uninit mask.
577
577
/// Used to copy parts of a mask multiple times to another allocation.
578
- pub struct AllocationDefinedness {
579
- /// The definedness of the first range.
578
+ pub struct InitMaskCompressed {
579
+ /// Whether the first range is initialized .
580
580
initial : bool ,
581
581
/// The lengths of ranges that are run-length encoded.
582
- /// The definedness of the ranges alternate starting with `initial`.
582
+ /// The initialization state of the ranges alternate starting with `initial`.
583
583
ranges : smallvec:: SmallVec < [ u64 ; 1 ] > ,
584
584
}
585
585
586
- impl AllocationDefinedness {
587
- pub fn all_bytes_undef ( & self ) -> bool {
588
- // The `ranges` are run-length encoded and of alternating definedness .
589
- // So if `ranges.len() > 1` then the second block is a range of defined .
586
+ impl InitMaskCompressed {
587
+ pub fn no_bytes_init ( & self ) -> bool {
588
+ // The `ranges` are run-length encoded and of alternating initialization state .
589
+ // So if `ranges.len() > 1` then the second block is an initialized range .
590
590
!self . initial && self . ranges . len ( ) == 1
591
591
}
592
592
}
593
593
594
- /// Transferring the definedness mask to other allocations.
594
+ /// Transferring the initialization mask to other allocations.
595
595
impl < Tag , Extra > Allocation < Tag , Extra > {
596
- /// Creates a run-length encoding of the undef mask.
597
- pub fn compress_undef_range ( & self , src : Pointer < Tag > , size : Size ) -> AllocationDefinedness {
596
+ /// Creates a run-length encoding of the initialization mask.
597
+ pub fn compress_undef_range ( & self , src : Pointer < Tag > , size : Size ) -> InitMaskCompressed {
598
598
// Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
599
- // a naive undef mask copying algorithm would repeatedly have to read the undef mask from
599
+ // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
600
600
// the source and write it to the destination. Even if we optimized the memory accesses,
601
601
// we'd be doing all of this `repeat` times.
602
- // Therefore we precompute a compressed version of the undef mask of the source value and
602
+ // Therefore we precompute a compressed version of the initialization mask of the source value and
603
603
// then write it back `repeat` times without computing any more information from the source.
604
604
605
- // A precomputed cache for ranges of defined/undefined bits
605
+ // A precomputed cache for ranges of initialized / uninitialized bits
606
606
// 0000010010001110 will become
607
607
// `[5, 1, 2, 1, 3, 3, 1]`,
608
608
// where each element toggles the state.
@@ -613,7 +613,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
613
613
let mut cur = initial;
614
614
615
615
for i in 1 ..size. bytes ( ) {
616
- // FIXME: optimize to bitshift the current undef block's bits and read the top bit.
616
+ // FIXME: optimize to bitshift the current uninitialized block's bits and read the top bit.
617
617
if self . init_mask . get ( src. offset + Size :: from_bytes ( i) ) == cur {
618
618
cur_len += 1 ;
619
619
} else {
@@ -625,13 +625,13 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
625
625
626
626
ranges. push ( cur_len) ;
627
627
628
- AllocationDefinedness { ranges, initial }
628
+ InitMaskCompressed { ranges, initial }
629
629
}
630
630
631
- /// Applies multiple instances of the run-length encoding to the undef mask.
632
- pub fn mark_compressed_undef_range (
631
+ /// Applies multiple instances of the run-length encoding to the initialization mask.
632
+ pub fn mark_compressed_init_range (
633
633
& mut self ,
634
- defined : & AllocationDefinedness ,
634
+ defined : & InitMaskCompressed ,
635
635
dest : Pointer < Tag > ,
636
636
size : Size ,
637
637
repeat : u64 ,
@@ -740,7 +740,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
740
740
}
741
741
742
742
////////////////////////////////////////////////////////////////////////////////
743
- // Undefined byte tracking
743
+ // Uninitialized byte tracking
744
744
////////////////////////////////////////////////////////////////////////////////
745
745
746
746
type Block = u64 ;
@@ -778,11 +778,11 @@ impl InitMask {
778
778
779
779
match idx {
780
780
Some ( idx) => {
781
- let undef_end = ( idx. bytes ( ) ..end. bytes ( ) )
781
+ let uninit_end = ( idx. bytes ( ) ..end. bytes ( ) )
782
782
. map ( Size :: from_bytes)
783
783
. find ( |& i| self . get ( i) )
784
784
. unwrap_or ( end) ;
785
- Err ( idx..undef_end )
785
+ Err ( idx..uninit_end )
786
786
}
787
787
None => Ok ( ( ) ) ,
788
788
}
0 commit comments