@@ -28,13 +28,14 @@ pub use self::value::{Scalar, ConstValue};
28
28
use std:: fmt;
29
29
use mir;
30
30
use hir:: def_id:: DefId ;
31
- use ty:: { self , TyCtxt , Instance } ;
31
+ use ty:: { self , TyCtxt , Instance , Ty , ParamEnvAnd } ;
32
32
use ty:: layout:: { self , Align , HasDataLayout , Size } ;
33
33
use middle:: region;
34
34
use std:: iter;
35
35
use std:: io;
36
36
use std:: ops:: { Deref , DerefMut } ;
37
- use std:: hash:: Hash ;
37
+ use std:: cmp;
38
+ use std:: hash:: { Hash , Hasher } ;
38
39
use syntax:: ast:: Mutability ;
39
40
use rustc_serialize:: { Encoder , Decodable , Encodable } ;
40
41
use rustc_data_structures:: sorted_map:: SortedMap ;
@@ -217,9 +218,56 @@ impl<'tcx, Tag> Pointer<Tag> {
217
218
}
218
219
219
220
220
- #[ derive( Copy , Clone , Eq , Hash , Ord , PartialEq , PartialOrd , Debug ) ]
221
+ #[ derive( Copy , Clone , Debug ) ]
221
222
pub struct AllocId ( pub u64 ) ;
222
223
224
+ impl Eq for AllocId { }
225
+
226
+ impl Hash for AllocId {
227
+ fn hash < H : Hasher > ( & self , state : & mut H ) {
228
+ ty:: tls:: with ( |tcx| {
229
+ let alloc_type = tcx. alloc_map . lock ( ) . get ( * self ) ;
230
+ match alloc_type {
231
+ Some ( alloc_type) => alloc_type. hash ( state) ,
232
+ None => self . 0 . hash ( state) ,
233
+ }
234
+ } )
235
+ }
236
+ }
237
+
238
+ impl PartialEq for AllocId {
239
+ fn eq ( & self , other : & Self ) -> bool {
240
+ ty:: tls:: with ( |tcx| {
241
+ let ( s, o) = {
242
+ let map = tcx. alloc_map . lock ( ) ;
243
+ ( map. get ( * self ) , map. get ( * other) )
244
+ } ;
245
+ s == o
246
+ } )
247
+ }
248
+ }
249
+
250
+ impl Ord for AllocId {
251
+ fn cmp ( & self , other : & Self ) -> cmp:: Ordering {
252
+ match self . partial_cmp ( other) {
253
+ Some ( ord) => ord,
254
+ None => self . 0 . cmp ( & other. 0 )
255
+ }
256
+ }
257
+ }
258
+
259
+ impl PartialOrd for AllocId {
260
+ fn partial_cmp ( & self , other : & Self ) -> Option < cmp:: Ordering > {
261
+ ty:: tls:: with ( |tcx| {
262
+ let ( s, o) = {
263
+ let map = tcx. alloc_map . lock ( ) ;
264
+ ( map. get ( * self ) ?, map. get ( * other) ?)
265
+ } ;
266
+ s. partial_cmp ( & o)
267
+ } )
268
+ }
269
+ }
270
+
223
271
impl :: rustc_serialize:: UseSpecializedEncodable for AllocId { }
224
272
impl :: rustc_serialize:: UseSpecializedDecodable for AllocId { }
225
273
@@ -427,7 +475,7 @@ impl fmt::Display for AllocId {
427
475
}
428
476
}
429
477
430
- #[ derive( Debug , Clone , Eq , PartialEq , Hash , RustcDecodable , RustcEncodable ) ]
478
+ #[ derive( Debug , Clone , Eq , PartialEq , Ord , PartialOrd , Hash , RustcDecodable , RustcEncodable ) ]
431
479
pub enum AllocType < ' tcx , M > {
432
480
/// The alloc id is used as a function pointer
433
481
Function ( Instance < ' tcx > ) ,
@@ -440,7 +488,7 @@ pub enum AllocType<'tcx, M> {
440
488
441
489
pub struct AllocMap < ' tcx , M > {
442
490
/// Lets you know what an AllocId refers to
443
- id_to_type : FxHashMap < AllocId , AllocType < ' tcx , M > > ,
491
+ id_to_type : FxHashMap < u64 , AllocType < ' tcx , M > > ,
444
492
445
493
/// Used to ensure that functions and statics only get one associated AllocId
446
494
type_interner : FxHashMap < AllocType < ' tcx , M > , AllocId > ,
@@ -479,7 +527,7 @@ impl<'tcx, M: fmt::Debug + Eq + Hash + Clone> AllocMap<'tcx, M> {
479
527
}
480
528
let id = self . reserve ( ) ;
481
529
debug ! ( "creating alloc_type {:?} with id {}" , alloc_type, id) ;
482
- self . id_to_type . insert ( id, alloc_type. clone ( ) ) ;
530
+ self . id_to_type . insert ( id. 0 , alloc_type. clone ( ) ) ;
483
531
self . type_interner . insert ( alloc_type, id) ;
484
532
id
485
533
}
@@ -492,7 +540,7 @@ impl<'tcx, M: fmt::Debug + Eq + Hash + Clone> AllocMap<'tcx, M> {
492
540
}
493
541
494
542
pub fn get ( & self , id : AllocId ) -> Option < AllocType < ' tcx , M > > {
495
- self . id_to_type . get ( & id) . cloned ( )
543
+ self . id_to_type . get ( & id. 0 ) . cloned ( )
496
544
}
497
545
498
546
pub fn unwrap_memory ( & self , id : AllocId ) -> M {
@@ -513,13 +561,13 @@ impl<'tcx, M: fmt::Debug + Eq + Hash + Clone> AllocMap<'tcx, M> {
513
561
}
514
562
515
563
pub fn set_id_memory ( & mut self , id : AllocId , mem : M ) {
516
- if let Some ( old) = self . id_to_type . insert ( id, AllocType :: Memory ( mem) ) {
564
+ if let Some ( old) = self . id_to_type . insert ( id. 0 , AllocType :: Memory ( mem) ) {
517
565
bug ! ( "tried to set allocation id {}, but it was already existing as {:#?}" , id, old) ;
518
566
}
519
567
}
520
568
521
569
pub fn set_id_same_memory ( & mut self , id : AllocId , mem : M ) {
522
- self . id_to_type . insert_same ( id, AllocType :: Memory ( mem) ) ;
570
+ self . id_to_type . insert_same ( id. 0 , AllocType :: Memory ( mem) ) ;
523
571
}
524
572
}
525
573
@@ -545,7 +593,7 @@ pub struct Allocation<Tag=(),Extra=()> {
545
593
pub extra : Extra ,
546
594
}
547
595
548
- impl < Tag , Extra : Default > Allocation < Tag , Extra > {
596
+ impl < Tag : Copy , Extra : Default > Allocation < Tag , Extra > {
549
597
/// Creates a read-only allocation initialized by the given bytes
550
598
pub fn from_bytes ( slice : & [ u8 ] , align : Align ) -> Self {
551
599
let mut undef_mask = UndefMask :: new ( Size :: ZERO ) ;
@@ -575,6 +623,145 @@ impl<Tag, Extra: Default> Allocation<Tag, Extra> {
575
623
extra : Extra :: default ( ) ,
576
624
}
577
625
}
626
+
627
+ #[ inline]
628
+ pub fn size ( & self ) -> Size {
629
+ Size :: from_bytes ( self . bytes . len ( ) as u64 )
630
+ }
631
+
632
+ pub fn check_align (
633
+ & self ,
634
+ offset : Size ,
635
+ required_align : Align ,
636
+ ) -> EvalResult < ' tcx > {
637
+ if self . align . abi ( ) < required_align. abi ( ) {
638
+ return err ! ( AlignmentCheckFailed {
639
+ has: self . align,
640
+ required: required_align,
641
+ } ) ;
642
+ }
643
+ let offset = offset. bytes ( ) ;
644
+ if offset % required_align. abi ( ) == 0 {
645
+ Ok ( ( ) )
646
+ } else {
647
+ let has = offset % required_align. abi ( ) ;
648
+ err ! ( AlignmentCheckFailed {
649
+ has: Align :: from_bytes( has, has) . unwrap( ) ,
650
+ required: required_align,
651
+ } )
652
+ }
653
+ }
654
+
655
+ pub fn check_bounds (
656
+ & self ,
657
+ offset : Size ,
658
+ size : Size ,
659
+ access : bool ,
660
+ ) -> EvalResult < ' tcx > {
661
+ let end = offset + size;
662
+ let allocation_size = self . size ( ) ;
663
+ if end > allocation_size {
664
+ err ! ( PointerOutOfBounds { offset, access, allocation_size } )
665
+ } else {
666
+ Ok ( ( ) )
667
+ }
668
+ }
669
+
670
+ pub fn check_defined (
671
+ & self ,
672
+ offset : Size ,
673
+ size : Size ,
674
+ ) -> EvalResult < ' tcx > {
675
+ self . undef_mask . is_range_defined (
676
+ offset,
677
+ offset + size,
678
+ ) . or_else ( |idx| err ! ( ReadUndefBytes ( idx) ) )
679
+ }
680
+
681
+ pub fn check_relocations (
682
+ & self ,
683
+ hdl : impl HasDataLayout ,
684
+ offset : Size ,
685
+ size : Size ,
686
+ ) -> EvalResult < ' tcx > {
687
+ if self . relocations ( hdl, offset, size) . len ( ) != 0 {
688
+ err ! ( ReadPointerAsBytes )
689
+ } else {
690
+ Ok ( ( ) )
691
+ }
692
+ }
693
+
694
+ fn relocations (
695
+ & self ,
696
+ hdl : impl HasDataLayout ,
697
+ offset : Size ,
698
+ size : Size ,
699
+ ) -> & [ ( Size , ( Tag , AllocId ) ) ] {
700
+ // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
701
+ // the beginning of this range.
702
+ let start = offset. bytes ( ) . saturating_sub ( hdl. data_layout ( ) . pointer_size . bytes ( ) - 1 ) ;
703
+ let end = offset + size; // this does overflow checking
704
+ self . relocations . range ( Size :: from_bytes ( start) ..end)
705
+ }
706
+
707
+ pub fn get_bytes (
708
+ & self ,
709
+ hdl : impl HasDataLayout ,
710
+ offset : Size ,
711
+ size : Size ,
712
+ required_align : Align ,
713
+ ) -> EvalResult < ' tcx , & [ u8 ] > {
714
+ self . check_align ( offset, required_align) ?;
715
+ self . check_bounds ( offset, size, true ) ?;
716
+ self . check_defined ( offset, size) ?;
717
+ self . check_relocations ( hdl, offset, size) ?;
718
+ Ok ( self . bytes_ignoring_relocations_and_undef ( offset, size) )
719
+ }
720
+
721
+ pub fn read_bits (
722
+ & self ,
723
+ tcx : TyCtxt < ' _ , ' _ , ' tcx > ,
724
+ offset : Size ,
725
+ ty : ParamEnvAnd < ' tcx , Ty < ' tcx > > ,
726
+ ) -> EvalResult < ' tcx , u128 > {
727
+ let ty = tcx. lift_to_global ( & ty) . unwrap ( ) ;
728
+ let layout = tcx. layout_of ( ty) . unwrap_or_else ( |e| {
729
+ panic ! ( "could not compute layout for {:?}: {:?}" , ty, e)
730
+ } ) ;
731
+ let bytes = self . get_bytes ( tcx, offset, layout. size , layout. align ) ?;
732
+ Ok ( read_target_uint ( tcx. data_layout . endian , bytes) . unwrap ( ) )
733
+ }
734
+
735
+ pub fn read_scalar (
736
+ & self ,
737
+ hdl : impl HasDataLayout ,
738
+ offset : Size ,
739
+ size : Size ,
740
+ align : Align ,
741
+ ) -> EvalResult < ' tcx , Scalar < Tag > > {
742
+ self . check_align ( offset, align) ?;
743
+ self . check_bounds ( offset, size, true ) ?;
744
+ self . check_defined ( offset, size) ?;
745
+ let bytes = self . bytes_ignoring_relocations_and_undef ( offset, size) ;
746
+ let int = read_target_uint ( hdl. data_layout ( ) . endian , & bytes) . unwrap ( ) ;
747
+ match self . relocations ( hdl, offset, size) {
748
+ & [ ( _, ( tag, alloc_id) ) ] if size == hdl. data_layout ( ) . pointer_size => {
749
+ Ok ( Pointer :: new_with_tag ( alloc_id, Size :: from_bytes ( int as u64 ) , tag) . into ( ) )
750
+ } ,
751
+ & [ ] => Ok ( Scalar :: Bits {
752
+ bits : int,
753
+ size : size. bytes ( ) as u8 ,
754
+ } ) ,
755
+ _ => err ! ( ReadPointerAsBytes ) ,
756
+ }
757
+ }
758
+
759
+ fn bytes_ignoring_relocations_and_undef ( & self , offset : Size , size : Size ) -> & [ u8 ] {
760
+ let end = offset + size;
761
+ let offset = offset. bytes ( ) as usize ;
762
+ let end = end. bytes ( ) as usize ;
763
+ & self . bytes [ offset..end]
764
+ }
578
765
}
579
766
580
767
impl < ' tcx > :: serialize:: UseSpecializedDecodable for & ' tcx Allocation { }
0 commit comments