@@ -589,6 +589,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
589
589
dest_align : Align ,
590
590
size : Size ,
591
591
nonoverlapping : bool ,
592
+ ) -> EvalResult < ' tcx > {
593
+ self . copy_repeatedly ( src, src_align, dest, dest_align, size, 1 , nonoverlapping)
594
+ }
595
+
596
+ pub fn copy_repeatedly (
597
+ & mut self ,
598
+ src : Scalar ,
599
+ src_align : Align ,
600
+ dest : Scalar ,
601
+ dest_align : Align ,
602
+ size : Size ,
603
+ length : u64 ,
604
+ nonoverlapping : bool ,
592
605
) -> EvalResult < ' tcx > {
593
606
// Empty accesses don't need to be valid pointers, but they should still be aligned
594
607
self . check_align ( src, src_align) ?;
@@ -603,16 +616,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
603
616
// first copy the relocations to a temporary buffer, because
604
617
// `get_bytes_mut` will clear the relocations, which is correct,
605
618
// since we don't want to keep any relocations at the target.
606
- let relocations: Vec < _ > = self . relocations ( src, size) ?
607
- . iter ( )
608
- . map ( |& ( offset, alloc_id) | {
609
- // Update relocation offsets for the new positions in the destination allocation.
610
- ( offset + dest. offset - src. offset , alloc_id)
611
- } )
612
- . collect ( ) ;
619
+ let relocations = {
620
+ let relocations = self . relocations ( src, size) ?;
621
+ let mut new_relocations = Vec :: with_capacity ( relocations. len ( ) * ( length as usize ) ) ;
622
+ for i in 0 ..length {
623
+ new_relocations. extend (
624
+ relocations
625
+ . iter ( )
626
+ . map ( |& ( offset, alloc_id) | {
627
+ ( offset + dest. offset - src. offset + ( i * size * relocations. len ( ) as u64 ) , alloc_id)
628
+ } )
629
+ ) ;
630
+ }
631
+
632
+ new_relocations
633
+ } ;
613
634
614
635
let src_bytes = self . get_bytes_unchecked ( src, size, src_align) ?. as_ptr ( ) ;
615
- let dest_bytes = self . get_bytes_mut ( dest, size, dest_align) ?. as_mut_ptr ( ) ;
636
+ let dest_bytes = self . get_bytes_mut ( dest, size * length , dest_align) ?. as_mut_ptr ( ) ;
616
637
617
638
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
618
639
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
@@ -629,13 +650,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
629
650
) ) ;
630
651
}
631
652
}
632
- ptr:: copy ( src_bytes, dest_bytes, size. bytes ( ) as usize ) ;
653
+
654
+ for i in 0 ..length {
655
+ ptr:: copy ( src_bytes, dest_bytes. offset ( ( size. bytes ( ) * i) as isize ) , size. bytes ( ) as usize ) ;
656
+ }
633
657
} else {
634
- ptr:: copy_nonoverlapping ( src_bytes, dest_bytes, size. bytes ( ) as usize ) ;
658
+ for i in 0 ..length {
659
+ ptr:: copy_nonoverlapping ( src_bytes, dest_bytes. offset ( ( size. bytes ( ) * i) as isize ) , size. bytes ( ) as usize ) ;
660
+ }
635
661
}
636
662
}
637
663
638
- self . copy_undef_mask ( src, dest, size) ?;
664
+ self . copy_undef_mask ( src, dest, size, length ) ?;
639
665
// copy back the relocations
640
666
self . get_mut ( dest. alloc_id ) ?. relocations . insert_presorted ( relocations) ;
641
667
@@ -856,21 +882,25 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
856
882
src : Pointer ,
857
883
dest : Pointer ,
858
884
size : Size ,
885
+ repeat : u64 ,
859
886
) -> EvalResult < ' tcx > {
860
887
// The bits have to be saved locally before writing to dest in case src and dest overlap.
861
888
assert_eq ! ( size. bytes( ) as usize as u64 , size. bytes( ) ) ;
862
- let mut v = Vec :: with_capacity ( size. bytes ( ) as usize ) ;
889
+
890
+ let undef_mask = self . get ( src. alloc_id ) ?. undef_mask . clone ( ) ;
891
+ let dest_allocation = self . get_mut ( dest. alloc_id ) ?;
892
+
863
893
for i in 0 ..size. bytes ( ) {
864
- let defined = self . get ( src. alloc_id ) ?. undef_mask . get ( src. offset + Size :: from_bytes ( i) ) ;
865
- v. push ( defined) ;
866
- }
867
- for ( i, defined) in v. into_iter ( ) . enumerate ( ) {
868
- self . get_mut ( dest. alloc_id ) ?. undef_mask . set (
869
- dest. offset +
870
- Size :: from_bytes ( i as u64 ) ,
871
- defined,
872
- ) ;
894
+ let defined = undef_mask. get ( src. offset + Size :: from_bytes ( i) ) ;
895
+
896
+ for j in 0 ..repeat {
897
+ dest_allocation. undef_mask . set (
898
+ dest. offset + Size :: from_bytes ( i + ( size. bytes ( ) * j) ) ,
899
+ defined
900
+ ) ;
901
+ }
873
902
}
903
+
874
904
Ok ( ( ) )
875
905
}
876
906
0 commit comments