Skip to content

Commit a88613c

Browse files
committed
Auto merge of #55674 - oli-obk:miri_engine_refactoring, r=RalfJung
Miri engine refactoring r? @RalfJung split out the "just moves stuff around" part of #55293
2 parents b76ee83 + 428af73 commit a88613c

File tree

10 files changed

+532
-494
lines changed

10 files changed

+532
-494
lines changed
+233
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,233 @@
1+
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
11+
//! The virtual memory representation of the MIR interpreter
12+
13+
use super::{Pointer, EvalResult, AllocId};
14+
15+
use ty::layout::{Size, Align};
16+
use syntax::ast::Mutability;
17+
use std::iter;
18+
use mir;
19+
use std::ops::{Deref, DerefMut};
20+
use rustc_data_structures::sorted_map::SortedMap;
21+
22+
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
23+
pub struct Allocation<Tag=(),Extra=()> {
24+
/// The actual bytes of the allocation.
25+
/// Note that the bytes of a pointer represent the offset of the pointer
26+
pub bytes: Vec<u8>,
27+
/// Maps from byte addresses to extra data for each pointer.
28+
/// Only the first byte of a pointer is inserted into the map; i.e.,
29+
/// every entry in this map applies to `pointer_size` consecutive bytes starting
30+
/// at the given offset.
31+
pub relocations: Relocations<Tag>,
32+
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
33+
pub undef_mask: UndefMask,
34+
/// The alignment of the allocation to detect unaligned reads.
35+
pub align: Align,
36+
/// Whether the allocation is mutable.
37+
/// Also used by codegen to determine if a static should be put into mutable memory,
38+
/// which happens for `static mut` and `static` with interior mutability.
39+
pub mutability: Mutability,
40+
/// Extra state for the machine.
41+
pub extra: Extra,
42+
}
43+
44+
pub trait AllocationExtra<Tag>: ::std::fmt::Debug + Default + Clone {
45+
/// Hook for performing extra checks on a memory read access.
46+
///
47+
/// Takes read-only access to the allocation so we can keep all the memory read
48+
/// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
49+
/// need to mutate.
50+
#[inline]
51+
fn memory_read(
52+
_alloc: &Allocation<Tag, Self>,
53+
_ptr: Pointer<Tag>,
54+
_size: Size,
55+
) -> EvalResult<'tcx> {
56+
Ok(())
57+
}
58+
59+
/// Hook for performing extra checks on a memory write access.
60+
#[inline]
61+
fn memory_written(
62+
_alloc: &mut Allocation<Tag, Self>,
63+
_ptr: Pointer<Tag>,
64+
_size: Size,
65+
) -> EvalResult<'tcx> {
66+
Ok(())
67+
}
68+
}
69+
70+
impl AllocationExtra<()> for () {}
71+
72+
impl<Tag, Extra: Default> Allocation<Tag, Extra> {
73+
/// Creates a read-only allocation initialized by the given bytes
74+
pub fn from_bytes(slice: &[u8], align: Align) -> Self {
75+
let mut undef_mask = UndefMask::new(Size::ZERO);
76+
undef_mask.grow(Size::from_bytes(slice.len() as u64), true);
77+
Self {
78+
bytes: slice.to_owned(),
79+
relocations: Relocations::new(),
80+
undef_mask,
81+
align,
82+
mutability: Mutability::Immutable,
83+
extra: Extra::default(),
84+
}
85+
}
86+
87+
pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self {
88+
Allocation::from_bytes(slice, Align::from_bytes(1, 1).unwrap())
89+
}
90+
91+
pub fn undef(size: Size, align: Align) -> Self {
92+
assert_eq!(size.bytes() as usize as u64, size.bytes());
93+
Allocation {
94+
bytes: vec![0; size.bytes() as usize],
95+
relocations: Relocations::new(),
96+
undef_mask: UndefMask::new(size),
97+
align,
98+
mutability: Mutability::Mutable,
99+
extra: Extra::default(),
100+
}
101+
}
102+
}
103+
104+
impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {}
105+
106+
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
107+
pub struct Relocations<Tag=(), Id=AllocId>(SortedMap<Size, (Tag, Id)>);
108+
109+
impl<Tag, Id> Relocations<Tag, Id> {
110+
pub fn new() -> Self {
111+
Relocations(SortedMap::new())
112+
}
113+
114+
// The caller must guarantee that the given relocations are already sorted
115+
// by address and contain no duplicates.
116+
pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
117+
Relocations(SortedMap::from_presorted_elements(r))
118+
}
119+
}
120+
121+
impl<Tag> Deref for Relocations<Tag> {
122+
type Target = SortedMap<Size, (Tag, AllocId)>;
123+
124+
fn deref(&self) -> &Self::Target {
125+
&self.0
126+
}
127+
}
128+
129+
impl<Tag> DerefMut for Relocations<Tag> {
130+
fn deref_mut(&mut self) -> &mut Self::Target {
131+
&mut self.0
132+
}
133+
}
134+
135+
////////////////////////////////////////////////////////////////////////////////
136+
// Undefined byte tracking
137+
////////////////////////////////////////////////////////////////////////////////
138+
139+
type Block = u64;
140+
const BLOCK_SIZE: u64 = 64;
141+
142+
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
143+
pub struct UndefMask {
144+
blocks: Vec<Block>,
145+
len: Size,
146+
}
147+
148+
impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len});
149+
150+
impl UndefMask {
151+
pub fn new(size: Size) -> Self {
152+
let mut m = UndefMask {
153+
blocks: vec![],
154+
len: Size::ZERO,
155+
};
156+
m.grow(size, false);
157+
m
158+
}
159+
160+
/// Check whether the range `start..end` (end-exclusive) is entirely defined.
161+
///
162+
/// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte
163+
/// at which the first undefined access begins.
164+
#[inline]
165+
pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> {
166+
if end > self.len {
167+
return Err(self.len);
168+
}
169+
170+
let idx = (start.bytes()..end.bytes())
171+
.map(|i| Size::from_bytes(i))
172+
.find(|&i| !self.get(i));
173+
174+
match idx {
175+
Some(idx) => Err(idx),
176+
None => Ok(())
177+
}
178+
}
179+
180+
pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
181+
let len = self.len;
182+
if end > len {
183+
self.grow(end - len, new_state);
184+
}
185+
self.set_range_inbounds(start, end, new_state);
186+
}
187+
188+
pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
189+
for i in start.bytes()..end.bytes() {
190+
self.set(Size::from_bytes(i), new_state);
191+
}
192+
}
193+
194+
#[inline]
195+
pub fn get(&self, i: Size) -> bool {
196+
let (block, bit) = bit_index(i);
197+
(self.blocks[block] & 1 << bit) != 0
198+
}
199+
200+
#[inline]
201+
pub fn set(&mut self, i: Size, new_state: bool) {
202+
let (block, bit) = bit_index(i);
203+
if new_state {
204+
self.blocks[block] |= 1 << bit;
205+
} else {
206+
self.blocks[block] &= !(1 << bit);
207+
}
208+
}
209+
210+
pub fn grow(&mut self, amount: Size, new_state: bool) {
211+
let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes();
212+
if amount.bytes() > unused_trailing_bits {
213+
let additional_blocks = amount.bytes() / BLOCK_SIZE + 1;
214+
assert_eq!(additional_blocks as usize as u64, additional_blocks);
215+
self.blocks.extend(
216+
iter::repeat(0).take(additional_blocks as usize),
217+
);
218+
}
219+
let start = self.len;
220+
self.len += amount;
221+
self.set_range_inbounds(start, start + amount, new_state);
222+
}
223+
}
224+
225+
#[inline]
226+
fn bit_index(bits: Size) -> (usize, usize) {
227+
let bits = bits.bytes();
228+
let a = bits / BLOCK_SIZE;
229+
let b = bits % BLOCK_SIZE;
230+
assert_eq!(a as usize as u64, a);
231+
assert_eq!(b as usize as u64, b);
232+
(a as usize, b as usize)
233+
}

0 commit comments

Comments
 (0)