1
- //! This is an incomplete implementation of mmap/mremap/ munmap which is restricted in order to be
1
+ //! This is an incomplete implementation of mmap/munmap which is restricted in order to be
2
2
//! implementable on top of the existing memory system. The point of these function as-written is
3
3
//! to allow memory allocators written entirely in Rust to be executed by Miri. This implementation
4
4
//! does not support other uses of mmap such as file mappings.
5
5
//!
6
- //! mmap/mremap/ munmap behave a lot like alloc/realloc /dealloc, and for simple use they are exactly
6
+ //! mmap/munmap behave a lot like alloc/dealloc, and for simple use they are exactly
7
7
//! equivalent. That is the only part we support: no MAP_FIXED or MAP_SHARED or anything
8
8
//! else that goes beyond a basic allocation API.
9
9
@@ -23,23 +23,23 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
23
23
) -> InterpResult < ' tcx , Scalar < Provenance > > {
24
24
let this = self . eval_context_mut ( ) ;
25
25
26
- // We do not support MAP_FIXED, so the addr argument is always ignored
27
- let addr = this. read_pointer ( addr) ?;
26
+ // We do not support MAP_FIXED, so the addr argument is always ignored (except for the MacOS hack)
27
+ let addr = this. read_target_usize ( addr) ?;
28
28
let length = this. read_target_usize ( length) ?;
29
29
let prot = this. read_scalar ( prot) ?. to_i32 ( ) ?;
30
30
let flags = this. read_scalar ( flags) ?. to_i32 ( ) ?;
31
31
let fd = this. read_scalar ( fd) ?. to_i32 ( ) ?;
32
- let offset = this. read_scalar ( offset) ? . to_target_usize ( this ) ?;
32
+ let offset = this. read_target_usize ( offset) ?;
33
33
34
34
let map_private = this. eval_libc_i32 ( "MAP_PRIVATE" ) ;
35
35
let map_anonymous = this. eval_libc_i32 ( "MAP_ANONYMOUS" ) ;
36
36
let map_shared = this. eval_libc_i32 ( "MAP_SHARED" ) ;
37
37
let map_fixed = this. eval_libc_i32 ( "MAP_FIXED" ) ;
38
38
39
- // This is a horrible hack, but on macos the guard page mechanism uses mmap
39
+ // This is a horrible hack, but on MacOS the guard page mechanism uses mmap
40
40
// in a way we do not support. We just give it the return value it expects.
41
41
if this. frame_in_std ( ) && this. tcx . sess . target . os == "macos" && ( flags & map_fixed) != 0 {
42
- return Ok ( Scalar :: from_maybe_pointer ( addr, this) ) ;
42
+ return Ok ( Scalar :: from_maybe_pointer ( Pointer :: from_addr_invalid ( addr) , this) ) ;
43
43
}
44
44
45
45
let prot_read = this. eval_libc_i32 ( "PROT_READ" ) ;
@@ -106,74 +106,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
106
106
Ok ( Scalar :: from_pointer ( ptr, this) )
107
107
}
108
108
109
- fn mremap (
110
- & mut self ,
111
- old_address : & OpTy < ' tcx , Provenance > ,
112
- old_size : & OpTy < ' tcx , Provenance > ,
113
- new_size : & OpTy < ' tcx , Provenance > ,
114
- flags : & OpTy < ' tcx , Provenance > ,
115
- ) -> InterpResult < ' tcx , Scalar < Provenance > > {
116
- let this = self . eval_context_mut ( ) ;
117
-
118
- let old_address = this. read_scalar ( old_address) ?. to_target_usize ( this) ?;
119
- let old_size = this. read_scalar ( old_size) ?. to_target_usize ( this) ?;
120
- let new_size = this. read_scalar ( new_size) ?. to_target_usize ( this) ?;
121
- let flags = this. read_scalar ( flags) ?. to_i32 ( ) ?;
122
-
123
- // old_address must be a multiple of the page size
124
- #[ allow( clippy:: arithmetic_side_effects) ] // PAGE_SIZE is nonzero
125
- if old_address % this. machine . page_size != 0 || new_size == 0 {
126
- this. set_last_error ( Scalar :: from_i32 ( this. eval_libc_i32 ( "EINVAL" ) ) ) ?;
127
- return Ok ( this. eval_libc ( "MAP_FAILED" ) ) ;
128
- }
129
-
130
- if flags & this. eval_libc_i32 ( "MREMAP_FIXED" ) != 0 {
131
- throw_unsup_format ! ( "Miri does not support mremap wth MREMAP_FIXED" ) ;
132
- }
133
-
134
- if flags & this. eval_libc_i32 ( "MREMAP_DONTUNMAP" ) != 0 {
135
- throw_unsup_format ! ( "Miri does not support mremap wth MREMAP_DONTUNMAP" ) ;
136
- }
137
-
138
- if flags & this. eval_libc_i32 ( "MREMAP_MAYMOVE" ) == 0 {
139
- // We only support MREMAP_MAYMOVE, so not passing the flag is just a failure
140
- this. set_last_error ( Scalar :: from_i32 ( this. eval_libc_i32 ( "EINVAL" ) ) ) ?;
141
- return Ok ( Scalar :: from_maybe_pointer ( Pointer :: null ( ) , this) ) ;
142
- }
143
-
144
- let old_address = Machine :: ptr_from_addr_cast ( this, old_address) ?;
145
- let align = this. machine . page_align ( ) ;
146
- let ptr = this. reallocate_ptr (
147
- old_address,
148
- Some ( ( Size :: from_bytes ( old_size) , align) ) ,
149
- Size :: from_bytes ( new_size) ,
150
- align,
151
- MiriMemoryKind :: Mmap . into ( ) ,
152
- ) ?;
153
- if let Some ( increase) = new_size. checked_sub ( old_size) {
154
- // We just allocated this, the access is definitely in-bounds and fits into our address space.
155
- // mmap guarantees new mappings are zero-init.
156
- this. write_bytes_ptr (
157
- ptr. offset ( Size :: from_bytes ( old_size) , this) . unwrap ( ) . into ( ) ,
158
- std:: iter:: repeat ( 0u8 ) . take ( usize:: try_from ( increase) . unwrap ( ) ) ,
159
- )
160
- . unwrap ( ) ;
161
- }
162
- // Memory mappings are always exposed
163
- Machine :: expose_ptr ( this, ptr) ?;
164
-
165
- Ok ( Scalar :: from_pointer ( ptr, this) )
166
- }
167
-
168
109
fn munmap (
169
110
& mut self ,
170
111
addr : & OpTy < ' tcx , Provenance > ,
171
112
length : & OpTy < ' tcx , Provenance > ,
172
113
) -> InterpResult < ' tcx , Scalar < Provenance > > {
173
114
let this = self . eval_context_mut ( ) ;
174
115
175
- let addr = this. read_scalar ( addr) ? . to_target_usize ( this ) ?;
176
- let length = this. read_scalar ( length) ? . to_target_usize ( this ) ?;
116
+ let addr = this. read_target_usize ( addr) ?;
117
+ let length = this. read_target_usize ( length) ?;
177
118
178
119
// addr must be a multiple of the page size
179
120
#[ allow( clippy:: arithmetic_side_effects) ] // PAGE_SIZE is nonzero
@@ -193,6 +134,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
193
134
throw_unsup_format ! ( "Miri only supports munmap on memory allocated directly by mmap" ) ;
194
135
} ;
195
136
137
+ // Elsewhere in this function we are careful to check what we can and throw an unsupported
138
+ // error instead of Undefined Behavior when use of this function falls outside of the
139
+ // narrow scope we support. We deliberately do not check the MemoryKind of this allocation,
140
+ // because we want to report UB on attempting to unmap memory that Rust "understands", such
141
+ // the stack, heap, or statics.
196
142
let ( _kind, alloc) = this. memory . alloc_map ( ) . get ( alloc_id) . unwrap ( ) ;
197
143
if offset != Size :: ZERO || alloc. len ( ) as u64 != length {
198
144
throw_unsup_format ! (
@@ -202,20 +148,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
202
148
203
149
let len = Size :: from_bytes ( alloc. len ( ) as u64 ) ;
204
150
this. deallocate_ptr (
205
- Pointer :: new ( Some ( Provenance :: Wildcard ) , Size :: from_bytes ( addr ) ) ,
151
+ ptr . into ( ) ,
206
152
Some ( ( len, this. machine . page_align ( ) ) ) ,
207
153
MemoryKind :: Machine ( MiriMemoryKind :: Mmap ) ,
208
154
) ?;
209
155
210
156
Ok ( Scalar :: from_i32 ( 0 ) )
211
157
}
212
158
}
213
-
214
- trait RangeExt {
215
- fn overlaps ( & self , other : & Self ) -> bool ;
216
- }
217
- impl RangeExt for std:: ops:: Range < Size > {
218
- fn overlaps ( & self , other : & Self ) -> bool {
219
- self . start . max ( other. start ) <= self . end . min ( other. end )
220
- }
221
- }
0 commit comments