diff --git a/rust-version b/rust-version index 5bdac314d1..0268b07ac4 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -nightly-2018-11-24 +nightly-2018-11-26 diff --git a/src/fn_call.rs b/src/fn_call.rs index e64d4f6954..e9d3255a5b 100644 --- a/src/fn_call.rs +++ b/src/fn_call.rs @@ -114,6 +114,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo None => self.tcx.item_name(def_id).as_str(), }; + let tcx = &{self.tcx.tcx}; + // All these functions take raw pointers, so if we access memory directly // (as opposed to through a place), we have to remember to erase any tag // that might still hang around! @@ -175,7 +177,9 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo MiriMemoryKind::Rust.into() )? .with_default_tag(); - self.memory_mut().write_repeat(ptr.into(), 0, Size::from_bytes(size))?; + self.memory_mut() + .get_mut(ptr.alloc_id)? + .write_repeat(tcx, ptr, 0, Size::from_bytes(size))?; self.write_scalar(Scalar::Ptr(ptr), dest)?; } "__rust_dealloc" => { @@ -239,7 +243,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "dlsym" => { let _handle = self.read_scalar(args[0])?; let symbol = self.read_scalar(args[1])?.to_ptr()?; - let symbol_name = self.memory().read_c_str(symbol)?; + let symbol_name = self.memory().get(symbol.alloc_id)?.read_c_str(tcx, symbol)?; let err = format!("bad c unicode symbol: {:?}", symbol_name); let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err); return err!(Unimplemented(format!( @@ -346,7 +350,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "getenv" => { let result = { let name_ptr = self.read_scalar(args[0])?.to_ptr()?; - let name = self.memory().read_c_str(name_ptr)?; + let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?; match self.machine.env_vars.get(name) { Some(&var) => Scalar::Ptr(var), None => Scalar::ptr_null(&*self.tcx), @@ -360,8 +364,8 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo { let name_ptr = self.read_scalar(args[0])?.not_undef()?; if !name_ptr.is_null_ptr(self) { - let name = self.memory().read_c_str(name_ptr.to_ptr()? - )?.to_owned(); + let name_ptr = name_ptr.to_ptr()?; + let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?.to_owned(); if !name.is_empty() && !name.contains(&b'=') { success = Some(self.machine.env_vars.remove(&name)); } @@ -382,9 +386,10 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo { let name_ptr = self.read_scalar(args[0])?.not_undef()?; let value_ptr = self.read_scalar(args[1])?.to_ptr()?; - let value = self.memory().read_c_str(value_ptr)?; + let value = self.memory().get(value_ptr.alloc_id)?.read_c_str(tcx, value_ptr)?; if !name_ptr.is_null_ptr(self) { - let name = self.memory().read_c_str(name_ptr.to_ptr()?)?; + let name_ptr = name_ptr.to_ptr()?; + let name = self.memory().get(name_ptr.alloc_id)?.read_c_str(tcx, name_ptr)?; if !name.is_empty() && !name.contains(&b'=') { new = Some((name.to_owned(), value.to_owned())); } @@ -397,9 +402,12 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo Align::from_bytes(1).unwrap(), MiriMemoryKind::Env.into(), )?.with_default_tag(); - self.memory_mut().write_bytes(value_copy.into(), &value)?; - let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), self)?.into(); - self.memory_mut().write_bytes(trailing_zero_ptr, &[0])?; + { + let alloc = self.memory_mut().get_mut(value_copy.alloc_id)?; + alloc.write_bytes(tcx, value_copy, &value)?; + let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), tcx)?; + alloc.write_bytes(tcx, trailing_zero_ptr, &[0])?; + } if let Some(var) = self.machine.env_vars.insert( name.to_owned(), value_copy, @@ -444,7 +452,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo "strlen" => { let ptr = self.read_scalar(args[0])?.to_ptr()?; - let n = self.memory().read_c_str(ptr)?.len(); + let n = self.memory().get(ptr.alloc_id)?.read_c_str(tcx, ptr)?.len(); self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?; } @@ -507,13 +515,15 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo let key_layout = self.layout_of(key_type)?; // Create key and write it into the memory where key_ptr wants it - let key = self.machine.tls.create_tls_key(dtor, &*self.tcx) as u128; + let key = self.machine.tls.create_tls_key(dtor, tcx) as u128; if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) { return err!(OutOfTls); } - self.memory_mut().write_scalar( + + self.memory().check_align(key_ptr.into(), key_layout.align.abi)?; + self.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar( + tcx, key_ptr, - key_layout.align.abi, Scalar::from_uint(key, key_layout.size).into(), key_layout.size, )?; @@ -610,7 +620,7 @@ impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalCo // This just creates a key; Windows does not natively support TLS dtors. // Create key and return it - let key = self.machine.tls.create_tls_key(None, &*self.tcx) as u128; + let key = self.machine.tls.create_tls_key(None, tcx) as u128; // Figure out how large a TLS key actually is. This is c::DWORD. if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) { diff --git a/src/intrinsic.rs b/src/intrinsic.rs index 66dab00e09..c9b16525e5 100644 --- a/src/intrinsic.rs +++ b/src/intrinsic.rs @@ -28,7 +28,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' if self.emulate_intrinsic(instance, args, dest)? { return Ok(()); } - + let tcx = &{self.tcx.tcx}; let substs = instance.substs; // All these intrinsics take raw pointers, so if we access memory directly @@ -248,6 +248,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // FIXME: We do not properly validate in case of ZSTs and when doing it in memory! // However, this only affects direct calls of the intrinsic; calls to the stable // functions wrapping them do get their validation. + // FIXME: should we check that the destination pointer is aligned even for ZSTs? if !dest.layout.is_zst() { // nothing to do for ZST match dest.layout.abi { layout::Abi::Scalar(ref s) => { @@ -263,7 +264,9 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // Do it in memory let mplace = self.force_allocation(dest)?; assert!(mplace.meta.is_none()); - self.memory_mut().write_repeat(mplace.ptr, 0, dest.layout.size)?; + // not a zst, must be valid pointer + let ptr = mplace.ptr.to_ptr()?; + self.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?; } } } @@ -412,6 +415,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // FIXME: We do not properly validate in case of ZSTs and when doing it in memory! // However, this only affects direct calls of the intrinsic; calls to the stable // functions wrapping them do get their validation. + // FIXME: should we check alignment for ZSTs? if !dest.layout.is_zst() { // nothing to do for ZST match dest.layout.abi { layout::Abi::Scalar(..) => { @@ -426,7 +430,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // Do it in memory let mplace = self.force_allocation(dest)?; assert!(mplace.meta.is_none()); - self.memory_mut().mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?; + let ptr = mplace.ptr.to_ptr()?; + self.memory_mut() + .get_mut(ptr.alloc_id)? + .mark_definedness(ptr, dest.layout.size, false)?; } } } @@ -439,7 +446,13 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' let ptr = self.read_scalar(args[0])?.not_undef()?; let count = self.read_scalar(args[2])?.to_usize(self)?; self.memory().check_align(ptr, ty_layout.align.abi)?; - self.memory_mut().write_repeat(ptr, val_byte, ty_layout.size * count)?; + let byte_count = ty_layout.size * count; + if byte_count.bytes() != 0 { + let ptr = ptr.to_ptr()?; + self.memory_mut() + .get_mut(ptr.alloc_id)? + .write_repeat(tcx, ptr, val_byte, byte_count)?; + } } name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))), diff --git a/src/operator.rs b/src/operator.rs index 2f3a0de999..e1ccdf9199 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -142,10 +142,12 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // allocations sit right next to each other. The C/C++ standards are // somewhat fuzzy about this case, so I think for now this check is // "good enough". - // We require liveness, as dead allocations can of course overlap. - self.memory().check_bounds_ptr(left, InboundsCheck::Live)?; - self.memory().check_bounds_ptr(right, InboundsCheck::Live)?; - // Two live in-bounds pointers, we can compare across allocations + // Dead allocations in miri cannot overlap with live allocations, but + // on read hardware this can easily happen. Thus for comparisons we require + // both pointers to be live. + self.memory().get(left.alloc_id)?.check_bounds_ptr(left)?; + self.memory().get(right.alloc_id)?.check_bounds_ptr(right)?; + // Two in-bounds pointers, we can compare across allocations left == right } } @@ -158,7 +160,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // Case I: Comparing with NULL if bits == 0 { // Test if the ptr is in-bounds. Then it cannot be NULL. - if self.memory().check_bounds_ptr(ptr, InboundsCheck::MaybeDead).is_ok() { + // Even dangling pointers cannot be NULL. + if self.memory().check_bounds_ptr_maybe_dead(ptr).is_ok() { return Ok(false); } } @@ -298,9 +301,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' if let Scalar::Ptr(ptr) = ptr { // Both old and new pointer must be in-bounds of a *live* allocation. // (Of the same allocation, but that part is trivial with our representation.) - self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?; + let alloc = self.memory().get(ptr.alloc_id)?; + alloc.check_bounds_ptr(ptr)?; let ptr = ptr.signed_offset(offset, self)?; - self.memory().check_bounds_ptr(ptr, InboundsCheck::Live)?; + alloc.check_bounds_ptr(ptr)?; Ok(Scalar::Ptr(ptr)) } else { // An integer pointer. They can only be offset by 0, and we pretend there diff --git a/src/stacked_borrows.rs b/src/stacked_borrows.rs index f26ef40ea9..f292d08363 100644 --- a/src/stacked_borrows.rs +++ b/src/stacked_borrows.rs @@ -5,7 +5,7 @@ use rustc::hir::{Mutability, MutMutable, MutImmutable}; use crate::{ EvalResult, EvalErrorKind, MiriEvalContext, HelpersEvalContextExt, Evaluator, MutValueVisitor, - MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra, InboundsCheck, + MemoryKind, MiriMemoryKind, RangeMap, AllocId, Allocation, AllocationExtra, Pointer, MemPlace, Scalar, Immediate, ImmTy, PlaceTy, MPlaceTy, }; @@ -500,8 +500,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> { } // Get the allocation - self.memory().check_bounds(ptr, size, InboundsCheck::Live)?; - let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!"); + let alloc = self.memory().get(ptr.alloc_id)?; + alloc.check_bounds(self, ptr, size)?; // If we got here, we do some checking, *but* we leave the tag unchanged. if let Borrow::Shr(Some(_)) = ptr.tag { assert_eq!(mutability, Some(MutImmutable)); @@ -543,8 +543,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for MiriEvalContext<'a, 'mir, 'tcx> { ptr, place.layout.ty, new_bor); // Get the allocation. It might not be mutable, so we cannot use `get_mut`. - self.memory().check_bounds(ptr, size, InboundsCheck::Live)?; - let alloc = self.memory().get(ptr.alloc_id).expect("We checked that the ptr is fine!"); + let alloc = self.memory().get(ptr.alloc_id)?; + alloc.check_bounds(self, ptr, size)?; // Update the stacks. if let Borrow::Shr(Some(_)) = new_bor { // Reference that cares about freezing. We need a frozen-sensitive reborrow.