Skip to content

Commit ed30152

Browse files
committed
Auto merge of #876 - RalfJung:atomic, r=RalfJung
check that atomics are sufficiently aligned Fixes #475
2 parents 843691d + a4cc58e commit ed30152

File tree

4 files changed

+109
-48
lines changed

4 files changed

+109
-48
lines changed

src/shims/intrinsics.rs

+61-26
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
use rustc_apfloat::Float;
22
use rustc::mir;
33
use rustc::mir::interpret::{InterpResult, PointerArithmetic};
4-
use rustc::ty::layout::{self, LayoutOf, Size};
4+
use rustc::ty::layout::{self, LayoutOf, Size, Align};
55
use rustc::ty;
66

77
use crate::{
@@ -48,56 +48,84 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
4848
}
4949
}
5050

51+
"volatile_load" => {
52+
let place = this.deref_operand(args[0])?;
53+
this.copy_op(place.into(), dest)?;
54+
}
55+
56+
"volatile_store" => {
57+
let place = this.deref_operand(args[0])?;
58+
this.copy_op(args[1], place.into())?;
59+
}
60+
5161
"atomic_load" |
5262
"atomic_load_relaxed" |
5363
"atomic_load_acq" => {
54-
let ptr = this.deref_operand(args[0])?;
55-
let val = this.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
56-
this.write_scalar(val, dest)?;
57-
}
64+
let place = this.deref_operand(args[0])?;
65+
let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
5866

59-
"volatile_load" => {
60-
let ptr = this.deref_operand(args[0])?;
61-
this.copy_op(ptr.into(), dest)?;
67+
// Check alignment requirements. Atomics must always be aligned to their size,
68+
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
69+
// be 8-aligned).
70+
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
71+
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
72+
73+
this.write_scalar(val, dest)?;
6274
}
6375

6476
"atomic_store" |
6577
"atomic_store_relaxed" |
6678
"atomic_store_rel" => {
67-
let ptr = this.deref_operand(args[0])?;
79+
let place = this.deref_operand(args[0])?;
6880
let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
69-
this.write_scalar(val, ptr.into())?;
70-
}
7181

72-
"volatile_store" => {
73-
let ptr = this.deref_operand(args[0])?;
74-
this.copy_op(args[1], ptr.into())?;
82+
// Check alignment requirements. Atomics must always be aligned to their size,
83+
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
84+
// be 8-aligned).
85+
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
86+
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
87+
88+
this.write_scalar(val, place.into())?;
7589
}
7690

7791
"atomic_fence_acq" => {
7892
// we are inherently singlethreaded and singlecored, this is a nop
7993
}
8094

8195
_ if intrinsic_name.starts_with("atomic_xchg") => {
82-
let ptr = this.deref_operand(args[0])?;
96+
let place = this.deref_operand(args[0])?;
8397
let new = this.read_scalar(args[1])?;
84-
let old = this.read_scalar(ptr.into())?;
98+
let old = this.read_scalar(place.into())?;
99+
100+
// Check alignment requirements. Atomics must always be aligned to their size,
101+
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
102+
// be 8-aligned).
103+
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
104+
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
105+
85106
this.write_scalar(old, dest)?; // old value is returned
86-
this.write_scalar(new, ptr.into())?;
107+
this.write_scalar(new, place.into())?;
87108
}
88109

89110
_ if intrinsic_name.starts_with("atomic_cxchg") => {
90-
let ptr = this.deref_operand(args[0])?;
111+
let place = this.deref_operand(args[0])?;
91112
let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
92113
let new = this.read_scalar(args[2])?;
93-
let old = this.read_immediate(ptr.into())?; // read as immediate for the sake of `binary_op()`
114+
let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
115+
116+
// Check alignment requirements. Atomics must always be aligned to their size,
117+
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
118+
// be 8-aligned).
119+
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
120+
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
121+
94122
// binary_op will bail if either of them is not a scalar
95123
let (eq, _) = this.binary_op(mir::BinOp::Eq, old, expect_old)?;
96124
let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
97125
this.write_immediate(res, dest)?; // old value is returned
98126
// update ptr depending on comparison
99127
if eq.to_bool()? {
100-
this.write_scalar(new, ptr.into())?;
128+
this.write_scalar(new, place.into())?;
101129
}
102130
}
103131

@@ -131,12 +159,19 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
131159
"atomic_xsub_rel" |
132160
"atomic_xsub_acqrel" |
133161
"atomic_xsub_relaxed" => {
134-
let ptr = this.deref_operand(args[0])?;
135-
if !ptr.layout.ty.is_integral() {
162+
let place = this.deref_operand(args[0])?;
163+
if !place.layout.ty.is_integral() {
136164
bug!("Atomic arithmetic operations only work on integer types");
137165
}
138166
let rhs = this.read_immediate(args[1])?;
139-
let old = this.read_immediate(ptr.into())?;
167+
let old = this.read_immediate(place.into())?;
168+
169+
// Check alignment requirements. Atomics must always be aligned to their size,
170+
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
171+
// be 8-aligned).
172+
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
173+
this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
174+
140175
this.write_immediate(*old, dest)?; // old value is returned
141176
let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
142177
"or" => (mir::BinOp::BitOr, false),
@@ -154,7 +189,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
154189
} else {
155190
val
156191
};
157-
this.write_scalar(val, ptr.into())?;
192+
this.write_scalar(val, place.into())?;
158193
}
159194

160195
"breakpoint" => unimplemented!(), // halt miri
@@ -335,8 +370,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
335370
}
336371

337372
"move_val_init" => {
338-
let ptr = this.deref_operand(args[0])?;
339-
this.copy_op(args[1], ptr.into())?;
373+
let place = this.deref_operand(args[0])?;
374+
this.copy_op(args[1], place.into())?;
340375
}
341376

342377
"offset" => {
+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
#![feature(core_intrinsics)]
2+
3+
fn main() {
4+
// Do a 4-aligned u64 atomic access. That should be UB on all platforms,
5+
// even if u64 only has alignment 4.
6+
let z = [0u32; 2];
7+
let zptr = &z as *const _ as *const u64;
8+
unsafe {
9+
::std::intrinsics::atomic_load(zptr);
10+
//~^ ERROR tried to access memory with alignment 4, but alignment 8 is required
11+
}
12+
}

tests/run-pass/atomic-access-bool.rs

-19
This file was deleted.

tests/run-pass/atomic-compare_exchange.rs renamed to tests/run-pass/atomic.rs

+36-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,32 @@
1-
use std::sync::atomic::{AtomicIsize, Ordering::*};
2-
3-
static ATOMIC: AtomicIsize = AtomicIsize::new(0);
1+
use std::sync::atomic::{AtomicBool, AtomicIsize, AtomicU64, Ordering::*};
42

53
fn main() {
4+
atomic_bool();
5+
atomic_isize();
6+
atomic_u64();
7+
}
8+
9+
fn atomic_bool() {
10+
static mut ATOMIC: AtomicBool = AtomicBool::new(false);
11+
12+
unsafe {
13+
assert_eq!(*ATOMIC.get_mut(), false);
14+
ATOMIC.store(true, SeqCst);
15+
assert_eq!(*ATOMIC.get_mut(), true);
16+
ATOMIC.fetch_or(false, SeqCst);
17+
assert_eq!(*ATOMIC.get_mut(), true);
18+
ATOMIC.fetch_and(false, SeqCst);
19+
assert_eq!(*ATOMIC.get_mut(), false);
20+
ATOMIC.fetch_nand(true, SeqCst);
21+
assert_eq!(*ATOMIC.get_mut(), true);
22+
ATOMIC.fetch_xor(true, SeqCst);
23+
assert_eq!(*ATOMIC.get_mut(), false);
24+
}
25+
}
26+
27+
fn atomic_isize() {
28+
static ATOMIC: AtomicIsize = AtomicIsize::new(0);
29+
630
// Make sure trans can emit all the intrinsics correctly
731
assert_eq!(ATOMIC.compare_exchange(0, 1, Relaxed, Relaxed), Ok(0));
832
assert_eq!(ATOMIC.compare_exchange(0, 2, Acquire, Relaxed), Err(1));
@@ -27,3 +51,12 @@ fn main() {
2751
ATOMIC.compare_exchange_weak(0, 1, SeqCst, Acquire).ok();
2852
ATOMIC.compare_exchange_weak(0, 1, SeqCst, SeqCst).ok();
2953
}
54+
55+
fn atomic_u64() {
56+
static ATOMIC: AtomicU64 = AtomicU64::new(0);
57+
58+
ATOMIC.store(1, SeqCst);
59+
assert_eq!(ATOMIC.compare_exchange(0, 0x100, AcqRel, Acquire), Err(1));
60+
assert_eq!(ATOMIC.compare_exchange_weak(1, 0x100, AcqRel, Acquire), Ok(1));
61+
assert_eq!(ATOMIC.load(Relaxed), 0x100);
62+
}

0 commit comments

Comments
 (0)