diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 582c9354041de..2bb0ce68b171e 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -410,17 +410,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { + fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value { unsafe { - let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED); + let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED); llvm::LLVMSetAlignment(load, align.bytes() as c_uint); load } } - fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { + fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value { unsafe { - let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED); + let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED); llvm::LLVMSetVolatile(load, llvm::True); load } @@ -428,6 +428,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn atomic_load( &mut self, + ty: &'ll Type, ptr: &'ll Value, order: rustc_codegen_ssa::common::AtomicOrdering, size: Size, @@ -435,6 +436,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unsafe { let load = llvm::LLVMRustBuildAtomicLoad( self.llbuilder, + ty, ptr, UNNAMED, AtomicOrdering::from_generic(order), @@ -486,7 +488,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } let llval = const_llval.unwrap_or_else(|| { - let load = self.load(place.llval, place.align); + let load = self.load(place.layout.llvm_type(self), place.llval, place.align); if let abi::Abi::Scalar(ref scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar); } @@ -498,7 +500,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let mut load = |i, scalar: &abi::Scalar, align| { let llptr = self.struct_gep(place.llval, i as u64); - let load = self.load(llptr, align); + let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); + let load = self.load(llty, llptr, align); scalar_load_metadata(self, load, scalar); self.to_immediate_scalar(load, scalar) }; @@ -815,13 +818,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { size: &'ll Value, flags: MemFlags, ) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memcpy. - let val = self.load(src, src_align); - let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); - self.store_with_flags(val, ptr, dst_align, flags); - return; - } + assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); let dst = self.pointercast(dst, self.type_i8p()); @@ -848,13 +845,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { size: &'ll Value, flags: MemFlags, ) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memmove. - let val = self.load(src, src_align); - let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); - self.store_with_flags(val, ptr, dst_align, flags); - return; - } + assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); let dst = self.pointercast(dst, self.type_i8p()); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index 38f50a6d621bb..de3f719b8163c 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -20,7 +20,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, // LLVM to keep around the reference to the global. let indices = [bx.const_i32(0), bx.const_i32(0)]; let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); - let volative_load_instruction = bx.volatile_load(element); + let volative_load_instruction = bx.volatile_load(bx.type_i8(), element); unsafe { llvm::LLVMSetAlignment(volative_load_instruction, 1); } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 9a968659e2fe8..a48a694b630f0 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -162,11 +162,14 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { sym::volatile_load | sym::unaligned_volatile_load => { let tp_ty = substs.type_at(0); - let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty) = fn_abi.ret.mode { - ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self))); - } - let load = self.volatile_load(ptr); + let ptr = args[0].immediate(); + let load = if let PassMode::Cast(ty) = fn_abi.ret.mode { + let llty = ty.llvm_type(self); + let ptr = self.pointercast(ptr, self.type_ptr_to(llty)); + self.volatile_load(llty, ptr) + } else { + self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr) + }; let align = if name == sym::unaligned_volatile_load { 1 } else { @@ -319,9 +322,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let integer_ty = self.type_ix(layout.size.bits()); let ptr_ty = self.type_ptr_to(integer_ty); let a_ptr = self.bitcast(a, ptr_ty); - let a_val = self.load(a_ptr, layout.align.abi); + let a_val = self.load(integer_ty, a_ptr, layout.align.abi); let b_ptr = self.bitcast(b, ptr_ty); - let b_val = self.load(b_ptr, layout.align.abi); + let b_val = self.load(integer_ty, b_ptr, layout.align.abi); self.icmp(IntPredicate::IntEQ, a_val, b_val) } else { let i8p_ty = self.type_i8p(); @@ -540,7 +543,7 @@ fn codegen_msvc_try( // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang let flags = bx.const_i32(8); let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]); - let ptr = catchpad_rust.load(slot, ptr_align); + let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align); catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet)); catchpad_rust.catch_ret(&funclet, caught.llbb()); diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 91923251018a8..2ade66ac41eda 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1385,7 +1385,12 @@ extern "C" { Val: &'a Value, Name: *const c_char, ) -> &'a Value; - pub fn LLVMBuildLoad(B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char) -> &'a Value; + pub fn LLVMBuildLoad2( + B: &Builder<'a>, + Ty: &'a Type, + PointerVal: &'a Value, + Name: *const c_char, + ) -> &'a Value; pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value; @@ -1631,6 +1636,7 @@ extern "C" { // Atomic Operations pub fn LLVMRustBuildAtomicLoad( B: &Builder<'a>, + ElementType: &'a Type, PointerVal: &'a Value, Name: *const c_char, Order: AtomicOrdering, diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 39d08fbee3b7f..9df1bd7d1d9bb 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -32,14 +32,15 @@ fn emit_direct_ptr_va_arg( slot_size: Align, allow_higher_align: bool, ) -> (&'ll Value, Align) { - let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p()); + let va_list_ty = bx.type_i8p(); + let va_list_ptr_ty = bx.type_ptr_to(va_list_ty); let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty { bx.bitcast(list.immediate(), va_list_ptr_ty) } else { list.immediate() }; - let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi); + let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi); let (addr, addr_align) = if allow_higher_align && align > slot_size { (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align) @@ -82,10 +83,10 @@ fn emit_ptr_va_arg( let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align); if indirect { - let tmp_ret = bx.load(addr, addr_align); - bx.load(tmp_ret, align.abi) + let tmp_ret = bx.load(llty, addr, addr_align); + bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi) } else { - bx.load(addr, addr_align) + bx.load(llty, addr, addr_align) } } @@ -118,7 +119,7 @@ fn emit_aapcs_va_arg( }; // if the offset >= 0 then the value will be on the stack - let mut reg_off_v = bx.load(reg_off, offset_align); + let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align); let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero); bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb()); @@ -139,8 +140,9 @@ fn emit_aapcs_va_arg( let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero); maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb()); + let top_type = bx.type_i8p(); let top = in_reg.struct_gep(va_list_addr, reg_top_index); - let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi); + let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi); // reg_value = *(@top + reg_off_v); let mut reg_addr = in_reg.gep(top, &[reg_off_v]); @@ -149,8 +151,9 @@ fn emit_aapcs_va_arg( let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32); reg_addr = in_reg.gep(reg_addr, &[offset]); } - let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(layout.llvm_type(bx))); - let reg_value = in_reg.load(reg_addr, layout.align.abi); + let reg_type = layout.llvm_type(bx); + let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type)); + let reg_value = in_reg.load(reg_type, reg_addr, layout.align.abi); in_reg.br(&end.llbb()); // On Stack block diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs index 63245a94c8e3d..b392b2c4ab8ac 100644 --- a/compiler/rustc_codegen_ssa/src/meth.rs +++ b/compiler/rustc_codegen_ssa/src/meth.rs @@ -20,10 +20,11 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.fn_ptr_backend_type(fn_abi))); + let llty = bx.fn_ptr_backend_type(fn_abi); + let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty)); let ptr_align = bx.tcx().data_layout.pointer_align.abi; let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]); - let ptr = bx.load(gep, ptr_align); + let ptr = bx.load(llty, gep, ptr_align); bx.nonnull_metadata(ptr); // Vtable loads are invariant. bx.set_invariant_load(ptr); @@ -38,10 +39,11 @@ impl<'a, 'tcx> VirtualIndex { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize())); + let llty = bx.type_isize(); + let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty)); let usize_align = bx.tcx().data_layout.pointer_align.abi; let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]); - let ptr = bx.load(gep, usize_align); + let ptr = bx.load(llty, gep, usize_align); // Vtable loads are invariant. bx.set_invariant_load(ptr); ptr diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 2cb28d7361c33..b584801a62de2 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -260,7 +260,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { PassMode::Direct(_) | PassMode::Pair(..) => { let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref()); if let Ref(llval, _, align) = op.val { - bx.load(llval, align) + bx.load(bx.backend_type(op.layout), llval, align) } else { op.immediate_or_packed_pair(&mut bx) } @@ -287,8 +287,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { llval } }; - let addr = bx.pointercast(llslot, bx.type_ptr_to(bx.cast_backend_type(&cast_ty))); - bx.load(addr, self.fn_abi.ret.layout.align.abi) + let ty = bx.cast_backend_type(&cast_ty); + let addr = bx.pointercast(llslot, bx.type_ptr_to(ty)); + bx.load(ty, addr, self.fn_abi.ret.layout.align.abi) } }; bx.ret(llval); @@ -1086,15 +1087,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { - let addr = bx.pointercast(llval, bx.type_ptr_to(bx.cast_backend_type(&ty))); - llval = bx.load(addr, align.min(arg.layout.align.abi)); + let llty = bx.cast_backend_type(&ty); + let addr = bx.pointercast(llval, bx.type_ptr_to(llty)); + llval = bx.load(llty, addr, align.min(arg.layout.align.abi)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI // used for this call is passing it by-value. In that case, // the load would just produce `OperandValue::Ref` instead // of the `OperandValue::Immediate` we need for the call. - llval = bx.load(llval, align); + llval = bx.load(bx.backend_type(arg.layout), llval, align); if let abi::Abi::Scalar(ref scalar) = arg.layout.abi { if scalar.is_bool() { bx.range_metadata(llval, 0..2); diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 6bb20545f07be..c139f915e6cbb 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -274,7 +274,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match *elem { mir::ProjectionElem::Deref => { indirect_offsets.push(Size::ZERO); - place = place.project_deref(bx); + place = bx.load_operand(place).deref(bx.cx()); } mir::ProjectionElem::Field(field, _) => { let i = field.index(); diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 8502309b90e5a..56ff1b3934c13 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -448,15 +448,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if ty.is_unsafe_ptr() { // Some platforms do not support atomic operations on pointers, // so we cast to integer first... - let ptr_llty = bx.type_ptr_to(bx.type_isize()); + let llty = bx.type_isize(); + let ptr_llty = bx.type_ptr_to(llty); source = bx.pointercast(source, ptr_llty); - } - let result = bx.atomic_load(source, order, size); - if ty.is_unsafe_ptr() { + let result = bx.atomic_load(llty, source, order, size); // ... and then cast the result back to a pointer bx.inttoptr(result, bx.backend_type(layout)) } else { - result + bx.atomic_load(bx.backend_type(layout), source, order, size) } } else { return invalid_monomorphization(ty); diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 25e84c38ed315..3c42b2cc2ea7a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -289,6 +289,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { } match self { OperandValue::Ref(r, None, source_align) => { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memcpy. + let ty = bx.backend_type(dest.layout); + let ptr = bx.pointercast(r, bx.type_ptr_to(ty)); + let val = bx.load(ty, ptr, source_align); + bx.store_with_flags(val, dest.llval, dest.align, flags); + return; + } base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags) } OperandValue::Ref(_, Some(_), _) => { diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index a9e7ebf6d43f7..66d9d1a1e0c49 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -402,18 +402,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { downcast } - pub fn project_deref>(&self, bx: &mut Bx) -> Self { - let target_ty = self.layout.ty.builtin_deref(true).expect("failed to deref"); - let layout = bx.layout_of(target_ty.ty); - - PlaceRef { - llval: bx.load(self.llval, self.align), - llextra: None, - layout, - align: layout.align.abi, - } - } - pub fn storage_live>(&self, bx: &mut Bx) { bx.lifetime_start(self.llval, self.layout.size); } diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 29b2db5d4d7af..f0c232a97bc94 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -137,9 +137,15 @@ pub trait BuilderMethods<'a, 'tcx>: fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value; - fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value; - fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value; - fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; + fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value; + fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value; + fn atomic_load( + &mut self, + ty: Self::Type, + ptr: Self::Value, + order: AtomicOrdering, + size: Size, + ) -> Self::Value; fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) -> OperandRef<'tcx, Self::Value>; diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index 9b757eb40c184..4cdc8a4155bcc 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -349,11 +349,10 @@ extern "C" void LLVMRustSetFastMath(LLVMValueRef V) { } extern "C" LLVMValueRef -LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name, - LLVMAtomicOrdering Order) { +LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Source, + const char *Name, LLVMAtomicOrdering Order) { Value *Ptr = unwrap(Source); - Type *Ty = Ptr->getType()->getPointerElementType(); - LoadInst *LI = unwrap(B)->CreateLoad(Ty, Ptr, Name); + LoadInst *LI = unwrap(B)->CreateLoad(unwrap(Ty), Ptr, Name); LI->setAtomic(fromRust(Order)); return wrap(LI); }