Skip to content

Commit 432e145

Browse files
committed
Auto merge of #86873 - nikic:opaque-ptrs, r=nagisa
Improve opaque pointers support Opaque pointers are coming, and rustc is not ready. This adds partial support by passing an explicit load type to LLVM. Two issues I've encountered: * The necessary type was not available at the point where non-temporal copies were generated. I've pushed the code for that upwards out of the memcpy implementation and moved the position of a cast to make do with the types we have available. (I'm not sure that cast is needed at all, but have retained it in the interest of conservativeness.) * The `PlaceRef::project_deref()` function used during debuginfo generation seems to be buggy in some way -- though I haven't figured out specifically what it does wrong. Replacing it with `load_operand().deref()` did the trick, but I don't really know what I'm doing here.
2 parents a31431f + 2ce1add commit 432e145

File tree

13 files changed

+81
-74
lines changed

13 files changed

+81
-74
lines changed

compiler/rustc_codegen_llvm/src/builder.rs

+11-20
Original file line numberDiff line numberDiff line change
@@ -410,31 +410,33 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
410410
}
411411
}
412412

413-
fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
413+
fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
414414
unsafe {
415-
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
415+
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
416416
llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
417417
load
418418
}
419419
}
420420

421-
fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
421+
fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
422422
unsafe {
423-
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
423+
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
424424
llvm::LLVMSetVolatile(load, llvm::True);
425425
load
426426
}
427427
}
428428

429429
fn atomic_load(
430430
&mut self,
431+
ty: &'ll Type,
431432
ptr: &'ll Value,
432433
order: rustc_codegen_ssa::common::AtomicOrdering,
433434
size: Size,
434435
) -> &'ll Value {
435436
unsafe {
436437
let load = llvm::LLVMRustBuildAtomicLoad(
437438
self.llbuilder,
439+
ty,
438440
ptr,
439441
UNNAMED,
440442
AtomicOrdering::from_generic(order),
@@ -486,7 +488,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
486488
}
487489
}
488490
let llval = const_llval.unwrap_or_else(|| {
489-
let load = self.load(place.llval, place.align);
491+
let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
490492
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
491493
scalar_load_metadata(self, load, scalar);
492494
}
@@ -498,7 +500,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
498500

499501
let mut load = |i, scalar: &abi::Scalar, align| {
500502
let llptr = self.struct_gep(place.llval, i as u64);
501-
let load = self.load(llptr, align);
503+
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
504+
let load = self.load(llty, llptr, align);
502505
scalar_load_metadata(self, load, scalar);
503506
self.to_immediate_scalar(load, scalar)
504507
};
@@ -815,13 +818,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
815818
size: &'ll Value,
816819
flags: MemFlags,
817820
) {
818-
if flags.contains(MemFlags::NONTEMPORAL) {
819-
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
820-
let val = self.load(src, src_align);
821-
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
822-
self.store_with_flags(val, ptr, dst_align, flags);
823-
return;
824-
}
821+
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
825822
let size = self.intcast(size, self.type_isize(), false);
826823
let is_volatile = flags.contains(MemFlags::VOLATILE);
827824
let dst = self.pointercast(dst, self.type_i8p());
@@ -848,13 +845,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
848845
size: &'ll Value,
849846
flags: MemFlags,
850847
) {
851-
if flags.contains(MemFlags::NONTEMPORAL) {
852-
// HACK(nox): This is inefficient but there is no nontemporal memmove.
853-
let val = self.load(src, src_align);
854-
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
855-
self.store_with_flags(val, ptr, dst_align, flags);
856-
return;
857-
}
848+
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
858849
let size = self.intcast(size, self.type_isize(), false);
859850
let is_volatile = flags.contains(MemFlags::VOLATILE);
860851
let dst = self.pointercast(dst, self.type_i8p());

compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_,
2020
// LLVM to keep around the reference to the global.
2121
let indices = [bx.const_i32(0), bx.const_i32(0)];
2222
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
23-
let volative_load_instruction = bx.volatile_load(element);
23+
let volative_load_instruction = bx.volatile_load(bx.type_i8(), element);
2424
unsafe {
2525
llvm::LLVMSetAlignment(volative_load_instruction, 1);
2626
}

compiler/rustc_codegen_llvm/src/intrinsic.rs

+11-8
Original file line numberDiff line numberDiff line change
@@ -162,11 +162,14 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
162162

163163
sym::volatile_load | sym::unaligned_volatile_load => {
164164
let tp_ty = substs.type_at(0);
165-
let mut ptr = args[0].immediate();
166-
if let PassMode::Cast(ty) = fn_abi.ret.mode {
167-
ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
168-
}
169-
let load = self.volatile_load(ptr);
165+
let ptr = args[0].immediate();
166+
let load = if let PassMode::Cast(ty) = fn_abi.ret.mode {
167+
let llty = ty.llvm_type(self);
168+
let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
169+
self.volatile_load(llty, ptr)
170+
} else {
171+
self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
172+
};
170173
let align = if name == sym::unaligned_volatile_load {
171174
1
172175
} else {
@@ -319,9 +322,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
319322
let integer_ty = self.type_ix(layout.size.bits());
320323
let ptr_ty = self.type_ptr_to(integer_ty);
321324
let a_ptr = self.bitcast(a, ptr_ty);
322-
let a_val = self.load(a_ptr, layout.align.abi);
325+
let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
323326
let b_ptr = self.bitcast(b, ptr_ty);
324-
let b_val = self.load(b_ptr, layout.align.abi);
327+
let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
325328
self.icmp(IntPredicate::IntEQ, a_val, b_val)
326329
} else {
327330
let i8p_ty = self.type_i8p();
@@ -540,7 +543,7 @@ fn codegen_msvc_try(
540543
// Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
541544
let flags = bx.const_i32(8);
542545
let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
543-
let ptr = catchpad_rust.load(slot, ptr_align);
546+
let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align);
544547
catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
545548
catchpad_rust.catch_ret(&funclet, caught.llbb());
546549

compiler/rustc_codegen_llvm/src/llvm/ffi.rs

+7-1
Original file line numberDiff line numberDiff line change
@@ -1385,7 +1385,12 @@ extern "C" {
13851385
Val: &'a Value,
13861386
Name: *const c_char,
13871387
) -> &'a Value;
1388-
pub fn LLVMBuildLoad(B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char) -> &'a Value;
1388+
pub fn LLVMBuildLoad2(
1389+
B: &Builder<'a>,
1390+
Ty: &'a Type,
1391+
PointerVal: &'a Value,
1392+
Name: *const c_char,
1393+
) -> &'a Value;
13891394

13901395
pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
13911396

@@ -1631,6 +1636,7 @@ extern "C" {
16311636
// Atomic Operations
16321637
pub fn LLVMRustBuildAtomicLoad(
16331638
B: &Builder<'a>,
1639+
ElementType: &'a Type,
16341640
PointerVal: &'a Value,
16351641
Name: *const c_char,
16361642
Order: AtomicOrdering,

compiler/rustc_codegen_llvm/src/va_arg.rs

+12-9
Original file line numberDiff line numberDiff line change
@@ -32,14 +32,15 @@ fn emit_direct_ptr_va_arg(
3232
slot_size: Align,
3333
allow_higher_align: bool,
3434
) -> (&'ll Value, Align) {
35-
let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
35+
let va_list_ty = bx.type_i8p();
36+
let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
3637
let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
3738
bx.bitcast(list.immediate(), va_list_ptr_ty)
3839
} else {
3940
list.immediate()
4041
};
4142

42-
let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
43+
let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
4344

4445
let (addr, addr_align) = if allow_higher_align && align > slot_size {
4546
(round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
@@ -82,10 +83,10 @@ fn emit_ptr_va_arg(
8283
let (addr, addr_align) =
8384
emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
8485
if indirect {
85-
let tmp_ret = bx.load(addr, addr_align);
86-
bx.load(tmp_ret, align.abi)
86+
let tmp_ret = bx.load(llty, addr, addr_align);
87+
bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
8788
} else {
88-
bx.load(addr, addr_align)
89+
bx.load(llty, addr, addr_align)
8990
}
9091
}
9192

@@ -118,7 +119,7 @@ fn emit_aapcs_va_arg(
118119
};
119120

120121
// if the offset >= 0 then the value will be on the stack
121-
let mut reg_off_v = bx.load(reg_off, offset_align);
122+
let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
122123
let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
123124
bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb());
124125

@@ -139,8 +140,9 @@ fn emit_aapcs_va_arg(
139140
let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
140141
maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb());
141142

143+
let top_type = bx.type_i8p();
142144
let top = in_reg.struct_gep(va_list_addr, reg_top_index);
143-
let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi);
145+
let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
144146

145147
// reg_value = *(@top + reg_off_v);
146148
let mut reg_addr = in_reg.gep(top, &[reg_off_v]);
@@ -149,8 +151,9 @@ fn emit_aapcs_va_arg(
149151
let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
150152
reg_addr = in_reg.gep(reg_addr, &[offset]);
151153
}
152-
let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(layout.llvm_type(bx)));
153-
let reg_value = in_reg.load(reg_addr, layout.align.abi);
154+
let reg_type = layout.llvm_type(bx);
155+
let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
156+
let reg_value = in_reg.load(reg_type, reg_addr, layout.align.abi);
154157
in_reg.br(&end.llbb());
155158

156159
// On Stack block

compiler/rustc_codegen_ssa/src/meth.rs

+6-4
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,11 @@ impl<'a, 'tcx> VirtualIndex {
2020
// Load the data pointer from the object.
2121
debug!("get_fn({:?}, {:?})", llvtable, self);
2222

23-
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.fn_ptr_backend_type(fn_abi)));
23+
let llty = bx.fn_ptr_backend_type(fn_abi);
24+
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
2425
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
2526
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
26-
let ptr = bx.load(gep, ptr_align);
27+
let ptr = bx.load(llty, gep, ptr_align);
2728
bx.nonnull_metadata(ptr);
2829
// Vtable loads are invariant.
2930
bx.set_invariant_load(ptr);
@@ -38,10 +39,11 @@ impl<'a, 'tcx> VirtualIndex {
3839
// Load the data pointer from the object.
3940
debug!("get_int({:?}, {:?})", llvtable, self);
4041

41-
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize()));
42+
let llty = bx.type_isize();
43+
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
4244
let usize_align = bx.tcx().data_layout.pointer_align.abi;
4345
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
44-
let ptr = bx.load(gep, usize_align);
46+
let ptr = bx.load(llty, gep, usize_align);
4547
// Vtable loads are invariant.
4648
bx.set_invariant_load(ptr);
4749
ptr

compiler/rustc_codegen_ssa/src/mir/block.rs

+8-6
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
260260
PassMode::Direct(_) | PassMode::Pair(..) => {
261261
let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
262262
if let Ref(llval, _, align) = op.val {
263-
bx.load(llval, align)
263+
bx.load(bx.backend_type(op.layout), llval, align)
264264
} else {
265265
op.immediate_or_packed_pair(&mut bx)
266266
}
@@ -287,8 +287,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
287287
llval
288288
}
289289
};
290-
let addr = bx.pointercast(llslot, bx.type_ptr_to(bx.cast_backend_type(&cast_ty)));
291-
bx.load(addr, self.fn_abi.ret.layout.align.abi)
290+
let ty = bx.cast_backend_type(&cast_ty);
291+
let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
292+
bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
292293
}
293294
};
294295
bx.ret(llval);
@@ -1086,15 +1087,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
10861087
if by_ref && !arg.is_indirect() {
10871088
// Have to load the argument, maybe while casting it.
10881089
if let PassMode::Cast(ty) = arg.mode {
1089-
let addr = bx.pointercast(llval, bx.type_ptr_to(bx.cast_backend_type(&ty)));
1090-
llval = bx.load(addr, align.min(arg.layout.align.abi));
1090+
let llty = bx.cast_backend_type(&ty);
1091+
let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
1092+
llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
10911093
} else {
10921094
// We can't use `PlaceRef::load` here because the argument
10931095
// may have a type we don't treat as immediate, but the ABI
10941096
// used for this call is passing it by-value. In that case,
10951097
// the load would just produce `OperandValue::Ref` instead
10961098
// of the `OperandValue::Immediate` we need for the call.
1097-
llval = bx.load(llval, align);
1099+
llval = bx.load(bx.backend_type(arg.layout), llval, align);
10981100
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
10991101
if scalar.is_bool() {
11001102
bx.range_metadata(llval, 0..2);

compiler/rustc_codegen_ssa/src/mir/debuginfo.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
274274
match *elem {
275275
mir::ProjectionElem::Deref => {
276276
indirect_offsets.push(Size::ZERO);
277-
place = place.project_deref(bx);
277+
place = bx.load_operand(place).deref(bx.cx());
278278
}
279279
mir::ProjectionElem::Field(field, _) => {
280280
let i = field.index();

compiler/rustc_codegen_ssa/src/mir/intrinsic.rs

+4-5
Original file line numberDiff line numberDiff line change
@@ -448,15 +448,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
448448
if ty.is_unsafe_ptr() {
449449
// Some platforms do not support atomic operations on pointers,
450450
// so we cast to integer first...
451-
let ptr_llty = bx.type_ptr_to(bx.type_isize());
451+
let llty = bx.type_isize();
452+
let ptr_llty = bx.type_ptr_to(llty);
452453
source = bx.pointercast(source, ptr_llty);
453-
}
454-
let result = bx.atomic_load(source, order, size);
455-
if ty.is_unsafe_ptr() {
454+
let result = bx.atomic_load(llty, source, order, size);
456455
// ... and then cast the result back to a pointer
457456
bx.inttoptr(result, bx.backend_type(layout))
458457
} else {
459-
result
458+
bx.atomic_load(bx.backend_type(layout), source, order, size)
460459
}
461460
} else {
462461
return invalid_monomorphization(ty);

compiler/rustc_codegen_ssa/src/mir/operand.rs

+8
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
289289
}
290290
match self {
291291
OperandValue::Ref(r, None, source_align) => {
292+
if flags.contains(MemFlags::NONTEMPORAL) {
293+
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
294+
let ty = bx.backend_type(dest.layout);
295+
let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
296+
let val = bx.load(ty, ptr, source_align);
297+
bx.store_with_flags(val, dest.llval, dest.align, flags);
298+
return;
299+
}
292300
base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
293301
}
294302
OperandValue::Ref(_, Some(_), _) => {

compiler/rustc_codegen_ssa/src/mir/place.rs

-12
Original file line numberDiff line numberDiff line change
@@ -402,18 +402,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
402402
downcast
403403
}
404404

405-
pub fn project_deref<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) -> Self {
406-
let target_ty = self.layout.ty.builtin_deref(true).expect("failed to deref");
407-
let layout = bx.layout_of(target_ty.ty);
408-
409-
PlaceRef {
410-
llval: bx.load(self.llval, self.align),
411-
llextra: None,
412-
layout,
413-
align: layout.align.abi,
414-
}
415-
}
416-
417405
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
418406
bx.lifetime_start(self.llval, self.layout.size);
419407
}

compiler/rustc_codegen_ssa/src/traits/builder.rs

+9-3
Original file line numberDiff line numberDiff line change
@@ -137,9 +137,15 @@ pub trait BuilderMethods<'a, 'tcx>:
137137
fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
138138
fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
139139

140-
fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value;
141-
fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value;
142-
fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
140+
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
141+
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
142+
fn atomic_load(
143+
&mut self,
144+
ty: Self::Type,
145+
ptr: Self::Value,
146+
order: AtomicOrdering,
147+
size: Size,
148+
) -> Self::Value;
143149
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
144150
-> OperandRef<'tcx, Self::Value>;
145151

compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -349,11 +349,10 @@ extern "C" void LLVMRustSetFastMath(LLVMValueRef V) {
349349
}
350350

351351
extern "C" LLVMValueRef
352-
LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name,
353-
LLVMAtomicOrdering Order) {
352+
LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Source,
353+
const char *Name, LLVMAtomicOrdering Order) {
354354
Value *Ptr = unwrap(Source);
355-
Type *Ty = Ptr->getType()->getPointerElementType();
356-
LoadInst *LI = unwrap(B)->CreateLoad(Ty, Ptr, Name);
355+
LoadInst *LI = unwrap(B)->CreateLoad(unwrap(Ty), Ptr, Name);
357356
LI->setAtomic(fromRust(Order));
358357
return wrap(LI);
359358
}

0 commit comments

Comments
 (0)