Skip to content

Commit 68217c9

Browse files
ignore zst offsets instead
1 parent e9bc3dd commit 68217c9

File tree

2 files changed

+76
-87
lines changed

2 files changed

+76
-87
lines changed

compiler/rustc_codegen_ssa/src/mir/place.rs

+23-9
Original file line numberDiff line numberDiff line change
@@ -93,15 +93,29 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
9393
let effective_field_align = self.align.restrict_for_offset(offset);
9494

9595
let mut simple = || {
96-
// Unions and newtypes only use an offset of 0.
97-
let llval = if offset.bytes() == 0 {
98-
self.llval
99-
} else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi {
100-
// Offsets have to match either first or second field.
101-
assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
102-
bx.struct_gep(self.llval, 1)
103-
} else {
104-
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
96+
let llval = match self.layout.abi {
97+
_ if offset.bytes() == 0 => {
98+
// Unions and newtypes only use an offset of 0.
99+
// Also handles the first field of Scalar and ScalarPair layouts.
100+
self.llval
101+
}
102+
Abi::ScalarPair(ref a, ref b)
103+
if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
104+
{
105+
// Offset matches second field.
106+
bx.struct_gep(self.llval, 1)
107+
}
108+
Abi::ScalarPair(..) | Abi::Scalar(_) => {
109+
// ZST fields are not included in Scalar and ScalarPair layouts, so manually offset the pointer.
110+
assert!(
111+
field.is_zst(),
112+
"non-ZST field offset does not match layout: {:?}",
113+
field
114+
);
115+
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
116+
bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())])
117+
}
118+
_ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)),
105119
};
106120
PlaceRef {
107121
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.

compiler/rustc_middle/src/ty/layout.rs

+53-78
Original file line numberDiff line numberDiff line change
@@ -289,32 +289,25 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
289289

290290
let optimize = !repr.inhibit_struct_field_reordering_opt();
291291
if optimize {
292+
let end =
293+
if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
294+
let optimizing = &mut inverse_memory_index[..end];
292295
let field_align = |f: &TyAndLayout<'_>| {
293296
if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
294297
};
295298
match kind {
296-
StructKind::AlwaysSized => {
297-
inverse_memory_index.sort_by_key(|&x| {
299+
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
300+
optimizing.sort_by_key(|&x| {
298301
// Place ZSTs first to avoid "interesting offsets",
299302
// especially with only one or two non-ZST fields.
300303
let f = &fields[x as usize];
301304
(!f.is_zst(), cmp::Reverse(field_align(f)))
302305
});
303306
}
304-
StructKind::MaybeUnsized => {
305-
// Sort in descending alignment, except for the last field,
306-
// which may be accessed through an unsized type.
307-
inverse_memory_index[..fields.len() - 1]
308-
.sort_by_key(|&x| cmp::Reverse(field_align(&fields[x as usize])));
309-
// Place ZSTs first to avoid "interesting offsets".
310-
// This will reorder the last field if it is a ZST, which is okay because
311-
// there's nothing in memory that could be accessed through an unsized type.
312-
inverse_memory_index.sort_by_key(|&x| !fields[x as usize].is_zst());
313-
}
314307
StructKind::Prefixed(..) => {
315308
// Sort in ascending alignment so that the layout stay optimal
316309
// regardless of the prefix
317-
inverse_memory_index.sort_by_key(|&x| field_align(&fields[x as usize]));
310+
optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
318311
}
319312
}
320313
}
@@ -397,78 +390,60 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
397390

398391
// Unpack newtype ABIs and find scalar pairs.
399392
if sized && size.bytes() > 0 {
400-
// All other fields must be ZSTs, and we need them to all start at 0.
401-
let mut zst_offsets = offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
402-
if zst_offsets.all(|(_, o)| o.bytes() == 0) {
403-
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
404-
405-
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
406-
// We have exactly one non-ZST field.
407-
(Some((i, field)), None, None) => {
408-
// Field fills the struct and it has a scalar or scalar pair ABI.
409-
if offsets[i].bytes() == 0
410-
&& align.abi == field.align.abi
411-
&& size == field.size
412-
{
413-
match field.abi {
414-
// For plain scalars, or vectors of them, we can't unpack
415-
// newtypes for `#[repr(C)]`, as that affects C ABIs.
416-
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
417-
abi = field.abi.clone();
418-
}
419-
// But scalar pairs are Rust-specific and get
420-
// treated as aggregates by C ABIs anyway.
421-
Abi::ScalarPair(..) => {
422-
abi = field.abi.clone();
423-
}
424-
_ => {}
393+
// All other fields must be ZSTs.
394+
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
395+
396+
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
397+
// We have exactly one non-ZST field.
398+
(Some((i, field)), None, None) => {
399+
// Field fills the struct and it has a scalar or scalar pair ABI.
400+
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
401+
{
402+
match field.abi {
403+
// For plain scalars, or vectors of them, we can't unpack
404+
// newtypes for `#[repr(C)]`, as that affects C ABIs.
405+
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
406+
abi = field.abi.clone();
407+
}
408+
// But scalar pairs are Rust-specific and get
409+
// treated as aggregates by C ABIs anyway.
410+
Abi::ScalarPair(..) => {
411+
abi = field.abi.clone();
425412
}
413+
_ => {}
426414
}
427415
}
416+
}
428417

429-
// Two non-ZST fields, and they're both scalars.
430-
(
431-
Some((
432-
i,
433-
&TyAndLayout {
434-
layout: &Layout { abi: Abi::Scalar(ref a), .. }, ..
435-
},
436-
)),
437-
Some((
438-
j,
439-
&TyAndLayout {
440-
layout: &Layout { abi: Abi::Scalar(ref b), .. }, ..
441-
},
442-
)),
443-
None,
444-
) => {
445-
// Order by the memory placement, not source order.
446-
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
447-
((i, a), (j, b))
448-
} else {
449-
((j, b), (i, a))
450-
};
451-
let pair = self.scalar_pair(a.clone(), b.clone());
452-
let pair_offsets = match pair.fields {
453-
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
454-
assert_eq!(memory_index, &[0, 1]);
455-
offsets
456-
}
457-
_ => bug!(),
458-
};
459-
if offsets[i] == pair_offsets[0]
460-
&& offsets[j] == pair_offsets[1]
461-
&& align == pair.align
462-
&& size == pair.size
463-
{
464-
// We can use `ScalarPair` only when it matches our
465-
// already computed layout (including `#[repr(C)]`).
466-
abi = pair.abi;
418+
// Two non-ZST fields, and they're both scalars.
419+
(
420+
Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref a), .. }, .. })),
421+
Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(ref b), .. }, .. })),
422+
None,
423+
) => {
424+
// Order by the memory placement, not source order.
425+
let ((i, a), (j, b)) =
426+
if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
427+
let pair = self.scalar_pair(a.clone(), b.clone());
428+
let pair_offsets = match pair.fields {
429+
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
430+
assert_eq!(memory_index, &[0, 1]);
431+
offsets
467432
}
433+
_ => bug!(),
434+
};
435+
if offsets[i] == pair_offsets[0]
436+
&& offsets[j] == pair_offsets[1]
437+
&& align == pair.align
438+
&& size == pair.size
439+
{
440+
// We can use `ScalarPair` only when it matches our
441+
// already computed layout (including `#[repr(C)]`).
442+
abi = pair.abi;
468443
}
469-
470-
_ => {}
471444
}
445+
446+
_ => {}
472447
}
473448
}
474449

0 commit comments

Comments
 (0)