提交 f62e43da 编写于 作者: E Eduard-Mihai Burtescu

rustc: track validity ranges for layout::Abi::Scalar values.

上级 5df25c4a
此差异已折叠。
......@@ -753,8 +753,8 @@ fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
bug!("failed to get layout for `{}`: {}", t, e)
});
if let layout::Variants::Tagged { ref variants, discr, .. } = layout.variants {
let discr_size = discr.size(cx.tcx).bytes();
if let layout::Variants::Tagged { ref variants, ref discr, .. } = layout.variants {
let discr_size = discr.value.size(cx.tcx).bytes();
debug!("enum `{}` is {} bytes large with layout:\n{:#?}",
t, layout.size.bytes(), layout);
......
......@@ -287,8 +287,8 @@ fn is_aggregate(&self) -> bool {
fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg> {
match self.abi {
// The primitive for this algorithm.
layout::Abi::Scalar(value) => {
let kind = match value {
layout::Abi::Scalar(ref scalar) => {
let kind = match scalar.value {
layout::Int(..) |
layout::Pointer => RegKind::Integer,
layout::F32 |
......@@ -471,8 +471,8 @@ pub fn ignore(&mut self) {
pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness
match self.layout.abi {
layout::Abi::Scalar(layout::Int(i, signed)) => {
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
if let layout::Int(i, signed) = scalar.value {
if i.size().bits() < bits {
self.attrs.set(if signed {
ArgAttribute::SExt
......@@ -481,8 +481,6 @@ pub fn extend_integer_width_to(&mut self, bits: u64) {
});
}
}
_ => {}
}
}
......@@ -695,9 +693,12 @@ pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>,
let arg_of = |ty: Ty<'tcx>, is_return: bool| {
let mut arg = ArgType::new(ccx.layout_of(ty));
if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = arg.layout.abi {
arg.attrs.set(ArgAttribute::ZExt);
} else if arg.layout.is_zst() {
if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
if scalar.is_bool() {
arg.attrs.set(ArgAttribute::ZExt);
}
}
if arg.layout.is_zst() {
// For some forsaken reason, x86_64-pc-windows-gnu
// doesn't ignore zero-sized struct arguments.
// The same is true for s390x-unknown-linux-gnu.
......
......@@ -375,11 +375,12 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
}
pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi {
bcx.trunc(val, Type::i1(bcx.ccx))
} else {
val
if let layout::Abi::Scalar(ref scalar) = layout.abi {
if scalar.is_bool() {
return bcx.trunc(val, Type::i1(bcx.ccx));
}
}
val
}
pub fn call_memcpy(b: &Builder,
......
......@@ -27,8 +27,12 @@ fn classify_ret_ty(ret: &mut ArgType) {
fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: TyLayout<'tcx>) -> bool {
match layout.abi {
layout::Abi::Scalar(layout::F32) |
layout::Abi::Scalar(layout::F64) => true,
layout::Abi::Scalar(ref scalar) => {
match scalar.value {
layout::F32 | layout::F64 => true,
_ => false
}
}
layout::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(ccx, layout.field(ccx, 0))
......
......@@ -22,8 +22,12 @@ pub enum Flavor {
fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: TyLayout<'tcx>) -> bool {
match layout.abi {
layout::Abi::Scalar(layout::F32) |
layout::Abi::Scalar(layout::F64) => true,
layout::Abi::Scalar(ref scalar) => {
match scalar.value {
layout::F32 | layout::F64 => true,
_ => false
}
}
layout::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(ccx, layout.field(ccx, 0))
......
......@@ -65,8 +65,8 @@ fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
match layout.abi {
layout::Abi::Scalar(value) => {
let reg = match value {
layout::Abi::Scalar(ref scalar) => {
let reg = match scalar.value {
layout::Int(..) |
layout::Pointer => Class::Int,
layout::F32 |
......
......@@ -1429,11 +1429,13 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let discriminant_type_metadata = match layout.variants {
layout::Variants::Single { .. } |
layout::Variants::NicheFilling { .. } => None,
layout::Variants::Tagged { discr, .. } => Some(discriminant_type_metadata(discr)),
layout::Variants::Tagged { ref discr, .. } => {
Some(discriminant_type_metadata(discr.value))
}
};
match (layout.abi, discriminant_type_metadata) {
(layout::Abi::Scalar(_), Some(discr)) => return FinalMetadata(discr),
match (&layout.abi, discriminant_type_metadata) {
(&layout::Abi::Scalar(_), Some(discr)) => return FinalMetadata(discr),
_ => {}
}
......
......@@ -26,6 +26,7 @@
#![feature(i128_type)]
#![feature(i128)]
#![feature(inclusive_range)]
#![feature(inclusive_range_syntax)]
#![feature(libc)]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
......
......@@ -671,10 +671,17 @@ fn trans_argument(&mut self,
(align | Alignment::Packed(arg.layout.align))
.non_abi());
} else {
// We can't use `LvalueRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
// used for this call is passing it by-value. In that case,
// the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call.
llval = bcx.load(llval, align.non_abi());
}
if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = arg.layout.abi {
bcx.range_metadata(llval, 0..2);
if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
if scalar.is_bool() {
bcx.range_metadata(llval, 0..2);
}
}
// We store bools as i8 so we need to truncate to i1.
llval = base::to_immediate(bcx, llval, arg.layout);
}
......
......@@ -455,9 +455,9 @@ fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
Value(base));
}
let layout = self.ccx.layout_of(projected_ty);
if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi {
if let layout::Abi::Scalar(ref scalar) = layout.abi {
let i1_type = Type::i1(self.ccx);
if val_ty(val) != i1_type {
if scalar.is_bool() && val_ty(val) != i1_type {
unsafe {
val = llvm::LLVMConstTrunc(val, i1_type.to_ref());
}
......@@ -685,10 +685,14 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
assert!(cast_layout.is_llvm_immediate());
let ll_t_out = cast_layout.immediate_llvm_type(self.ccx);
let llval = operand.llval;
let signed = match self.ccx.layout_of(operand.ty).abi {
layout::Abi::Scalar(layout::Int(_, signed)) => signed,
_ => false
};
let mut signed = false;
let l = self.ccx.layout_of(operand.ty);
if let layout::Abi::Scalar(ref scalar) = l.abi {
if let layout::Int(_, true) = scalar.value {
signed = true;
}
}
unsafe {
match (r_t_in, r_t_out) {
......
......@@ -148,16 +148,29 @@ pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
const_llval
} else {
let load = bcx.load(self.llval, self.alignment.non_abi());
if self.layout.ty.is_bool() {
bcx.range_metadata(load, 0..2);
} else if self.layout.ty.is_char() {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
bcx.range_metadata(load, 0..0x10FFFF+1);
} else if self.layout.ty.is_region_ptr() ||
self.layout.ty.is_box() ||
self.layout.ty.is_fn() {
bcx.nonnull_metadata(load);
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
let (min, max) = (scalar.valid_range.start, scalar.valid_range.end);
let max_next = max.wrapping_add(1);
let bits = scalar.value.size(bcx.ccx).bits();
assert!(bits <= 128);
let mask = !0u128 >> (128 - bits);
// For a (max) value of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
// i.e., if the range is everything. The lo==hi case would be
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
match scalar.value {
layout::Int(..) if max_next & mask != min & mask => {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
bcx.range_metadata(load, min..max_next);
}
layout::Pointer if 0 < min && min < max => {
bcx.nonnull_metadata(load);
}
_ => {}
}
}
load
};
......@@ -274,48 +287,18 @@ pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> Valu
let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx);
match self.layout.variants {
layout::Variants::Single { index } => {
assert_eq!(index, 0);
return C_uint(cast_to, 0);
return C_uint(cast_to, index as u64);
}
layout::Variants::Tagged { .. } |
layout::Variants::NicheFilling { .. } => {},
}
let discr = self.project_field(bcx, 0);
let discr_scalar = match discr.layout.abi {
layout::Abi::Scalar(discr) => discr,
_ => bug!("discriminant not scalar: {:#?}", discr.layout)
};
let (min, max) = match self.layout.variants {
layout::Variants::Tagged { ref discr_range, .. } => {
(discr_range.start, discr_range.end)
}
_ => (0, !0),
};
let max_next = max.wrapping_add(1);
let bits = discr_scalar.size(bcx.ccx).bits();
assert!(bits <= 128);
let mask = !0u128 >> (128 - bits);
let lldiscr = bcx.load(discr.llval, discr.alignment.non_abi());
match discr_scalar {
// For a (max) discr of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
layout::Int(..) if max_next & mask != min & mask => {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
bcx.range_metadata(lldiscr, min..max_next);
}
_ => {
// i.e., if the range is everything. The lo==hi case would be
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
}
};
let lldiscr = discr.load(bcx).immediate();
match self.layout.variants {
layout::Variants::Single { .. } => bug!(),
layout::Variants::Tagged { .. } => {
let signed = match discr_scalar {
layout::Variants::Tagged { ref discr, .. } => {
let signed = match discr.value {
layout::Int(_, signed) => signed,
_ => false
};
......
......@@ -119,6 +119,7 @@ pub fn trans_rvalue(&mut self,
}
// Use llvm.memset.p0i8.* to initialize byte arrays
let v = base::from_immediate(&bcx, v);
if common::val_ty(v) == Type::i8(bcx.ccx) {
base::call_memset(&bcx, start, v, size, align, false);
return bcx;
......@@ -278,28 +279,25 @@ pub fn trans_rvalue_operand(&mut self,
let ll_t_out = cast.immediate_llvm_type(bcx.ccx);
let llval = operand.immediate();
match operand.layout.variants {
layout::Variants::Tagged {
ref discr_range, ..
} if discr_range.end > discr_range.start => {
// We want `table[e as usize]` to not
// have bound checks, and this is the most
// convenient place to put the `assume`.
base::call_assume(&bcx, bcx.icmp(
llvm::IntULE,
llval,
C_uint_big(ll_t_in, discr_range.end)
));
let mut signed = false;
if let layout::Abi::Scalar(ref scalar) = operand.layout.abi {
if let layout::Int(_, s) = scalar.value {
signed = s;
if scalar.valid_range.end > scalar.valid_range.start {
// We want `table[e as usize]` to not
// have bound checks, and this is the most
// convenient place to put the `assume`.
base::call_assume(&bcx, bcx.icmp(
llvm::IntULE,
llval,
C_uint_big(ll_t_in, scalar.valid_range.end)
));
}
}
_ => {}
}
let signed = match operand.layout.abi {
layout::Abi::Scalar(layout::Int(_, signed)) => signed,
_ => false
};
let newval = match (r_t_in, r_t_out) {
(CastTy::Int(_), CastTy::Int(_)) => {
bcx.intcast(llval, ll_t_out, signed)
......
......@@ -268,7 +268,6 @@ pub fn int_width(&self) -> u64 {
pub fn from_integer(cx: &CrateContext, i: layout::Integer) -> Type {
use rustc::ty::layout::Integer::*;
match i {
I1 => Type::i1(cx),
I8 => Type::i8(cx),
I16 => Type::i16(cx),
I32 => Type::i32(cx),
......
......@@ -176,14 +176,13 @@ fn is_llvm_immediate(&self) -> bool {
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
if let layout::Abi::Scalar(value) = self.abi {
if let layout::Abi::Scalar(ref scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs
// can be either fat or thin (data pointers of fat pointers).
if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) {
return llty;
}
let llty = match value {
layout::Int(layout::I1, _) => Type::i8(ccx),
let llty = match scalar.value {
layout::Int(i, _) => Type::from_integer(ccx, i),
layout::F32 => Type::f32(ccx),
layout::F64 => Type::f64(ccx),
......@@ -249,11 +248,12 @@ fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
}
fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = self.abi {
Type::i1(ccx)
} else {
self.llvm_type(ccx)
if let layout::Abi::Scalar(ref scalar) = self.abi {
if scalar.is_bool() {
return Type::i1(ccx);
}
}
self.llvm_type(ccx)
}
fn over_align(&self) -> Option<Align> {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册