提交 cdeef614 编写于 作者: R Ralf Jung

move some Scalar helpers from miri here, and use them where appropriate

上级 29e6aabc
......@@ -85,9 +85,14 @@ pub struct GlobalId<'tcx> {
pub trait PointerArithmetic: layout::HasDataLayout {
// These are not supposed to be overridden.
#[inline(always)]
fn pointer_size(self) -> Size {
self.data_layout().pointer_size
}
//// Trunace the given value to the pointer size; also return whether there was an overflow
fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits();
let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
}
......
......@@ -14,7 +14,7 @@
use ty::subst::Substs;
use hir::def_id::DefId;
use super::{EvalResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend};
use super::{EvalResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend, truncate};
/// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which
/// matches the LocalValue optimizations for easy conversions between Value and ConstValue.
......@@ -58,6 +58,7 @@ pub fn try_to_ptr(&self) -> Option<Pointer> {
self.try_to_scalar()?.to_ptr().ok()
}
#[inline]
pub fn new_slice(
val: Scalar,
len: u64,
......@@ -69,12 +70,14 @@ pub fn new_slice(
}.into())
}
#[inline]
pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
ConstValue::ScalarPair(val, Scalar::Ptr(vtable).into())
}
}
impl<'tcx> Scalar {
#[inline]
pub fn ptr_null(cx: impl HasDataLayout) -> Self {
Scalar::Bits {
bits: 0,
......@@ -82,10 +85,12 @@ pub fn ptr_null(cx: impl HasDataLayout) -> Self {
}
}
#[inline]
pub fn zst() -> Self {
Scalar::Bits { bits: 0, size: 0 }
}
#[inline]
pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self {
......@@ -100,6 +105,7 @@ pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tc
}
}
#[inline]
pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self {
......@@ -114,6 +120,7 @@ pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Sel
}
}
#[inline]
pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self {
let layout = cx.data_layout();
match self {
......@@ -128,6 +135,7 @@ pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self
}
}
#[inline]
pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool {
match self {
Scalar::Bits { bits, size } => {
......@@ -138,14 +146,53 @@ pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool {
}
}
#[inline]
pub fn is_null(self) -> bool {
match self {
Scalar::Bits { bits, .. } => bits == 0,
Scalar::Ptr(_) => false
}
}
#[inline]
pub fn from_bool(b: bool) -> Self {
Scalar::Bits { bits: b as u128, size: 1 }
}
#[inline]
pub fn from_char(c: char) -> Self {
Scalar::Bits { bits: c as u128, size: 4 }
}
#[inline]
pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
let i = i.into();
debug_assert_eq!(truncate(i, size), i,
"Unsigned value {} does not fit in {} bits", i, size.bits());
Scalar::Bits { bits: i, size: size.bytes() as u8 }
}
#[inline]
pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
let i = i.into();
// `into` performed sign extension, we have to truncate
let truncated = truncate(i as u128, size);
debug_assert_eq!(sign_extend(truncated, size) as i128, i,
"Signed value {} does not fit in {} bits", i, size.bits());
Scalar::Bits { bits: truncated, size: size.bytes() as u8 }
}
#[inline]
pub fn from_f32(f: f32) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 4 }
}
#[inline]
pub fn from_f64(f: f64) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 8 }
}
#[inline]
pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
match self {
Scalar::Bits { bits, size } => {
......@@ -157,6 +204,7 @@ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
}
}
#[inline]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
match self {
Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage),
......@@ -165,6 +213,7 @@ pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
}
}
#[inline]
pub fn is_bits(self) -> bool {
match self {
Scalar::Bits { .. } => true,
......@@ -172,6 +221,7 @@ pub fn is_bits(self) -> bool {
}
}
#[inline]
pub fn is_ptr(self) -> bool {
match self {
Scalar::Ptr(_) => true,
......@@ -209,6 +259,13 @@ pub fn to_u32(self) -> EvalResult<'static, u32> {
Ok(b as u32)
}
pub fn to_u64(self) -> EvalResult<'static, u64> {
let sz = Size::from_bits(64);
let b = self.to_bits(sz)?;
assert_eq!(b as u64 as u128, b);
Ok(b as u64)
}
pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64> {
let b = self.to_bits(cx.data_layout().pointer_size)?;
assert_eq!(b as u64 as u128, b);
......@@ -231,12 +288,30 @@ pub fn to_i32(self) -> EvalResult<'static, i32> {
Ok(b as i32)
}
pub fn to_i64(self) -> EvalResult<'static, i64> {
let sz = Size::from_bits(64);
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
assert_eq!(b as i64 as i128, b);
Ok(b as i64)
}
pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64> {
let b = self.to_bits(cx.data_layout().pointer_size)?;
let b = sign_extend(b, cx.data_layout().pointer_size) as i128;
assert_eq!(b as i64 as i128, b);
Ok(b as i64)
}
#[inline]
pub fn to_f32(self) -> EvalResult<'static, f32> {
Ok(f32::from_bits(self.to_u32()?))
}
#[inline]
pub fn to_f64(self) -> EvalResult<'static, f64> {
Ok(f64::from_bits(self.to_u64()?))
}
}
impl From<Pointer> for Scalar {
......@@ -308,6 +383,16 @@ pub fn to_char(self) -> EvalResult<'tcx, char> {
self.not_undef()?.to_char()
}
#[inline(always)]
pub fn to_f32(self) -> EvalResult<'tcx, f32> {
self.not_undef()?.to_f32()
}
#[inline(always)]
pub fn to_f64(self) -> EvalResult<'tcx, f64> {
self.not_undef()?.to_f64()
}
#[inline(always)]
pub fn to_u8(self) -> EvalResult<'tcx, u8> {
self.not_undef()?.to_u8()
......@@ -318,6 +403,11 @@ pub fn to_u32(self) -> EvalResult<'tcx, u32> {
self.not_undef()?.to_u32()
}
#[inline(always)]
pub fn to_u64(self) -> EvalResult<'tcx, u64> {
self.not_undef()?.to_u64()
}
#[inline(always)]
pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> {
self.not_undef()?.to_usize(cx)
......@@ -333,6 +423,11 @@ pub fn to_i32(self) -> EvalResult<'tcx, i32> {
self.not_undef()?.to_i32()
}
#[inline(always)]
pub fn to_i64(self) -> EvalResult<'tcx, i64> {
self.not_undef()?.to_i64()
}
#[inline(always)]
pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, i64> {
self.not_undef()?.to_isize(cx)
......
......@@ -14,8 +14,7 @@
use rustc_apfloat::ieee::{Single, Double};
use rustc::mir::interpret::{
Scalar, EvalResult, Pointer, PointerArithmetic, EvalErrorKind,
truncate, sign_extend
Scalar, EvalResult, Pointer, PointerArithmetic, EvalErrorKind, truncate
};
use rustc::mir::CastKind;
use rustc_apfloat::Float;
......@@ -70,10 +69,7 @@ pub fn cast(
.discriminant_for_variant(*self.tcx, index)
.val;
return self.write_scalar(
Scalar::Bits {
bits: discr_val,
size: dst_layout.size.bytes() as u8,
},
Scalar::from_uint(discr_val, dst_layout.size),
dest);
}
}
......@@ -198,41 +194,39 @@ fn cast_from_int(
match dest_layout.ty.sty {
Int(_) | Uint(_) => {
let v = self.truncate(v, dest_layout);
Ok(Scalar::Bits {
bits: v,
size: dest_layout.size.bytes() as u8,
})
Ok(Scalar::from_uint(v, dest_layout.size))
}
Float(FloatTy::F32) if signed => Ok(Scalar::Bits {
bits: Single::from_i128(v as i128).value.to_bits(),
size: 4,
}),
Float(FloatTy::F64) if signed => Ok(Scalar::Bits {
bits: Double::from_i128(v as i128).value.to_bits(),
size: 8,
}),
Float(FloatTy::F32) => Ok(Scalar::Bits {
bits: Single::from_u128(v).value.to_bits(),
size: 4,
}),
Float(FloatTy::F64) => Ok(Scalar::Bits {
bits: Double::from_u128(v).value.to_bits(),
size: 8,
}),
Float(FloatTy::F32) if signed => Ok(Scalar::from_uint(
Single::from_i128(v as i128).value.to_bits(),
Size::from_bits(32)
)),
Float(FloatTy::F64) if signed => Ok(Scalar::from_uint(
Double::from_i128(v as i128).value.to_bits(),
Size::from_bits(64)
)),
Float(FloatTy::F32) => Ok(Scalar::from_uint(
Single::from_u128(v).value.to_bits(),
Size::from_bits(32)
)),
Float(FloatTy::F64) => Ok(Scalar::from_uint(
Double::from_u128(v).value.to_bits(),
Size::from_bits(64)
)),
Char => {
assert_eq!(v as u8 as u128, v);
Ok(Scalar::Bits { bits: v, size: 4 })
// `u8` to `char` cast
debug_assert_eq!(v as u8 as u128, v);
Ok(Scalar::from_uint(v, Size::from_bytes(4)))
},
// No alignment check needed for raw pointers.
// But we have to truncate to target ptr size.
RawPtr(_) => {
Ok(Scalar::Bits {
bits: self.memory.truncate_to_ptr(v).0 as u128,
size: self.memory.pointer_size().bytes() as u8,
})
Ok(Scalar::from_uint(
self.truncate_to_ptr(v).0,
self.pointer_size(),
))
},
// Casts to bool are not permitted by rustc, no need to handle them here.
......@@ -251,56 +245,40 @@ fn cast_from_float(
match dest_ty.sty {
// float -> uint
Uint(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
let width = t.bit_width().unwrap_or(self.pointer_size().bits() as usize);
let v = match fty {
FloatTy::F32 => Single::from_bits(bits).to_u128(width).value,
FloatTy::F64 => Double::from_bits(bits).to_u128(width).value,
};
// This should already fit the bit width
Ok(Scalar::Bits {
bits: v,
size: (width / 8) as u8,
})
Ok(Scalar::from_uint(v, Size::from_bits(width as u64)))
},
// float -> int
Int(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize);
let width = t.bit_width().unwrap_or(self.pointer_size().bits() as usize);
let v = match fty {
FloatTy::F32 => Single::from_bits(bits).to_i128(width).value,
FloatTy::F64 => Double::from_bits(bits).to_i128(width).value,
};
// We got an i128, but we may need something smaller. We have to truncate ourselves.
let truncated = truncate(v as u128, Size::from_bits(width as u64));
assert_eq!(sign_extend(truncated, Size::from_bits(width as u64)) as i128, v,
"truncating and extending changed the value?!?");
Ok(Scalar::Bits {
bits: truncated,
size: (width / 8) as u8,
})
Ok(Scalar::from_int(v, Size::from_bits(width as u64)))
},
// f64 -> f32
Float(FloatTy::F32) if fty == FloatTy::F64 => {
Ok(Scalar::Bits {
bits: Single::to_bits(Double::from_bits(bits).convert(&mut false).value),
size: 4,
})
Ok(Scalar::from_uint(
Single::to_bits(Double::from_bits(bits).convert(&mut false).value),
Size::from_bits(32),
))
},
// f32 -> f64
Float(FloatTy::F64) if fty == FloatTy::F32 => {
Ok(Scalar::Bits {
bits: Double::to_bits(Single::from_bits(bits).convert(&mut false).value),
size: 8,
})
Ok(Scalar::from_uint(
Double::to_bits(Single::from_bits(bits).convert(&mut false).value),
Size::from_bits(64),
))
},
// identity cast
Float(FloatTy:: F64) => Ok(Scalar::Bits {
bits,
size: 8,
}),
Float(FloatTy:: F32) => Ok(Scalar::Bits {
bits,
size: 4,
}),
Float(FloatTy:: F64) => Ok(Scalar::from_uint(bits, Size::from_bits(64))),
Float(FloatTy:: F32) => Ok(Scalar::from_uint(bits, Size::from_bits(32))),
_ => err!(Unimplemented(format!("float to {:?} cast", dest_ty))),
}
}
......
......@@ -270,7 +270,8 @@ fn data_layout(&self) -> &layout::TargetDataLayout {
}
impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout
for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M> {
for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M>
{
#[inline]
fn data_layout(&self) -> &layout::TargetDataLayout {
&self.tcx.data_layout
......
......@@ -41,7 +41,7 @@ fn numeric_intrinsic<'tcx>(
"bswap" => (bits << extra).swap_bytes(),
_ => bug!("not a numeric intrinsic: {}", name),
};
Ok(Scalar::Bits { bits: bits_out, size: size.bytes() as u8 })
Ok(Scalar::from_uint(bits_out, size))
}
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
......@@ -59,30 +59,21 @@ pub fn emulate_intrinsic(
"min_align_of" => {
let elem_ty = substs.type_at(0);
let elem_align = self.layout_of(elem_ty)?.align.abi();
let align_val = Scalar::Bits {
bits: elem_align as u128,
size: dest.layout.size.bytes() as u8,
};
let align_val = Scalar::from_uint(elem_align, dest.layout.size);
self.write_scalar(align_val, dest)?;
}
"size_of" => {
let ty = substs.type_at(0);
let size = self.layout_of(ty)?.size.bytes() as u128;
let size_val = Scalar::Bits {
bits: size,
size: dest.layout.size.bytes() as u8,
};
let size_val = Scalar::from_uint(size, dest.layout.size);
self.write_scalar(size_val, dest)?;
}
"type_id" => {
let ty = substs.type_at(0);
let type_id = self.tcx.type_id_hash(ty) as u128;
let id_val = Scalar::Bits {
bits: type_id,
size: dest.layout.size.bytes() as u8,
};
let id_val = Scalar::from_uint(type_id, dest.layout.size);
self.write_scalar(id_val, dest)?;
}
"ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
......
......@@ -23,7 +23,8 @@
use rustc::ty::{self, Instance, query::TyCtxtAt};
use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout};
use rustc::mir::interpret::{Pointer, AllocId, Allocation, ScalarMaybeUndef, GlobalId,
EvalResult, Scalar, EvalErrorKind, AllocType, truncate};
EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic,
truncate};
pub use rustc::mir::interpret::{write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher};
......@@ -60,6 +61,14 @@ fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'a, 'b, 'c, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout
for &'b &'c mut Memory<'a, 'mir, 'tcx, M>
{
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'a, 'mir, 'tcx, M> Eq for Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>,
......@@ -277,14 +286,6 @@ pub fn deallocate(
Ok(())
}
pub fn pointer_size(&self) -> Size {
self.tcx.data_layout.pointer_size
}
pub fn endianness(&self) -> layout::Endian {
self.tcx.data_layout.endian
}
/// Check that the pointer is aligned AND non-NULL. This supports scalars
/// for the benefit of other parts of miri that need to check alignment even for ZST.
pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> {
......@@ -773,7 +774,6 @@ pub fn read_scalar(
) -> EvalResult<'tcx, ScalarMaybeUndef> {
// Make sure we don't read part of a pointer as a pointer
self.check_relocation_edges(ptr, size)?;
let endianness = self.endianness();
// get_bytes_unchecked tests alignment
let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
// Undef check happens *after* we established that the alignment is correct.
......@@ -784,7 +784,7 @@ pub fn read_scalar(
return Ok(ScalarMaybeUndef::Undef);
}
// Now we do the actual reading
let bits = read_target_uint(endianness, bytes).unwrap();
let bits = read_target_uint(self.tcx.data_layout.endian, bytes).unwrap();
// See if we got a pointer
if size != self.pointer_size() {
if self.relocations(ptr, size)?.len() != 0 {
......@@ -801,10 +801,7 @@ pub fn read_scalar(
}
}
// We don't. Just return the bits.
Ok(ScalarMaybeUndef::Scalar(Scalar::Bits {
bits,
size: size.bytes() as u8,
}))
Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
}
pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align)
......@@ -820,8 +817,6 @@ pub fn write_scalar(
val: ScalarMaybeUndef,
type_size: Size,
) -> EvalResult<'tcx> {
let endianness = self.endianness();
let val = match val {
ScalarMaybeUndef::Scalar(scalar) => scalar,
ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false),
......@@ -835,7 +830,7 @@ pub fn write_scalar(
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, type_size.bytes());
assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
"Unexpected value of size {} when writing to memory", size);
bits
},
......@@ -843,8 +838,9 @@ pub fn write_scalar(
{
// get_bytes_mut checks alignment
let endian = self.tcx.data_layout.endian;
let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?;
write_target_uint(endianness, dst, bytes).unwrap();
write_target_uint(endian, dst, bytes).unwrap();
}
// See if we have to also write a relocation
......
......@@ -42,10 +42,7 @@ pub fn new_slice(
len: u64,
cx: impl HasDataLayout
) -> Self {
Value::ScalarPair(val.into(), Scalar::Bits {
bits: len as u128,
size: cx.data_layout().pointer_size.bytes() as u8,
}.into())
Value::ScalarPair(val.into(), Scalar::from_uint(len, cx.data_layout().pointer_size).into())
}
pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
......
......@@ -9,7 +9,7 @@
// except according to those terms.
use rustc::mir;
use rustc::ty::{self, layout::TyLayout};
use rustc::ty::{self, layout::{Size, TyLayout}};
use syntax::ast::FloatTy;
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float;
......@@ -105,10 +105,8 @@ fn binary_float_op(
($ty:path, $size:expr) => {{
let l = <$ty>::from_bits(l);
let r = <$ty>::from_bits(r);
let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>| Scalar::Bits {
bits: res.value.to_bits(),
size: $size,
};
let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>|
Scalar::from_uint(res.value.to_bits(), Size::from_bytes($size));
let val = match bin_op {
Eq => Scalar::from_bool(l == r),
Ne => Scalar::from_bool(l != r),
......@@ -169,10 +167,7 @@ fn binary_int_op(
}
};
let truncated = self.truncate(result, left_layout);
return Ok((Scalar::Bits {
bits: truncated,
size: size.bytes() as u8,
}, oflo));
return Ok((Scalar::from_uint(truncated, size), oflo));
}
// For the remaining ops, the types must be the same on both sides
......@@ -220,7 +215,7 @@ fn binary_int_op(
Rem | Div => {
// int_min / -1
if r == -1 && l == (1 << (size.bits() - 1)) {
return Ok((Scalar::Bits { bits: l, size: size.bytes() as u8 }, true));
return Ok((Scalar::from_uint(l, size), true));
}
},
_ => {},
......@@ -232,16 +227,14 @@ fn binary_int_op(
let max = 1 << (size.bits() - 1);
oflo = result >= max || result < -max;
}
// this may be out-of-bounds for the result type, so we have to truncate ourselves
let result = result as u128;
let truncated = self.truncate(result, left_layout);
return Ok((Scalar::Bits {
bits: truncated,
size: size.bytes() as u8,
}, oflo));
return Ok((Scalar::from_uint(truncated, size), oflo));
}
}
let size = left_layout.size.bytes() as u8;
let size = left_layout.size;
// only ints left
let val = match bin_op {
......@@ -253,11 +246,12 @@ fn binary_int_op(
Gt => Scalar::from_bool(l > r),
Ge => Scalar::from_bool(l >= r),
BitOr => Scalar::Bits { bits: l | r, size },
BitAnd => Scalar::Bits { bits: l & r, size },
BitXor => Scalar::Bits { bits: l ^ r, size },
BitOr => Scalar::from_uint(l | r, size),
BitAnd => Scalar::from_uint(l & r, size),
BitXor => Scalar::from_uint(l ^ r, size),
Add | Sub | Mul | Rem | Div => {
debug_assert!(!left_layout.abi.is_signed());
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
Add => u128::overflowing_add,
Sub => u128::overflowing_sub,
......@@ -270,10 +264,7 @@ fn binary_int_op(
};
let (result, oflo) = op(l, r);
let truncated = self.truncate(result, left_layout);
return Ok((Scalar::Bits {
bits: truncated,
size,
}, oflo || truncated != result));
return Ok((Scalar::from_uint(truncated, size), oflo || truncated != result));
}
_ => {
......@@ -373,7 +364,7 @@ pub fn unary_op(
(Neg, FloatTy::F64) => Double::to_bits(-Double::from_bits(val)),
_ => bug!("Invalid float op {:?}", un_op)
};
Ok(Scalar::Bits { bits: res, size: layout.size.bytes() as u8 })
Ok(Scalar::from_uint(res, layout.size))
}
_ => {
assert!(layout.ty.is_integral());
......@@ -386,10 +377,7 @@ pub fn unary_op(
}
};
// res needs tuncating
Ok(Scalar::Bits {
bits: self.truncate(res, layout),
size: layout.size.bytes() as u8,
})
Ok(Scalar::from_uint(self.truncate(res, layout), layout.size))
}
}
}
......
......@@ -20,7 +20,7 @@
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{
GlobalId, Scalar, EvalResult, Pointer, ScalarMaybeUndef
GlobalId, Scalar, EvalResult, Pointer, ScalarMaybeUndef, PointerArithmetic
};
use super::{EvalContext, Machine, Value, ValTy, Operand, OpTy, MemoryKind};
......@@ -344,10 +344,7 @@ pub fn mplace_subslice(
ty::Array(inner, _) =>
(None, self.tcx.mk_array(inner, inner_len)),
ty::Slice(..) => {
let len = Scalar::Bits {
bits: inner_len.into(),
size: self.memory.pointer_size().bytes() as u8
};
let len = Scalar::from_uint(inner_len, self.pointer_size());
(Some(len), base.layout.ty)
}
_ =>
......@@ -716,10 +713,7 @@ pub fn write_discriminant_index(
let discr_val = (discr_val << shift) >> shift;
let discr_dest = self.place_field(dest, 0)?;
self.write_scalar(Scalar::Bits {
bits: discr_val,
size: size.bytes() as u8,
}, discr_dest)?;
self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?;
}
layout::Variants::NicheFilling {
dataful_variant,
......@@ -733,10 +727,10 @@ pub fn write_discriminant_index(
self.place_field(dest, 0)?;
let niche_value = ((variant_index - niche_variants.start()) as u128)
.wrapping_add(niche_start);
self.write_scalar(Scalar::Bits {
bits: niche_value,
size: niche_dest.layout.size.bytes() as u8,
}, niche_dest)?;
self.write_scalar(
Scalar::from_uint(niche_value, niche_dest.layout.size),
niche_dest
)?;
}
}
}
......@@ -766,11 +760,11 @@ pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx>)
let layout = self.layout_of(ty)?;
// More sanity checks
let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
assert_eq!(size, layout.size);
assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved
// FIXME: More checks for the vtable? We could make sure it is exactly
// the one one would expect for this type.
if cfg!(debug_assertions) {
let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
assert_eq!(size, layout.size);
assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved
}
let mplace = MPlaceTy {
mplace: MemPlace { extra: None, ..*mplace },
......
......@@ -14,7 +14,7 @@
use rustc::mir;
use rustc::ty::layout::LayoutOf;
use rustc::mir::interpret::{EvalResult, Scalar};
use rustc::mir::interpret::{EvalResult, Scalar, PointerArithmetic};
use super::{EvalContext, Machine};
......@@ -269,12 +269,9 @@ fn eval_rvalue_into_place(
let src = self.eval_place(place)?;
let mplace = self.force_allocation(src)?;
let len = mplace.len(&self)?;
let size = self.memory.pointer_size().bytes() as u8;
let size = self.pointer_size();
self.write_scalar(
Scalar::Bits {
bits: len as u128,
size,
},
Scalar::from_uint(len, size),
dest,
)?;
}
......@@ -294,12 +291,9 @@ fn eval_rvalue_into_place(
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(),
"SizeOf nullary MIR operator called for unsized type");
let size = self.memory.pointer_size().bytes() as u8;
let size = self.pointer_size();
self.write_scalar(
Scalar::Bits {
bits: layout.size.bytes() as u128,
size,
},
Scalar::from_uint(layout.size.bytes(), size),
dest,
)?;
}
......@@ -313,11 +307,8 @@ fn eval_rvalue_into_place(
Discriminant(ref place) => {
let place = self.eval_place(place)?;
let discr_val = self.read_discriminant(self.place_to_op(place)?)?.0;
let size = dest.layout.size.bytes() as u8;
self.write_scalar(Scalar::Bits {
bits: discr_val,
size,
}, dest)?;
let size = dest.layout.size;
self.write_scalar(Scalar::from_uint(discr_val, size), dest)?;
}
}
......
......@@ -16,7 +16,7 @@
use syntax::source_map::Span;
use rustc_target::spec::abi::Abi;
use rustc::mir::interpret::{EvalResult, Scalar};
use rustc::mir::interpret::{EvalResult, Scalar, PointerArithmetic};
use super::{
EvalContext, Machine, Value, OpTy, Place, PlaceTy, ValTy, Operand, StackPopCleanup
};
......@@ -60,10 +60,7 @@ pub(super) fn eval_terminator(
for (index, &const_int) in values.iter().enumerate() {
// Compare using binary_op
let const_int = Scalar::Bits {
bits: const_int,
size: discr.layout.size.bytes() as u8
};
let const_int = Scalar::from_uint(const_int, discr.layout.size);
let (res, _) = self.binary_op(mir::BinOp::Eq,
discr,
ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout }
......@@ -411,7 +408,7 @@ fn eval_fn_call(
}
// cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size();
let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
let vtable = ptr.vtable()?;
......
......@@ -10,7 +10,7 @@
use rustc::ty::{self, Ty};
use rustc::ty::layout::{Size, Align, LayoutOf};
use rustc::mir::interpret::{Scalar, Pointer, EvalResult};
use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
use syntax::ast::Mutability;
......@@ -35,7 +35,7 @@ pub fn get_vtable(
let size = layout.size.bytes();
let align = layout.align.abi();
let ptr_size = self.memory.pointer_size();
let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let methods = self.tcx.vtable_methods(trait_ref);
let vtable = self.memory.allocate(
......@@ -49,15 +49,10 @@ pub fn get_vtable(
self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, &self)?;
self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::Bits {
bits: size as u128,
size: ptr_size.bytes() as u8,
}.into())?;
self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::from_uint(size, ptr_size).into())?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?;
self.memory.write_ptr_sized(align_ptr, ptr_align, Scalar::Bits {
bits: align as u128,
size: ptr_size.bytes() as u8,
}.into())?;
self.memory.write_ptr_sized(align_ptr, ptr_align,
Scalar::from_uint(align, ptr_size).into())?;
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
......@@ -97,7 +92,7 @@ pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer,
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size();
let pointer_size = self.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)?
.to_bits(pointer_size)? as u64;
......
......@@ -15,7 +15,7 @@
use rustc::ty::{self, Ty};
use rustc_data_structures::fx::FxHashSet;
use rustc::mir::interpret::{
Scalar, AllocType, EvalResult, ScalarMaybeUndef, EvalErrorKind
Scalar, AllocType, EvalResult, ScalarMaybeUndef, EvalErrorKind, PointerArithmetic
};
use super::{
......@@ -118,7 +118,7 @@ fn validate_scalar(
bits
},
Scalar::Ptr(_) => {
let ptr_size = self.memory.pointer_size();
let ptr_size = self.pointer_size();
let ptr_max = u128::max_value() >> (128 - ptr_size.bits());
return if lo > hi {
if lo - hi == 1 {
......@@ -376,6 +376,7 @@ pub fn validate_operand(
"non-pointer vtable in fat pointer", path
),
}
// FIXME: More checks for the vtable.
}
ty::Slice(..) | ty::Str => {
match ptr.extra.unwrap().to_usize(self) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册