提交 cdeef614 编写于 作者: R Ralf Jung

move some Scalar helpers from miri here, and use them where appropriate

上级 29e6aabc
...@@ -85,9 +85,14 @@ pub struct GlobalId<'tcx> { ...@@ -85,9 +85,14 @@ pub struct GlobalId<'tcx> {
pub trait PointerArithmetic: layout::HasDataLayout { pub trait PointerArithmetic: layout::HasDataLayout {
// These are not supposed to be overridden. // These are not supposed to be overridden.
#[inline(always)]
fn pointer_size(self) -> Size {
self.data_layout().pointer_size
}
//// Trunace the given value to the pointer size; also return whether there was an overflow //// Trunace the given value to the pointer size; also return whether there was an overflow
fn truncate_to_ptr(self, val: u128) -> (u64, bool) { fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits(); let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
} }
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
use ty::subst::Substs; use ty::subst::Substs;
use hir::def_id::DefId; use hir::def_id::DefId;
use super::{EvalResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend}; use super::{EvalResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend, truncate};
/// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which /// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which
/// matches the LocalValue optimizations for easy conversions between Value and ConstValue. /// matches the LocalValue optimizations for easy conversions between Value and ConstValue.
...@@ -58,6 +58,7 @@ pub fn try_to_ptr(&self) -> Option<Pointer> { ...@@ -58,6 +58,7 @@ pub fn try_to_ptr(&self) -> Option<Pointer> {
self.try_to_scalar()?.to_ptr().ok() self.try_to_scalar()?.to_ptr().ok()
} }
#[inline]
pub fn new_slice( pub fn new_slice(
val: Scalar, val: Scalar,
len: u64, len: u64,
...@@ -69,12 +70,14 @@ pub fn new_slice( ...@@ -69,12 +70,14 @@ pub fn new_slice(
}.into()) }.into())
} }
#[inline]
pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
ConstValue::ScalarPair(val, Scalar::Ptr(vtable).into()) ConstValue::ScalarPair(val, Scalar::Ptr(vtable).into())
} }
} }
impl<'tcx> Scalar { impl<'tcx> Scalar {
#[inline]
pub fn ptr_null(cx: impl HasDataLayout) -> Self { pub fn ptr_null(cx: impl HasDataLayout) -> Self {
Scalar::Bits { Scalar::Bits {
bits: 0, bits: 0,
...@@ -82,10 +85,12 @@ pub fn ptr_null(cx: impl HasDataLayout) -> Self { ...@@ -82,10 +85,12 @@ pub fn ptr_null(cx: impl HasDataLayout) -> Self {
} }
} }
#[inline]
pub fn zst() -> Self { pub fn zst() -> Self {
Scalar::Bits { bits: 0, size: 0 } Scalar::Bits { bits: 0, size: 0 }
} }
#[inline]
pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> { pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout(); let layout = cx.data_layout();
match self { match self {
...@@ -100,6 +105,7 @@ pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tc ...@@ -100,6 +105,7 @@ pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tc
} }
} }
#[inline]
pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> { pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout(); let layout = cx.data_layout();
match self { match self {
...@@ -114,6 +120,7 @@ pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Sel ...@@ -114,6 +120,7 @@ pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Sel
} }
} }
#[inline]
pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self { pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self {
let layout = cx.data_layout(); let layout = cx.data_layout();
match self { match self {
...@@ -128,6 +135,7 @@ pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self ...@@ -128,6 +135,7 @@ pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self
} }
} }
#[inline]
pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool { pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool {
match self { match self {
Scalar::Bits { bits, size } => { Scalar::Bits { bits, size } => {
...@@ -138,14 +146,53 @@ pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool { ...@@ -138,14 +146,53 @@ pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool {
} }
} }
#[inline]
pub fn is_null(self) -> bool {
match self {
Scalar::Bits { bits, .. } => bits == 0,
Scalar::Ptr(_) => false
}
}
#[inline]
pub fn from_bool(b: bool) -> Self { pub fn from_bool(b: bool) -> Self {
Scalar::Bits { bits: b as u128, size: 1 } Scalar::Bits { bits: b as u128, size: 1 }
} }
#[inline]
pub fn from_char(c: char) -> Self { pub fn from_char(c: char) -> Self {
Scalar::Bits { bits: c as u128, size: 4 } Scalar::Bits { bits: c as u128, size: 4 }
} }
#[inline]
pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
let i = i.into();
debug_assert_eq!(truncate(i, size), i,
"Unsigned value {} does not fit in {} bits", i, size.bits());
Scalar::Bits { bits: i, size: size.bytes() as u8 }
}
#[inline]
pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
let i = i.into();
// `into` performed sign extension, we have to truncate
let truncated = truncate(i as u128, size);
debug_assert_eq!(sign_extend(truncated, size) as i128, i,
"Signed value {} does not fit in {} bits", i, size.bits());
Scalar::Bits { bits: truncated, size: size.bytes() as u8 }
}
#[inline]
pub fn from_f32(f: f32) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 4 }
}
#[inline]
pub fn from_f64(f: f64) -> Self {
Scalar::Bits { bits: f.to_bits() as u128, size: 8 }
}
#[inline]
pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
match self { match self {
Scalar::Bits { bits, size } => { Scalar::Bits { bits, size } => {
...@@ -157,6 +204,7 @@ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { ...@@ -157,6 +204,7 @@ pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> {
} }
} }
#[inline]
pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
match self { match self {
Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage), Scalar::Bits { bits: 0, .. } => err!(InvalidNullPointerUsage),
...@@ -165,6 +213,7 @@ pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { ...@@ -165,6 +213,7 @@ pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> {
} }
} }
#[inline]
pub fn is_bits(self) -> bool { pub fn is_bits(self) -> bool {
match self { match self {
Scalar::Bits { .. } => true, Scalar::Bits { .. } => true,
...@@ -172,6 +221,7 @@ pub fn is_bits(self) -> bool { ...@@ -172,6 +221,7 @@ pub fn is_bits(self) -> bool {
} }
} }
#[inline]
pub fn is_ptr(self) -> bool { pub fn is_ptr(self) -> bool {
match self { match self {
Scalar::Ptr(_) => true, Scalar::Ptr(_) => true,
...@@ -209,6 +259,13 @@ pub fn to_u32(self) -> EvalResult<'static, u32> { ...@@ -209,6 +259,13 @@ pub fn to_u32(self) -> EvalResult<'static, u32> {
Ok(b as u32) Ok(b as u32)
} }
pub fn to_u64(self) -> EvalResult<'static, u64> {
let sz = Size::from_bits(64);
let b = self.to_bits(sz)?;
assert_eq!(b as u64 as u128, b);
Ok(b as u64)
}
pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64> { pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64> {
let b = self.to_bits(cx.data_layout().pointer_size)?; let b = self.to_bits(cx.data_layout().pointer_size)?;
assert_eq!(b as u64 as u128, b); assert_eq!(b as u64 as u128, b);
...@@ -231,12 +288,30 @@ pub fn to_i32(self) -> EvalResult<'static, i32> { ...@@ -231,12 +288,30 @@ pub fn to_i32(self) -> EvalResult<'static, i32> {
Ok(b as i32) Ok(b as i32)
} }
pub fn to_i64(self) -> EvalResult<'static, i64> {
let sz = Size::from_bits(64);
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
assert_eq!(b as i64 as i128, b);
Ok(b as i64)
}
pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64> { pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64> {
let b = self.to_bits(cx.data_layout().pointer_size)?; let b = self.to_bits(cx.data_layout().pointer_size)?;
let b = sign_extend(b, cx.data_layout().pointer_size) as i128; let b = sign_extend(b, cx.data_layout().pointer_size) as i128;
assert_eq!(b as i64 as i128, b); assert_eq!(b as i64 as i128, b);
Ok(b as i64) Ok(b as i64)
} }
#[inline]
pub fn to_f32(self) -> EvalResult<'static, f32> {
Ok(f32::from_bits(self.to_u32()?))
}
#[inline]
pub fn to_f64(self) -> EvalResult<'static, f64> {
Ok(f64::from_bits(self.to_u64()?))
}
} }
impl From<Pointer> for Scalar { impl From<Pointer> for Scalar {
...@@ -308,6 +383,16 @@ pub fn to_char(self) -> EvalResult<'tcx, char> { ...@@ -308,6 +383,16 @@ pub fn to_char(self) -> EvalResult<'tcx, char> {
self.not_undef()?.to_char() self.not_undef()?.to_char()
} }
#[inline(always)]
pub fn to_f32(self) -> EvalResult<'tcx, f32> {
self.not_undef()?.to_f32()
}
#[inline(always)]
pub fn to_f64(self) -> EvalResult<'tcx, f64> {
self.not_undef()?.to_f64()
}
#[inline(always)] #[inline(always)]
pub fn to_u8(self) -> EvalResult<'tcx, u8> { pub fn to_u8(self) -> EvalResult<'tcx, u8> {
self.not_undef()?.to_u8() self.not_undef()?.to_u8()
...@@ -318,6 +403,11 @@ pub fn to_u32(self) -> EvalResult<'tcx, u32> { ...@@ -318,6 +403,11 @@ pub fn to_u32(self) -> EvalResult<'tcx, u32> {
self.not_undef()?.to_u32() self.not_undef()?.to_u32()
} }
#[inline(always)]
pub fn to_u64(self) -> EvalResult<'tcx, u64> {
self.not_undef()?.to_u64()
}
#[inline(always)] #[inline(always)]
pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> { pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> {
self.not_undef()?.to_usize(cx) self.not_undef()?.to_usize(cx)
...@@ -333,6 +423,11 @@ pub fn to_i32(self) -> EvalResult<'tcx, i32> { ...@@ -333,6 +423,11 @@ pub fn to_i32(self) -> EvalResult<'tcx, i32> {
self.not_undef()?.to_i32() self.not_undef()?.to_i32()
} }
#[inline(always)]
pub fn to_i64(self) -> EvalResult<'tcx, i64> {
self.not_undef()?.to_i64()
}
#[inline(always)] #[inline(always)]
pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, i64> { pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, i64> {
self.not_undef()?.to_isize(cx) self.not_undef()?.to_isize(cx)
......
...@@ -14,8 +14,7 @@ ...@@ -14,8 +14,7 @@
use rustc_apfloat::ieee::{Single, Double}; use rustc_apfloat::ieee::{Single, Double};
use rustc::mir::interpret::{ use rustc::mir::interpret::{
Scalar, EvalResult, Pointer, PointerArithmetic, EvalErrorKind, Scalar, EvalResult, Pointer, PointerArithmetic, EvalErrorKind, truncate
truncate, sign_extend
}; };
use rustc::mir::CastKind; use rustc::mir::CastKind;
use rustc_apfloat::Float; use rustc_apfloat::Float;
...@@ -70,10 +69,7 @@ pub fn cast( ...@@ -70,10 +69,7 @@ pub fn cast(
.discriminant_for_variant(*self.tcx, index) .discriminant_for_variant(*self.tcx, index)
.val; .val;
return self.write_scalar( return self.write_scalar(
Scalar::Bits { Scalar::from_uint(discr_val, dst_layout.size),
bits: discr_val,
size: dst_layout.size.bytes() as u8,
},
dest); dest);
} }
} }
...@@ -198,41 +194,39 @@ fn cast_from_int( ...@@ -198,41 +194,39 @@ fn cast_from_int(
match dest_layout.ty.sty { match dest_layout.ty.sty {
Int(_) | Uint(_) => { Int(_) | Uint(_) => {
let v = self.truncate(v, dest_layout); let v = self.truncate(v, dest_layout);
Ok(Scalar::Bits { Ok(Scalar::from_uint(v, dest_layout.size))
bits: v,
size: dest_layout.size.bytes() as u8,
})
} }
Float(FloatTy::F32) if signed => Ok(Scalar::Bits { Float(FloatTy::F32) if signed => Ok(Scalar::from_uint(
bits: Single::from_i128(v as i128).value.to_bits(), Single::from_i128(v as i128).value.to_bits(),
size: 4, Size::from_bits(32)
}), )),
Float(FloatTy::F64) if signed => Ok(Scalar::Bits { Float(FloatTy::F64) if signed => Ok(Scalar::from_uint(
bits: Double::from_i128(v as i128).value.to_bits(), Double::from_i128(v as i128).value.to_bits(),
size: 8, Size::from_bits(64)
}), )),
Float(FloatTy::F32) => Ok(Scalar::Bits { Float(FloatTy::F32) => Ok(Scalar::from_uint(
bits: Single::from_u128(v).value.to_bits(), Single::from_u128(v).value.to_bits(),
size: 4, Size::from_bits(32)
}), )),
Float(FloatTy::F64) => Ok(Scalar::Bits { Float(FloatTy::F64) => Ok(Scalar::from_uint(
bits: Double::from_u128(v).value.to_bits(), Double::from_u128(v).value.to_bits(),
size: 8, Size::from_bits(64)
}), )),
Char => { Char => {
assert_eq!(v as u8 as u128, v); // `u8` to `char` cast
Ok(Scalar::Bits { bits: v, size: 4 }) debug_assert_eq!(v as u8 as u128, v);
Ok(Scalar::from_uint(v, Size::from_bytes(4)))
}, },
// No alignment check needed for raw pointers. // No alignment check needed for raw pointers.
// But we have to truncate to target ptr size. // But we have to truncate to target ptr size.
RawPtr(_) => { RawPtr(_) => {
Ok(Scalar::Bits { Ok(Scalar::from_uint(
bits: self.memory.truncate_to_ptr(v).0 as u128, self.truncate_to_ptr(v).0,
size: self.memory.pointer_size().bytes() as u8, self.pointer_size(),
}) ))
}, },
// Casts to bool are not permitted by rustc, no need to handle them here. // Casts to bool are not permitted by rustc, no need to handle them here.
...@@ -251,56 +245,40 @@ fn cast_from_float( ...@@ -251,56 +245,40 @@ fn cast_from_float(
match dest_ty.sty { match dest_ty.sty {
// float -> uint // float -> uint
Uint(t) => { Uint(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); let width = t.bit_width().unwrap_or(self.pointer_size().bits() as usize);
let v = match fty { let v = match fty {
FloatTy::F32 => Single::from_bits(bits).to_u128(width).value, FloatTy::F32 => Single::from_bits(bits).to_u128(width).value,
FloatTy::F64 => Double::from_bits(bits).to_u128(width).value, FloatTy::F64 => Double::from_bits(bits).to_u128(width).value,
}; };
// This should already fit the bit width // This should already fit the bit width
Ok(Scalar::Bits { Ok(Scalar::from_uint(v, Size::from_bits(width as u64)))
bits: v,
size: (width / 8) as u8,
})
}, },
// float -> int // float -> int
Int(t) => { Int(t) => {
let width = t.bit_width().unwrap_or(self.memory.pointer_size().bits() as usize); let width = t.bit_width().unwrap_or(self.pointer_size().bits() as usize);
let v = match fty { let v = match fty {
FloatTy::F32 => Single::from_bits(bits).to_i128(width).value, FloatTy::F32 => Single::from_bits(bits).to_i128(width).value,
FloatTy::F64 => Double::from_bits(bits).to_i128(width).value, FloatTy::F64 => Double::from_bits(bits).to_i128(width).value,
}; };
// We got an i128, but we may need something smaller. We have to truncate ourselves. Ok(Scalar::from_int(v, Size::from_bits(width as u64)))
let truncated = truncate(v as u128, Size::from_bits(width as u64));
assert_eq!(sign_extend(truncated, Size::from_bits(width as u64)) as i128, v,
"truncating and extending changed the value?!?");
Ok(Scalar::Bits {
bits: truncated,
size: (width / 8) as u8,
})
}, },
// f64 -> f32 // f64 -> f32
Float(FloatTy::F32) if fty == FloatTy::F64 => { Float(FloatTy::F32) if fty == FloatTy::F64 => {
Ok(Scalar::Bits { Ok(Scalar::from_uint(
bits: Single::to_bits(Double::from_bits(bits).convert(&mut false).value), Single::to_bits(Double::from_bits(bits).convert(&mut false).value),
size: 4, Size::from_bits(32),
}) ))
}, },
// f32 -> f64 // f32 -> f64
Float(FloatTy::F64) if fty == FloatTy::F32 => { Float(FloatTy::F64) if fty == FloatTy::F32 => {
Ok(Scalar::Bits { Ok(Scalar::from_uint(
bits: Double::to_bits(Single::from_bits(bits).convert(&mut false).value), Double::to_bits(Single::from_bits(bits).convert(&mut false).value),
size: 8, Size::from_bits(64),
}) ))
}, },
// identity cast // identity cast
Float(FloatTy:: F64) => Ok(Scalar::Bits { Float(FloatTy:: F64) => Ok(Scalar::from_uint(bits, Size::from_bits(64))),
bits, Float(FloatTy:: F32) => Ok(Scalar::from_uint(bits, Size::from_bits(32))),
size: 8,
}),
Float(FloatTy:: F32) => Ok(Scalar::Bits {
bits,
size: 4,
}),
_ => err!(Unimplemented(format!("float to {:?} cast", dest_ty))), _ => err!(Unimplemented(format!("float to {:?} cast", dest_ty))),
} }
} }
......
...@@ -270,7 +270,8 @@ fn data_layout(&self) -> &layout::TargetDataLayout { ...@@ -270,7 +270,8 @@ fn data_layout(&self) -> &layout::TargetDataLayout {
} }
impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout
for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M> { for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M>
{
#[inline] #[inline]
fn data_layout(&self) -> &layout::TargetDataLayout { fn data_layout(&self) -> &layout::TargetDataLayout {
&self.tcx.data_layout &self.tcx.data_layout
......
...@@ -41,7 +41,7 @@ fn numeric_intrinsic<'tcx>( ...@@ -41,7 +41,7 @@ fn numeric_intrinsic<'tcx>(
"bswap" => (bits << extra).swap_bytes(), "bswap" => (bits << extra).swap_bytes(),
_ => bug!("not a numeric intrinsic: {}", name), _ => bug!("not a numeric intrinsic: {}", name),
}; };
Ok(Scalar::Bits { bits: bits_out, size: size.bytes() as u8 }) Ok(Scalar::from_uint(bits_out, size))
} }
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
...@@ -59,30 +59,21 @@ pub fn emulate_intrinsic( ...@@ -59,30 +59,21 @@ pub fn emulate_intrinsic(
"min_align_of" => { "min_align_of" => {
let elem_ty = substs.type_at(0); let elem_ty = substs.type_at(0);
let elem_align = self.layout_of(elem_ty)?.align.abi(); let elem_align = self.layout_of(elem_ty)?.align.abi();
let align_val = Scalar::Bits { let align_val = Scalar::from_uint(elem_align, dest.layout.size);
bits: elem_align as u128,
size: dest.layout.size.bytes() as u8,
};
self.write_scalar(align_val, dest)?; self.write_scalar(align_val, dest)?;
} }
"size_of" => { "size_of" => {
let ty = substs.type_at(0); let ty = substs.type_at(0);
let size = self.layout_of(ty)?.size.bytes() as u128; let size = self.layout_of(ty)?.size.bytes() as u128;
let size_val = Scalar::Bits { let size_val = Scalar::from_uint(size, dest.layout.size);
bits: size,
size: dest.layout.size.bytes() as u8,
};
self.write_scalar(size_val, dest)?; self.write_scalar(size_val, dest)?;
} }
"type_id" => { "type_id" => {
let ty = substs.type_at(0); let ty = substs.type_at(0);
let type_id = self.tcx.type_id_hash(ty) as u128; let type_id = self.tcx.type_id_hash(ty) as u128;
let id_val = Scalar::Bits { let id_val = Scalar::from_uint(type_id, dest.layout.size);
bits: type_id,
size: dest.layout.size.bytes() as u8,
};
self.write_scalar(id_val, dest)?; self.write_scalar(id_val, dest)?;
} }
"ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => { "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
......
...@@ -23,7 +23,8 @@ ...@@ -23,7 +23,8 @@
use rustc::ty::{self, Instance, query::TyCtxtAt}; use rustc::ty::{self, Instance, query::TyCtxtAt};
use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout}; use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout};
use rustc::mir::interpret::{Pointer, AllocId, Allocation, ScalarMaybeUndef, GlobalId, use rustc::mir::interpret::{Pointer, AllocId, Allocation, ScalarMaybeUndef, GlobalId,
EvalResult, Scalar, EvalErrorKind, AllocType, truncate}; EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic,
truncate};
pub use rustc::mir::interpret::{write_target_uint, read_target_uint}; pub use rustc::mir::interpret::{write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher}; use rustc_data_structures::fx::{FxHashSet, FxHashMap, FxHasher};
...@@ -60,6 +61,14 @@ fn data_layout(&self) -> &TargetDataLayout { ...@@ -60,6 +61,14 @@ fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout &self.tcx.data_layout
} }
} }
impl<'a, 'b, 'c, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout
for &'b &'c mut Memory<'a, 'mir, 'tcx, M>
{
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'a, 'mir, 'tcx, M> Eq for Memory<'a, 'mir, 'tcx, M> impl<'a, 'mir, 'tcx, M> Eq for Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>, where M: Machine<'mir, 'tcx>,
...@@ -277,14 +286,6 @@ pub fn deallocate( ...@@ -277,14 +286,6 @@ pub fn deallocate(
Ok(()) Ok(())
} }
pub fn pointer_size(&self) -> Size {
self.tcx.data_layout.pointer_size
}
pub fn endianness(&self) -> layout::Endian {
self.tcx.data_layout.endian
}
/// Check that the pointer is aligned AND non-NULL. This supports scalars /// Check that the pointer is aligned AND non-NULL. This supports scalars
/// for the benefit of other parts of miri that need to check alignment even for ZST. /// for the benefit of other parts of miri that need to check alignment even for ZST.
pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> { pub fn check_align(&self, ptr: Scalar, required_align: Align) -> EvalResult<'tcx> {
...@@ -773,7 +774,6 @@ pub fn read_scalar( ...@@ -773,7 +774,6 @@ pub fn read_scalar(
) -> EvalResult<'tcx, ScalarMaybeUndef> { ) -> EvalResult<'tcx, ScalarMaybeUndef> {
// Make sure we don't read part of a pointer as a pointer // Make sure we don't read part of a pointer as a pointer
self.check_relocation_edges(ptr, size)?; self.check_relocation_edges(ptr, size)?;
let endianness = self.endianness();
// get_bytes_unchecked tests alignment // get_bytes_unchecked tests alignment
let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?; let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
// Undef check happens *after* we established that the alignment is correct. // Undef check happens *after* we established that the alignment is correct.
...@@ -784,7 +784,7 @@ pub fn read_scalar( ...@@ -784,7 +784,7 @@ pub fn read_scalar(
return Ok(ScalarMaybeUndef::Undef); return Ok(ScalarMaybeUndef::Undef);
} }
// Now we do the actual reading // Now we do the actual reading
let bits = read_target_uint(endianness, bytes).unwrap(); let bits = read_target_uint(self.tcx.data_layout.endian, bytes).unwrap();
// See if we got a pointer // See if we got a pointer
if size != self.pointer_size() { if size != self.pointer_size() {
if self.relocations(ptr, size)?.len() != 0 { if self.relocations(ptr, size)?.len() != 0 {
...@@ -801,10 +801,7 @@ pub fn read_scalar( ...@@ -801,10 +801,7 @@ pub fn read_scalar(
} }
} }
// We don't. Just return the bits. // We don't. Just return the bits.
Ok(ScalarMaybeUndef::Scalar(Scalar::Bits { Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
bits,
size: size.bytes() as u8,
}))
} }
pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align)
...@@ -820,8 +817,6 @@ pub fn write_scalar( ...@@ -820,8 +817,6 @@ pub fn write_scalar(
val: ScalarMaybeUndef, val: ScalarMaybeUndef,
type_size: Size, type_size: Size,
) -> EvalResult<'tcx> { ) -> EvalResult<'tcx> {
let endianness = self.endianness();
let val = match val { let val = match val {
ScalarMaybeUndef::Scalar(scalar) => scalar, ScalarMaybeUndef::Scalar(scalar) => scalar,
ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false), ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false),
...@@ -835,7 +830,7 @@ pub fn write_scalar( ...@@ -835,7 +830,7 @@ pub fn write_scalar(
Scalar::Bits { bits, size } => { Scalar::Bits { bits, size } => {
assert_eq!(size as u64, type_size.bytes()); assert_eq!(size as u64, type_size.bytes());
assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits, debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits,
"Unexpected value of size {} when writing to memory", size); "Unexpected value of size {} when writing to memory", size);
bits bits
}, },
...@@ -843,8 +838,9 @@ pub fn write_scalar( ...@@ -843,8 +838,9 @@ pub fn write_scalar(
{ {
// get_bytes_mut checks alignment // get_bytes_mut checks alignment
let endian = self.tcx.data_layout.endian;
let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?; let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?;
write_target_uint(endianness, dst, bytes).unwrap(); write_target_uint(endian, dst, bytes).unwrap();
} }
// See if we have to also write a relocation // See if we have to also write a relocation
......
...@@ -42,10 +42,7 @@ pub fn new_slice( ...@@ -42,10 +42,7 @@ pub fn new_slice(
len: u64, len: u64,
cx: impl HasDataLayout cx: impl HasDataLayout
) -> Self { ) -> Self {
Value::ScalarPair(val.into(), Scalar::Bits { Value::ScalarPair(val.into(), Scalar::from_uint(len, cx.data_layout().pointer_size).into())
bits: len as u128,
size: cx.data_layout().pointer_size.bytes() as u8,
}.into())
} }
pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self {
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
// except according to those terms. // except according to those terms.
use rustc::mir; use rustc::mir;
use rustc::ty::{self, layout::TyLayout}; use rustc::ty::{self, layout::{Size, TyLayout}};
use syntax::ast::FloatTy; use syntax::ast::FloatTy;
use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::Float; use rustc_apfloat::Float;
...@@ -105,10 +105,8 @@ fn binary_float_op( ...@@ -105,10 +105,8 @@ fn binary_float_op(
($ty:path, $size:expr) => {{ ($ty:path, $size:expr) => {{
let l = <$ty>::from_bits(l); let l = <$ty>::from_bits(l);
let r = <$ty>::from_bits(r); let r = <$ty>::from_bits(r);
let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>| Scalar::Bits { let bitify = |res: ::rustc_apfloat::StatusAnd<$ty>|
bits: res.value.to_bits(), Scalar::from_uint(res.value.to_bits(), Size::from_bytes($size));
size: $size,
};
let val = match bin_op { let val = match bin_op {
Eq => Scalar::from_bool(l == r), Eq => Scalar::from_bool(l == r),
Ne => Scalar::from_bool(l != r), Ne => Scalar::from_bool(l != r),
...@@ -169,10 +167,7 @@ fn binary_int_op( ...@@ -169,10 +167,7 @@ fn binary_int_op(
} }
}; };
let truncated = self.truncate(result, left_layout); let truncated = self.truncate(result, left_layout);
return Ok((Scalar::Bits { return Ok((Scalar::from_uint(truncated, size), oflo));
bits: truncated,
size: size.bytes() as u8,
}, oflo));
} }
// For the remaining ops, the types must be the same on both sides // For the remaining ops, the types must be the same on both sides
...@@ -220,7 +215,7 @@ fn binary_int_op( ...@@ -220,7 +215,7 @@ fn binary_int_op(
Rem | Div => { Rem | Div => {
// int_min / -1 // int_min / -1
if r == -1 && l == (1 << (size.bits() - 1)) { if r == -1 && l == (1 << (size.bits() - 1)) {
return Ok((Scalar::Bits { bits: l, size: size.bytes() as u8 }, true)); return Ok((Scalar::from_uint(l, size), true));
} }
}, },
_ => {}, _ => {},
...@@ -232,16 +227,14 @@ fn binary_int_op( ...@@ -232,16 +227,14 @@ fn binary_int_op(
let max = 1 << (size.bits() - 1); let max = 1 << (size.bits() - 1);
oflo = result >= max || result < -max; oflo = result >= max || result < -max;
} }
// this may be out-of-bounds for the result type, so we have to truncate ourselves
let result = result as u128; let result = result as u128;
let truncated = self.truncate(result, left_layout); let truncated = self.truncate(result, left_layout);
return Ok((Scalar::Bits { return Ok((Scalar::from_uint(truncated, size), oflo));
bits: truncated,
size: size.bytes() as u8,
}, oflo));
} }
} }
let size = left_layout.size.bytes() as u8; let size = left_layout.size;
// only ints left // only ints left
let val = match bin_op { let val = match bin_op {
...@@ -253,11 +246,12 @@ fn binary_int_op( ...@@ -253,11 +246,12 @@ fn binary_int_op(
Gt => Scalar::from_bool(l > r), Gt => Scalar::from_bool(l > r),
Ge => Scalar::from_bool(l >= r), Ge => Scalar::from_bool(l >= r),
BitOr => Scalar::Bits { bits: l | r, size }, BitOr => Scalar::from_uint(l | r, size),
BitAnd => Scalar::Bits { bits: l & r, size }, BitAnd => Scalar::from_uint(l & r, size),
BitXor => Scalar::Bits { bits: l ^ r, size }, BitXor => Scalar::from_uint(l ^ r, size),
Add | Sub | Mul | Rem | Div => { Add | Sub | Mul | Rem | Div => {
debug_assert!(!left_layout.abi.is_signed());
let op: fn(u128, u128) -> (u128, bool) = match bin_op { let op: fn(u128, u128) -> (u128, bool) = match bin_op {
Add => u128::overflowing_add, Add => u128::overflowing_add,
Sub => u128::overflowing_sub, Sub => u128::overflowing_sub,
...@@ -270,10 +264,7 @@ fn binary_int_op( ...@@ -270,10 +264,7 @@ fn binary_int_op(
}; };
let (result, oflo) = op(l, r); let (result, oflo) = op(l, r);
let truncated = self.truncate(result, left_layout); let truncated = self.truncate(result, left_layout);
return Ok((Scalar::Bits { return Ok((Scalar::from_uint(truncated, size), oflo || truncated != result));
bits: truncated,
size,
}, oflo || truncated != result));
} }
_ => { _ => {
...@@ -373,7 +364,7 @@ pub fn unary_op( ...@@ -373,7 +364,7 @@ pub fn unary_op(
(Neg, FloatTy::F64) => Double::to_bits(-Double::from_bits(val)), (Neg, FloatTy::F64) => Double::to_bits(-Double::from_bits(val)),
_ => bug!("Invalid float op {:?}", un_op) _ => bug!("Invalid float op {:?}", un_op)
}; };
Ok(Scalar::Bits { bits: res, size: layout.size.bytes() as u8 }) Ok(Scalar::from_uint(res, layout.size))
} }
_ => { _ => {
assert!(layout.ty.is_integral()); assert!(layout.ty.is_integral());
...@@ -386,10 +377,7 @@ pub fn unary_op( ...@@ -386,10 +377,7 @@ pub fn unary_op(
} }
}; };
// res needs tuncating // res needs tuncating
Ok(Scalar::Bits { Ok(Scalar::from_uint(self.truncate(res, layout), layout.size))
bits: self.truncate(res, layout),
size: layout.size.bytes() as u8,
})
} }
} }
} }
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{ use rustc::mir::interpret::{
GlobalId, Scalar, EvalResult, Pointer, ScalarMaybeUndef GlobalId, Scalar, EvalResult, Pointer, ScalarMaybeUndef, PointerArithmetic
}; };
use super::{EvalContext, Machine, Value, ValTy, Operand, OpTy, MemoryKind}; use super::{EvalContext, Machine, Value, ValTy, Operand, OpTy, MemoryKind};
...@@ -344,10 +344,7 @@ pub fn mplace_subslice( ...@@ -344,10 +344,7 @@ pub fn mplace_subslice(
ty::Array(inner, _) => ty::Array(inner, _) =>
(None, self.tcx.mk_array(inner, inner_len)), (None, self.tcx.mk_array(inner, inner_len)),
ty::Slice(..) => { ty::Slice(..) => {
let len = Scalar::Bits { let len = Scalar::from_uint(inner_len, self.pointer_size());
bits: inner_len.into(),
size: self.memory.pointer_size().bytes() as u8
};
(Some(len), base.layout.ty) (Some(len), base.layout.ty)
} }
_ => _ =>
...@@ -716,10 +713,7 @@ pub fn write_discriminant_index( ...@@ -716,10 +713,7 @@ pub fn write_discriminant_index(
let discr_val = (discr_val << shift) >> shift; let discr_val = (discr_val << shift) >> shift;
let discr_dest = self.place_field(dest, 0)?; let discr_dest = self.place_field(dest, 0)?;
self.write_scalar(Scalar::Bits { self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?;
bits: discr_val,
size: size.bytes() as u8,
}, discr_dest)?;
} }
layout::Variants::NicheFilling { layout::Variants::NicheFilling {
dataful_variant, dataful_variant,
...@@ -733,10 +727,10 @@ pub fn write_discriminant_index( ...@@ -733,10 +727,10 @@ pub fn write_discriminant_index(
self.place_field(dest, 0)?; self.place_field(dest, 0)?;
let niche_value = ((variant_index - niche_variants.start()) as u128) let niche_value = ((variant_index - niche_variants.start()) as u128)
.wrapping_add(niche_start); .wrapping_add(niche_start);
self.write_scalar(Scalar::Bits { self.write_scalar(
bits: niche_value, Scalar::from_uint(niche_value, niche_dest.layout.size),
size: niche_dest.layout.size.bytes() as u8, niche_dest
}, niche_dest)?; )?;
} }
} }
} }
...@@ -766,11 +760,11 @@ pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx>) ...@@ -766,11 +760,11 @@ pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx>)
let layout = self.layout_of(ty)?; let layout = self.layout_of(ty)?;
// More sanity checks // More sanity checks
let (size, align) = self.read_size_and_align_from_vtable(vtable)?; if cfg!(debug_assertions) {
assert_eq!(size, layout.size); let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved assert_eq!(size, layout.size);
// FIXME: More checks for the vtable? We could make sure it is exactly assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved
// the one one would expect for this type. }
let mplace = MPlaceTy { let mplace = MPlaceTy {
mplace: MemPlace { extra: None, ..*mplace }, mplace: MemPlace { extra: None, ..*mplace },
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
use rustc::mir; use rustc::mir;
use rustc::ty::layout::LayoutOf; use rustc::ty::layout::LayoutOf;
use rustc::mir::interpret::{EvalResult, Scalar}; use rustc::mir::interpret::{EvalResult, Scalar, PointerArithmetic};
use super::{EvalContext, Machine}; use super::{EvalContext, Machine};
...@@ -269,12 +269,9 @@ fn eval_rvalue_into_place( ...@@ -269,12 +269,9 @@ fn eval_rvalue_into_place(
let src = self.eval_place(place)?; let src = self.eval_place(place)?;
let mplace = self.force_allocation(src)?; let mplace = self.force_allocation(src)?;
let len = mplace.len(&self)?; let len = mplace.len(&self)?;
let size = self.memory.pointer_size().bytes() as u8; let size = self.pointer_size();
self.write_scalar( self.write_scalar(
Scalar::Bits { Scalar::from_uint(len, size),
bits: len as u128,
size,
},
dest, dest,
)?; )?;
} }
...@@ -294,12 +291,9 @@ fn eval_rvalue_into_place( ...@@ -294,12 +291,9 @@ fn eval_rvalue_into_place(
let layout = self.layout_of(ty)?; let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), assert!(!layout.is_unsized(),
"SizeOf nullary MIR operator called for unsized type"); "SizeOf nullary MIR operator called for unsized type");
let size = self.memory.pointer_size().bytes() as u8; let size = self.pointer_size();
self.write_scalar( self.write_scalar(
Scalar::Bits { Scalar::from_uint(layout.size.bytes(), size),
bits: layout.size.bytes() as u128,
size,
},
dest, dest,
)?; )?;
} }
...@@ -313,11 +307,8 @@ fn eval_rvalue_into_place( ...@@ -313,11 +307,8 @@ fn eval_rvalue_into_place(
Discriminant(ref place) => { Discriminant(ref place) => {
let place = self.eval_place(place)?; let place = self.eval_place(place)?;
let discr_val = self.read_discriminant(self.place_to_op(place)?)?.0; let discr_val = self.read_discriminant(self.place_to_op(place)?)?.0;
let size = dest.layout.size.bytes() as u8; let size = dest.layout.size;
self.write_scalar(Scalar::Bits { self.write_scalar(Scalar::from_uint(discr_val, size), dest)?;
bits: discr_val,
size,
}, dest)?;
} }
} }
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
use syntax::source_map::Span; use syntax::source_map::Span;
use rustc_target::spec::abi::Abi; use rustc_target::spec::abi::Abi;
use rustc::mir::interpret::{EvalResult, Scalar}; use rustc::mir::interpret::{EvalResult, Scalar, PointerArithmetic};
use super::{ use super::{
EvalContext, Machine, Value, OpTy, Place, PlaceTy, ValTy, Operand, StackPopCleanup EvalContext, Machine, Value, OpTy, Place, PlaceTy, ValTy, Operand, StackPopCleanup
}; };
...@@ -60,10 +60,7 @@ pub(super) fn eval_terminator( ...@@ -60,10 +60,7 @@ pub(super) fn eval_terminator(
for (index, &const_int) in values.iter().enumerate() { for (index, &const_int) in values.iter().enumerate() {
// Compare using binary_op // Compare using binary_op
let const_int = Scalar::Bits { let const_int = Scalar::from_uint(const_int, discr.layout.size);
bits: const_int,
size: discr.layout.size.bytes() as u8
};
let (res, _) = self.binary_op(mir::BinOp::Eq, let (res, _) = self.binary_op(mir::BinOp::Eq,
discr, discr,
ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout } ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout }
...@@ -411,7 +408,7 @@ fn eval_fn_call( ...@@ -411,7 +408,7 @@ fn eval_fn_call(
} }
// cannot use the shim here, because that will only result in infinite recursion // cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => { ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size(); let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align; let ptr_align = self.tcx.data_layout.pointer_align;
let ptr = self.ref_to_mplace(self.read_value(args[0])?)?; let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
let vtable = ptr.vtable()?; let vtable = ptr.vtable()?;
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
use rustc::ty::{self, Ty}; use rustc::ty::{self, Ty};
use rustc::ty::layout::{Size, Align, LayoutOf}; use rustc::ty::layout::{Size, Align, LayoutOf};
use rustc::mir::interpret::{Scalar, Pointer, EvalResult}; use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
use syntax::ast::Mutability; use syntax::ast::Mutability;
...@@ -35,7 +35,7 @@ pub fn get_vtable( ...@@ -35,7 +35,7 @@ pub fn get_vtable(
let size = layout.size.bytes(); let size = layout.size.bytes();
let align = layout.align.abi(); let align = layout.align.abi();
let ptr_size = self.memory.pointer_size(); let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align; let ptr_align = self.tcx.data_layout.pointer_align;
let methods = self.tcx.vtable_methods(trait_ref); let methods = self.tcx.vtable_methods(trait_ref);
let vtable = self.memory.allocate( let vtable = self.memory.allocate(
...@@ -49,15 +49,10 @@ pub fn get_vtable( ...@@ -49,15 +49,10 @@ pub fn get_vtable(
self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?; self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, &self)?; let size_ptr = vtable.offset(ptr_size, &self)?;
self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::Bits { self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::from_uint(size, ptr_size).into())?;
bits: size as u128,
size: ptr_size.bytes() as u8,
}.into())?;
let align_ptr = vtable.offset(ptr_size * 2, &self)?; let align_ptr = vtable.offset(ptr_size * 2, &self)?;
self.memory.write_ptr_sized(align_ptr, ptr_align, Scalar::Bits { self.memory.write_ptr_sized(align_ptr, ptr_align,
bits: align as u128, Scalar::from_uint(align, ptr_size).into())?;
size: ptr_size.bytes() as u8,
}.into())?;
for (i, method) in methods.iter().enumerate() { for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method { if let Some((def_id, substs)) = *method {
...@@ -97,7 +92,7 @@ pub fn read_size_and_align_from_vtable( ...@@ -97,7 +92,7 @@ pub fn read_size_and_align_from_vtable(
&self, &self,
vtable: Pointer, vtable: Pointer,
) -> EvalResult<'tcx, (Size, Align)> { ) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size(); let pointer_size = self.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align; let pointer_align = self.tcx.data_layout.pointer_align;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)? let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)?
.to_bits(pointer_size)? as u64; .to_bits(pointer_size)? as u64;
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
use rustc::ty::{self, Ty}; use rustc::ty::{self, Ty};
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxHashSet;
use rustc::mir::interpret::{ use rustc::mir::interpret::{
Scalar, AllocType, EvalResult, ScalarMaybeUndef, EvalErrorKind Scalar, AllocType, EvalResult, ScalarMaybeUndef, EvalErrorKind, PointerArithmetic
}; };
use super::{ use super::{
...@@ -118,7 +118,7 @@ fn validate_scalar( ...@@ -118,7 +118,7 @@ fn validate_scalar(
bits bits
}, },
Scalar::Ptr(_) => { Scalar::Ptr(_) => {
let ptr_size = self.memory.pointer_size(); let ptr_size = self.pointer_size();
let ptr_max = u128::max_value() >> (128 - ptr_size.bits()); let ptr_max = u128::max_value() >> (128 - ptr_size.bits());
return if lo > hi { return if lo > hi {
if lo - hi == 1 { if lo - hi == 1 {
...@@ -376,6 +376,7 @@ pub fn validate_operand( ...@@ -376,6 +376,7 @@ pub fn validate_operand(
"non-pointer vtable in fat pointer", path "non-pointer vtable in fat pointer", path
), ),
} }
// FIXME: More checks for the vtable.
} }
ty::Slice(..) | ty::Str => { ty::Slice(..) | ty::Str => {
match ptr.extra.unwrap().to_usize(self) { match ptr.extra.unwrap().to_usize(self) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册