提交 ef2177cf 编写于 作者: O Oliver Schneider

Rename ByVal(Pair) to Scalar(Pair)

上级 1606e137
......@@ -394,10 +394,10 @@ fn hash_stable<W: StableHasherResult>(&self,
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
ByVal(val) => {
Scalar(val) => {
val.hash_stable(hcx, hasher);
}
ByValPair(a, b) => {
ScalarPair(a, b) => {
a.hash_stable(hcx, hasher);
b.hash_stable(hcx, hasher);
}
......@@ -410,8 +410,8 @@ fn hash_stable<W: StableHasherResult>(&self,
}
impl_stable_hash_for!(enum mir::interpret::Value {
ByVal(v),
ByValPair(a, b),
Scalar(v),
ScalarPair(a, b),
ByRef(ptr, align)
});
......
......@@ -5,14 +5,14 @@
use super::{EvalResult, MemoryPointer, PointerArithmetic, Allocation};
/// Represents a constant value in Rust. ByVal and ByValPair are optimizations which
/// Represents a constant value in Rust. ByVal and ScalarPair are optimizations which
/// matches Value's optimizations for easy conversions between these two types
#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash)]
pub enum ConstValue<'tcx> {
/// Used only for types with layout::abi::Scalar ABI and ZSTs which use Scalar::Undef
ByVal(Scalar),
Scalar(Scalar),
/// Used only for types with layout::abi::ScalarPair
ByValPair(Scalar, Scalar),
ScalarPair(Scalar, Scalar),
/// Used only for the remaining cases. An allocation + offset into the allocation
ByRef(&'tcx Allocation, Size),
}
......@@ -22,8 +22,8 @@ impl<'tcx> ConstValue<'tcx> {
pub fn from_byval_value(val: Value) -> Self {
match val {
Value::ByRef(..) => bug!(),
Value::ByValPair(a, b) => ConstValue::ByValPair(a, b),
Value::ByVal(val) => ConstValue::ByVal(val),
Value::ScalarPair(a, b) => ConstValue::ScalarPair(a, b),
Value::Scalar(val) => ConstValue::Scalar(val),
}
}
......@@ -31,22 +31,22 @@ pub fn from_byval_value(val: Value) -> Self {
pub fn to_byval_value(&self) -> Option<Value> {
match *self {
ConstValue::ByRef(..) => None,
ConstValue::ByValPair(a, b) => Some(Value::ByValPair(a, b)),
ConstValue::ByVal(val) => Some(Value::ByVal(val)),
ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a, b)),
ConstValue::Scalar(val) => Some(Value::Scalar(val)),
}
}
#[inline]
pub fn from_primval(val: Scalar) -> Self {
ConstValue::ByVal(val)
ConstValue::Scalar(val)
}
#[inline]
pub fn to_primval(&self) -> Option<Scalar> {
match *self {
ConstValue::ByRef(..) => None,
ConstValue::ByValPair(..) => None,
ConstValue::ByVal(val) => Some(val),
ConstValue::ScalarPair(..) => None,
ConstValue::Scalar(val) => Some(val),
}
}
......@@ -74,13 +74,13 @@ pub fn to_ptr(&self) -> Option<MemoryPointer> {
/// whether the pointer is supposed to be aligned or not (also see Place).
///
/// For optimization of a few very common cases, there is also a representation for a pair of
/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary
/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
/// operations and fat pointers. This idea was taken from rustc's codegen.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)]
pub enum Value {
ByRef(Pointer, Align),
ByVal(Scalar),
ByValPair(Scalar, Scalar),
Scalar(Scalar),
ScalarPair(Scalar, Scalar),
}
impl<'tcx> ty::TypeFoldable<'tcx> for Value {
......@@ -166,15 +166,15 @@ pub fn is_null(self) -> EvalResult<'tcx, bool> {
}
pub fn to_value_with_len(self, len: u64) -> Value {
Value::ByValPair(self.primval, Scalar::from_u128(len as u128))
Value::ScalarPair(self.primval, Scalar::from_u128(len as u128))
}
pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value {
Value::ByValPair(self.primval, Scalar::Ptr(vtable))
Value::ScalarPair(self.primval, Scalar::Ptr(vtable))
}
pub fn to_value(self) -> Value {
Value::ByVal(self.primval)
Value::Scalar(self.primval)
}
}
......
......@@ -1153,7 +1153,7 @@ pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
.map(|&u| {
let mut s = String::new();
print_miri_value(
Value::ByVal(Scalar::Bytes(u)),
Value::Scalar(Scalar::Bytes(u)),
switch_ty,
&mut s,
).unwrap();
......@@ -1893,19 +1893,19 @@ pub fn fmt_const_val<W: Write>(fmt: &mut W, const_val: &ty::Const) -> fmt::Resul
pub fn print_miri_value<W: Write>(value: Value, ty: Ty, f: &mut W) -> fmt::Result {
use ty::TypeVariants::*;
match (value, &ty.sty) {
(Value::ByVal(Scalar::Bytes(0)), &TyBool) => write!(f, "false"),
(Value::ByVal(Scalar::Bytes(1)), &TyBool) => write!(f, "true"),
(Value::ByVal(Scalar::Bytes(bits)), &TyFloat(ast::FloatTy::F32)) =>
(Value::Scalar(Scalar::Bytes(0)), &TyBool) => write!(f, "false"),
(Value::Scalar(Scalar::Bytes(1)), &TyBool) => write!(f, "true"),
(Value::Scalar(Scalar::Bytes(bits)), &TyFloat(ast::FloatTy::F32)) =>
write!(f, "{}f32", Single::from_bits(bits)),
(Value::ByVal(Scalar::Bytes(bits)), &TyFloat(ast::FloatTy::F64)) =>
(Value::Scalar(Scalar::Bytes(bits)), &TyFloat(ast::FloatTy::F64)) =>
write!(f, "{}f64", Double::from_bits(bits)),
(Value::ByVal(Scalar::Bytes(n)), &TyUint(ui)) => write!(f, "{:?}{}", n, ui),
(Value::ByVal(Scalar::Bytes(n)), &TyInt(i)) => write!(f, "{:?}{}", n as i128, i),
(Value::ByVal(Scalar::Bytes(n)), &TyChar) =>
(Value::Scalar(Scalar::Bytes(n)), &TyUint(ui)) => write!(f, "{:?}{}", n, ui),
(Value::Scalar(Scalar::Bytes(n)), &TyInt(i)) => write!(f, "{:?}{}", n as i128, i),
(Value::Scalar(Scalar::Bytes(n)), &TyChar) =>
write!(f, "{:?}", ::std::char::from_u32(n as u32).unwrap()),
(Value::ByVal(Scalar::Undef), &TyFnDef(did, _)) =>
(Value::Scalar(Scalar::Undef), &TyFnDef(did, _)) =>
write!(f, "{}", item_path_str(did)),
(Value::ByValPair(Scalar::Ptr(ptr), Scalar::Bytes(len)),
(Value::ScalarPair(Scalar::Ptr(ptr), Scalar::Bytes(len)),
&TyRef(_, &ty::TyS { sty: TyStr, .. }, _)) => {
ty::tls::with(|tcx| {
match tcx.alloc_map.lock().get(ptr.alloc_id) {
......
......@@ -105,7 +105,7 @@ pub fn from_const(bx: &Builder<'a, 'tcx>,
}
let val = match val {
ConstValue::ByVal(x) => {
ConstValue::Scalar(x) => {
let scalar = match layout.abi {
layout::Abi::Scalar(ref x) => x,
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
......@@ -118,10 +118,10 @@ pub fn from_const(bx: &Builder<'a, 'tcx>,
);
OperandValue::Immediate(llval)
},
ConstValue::ByValPair(a, b) => {
ConstValue::ScalarPair(a, b) => {
let (a_scalar, b_scalar) = match layout.abi {
layout::Abi::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("from_const: invalid ByValPair layout: {:#?}", layout)
_ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout)
};
let a_llval = primval_to_llvm(
bx.cx,
......
......@@ -183,7 +183,7 @@ pub fn const_eval_literal(
let s = s.as_str();
let id = self.tcx.allocate_bytes(s.as_bytes());
let ptr = MemoryPointer::zero(id);
ConstValue::ByValPair(
ConstValue::ScalarPair(
Scalar::Ptr(ptr),
Scalar::from_u128(s.len() as u128),
)
......@@ -191,16 +191,16 @@ pub fn const_eval_literal(
LitKind::ByteStr(ref data) => {
let id = self.tcx.allocate_bytes(data);
let ptr = MemoryPointer::zero(id);
ConstValue::ByVal(Scalar::Ptr(ptr))
ConstValue::Scalar(Scalar::Ptr(ptr))
},
LitKind::Byte(n) => ConstValue::ByVal(Scalar::Bytes(n as u128)),
LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bytes(n as u128)),
LitKind::Int(n, _) if neg => {
let n = n as i128;
let n = n.overflowing_neg().0;
let n = clamp(n as u128);
ConstValue::ByVal(Scalar::Bytes(n))
ConstValue::Scalar(Scalar::Bytes(n))
},
LitKind::Int(n, _) => ConstValue::ByVal(Scalar::Bytes(clamp(n))),
LitKind::Int(n, _) => ConstValue::Scalar(Scalar::Bytes(clamp(n))),
LitKind::Float(n, fty) => {
parse_float(n, fty)
}
......@@ -211,8 +211,8 @@ pub fn const_eval_literal(
};
parse_float(n, fty)
}
LitKind::Bool(b) => ConstValue::ByVal(Scalar::Bytes(b as u128)),
LitKind::Char(c) => ConstValue::ByVal(Scalar::Bytes(c as u128)),
LitKind::Bool(b) => ConstValue::Scalar(Scalar::Bytes(b as u128)),
LitKind::Char(c) => ConstValue::Scalar(Scalar::Bytes(c as u128)),
};
Literal::Value {
value: ty::Const::from_const_value(self.tcx, lit, ty)
......
......@@ -1083,14 +1083,14 @@ pub fn compare_const_vals<'a, 'tcx>(
if let ty::TyStr = rty.sty {
match (a.to_byval_value(), b.to_byval_value()) {
(
Some(Value::ByValPair(
Some(Value::ScalarPair(
Scalar::Ptr(ptr_a),
Scalar::Bits {
bits: size_a,
defined: tcx.data_layout.pointer_size.bits() as u8,
},
)),
Some(Value::ByValPair(
Some(Value::ScalarPair(
Scalar::Ptr(ptr_b),
Scalar::Bits {
bits: size_b,
......@@ -1129,7 +1129,7 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind,
let s = s.as_str();
let id = tcx.allocate_bytes(s.as_bytes());
let ptr = MemoryPointer::zero(id);
ConstValue::ByValPair(
ConstValue::ScalarPair(
Scalar::Ptr(ptr),
Scalar::from_u128(s.len() as u128),
)
......@@ -1137,9 +1137,9 @@ fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind,
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_bytes(data);
let ptr = MemoryPointer::zero(id);
ConstValue::ByVal(Scalar::Ptr(ptr))
ConstValue::Scalar(Scalar::Ptr(ptr))
},
LitKind::Byte(n) => ConstValue::ByVal(Scalar::Bytes(n as u128)),
LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bytes(n as u128)),
LitKind::Int(n, _) => {
enum Int {
Signed(IntTy),
......@@ -1173,7 +1173,7 @@ enum Int {
Int::Signed(IntTy::I128)| Int::Unsigned(UintTy::U128) => n,
_ => bug!(),
};
ConstValue::ByVal(Scalar::Bytes(n))
ConstValue::Scalar(Scalar::Bytes(n))
},
LitKind::Float(n, fty) => {
parse_float(n, fty, neg)?
......@@ -1185,8 +1185,8 @@ enum Int {
};
parse_float(n, fty, neg)?
}
LitKind::Bool(b) => ConstValue::ByVal(Scalar::Bytes(b as u128)),
LitKind::Char(c) => ConstValue::ByVal(Scalar::Bytes(c as u128)),
LitKind::Bool(b) => ConstValue::Scalar(Scalar::Bytes(b as u128)),
LitKind::Char(c) => ConstValue::Scalar(Scalar::Bytes(c as u128)),
};
Ok(ty::Const::from_const_value(tcx, lit, ty))
}
......@@ -1222,5 +1222,5 @@ pub fn parse_float<'tcx>(
}
};
Ok(ConstValue::ByVal(Scalar::Bytes(bits)))
Ok(ConstValue::Scalar(Scalar::Bytes(bits)))
}
......@@ -100,16 +100,16 @@ pub fn value_to_const_value<'tcx>(
) -> &'tcx ty::Const<'tcx> {
let layout = ecx.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)).unwrap();
match (val, &layout.abi) {
(Value::ByVal(Scalar::Undef), _) if layout.is_zst() => {},
(Value::Scalar(Scalar::Undef), _) if layout.is_zst() => {},
(Value::ByRef(..), _) |
(Value::ByVal(_), &layout::Abi::Scalar(_)) |
(Value::ByValPair(..), &layout::Abi::ScalarPair(..)) => {},
(Value::Scalar(_), &layout::Abi::Scalar(_)) |
(Value::ScalarPair(..), &layout::Abi::ScalarPair(..)) => {},
_ => bug!("bad value/layout combo: {:#?}, {:#?}", val, layout),
}
let val = (|| {
match val {
Value::ByVal(val) => Ok(ConstValue::ByVal(val)),
Value::ByValPair(a, b) => Ok(ConstValue::ByValPair(a, b)),
Value::Scalar(val) => Ok(ConstValue::Scalar(val)),
Value::ScalarPair(a, b) => Ok(ConstValue::ScalarPair(a, b)),
Value::ByRef(ptr, align) => {
let ptr = ptr.primval.to_ptr().unwrap();
let alloc = ecx.memory.get(ptr.alloc_id)?;
......@@ -419,7 +419,7 @@ pub fn const_val_field<'a, 'tcx>(
let layout = ecx.layout_of(ty)?;
let (ptr, align) = match value {
Value::ByRef(ptr, align) => (ptr, align),
Value::ByValPair(..) | Value::ByVal(_) => {
Value::ScalarPair(..) | Value::Scalar(_) => {
let ptr = ecx.alloc_ptr(ty)?.into();
ecx.write_value_to_ptr(value, ptr, layout.align, ty)?;
(ptr, layout.align)
......@@ -436,9 +436,9 @@ pub fn const_val_field<'a, 'tcx>(
new_value = ecx.try_read_by_ref(new_value, layout.ty)?;
use rustc_data_structures::indexed_vec::Idx;
match (value, new_value) {
(Value::ByVal(_), Value::ByRef(..)) |
(Value::ByValPair(..), Value::ByRef(..)) |
(Value::ByVal(_), Value::ByValPair(..)) => bug!(
(Value::Scalar(_), Value::ByRef(..)) |
(Value::ScalarPair(..), Value::ByRef(..)) |
(Value::Scalar(_), Value::ScalarPair(..)) => bug!(
"field {} of {:?} yielded {:?}",
field.index(),
value,
......@@ -469,7 +469,7 @@ pub fn const_variant_index<'a, 'tcx>(
let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap();
let value = ecx.const_value_to_value(val, ty)?;
let (ptr, align) = match value {
Value::ByValPair(..) | Value::ByVal(_) => {
Value::ScalarPair(..) | Value::Scalar(_) => {
let layout = ecx.layout_of(ty)?;
let ptr = ecx.memory.allocate(layout.size, layout.align, Some(MemoryKind::Stack))?;
let ptr: Pointer = ptr.into();
......
......@@ -76,7 +76,7 @@ pub struct Frame<'mir, 'tcx: 'mir> {
/// `None` represents a local that is currently dead, while a live local
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
///
/// Before being initialized, arguments are `Value::ByVal(Scalar::Undef)` and other locals are `None`.
/// Before being initialized, arguments are `Value::Scalar(Scalar::Undef)` and other locals are `None`.
pub locals: IndexVec<mir::Local, Option<Value>>,
////////////////////////////////////////////////////////////////////////////////
......@@ -230,7 +230,7 @@ pub fn cur_frame(&self) -> usize {
pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
let ptr = self.memory.allocate_bytes(s.as_bytes());
Ok(Value::ByValPair(
Ok(Value::ScalarPair(
Scalar::Ptr(ptr),
Scalar::from_u128(s.len() as u128),
))
......@@ -247,8 +247,8 @@ pub fn const_value_to_value(
let id = self.memory.allocate_value(alloc.clone(), Some(MemoryKind::Stack))?;
Ok(Value::ByRef(MemoryPointer::new(id, offset).into(), alloc.align))
},
ConstValue::ByValPair(a, b) => Ok(Value::ByValPair(a, b)),
ConstValue::ByVal(val) => Ok(Value::ByVal(val)),
ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a, b)),
ConstValue::Scalar(val) => Ok(Value::Scalar(val)),
}
}
......@@ -408,7 +408,7 @@ pub fn push_stack_frame(
::log_settings::settings().indentation += 1;
let locals = if mir.local_decls.len() > 1 {
let mut locals = IndexVec::from_elem(Some(Value::ByVal(Scalar::Undef)), &mir.local_decls);
let mut locals = IndexVec::from_elem(Some(Value::Scalar(Scalar::Undef)), &mir.local_decls);
match self.tcx.describe_def(instance.def_id()) {
// statics and constants don't have `Storage*` statements, no need to look for them
Some(Def::Static(..)) | Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {},
......@@ -668,9 +668,9 @@ pub(super) fn eval_rvalue_into_place(
match (src.value, self.type_is_fat_ptr(dest_ty)) {
(Value::ByRef { .. }, _) |
// pointers to extern types
(Value::ByVal(_),_) |
(Value::Scalar(_),_) |
// slices and trait objects to other slices/trait objects
(Value::ByValPair(..), true) => {
(Value::ScalarPair(..), true) => {
let valty = ValTy {
value: src.value,
ty: dest_ty,
......@@ -678,9 +678,9 @@ pub(super) fn eval_rvalue_into_place(
self.write_value(valty, dest)?;
}
// slices and trait objects to thin pointers (dropping the metadata)
(Value::ByValPair(data, _), false) => {
(Value::ScalarPair(data, _), false) => {
let valty = ValTy {
value: Value::ByVal(data),
value: Value::Scalar(data),
ty: dest_ty,
};
self.write_value(valty, dest)?;
......@@ -707,7 +707,7 @@ pub(super) fn eval_rvalue_into_place(
let src_val = self.value_to_primval(src)?;
let dest_val = self.cast_primval(src_val, src.ty, dest_ty)?;
let valty = ValTy {
value: Value::ByVal(dest_val),
value: Value::Scalar(dest_val),
ty: dest_ty,
};
self.write_value(valty, dest)?;
......@@ -729,7 +729,7 @@ pub(super) fn eval_rvalue_into_place(
).ok_or_else(|| EvalErrorKind::TypeckError.into());
let fn_ptr = self.memory.create_fn_alloc(instance?);
let valty = ValTy {
value: Value::ByVal(Scalar::Ptr(fn_ptr)),
value: Value::Scalar(Scalar::Ptr(fn_ptr)),
ty: dest_ty,
};
self.write_value(valty, dest)?;
......@@ -765,7 +765,7 @@ pub(super) fn eval_rvalue_into_place(
);
let fn_ptr = self.memory.create_fn_alloc(instance);
let valty = ValTy {
value: Value::ByVal(Scalar::Ptr(fn_ptr)),
value: Value::Scalar(Scalar::Ptr(fn_ptr)),
ty: dest_ty,
};
self.write_value(valty, dest)?;
......@@ -1094,13 +1094,13 @@ pub fn value_to_primval(
match self.follow_by_ref_value(value, ty)? {
Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"),
Value::ByVal(primval) => {
Value::Scalar(primval) => {
// TODO: Do we really want insta-UB here?
self.ensure_valid_value(primval, ty)?;
Ok(primval)
}
Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"),
Value::ScalarPair(..) => bug!("value_to_primval can't work with fat pointers"),
}
}
......@@ -1119,7 +1119,7 @@ pub fn write_primval(
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
let valty = ValTy {
value: Value::ByVal(val),
value: Value::Scalar(val),
ty: dest_ty,
};
self.write_value(valty, dest)
......@@ -1132,7 +1132,7 @@ pub fn write_value(
) -> EvalResult<'tcx> {
//trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
// Note that it is really important that the type here is the right one, and matches the type things are read at.
// In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only
// In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only
// correct if we never look at this data with the wrong type.
match dest {
......@@ -1211,7 +1211,7 @@ pub fn write_value_to_ptr(
Value::ByRef(ptr, align) => {
self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false)
}
Value::ByVal(primval) => {
Value::Scalar(primval) => {
let signed = match layout.abi {
layout::Abi::Scalar(ref scal) => match scal.value {
layout::Primitive::Int(_, signed) => signed,
......@@ -1222,11 +1222,11 @@ pub fn write_value_to_ptr(
};
self.memory.write_primval(dest, dest_align, primval, layout.size, signed)
}
Value::ByValPair(a_val, b_val) => {
Value::ScalarPair(a_val, b_val) => {
trace!("write_value_to_ptr valpair: {:#?}", layout);
let (a, b) = match layout.abi {
layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
_ => bug!("write_value_to_ptr: invalid ByValPair layout: {:#?}", layout)
_ => bug!("write_value_to_ptr: invalid ScalarPair layout: {:#?}", layout)
};
let (a_size, b_size) = (a.size(&self), b.size(&self));
let a_ptr = dest;
......@@ -1405,7 +1405,7 @@ pub fn validate_ptr_target(
}
pub fn try_read_by_ref(&self, mut val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
// Convert to ByVal or ByValPair if possible
// Convert to ByVal or ScalarPair if possible
if let Value::ByRef(ptr, align) = val {
if let Some(read_val) = self.try_read_value(ptr, align, ty)? {
val = read_val;
......@@ -1419,7 +1419,7 @@ pub fn try_read_value(&self, ptr: Pointer, ptr_align: Align, ty: Ty<'tcx>) -> Ev
self.memory.check_align(ptr, ptr_align)?;
if layout.size.bytes() == 0 {
return Ok(Some(Value::ByVal(Scalar::Undef)));
return Ok(Some(Value::Scalar(Scalar::Undef)));
}
let ptr = ptr.to_ptr()?;
......@@ -1430,7 +1430,7 @@ pub fn try_read_value(&self, ptr: Pointer, ptr_align: Align, ty: Ty<'tcx>) -> Ev
match layout.abi {
layout::Abi::Scalar(..) => {
let primval = self.memory.read_primval(ptr, ptr_align, layout.size)?;
Ok(Some(Value::ByVal(primval)))
Ok(Some(Value::Scalar(primval)))
}
layout::Abi::ScalarPair(ref a, ref b) => {
let (a, b) = (&a.value, &b.value);
......@@ -1440,7 +1440,7 @@ pub fn try_read_value(&self, ptr: Pointer, ptr_align: Align, ty: Ty<'tcx>) -> Ev
let b_ptr = ptr.offset(b_offset, self)?.into();
let a_val = self.memory.read_primval(a_ptr, ptr_align, a_size)?;
let b_val = self.memory.read_primval(b_ptr, ptr_align, b_size)?;
Ok(Some(Value::ByValPair(a_val, b_val)))
Ok(Some(Value::ScalarPair(a_val, b_val)))
}
_ => Ok(None),
}
......@@ -1563,7 +1563,7 @@ fn unsize_into(
self.place_field(src_place, mir::Field::new(i), src_layout)?;
(self.read_place(src_f_place)?, src_field)
}
Value::ByVal(_) | Value::ByValPair(..) => {
Value::Scalar(_) | Value::ScalarPair(..) => {
let src_field = src_layout.field(&self, i)?;
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(src_field.size, src_layout.size);
......@@ -1622,13 +1622,13 @@ pub fn dump_local(&self, place: Place) {
ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
}
}
Ok(Value::ByVal(val)) => {
Ok(Value::Scalar(val)) => {
write!(msg, " {:?}", val).unwrap();
if let Scalar::Ptr(ptr) = val {
allocs.push(ptr.alloc_id);
}
}
Ok(Value::ByValPair(val1, val2)) => {
Ok(Value::ScalarPair(val1, val2)) => {
write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
if let Scalar::Ptr(ptr) = val1 {
allocs.push(ptr.alloc_id);
......@@ -1797,7 +1797,7 @@ pub fn storage_live(&mut self, local: mir::Local) -> Option<Value> {
trace!("{:?} is now live", local);
// StorageLive *always* kills the value that's currently stored
mem::replace(&mut self.locals[local], Some(Value::ByVal(Scalar::Undef)))
mem::replace(&mut self.locals[local], Some(Value::Scalar(Scalar::Undef)))
}
/// Returns the old value of the local
......
......@@ -932,8 +932,8 @@ fn into_ptr(
Value::ByRef(ptr, align) => {
self.memory().read_ptr_sized(ptr.to_ptr()?, align)?
}
Value::ByVal(ptr) |
Value::ByValPair(ptr, _) => ptr,
Value::Scalar(ptr) |
Value::ScalarPair(ptr, _) => ptr,
}.into())
}
......@@ -952,9 +952,9 @@ fn into_ptr_vtable_pair(
Ok((ptr, vtable))
}
Value::ByValPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
Value::ScalarPair(ptr, vtable) => Ok((ptr.into(), vtable.to_ptr()?)),
Value::ByVal(Scalar::Undef) => err!(ReadUndefBytes),
Value::Scalar(Scalar::Undef) => err!(ReadUndefBytes),
_ => bug!("expected ptr and vtable, got {:?}", value),
}
}
......@@ -973,13 +973,13 @@ fn into_slice(
)?.to_bytes()? as u64;
Ok((ptr, len))
}
Value::ByValPair(ptr, val) => {
Value::ScalarPair(ptr, val) => {
let len = val.to_u128()?;
assert_eq!(len as u64 as u128, len);
Ok((ptr.into(), len as u64))
}
Value::ByVal(Scalar::Undef) => err!(ReadUndefBytes),
Value::ByVal(_) => bug!("expected ptr and length, got {:?}", value),
Value::Scalar(Scalar::Undef) => err!(ReadUndefBytes),
Value::Scalar(_) => bug!("expected ptr and length, got {:?}", value),
}
}
}
......
......@@ -32,7 +32,7 @@ pub fn intrinsic_with_overflow(
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
let val = Value::ByValPair(val, Scalar::from_bool(overflowed));
let val = Value::ScalarPair(val, Scalar::from_bool(overflowed));
let valty = ValTy {
value: val,
ty: dest_ty,
......
......@@ -128,17 +128,17 @@ pub fn read_field(
let field_index = field.index();
let field = base_layout.field(self, field_index)?;
if field.size.bytes() == 0 {
return Ok(Some((Value::ByVal(Scalar::Undef), field.ty)))
return Ok(Some((Value::Scalar(Scalar::Undef), field.ty)))
}
let offset = base_layout.fields.offset(field_index);
match base {
// the field covers the entire type
Value::ByValPair(..) |
Value::ByVal(_) if offset.bytes() == 0 && field.size == base_layout.size => Ok(Some((base, field.ty))),
Value::ScalarPair(..) |
Value::Scalar(_) if offset.bytes() == 0 && field.size == base_layout.size => Ok(Some((base, field.ty))),
// split fat pointers, 2 element tuples, ...
Value::ByValPair(a, b) if base_layout.fields.count() == 2 => {
Value::ScalarPair(a, b) if base_layout.fields.count() == 2 => {
let val = [a, b][field_index];
Ok(Some((Value::ByVal(val), field.ty)))
Ok(Some((Value::Scalar(val), field.ty)))
},
// FIXME(oli-obk): figure out whether we should be calling `try_read_value` here
_ => Ok(None),
......@@ -173,7 +173,7 @@ pub(super) fn eval_and_read_place(
place: &mir::Place<'tcx>,
) -> EvalResult<'tcx, Value> {
// Shortcut for things like accessing a fat pointer's field,
// which would otherwise (in the `eval_place` path) require moving a `ByValPair` to memory
// which would otherwise (in the `eval_place` path) require moving a `ScalarPair` to memory
// and returning an `Place::Ptr` to it
if let Some(val) = self.try_read_place(place)? {
return Ok(val);
......@@ -250,8 +250,8 @@ pub fn place_field(
Place::Local { frame, local } => {
match (&self.stack[frame].get_local(local)?, &base_layout.abi) {
// in case the field covers the entire type, just return the value
(&Value::ByVal(_), &layout::Abi::Scalar(_)) |
(&Value::ByValPair(..), &layout::Abi::ScalarPair(..))
(&Value::Scalar(_), &layout::Abi::Scalar(_)) |
(&Value::ScalarPair(..), &layout::Abi::ScalarPair(..))
if offset.bytes() == 0 && field.size == base_layout.size =>
{
return Ok((base, field));
......
......@@ -52,7 +52,7 @@ fn drop(
let instance = match ty.sty {
ty::TyDynamic(..) => {
let vtable = match arg {
Value::ByValPair(_, Scalar::Ptr(vtable)) => vtable,
Value::ScalarPair(_, Scalar::Ptr(vtable)) => vtable,
_ => bug!("expected fat ptr, got {:?}", arg),
};
match self.read_drop_type_from_vtable(vtable)? {
......
......@@ -359,7 +359,7 @@ fn eval_fn_call(
self.write_value(valty, dest)?;
}
}
Value::ByVal(Scalar::Undef) => {}
Value::Scalar(Scalar::Undef) => {}
other => {
trace!("{:#?}, {:#?}", other, layout);
let mut layout = layout;
......
......@@ -67,8 +67,8 @@ pub fn read_drop_type_from_vtable(
let pointer_align = self.tcx.data_layout.pointer_align;
match self.read_ptr(vtable, pointer_align, self.tcx.mk_nil_ptr())? {
// some values don't need to call a drop impl, so the value is null
Value::ByVal(Scalar::Bytes(0)) => Ok(None),
Value::ByVal(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
Value::Scalar(Scalar::Bytes(0)) => Ok(None),
Value::Scalar(Scalar::Ptr(drop_fn)) => self.memory.get_fn(drop_fn).map(Some),
_ => err!(ReadBytesAsPointer),
}
}
......
......@@ -1245,13 +1245,13 @@ fn collect_const<'a, 'tcx>(
};
match val {
ConstVal::Unevaluated(..) => bug!("const eval yielded unevaluated const"),
ConstVal::Value(ConstValue::ByValPair(Scalar::Ptr(a), Scalar::Ptr(b))) => {
ConstVal::Value(ConstValue::ScalarPair(Scalar::Ptr(a), Scalar::Ptr(b))) => {
collect_miri(tcx, a.alloc_id, output);
collect_miri(tcx, b.alloc_id, output);
}
ConstVal::Value(ConstValue::ByValPair(_, Scalar::Ptr(ptr))) |
ConstVal::Value(ConstValue::ByValPair(Scalar::Ptr(ptr), _)) |
ConstVal::Value(ConstValue::ByVal(Scalar::Ptr(ptr))) =>
ConstVal::Value(ConstValue::ScalarPair(_, Scalar::Ptr(ptr))) |
ConstVal::Value(ConstValue::ScalarPair(Scalar::Ptr(ptr), _)) |
ConstVal::Value(ConstValue::Scalar(Scalar::Ptr(ptr))) =>
collect_miri(tcx, ptr.alloc_id, output),
ConstVal::Value(ConstValue::ByRef(alloc, _offset)) => {
for &id in alloc.relocations.values() {
......
......@@ -215,7 +215,7 @@ fn eval_place(&mut self, place: &Place<'tcx>) -> Option<Const<'tcx>> {
trace!("field proj on {:?}", proj.base);
let (base, ty, span) = self.eval_place(&proj.base)?;
match base {
Value::ByValPair(a, b) => {
Value::ScalarPair(a, b) => {
trace!("by val pair: {:?}, {:?}", a, b);
let base_layout = self.tcx.layout_of(self.param_env.and(ty)).ok()?;
trace!("layout computed");
......@@ -228,7 +228,7 @@ fn eval_place(&mut self, place: &Place<'tcx>) -> Option<Const<'tcx>> {
};
let field = base_layout.field(cx, field_index).ok()?;
trace!("projection resulted in: {:?}", val);
Some((Value::ByVal(val), field.ty, span))
Some((Value::Scalar(val), field.ty, span))
},
_ => None,
}
......@@ -283,7 +283,7 @@ fn const_prop(
Rvalue::NullaryOp(NullOp::SizeOf, ty) => {
let param_env = self.tcx.param_env(self.source.def_id);
type_size_of(self.tcx, param_env, ty).map(|n| (
Value::ByVal(Scalar::Bytes(n as u128)),
Value::Scalar(Scalar::Bytes(n as u128)),
self.tcx.types.usize,
span,
))
......@@ -305,7 +305,7 @@ fn const_prop(
this.ecx.value_to_primval(ValTy { value: val.0, ty: val.1 })
})?;
let val = self.use_ecx(span, |this| this.ecx.unary_op(op, prim, val.1))?;
Some((Value::ByVal(val), place_ty, span))
Some((Value::Scalar(val), place_ty, span))
}
Rvalue::CheckedBinaryOp(op, ref left, ref right) |
Rvalue::BinaryOp(op, ref left, ref right) => {
......@@ -357,7 +357,7 @@ fn const_prop(
this.ecx.binary_op(op, l, left.1, r, right.1)
})?;
let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue {
Value::ByValPair(
Value::ScalarPair(
val,
Scalar::from_bool(overflow),
)
......@@ -371,7 +371,7 @@ fn const_prop(
});
return None;
}
Value::ByVal(val)
Value::Scalar(val)
};
Some((val, place_ty, span))
},
......@@ -485,7 +485,7 @@ fn visit_terminator_kind(
if let TerminatorKind::Assert { expected, msg, cond, .. } = kind {
if let Some(value) = self.eval_operand(cond) {
trace!("assertion on {:?} should be {:?}", value, expected);
if Value::ByVal(Scalar::from_bool(*expected)) != value.0 {
if Value::Scalar(Scalar::from_bool(*expected)) != value.0 {
// poison all places this operand references so that further code
// doesn't use the invalid value
match cond {
......@@ -520,14 +520,14 @@ fn visit_terminator_kind(
BoundsCheck { ref len, ref index } => {
let len = self.eval_operand(len).expect("len must be const");
let len = match len.0 {
Value::ByVal(Scalar::Bytes(n)) => n,
Value::Scalar(Scalar::Bytes(n)) => n,
_ => bug!("const len not primitive: {:?}", len),
};
let index = self
.eval_operand(index)
.expect("index must be const");
let index = match index.0 {
Value::ByVal(Scalar::Bytes(n)) => n,
Value::Scalar(Scalar::Bytes(n)) => n,
_ => bug!("const index not primitive: {:?}", index),
};
format!(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册