Fixup long code lines

上级 35f25bfe
......@@ -240,7 +240,12 @@ fn cast_from_int(
}
}
fn cast_from_float(&self, bits: u128, fty: FloatTy, dest_ty: Ty<'tcx>) -> EvalResult<'tcx, Scalar> {
fn cast_from_float(
&self,
bits: u128,
fty: FloatTy,
dest_ty: Ty<'tcx>
) -> EvalResult<'tcx, Scalar> {
use rustc::ty::TyKind::*;
use rustc_apfloat::FloatConvert;
match dest_ty.sty {
......
......@@ -280,7 +280,9 @@ fn data_layout(&self) -> &layout::TargetDataLayout {
}
}
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> layout::HasTyCtxt<'tcx> for &'a EvalContext<'a, 'mir, 'tcx, M> {
impl<'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for &'a EvalContext<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>
{
#[inline]
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
*self.tcx
......@@ -340,7 +342,8 @@ pub fn new(
pub(crate) fn with_fresh_body<F: FnOnce(&mut Self) -> R, R>(&mut self, f: F) -> R {
let stack = mem::replace(&mut self.stack, Vec::new());
let steps = mem::replace(&mut self.steps_since_detector_enabled, -STEPS_UNTIL_DETECTOR_ENABLED);
let steps = mem::replace(&mut self.steps_since_detector_enabled,
-STEPS_UNTIL_DETECTOR_ENABLED);
let r = f(self);
self.stack = stack;
self.steps_since_detector_enabled = steps;
......@@ -389,7 +392,8 @@ pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx))
}
pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, ty::Instance<'tcx>> {
pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>)
-> EvalResult<'tcx, ty::Instance<'tcx>> {
trace!("resolve: {:?}, {:#?}", def_id, substs);
trace!("substs: {:#?}", self.substs());
trace!("param_env: {:#?}", self.param_env);
......@@ -416,7 +420,9 @@ pub fn load_mir(
) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> {
// do not continue if typeck errors occurred (can only occur in local crate)
let did = instance.def_id();
if did.is_local() && self.tcx.has_typeck_tables(did) && self.tcx.typeck_tables_of(did).tainted_by_errors {
if did.is_local()
&& self.tcx.has_typeck_tables(did)
&& self.tcx.typeck_tables_of(did).tainted_by_errors {
return err!(TypeckError);
}
trace!("load mir {:?}", instance);
......@@ -663,7 +669,8 @@ pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Cons
} else {
self.param_env
};
self.tcx.const_eval(param_env.and(gid)).map_err(|err| EvalErrorKind::ReferencedConstant(err).into())
self.tcx.const_eval(param_env.and(gid))
.map_err(|err| EvalErrorKind::ReferencedConstant(err).into())
}
#[inline(always)]
......@@ -769,7 +776,8 @@ pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> (Vec<FrameInfo
} else {
last_span = Some(span);
}
let location = if self.tcx.def_key(instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr {
let location = if self.tcx.def_key(instance.def_id()).disambiguated_data.data
== DefPathData::ClosureExpr {
"closure".to_owned()
} else {
instance.to_string()
......
......@@ -241,7 +241,9 @@ pub fn deallocate(
}
};
let alloc_kind = self.alloc_kind.remove(&ptr.alloc_id).expect("alloc_map out of sync with alloc_kind");
let alloc_kind = self.alloc_kind
.remove(&ptr.alloc_id)
.expect("alloc_map out of sync with alloc_kind");
// It is okay for us to still holds locks on deallocation -- for example, we could store
// data we own in a local, and the local could be deallocated (from StorageDead) before the
......@@ -259,7 +261,11 @@ pub fn deallocate(
}
if let Some((size, align)) = size_and_align {
if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align {
return err!(IncorrectAllocationInformation(size, Size::from_bytes(alloc.bytes.len() as u64), align, alloc.align));
let bytes = Size::from_bytes(alloc.bytes.len() as u64);
return err!(IncorrectAllocationInformation(size,
bytes,
align,
alloc.align));
}
}
......@@ -678,7 +684,8 @@ pub fn copy_repeatedly(
relocations
.iter()
.map(|&(offset, alloc_id)| {
(offset + dest.offset - src.offset + (i * size * relocations.len() as u64), alloc_id)
(offset + dest.offset - src.offset + (i * size * relocations.len() as u64),
alloc_id)
})
);
}
......@@ -707,11 +714,15 @@ pub fn copy_repeatedly(
}
for i in 0..length {
ptr::copy(src_bytes, dest_bytes.offset((size.bytes() * i) as isize), size.bytes() as usize);
ptr::copy(src_bytes,
dest_bytes.offset((size.bytes() * i) as isize),
size.bytes() as usize);
}
} else {
for i in 0..length {
ptr::copy_nonoverlapping(src_bytes, dest_bytes.offset((size.bytes() * i) as isize), size.bytes() as usize);
ptr::copy_nonoverlapping(src_bytes,
dest_bytes.offset((size.bytes() * i) as isize),
size.bytes() as usize);
}
}
}
......@@ -778,7 +789,8 @@ pub fn write_repeat(&mut self, ptr: Scalar, val: u8, count: Size) -> EvalResult<
}
/// Read a *non-ZST* scalar
pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, ScalarMaybeUndef> {
pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size)
-> EvalResult<'tcx, ScalarMaybeUndef> {
// Make sure we don't read part of a pointer as a pointer
self.check_relocation_edges(ptr, size)?;
let endianness = self.endianness();
......@@ -801,7 +813,10 @@ pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalRes
} else {
let alloc = self.get(ptr.alloc_id)?;
match alloc.relocations.get(&ptr.offset) {
Some(&alloc_id) => return Ok(ScalarMaybeUndef::Scalar(Pointer::new(alloc_id, Size::from_bytes(bits as u64)).into())),
Some(&alloc_id) => {
let ptr = Pointer::new(alloc_id, Size::from_bytes(bits as u64));
return Ok(ScalarMaybeUndef::Scalar(ptr.into()))
}
None => {},
}
}
......@@ -812,7 +827,8 @@ pub fn read_scalar(&self, ptr: Pointer, ptr_align: Align, size: Size) -> EvalRes
}))
}
pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align) -> EvalResult<'tcx, ScalarMaybeUndef> {
pub fn read_ptr_sized(&self, ptr: Pointer, ptr_align: Align)
-> EvalResult<'tcx, ScalarMaybeUndef> {
self.read_scalar(ptr, ptr_align, self.pointer_size())
}
......@@ -865,7 +881,8 @@ pub fn write_scalar(
Ok(())
}
pub fn write_ptr_sized(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef) -> EvalResult<'tcx> {
pub fn write_ptr_sized(&mut self, ptr: Pointer, ptr_align: Align, val: ScalarMaybeUndef)
-> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
}
......@@ -1009,7 +1026,9 @@ pub trait HasMemory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M>;
}
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M> {
impl<'a, 'mir, 'tcx, M> HasMemory<'a, 'mir, 'tcx, M> for Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>
{
#[inline]
fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
self
......@@ -1021,7 +1040,9 @@ fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
}
}
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M> {
impl<'a, 'mir, 'tcx, M> HasMemory<'a, 'mir, 'tcx, M> for EvalContext<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>
{
#[inline]
fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
&mut self.memory
......@@ -1033,7 +1054,9 @@ fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
}
}
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> layout::HasDataLayout for &'a Memory<'a, 'mir, 'tcx, M> {
impl<'a, 'mir, 'tcx, M> layout::HasDataLayout for &'a Memory<'a, 'mir, 'tcx, M>
where M: Machine<'mir, 'tcx>
{
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
......
......@@ -68,7 +68,8 @@ pub fn binary_op(
layout::Abi::Scalar(ref scalar) => scalar.value,
_ => return err!(TypeNotPrimitive(right_layout.ty)),
};
trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", bin_op, left, left_kind, right, right_kind);
trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op, left, left_kind, right, right_kind);
// I: Handle operations that support pointers
if !left_kind.is_float() && !right_kind.is_float() {
......
......@@ -287,11 +287,13 @@ pub fn mplace_field(
offsets[usize::try_from(field).unwrap()],
layout::FieldPlacement::Array { stride, .. } => {
let len = base.len();
assert!(field < len, "Tried to access element {} of array/slice with length {}", field, len);
assert!(field < len,
"Tried to access element {} of array/slice with length {}", field, len);
stride * field
}
layout::FieldPlacement::Union(count) => {
assert!(field < count as u64, "Tried to access field {} of union with {} fields", field, count);
assert!(field < count as u64,
"Tried to access field {} of union with {} fields", field, count);
// Offset is always 0
Size::from_bytes(0)
}
......@@ -604,7 +606,8 @@ fn write_value_to_mplace(
Value::ScalarPair(a_val, b_val) => {
let (a, b) = match dest.layout.abi {
layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
_ => bug!("write_value_to_mplace: invalid ScalarPair layout: {:#?}", dest.layout)
_ => bug!("write_value_to_mplace: invalid ScalarPair layout: {:#?}",
dest.layout)
};
let (a_size, b_size) = (a.size(&self), b.size(&self));
let (a_align, b_align) = (a.align(&self), b.align(&self));
......@@ -770,7 +773,8 @@ pub fn place_to_op(&self, place: PlaceTy<'tcx>) -> EvalResult<'tcx, OpTy<'tcx>>
/// Turn a place that is a dyn trait (i.e., PlaceExtra::Vtable and the appropriate layout)
/// or a slice into the specific fixed-size place and layout that is given by the vtable/len.
/// This "unpacks" the existential quantifier, so to speak.
pub fn unpack_unsized_mplace(&self, mplace: MPlaceTy<'tcx>) -> EvalResult<'tcx, MPlaceTy<'tcx>> {
pub fn unpack_unsized_mplace(&self, mplace: MPlaceTy<'tcx>)
-> EvalResult<'tcx, MPlaceTy<'tcx>> {
trace!("Unpacking {:?} ({:?})", *mplace, mplace.layout.ty);
let layout = match mplace.extra {
PlaceExtra::Vtable(vtable) => {
......
......@@ -55,7 +55,10 @@ pub(super) fn eval_terminator(
for (index, &const_int) in values.iter().enumerate() {
// Compare using binary_op
let const_int = Scalar::Bits { bits: const_int, size: discr.layout.size.bytes() as u8 };
let const_int = Scalar::Bits {
bits: const_int,
size: discr.layout.size.bytes() as u8
};
let (res, _) = self.binary_op(mir::BinOp::Eq,
discr,
ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout }
......@@ -154,7 +157,9 @@ pub(super) fn eval_terminator(
target,
..
} => {
let cond_val = self.eval_operand_and_read_value(cond, None)?.to_scalar()?.to_bool()?;
let cond_val = self.eval_operand_and_read_value(cond, None)?
.to_scalar()?
.to_bool()?;
if expected == cond_val {
self.goto_block(target);
} else {
......@@ -239,15 +244,24 @@ fn check_ty_compat<'tcx>(ty: Ty<'tcx>, real_ty: Ty<'tcx>) -> bool {
// We need to allow what comes up when a non-capturing closure is cast to a fn().
match (sig.abi, real_sig.abi) {
(Abi::Rust, Abi::RustCall) // check the ABIs. This makes the test here non-symmetric.
if check_ty_compat(sig.output(), real_sig.output()) && real_sig.inputs_and_output.len() == 3 => {
if check_ty_compat(sig.output(), real_sig.output())
&& real_sig.inputs_and_output.len() == 3 => {
// First argument of real_sig must be a ZST
let fst_ty = real_sig.inputs_and_output[0];
if self.layout_of(fst_ty)?.is_zst() {
// Second argument must be a tuple matching the argument list of sig
let snd_ty = real_sig.inputs_and_output[1];
match snd_ty.sty {
<<<<<<< HEAD
ty::Tuple(tys) if sig.inputs().len() == tys.len() =>
if sig.inputs().iter().zip(tys).all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
=======
ty::TyTuple(tys) if sig.inputs().len() == tys.len() =>
if sig.inputs()
.iter()
.zip(tys)
.all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
>>>>>>> 7d30ba9... Fixup long code lines
return Ok(true)
},
_ => {}
......@@ -304,7 +318,8 @@ fn eval_fn_call(
trace!(
"args: {:#?}",
self.frame().mir.args_iter().zip(args.iter())
.map(|(local, arg)| (local, **arg, arg.layout.ty)).collect::<Vec<_>>()
.map(|(local, arg)| (local, **arg, arg.layout.ty))
.collect::<Vec<_>>()
);
let local = arg_locals.nth(1).unwrap();
for (i, &op) in args.into_iter().enumerate() {
......@@ -325,7 +340,7 @@ fn eval_fn_call(
ty::InstanceDef::Item(_) => {
// Push the stack frame, and potentially be entirely done if the call got hooked
if M::eval_fn_call(self, instance, destination, args, span)? {
// TODO: Can we make it return the frame to push, instead
// FIXME: Can we make it return the frame to push, instead
// of the hook doing half of the work and us doing the argument
// initialization?
return Ok(());
......
......@@ -92,7 +92,8 @@ pub fn read_size_and_align_from_vtable(
) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.memory.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?, pointer_align)?.to_bits(pointer_size)? as u64;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)?
.to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized(
vtable.offset(pointer_size * 2, self)?,
pointer_align
......
......@@ -221,7 +221,10 @@ pub fn validate_mplace(
let variant = self.read_discriminant_as_variant_index(dest.into())?;
let inner_dest = self.mplace_downcast(dest, variant)?;
// Put the variant projection onto the path, as a field
path.push(PathElem::Field(dest.layout.ty.ty_adt_def().unwrap().variants[variant].name));
path.push(PathElem::Field(dest.layout.ty
.ty_adt_def()
.unwrap()
.variants[variant].name));
trace!("variant layout: {:#?}", dest.layout);
(variant, inner_dest)
},
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册