提交 c488d59a 编写于 作者: M Masaki Hara

Integrate OperandValue::UnsizedRef into OperandValue::Ref.

上级 6e15e7c1
...@@ -188,7 +188,7 @@ fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, ...@@ -188,7 +188,7 @@ fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll,
} }
let cx = bx.cx; let cx = bx.cx;
if self.is_sized_indirect() { if self.is_sized_indirect() {
OperandValue::Ref(val, self.layout.align).store(bx, dst) OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
} else if self.is_unsized_indirect() { } else if self.is_unsized_indirect() {
bug!("unsized ArgType must be handled through store_fn_arg"); bug!("unsized ArgType must be handled through store_fn_arg");
} else if let PassMode::Cast(cast) = self.mode { } else if let PassMode::Cast(cast) = self.mode {
...@@ -249,7 +249,7 @@ fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceR ...@@ -249,7 +249,7 @@ fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceR
OperandValue::Pair(next(), next()).store(bx, dst); OperandValue::Pair(next(), next()).store(bx, dst);
} }
PassMode::Indirect(_, Some(_)) => { PassMode::Indirect(_, Some(_)) => {
OperandValue::UnsizedRef(next(), next()).store(bx, dst); OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst);
} }
PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => { PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
self.store(bx, next(), dst); self.store(bx, next(), dst);
......
...@@ -295,7 +295,7 @@ pub fn coerce_unsized_into( ...@@ -295,7 +295,7 @@ pub fn coerce_unsized_into(
OperandValue::Immediate(base) => { OperandValue::Immediate(base) => {
unsize_thin_ptr(bx, base, src_ty, dst_ty) unsize_thin_ptr(bx, base, src_ty, dst_ty)
} }
OperandValue::Ref(..) | OperandValue::UnsizedRef(..) => bug!() OperandValue::Ref(..) => bug!()
}; };
OperandValue::Pair(base, info).store(bx, dst); OperandValue::Pair(base, info).store(bx, dst);
}; };
......
...@@ -605,7 +605,7 @@ fn modify_as_needed( ...@@ -605,7 +605,7 @@ fn modify_as_needed(
// etc. // etc.
assert!(!bx.cx.type_needs_drop(arg.layout.ty)); assert!(!bx.cx.type_needs_drop(arg.layout.ty));
let (ptr, align) = match arg.val { let (ptr, align) = match arg.val {
OperandValue::Ref(ptr, align) => (ptr, align), OperandValue::Ref(ptr, None, align) => (ptr, align),
_ => bug!() _ => bug!()
}; };
let arg = PlaceRef::new_sized(ptr, arg.layout, align); let arg = PlaceRef::new_sized(ptr, arg.layout, align);
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
use super::{FunctionCx, LocalRef}; use super::{FunctionCx, LocalRef};
use super::place::PlaceRef; use super::place::PlaceRef;
use super::operand::OperandRef; use super::operand::OperandRef;
use super::operand::OperandValue::{Pair, Ref, UnsizedRef, Immediate}; use super::operand::OperandValue::{Pair, Ref, Immediate};
impl FunctionCx<'a, 'll, 'tcx> { impl FunctionCx<'a, 'll, 'tcx> {
pub fn codegen_block(&mut self, bb: mir::BasicBlock) { pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
...@@ -232,10 +232,8 @@ fn codegen_terminator(&mut self, ...@@ -232,10 +232,8 @@ fn codegen_terminator(&mut self,
PassMode::Direct(_) | PassMode::Pair(..) => { PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE)); let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE));
if let Ref(llval, align) = op.val { if let Ref(llval, _, align) = op.val {
bx.load(llval, align) bx.load(llval, align)
} else if let UnsizedRef(..) = op.val {
bug!("return type must be sized");
} else { } else {
op.immediate_or_packed_pair(&bx) op.immediate_or_packed_pair(&bx)
} }
...@@ -247,7 +245,7 @@ fn codegen_terminator(&mut self, ...@@ -247,7 +245,7 @@ fn codegen_terminator(&mut self,
LocalRef::Operand(None) => bug!("use of return before def"), LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Place(cg_place) => { LocalRef::Place(cg_place) => {
OperandRef { OperandRef {
val: Ref(cg_place.llval, cg_place.align), val: Ref(cg_place.llval, None, cg_place.align),
layout: cg_place.layout layout: cg_place.layout
} }
} }
...@@ -259,12 +257,11 @@ fn codegen_terminator(&mut self, ...@@ -259,12 +257,11 @@ fn codegen_terminator(&mut self,
op.val.store(&bx, scratch); op.val.store(&bx, scratch);
scratch.llval scratch.llval
} }
Ref(llval, align) => { Ref(llval, _, align) => {
assert_eq!(align.abi(), op.layout.align.abi(), assert_eq!(align.abi(), op.layout.align.abi(),
"return place is unaligned!"); "return place is unaligned!");
llval llval
} }
UnsizedRef(..) => bug!("return type must be sized"),
}; };
bx.load( bx.load(
bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()), bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()),
...@@ -605,15 +602,11 @@ fn codegen_terminator(&mut self, ...@@ -605,15 +602,11 @@ fn codegen_terminator(&mut self,
// The callee needs to own the argument memory if we pass it // The callee needs to own the argument memory if we pass it
// by-ref, so make a local copy of non-immediate constants. // by-ref, so make a local copy of non-immediate constants.
match (arg, op.val) { match (arg, op.val) {
(&mir::Operand::Copy(_), Ref(..)) | (&mir::Operand::Copy(_), Ref(_, None, _)) |
(&mir::Operand::Constant(_), Ref(..)) => { (&mir::Operand::Constant(_), Ref(_, None, _)) => {
let tmp = PlaceRef::alloca(&bx, op.layout, "const"); let tmp = PlaceRef::alloca(&bx, op.layout, "const");
op.val.store(&bx, tmp); op.val.store(&bx, tmp);
op.val = Ref(tmp.llval, tmp.align); op.val = Ref(tmp.llval, None, tmp.align);
}
(&mir::Operand::Copy(_), UnsizedRef(..)) |
(&mir::Operand::Constant(_), UnsizedRef(..)) => {
bug!("tried to pass an unsized argument by copy or constant")
} }
_ => {} _ => {}
} }
...@@ -667,7 +660,7 @@ fn codegen_argument(&mut self, ...@@ -667,7 +660,7 @@ fn codegen_argument(&mut self,
} }
} else if arg.is_unsized_indirect() { } else if arg.is_unsized_indirect() {
match op.val { match op.val {
UnsizedRef(a, b) => { Ref(a, Some(b), _) => {
llargs.push(a); llargs.push(a);
llargs.push(b); llargs.push(b);
return; return;
...@@ -690,7 +683,7 @@ fn codegen_argument(&mut self, ...@@ -690,7 +683,7 @@ fn codegen_argument(&mut self,
} }
} }
} }
Ref(llval, align) => { Ref(llval, _, align) => {
if arg.is_indirect() && align.abi() < arg.layout.align.abi() { if arg.is_indirect() && align.abi() < arg.layout.align.abi() {
// `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
...@@ -703,8 +696,6 @@ fn codegen_argument(&mut self, ...@@ -703,8 +696,6 @@ fn codegen_argument(&mut self,
(llval, align, true) (llval, align, true)
} }
} }
UnsizedRef(..) =>
bug!("codegen_argument: tried to pass unsized operand to sized argument"),
}; };
if by_ref && !arg.is_indirect() { if by_ref && !arg.is_indirect() {
...@@ -740,13 +731,13 @@ fn codegen_arguments_untupled(&mut self, ...@@ -740,13 +731,13 @@ fn codegen_arguments_untupled(&mut self,
let tuple = self.codegen_operand(bx, operand); let tuple = self.codegen_operand(bx, operand);
// Handle both by-ref and immediate tuples. // Handle both by-ref and immediate tuples.
if let Ref(llval, align) = tuple.val { if let Ref(llval, None, align) = tuple.val {
let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align); let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align);
for i in 0..tuple.layout.fields.count() { for i in 0..tuple.layout.fields.count() {
let field_ptr = tuple_ptr.project_field(bx, i); let field_ptr = tuple_ptr.project_field(bx, i);
self.codegen_argument(bx, field_ptr.load(bx), llargs, &args[i]); self.codegen_argument(bx, field_ptr.load(bx), llargs, &args[i]);
} }
} else if let UnsizedRef(..) = tuple.val { } else if let Ref(_, Some(_), _) = tuple.val {
bug!("closure arguments must be sized") bug!("closure arguments must be sized")
} else { } else {
// If the tuple is immediate, the elements are as well. // If the tuple is immediate, the elements are as well.
......
...@@ -37,11 +37,9 @@ ...@@ -37,11 +37,9 @@
pub enum OperandValue<'ll> { pub enum OperandValue<'ll> {
/// A reference to the actual operand. The data is guaranteed /// A reference to the actual operand. The data is guaranteed
/// to be valid for the operand's lifetime. /// to be valid for the operand's lifetime.
Ref(&'ll Value, Align), /// The second value, if any, is the extra data (vtable or length)
/// A reference to the unsized operand. The data is guaranteed /// which indicates that it refers to an unsized rvalue.
/// to be valid for the operand's lifetime. Ref(&'ll Value, Option<&'ll Value>, Align),
/// The second field is the extra.
UnsizedRef(&'ll Value, &'ll Value),
/// A single LLVM value. /// A single LLVM value.
Immediate(&'ll Value), Immediate(&'ll Value),
/// A pair of immediate LLVM values. Used by fat pointers too. /// A pair of immediate LLVM values. Used by fat pointers too.
...@@ -154,8 +152,7 @@ pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'ll, 'tcx> { ...@@ -154,8 +152,7 @@ pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'ll, 'tcx> {
let (llptr, llextra) = match self.val { let (llptr, llextra) = match self.val {
OperandValue::Immediate(llptr) => (llptr, None), OperandValue::Immediate(llptr) => (llptr, None),
OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)), OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
OperandValue::Ref(..) | OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self)
OperandValue::UnsizedRef(..) => bug!("Deref of by-Ref operand {:?}", self)
}; };
let layout = cx.layout_of(projected_ty); let layout = cx.layout_of(projected_ty);
PlaceRef { PlaceRef {
...@@ -250,8 +247,7 @@ pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef ...@@ -250,8 +247,7 @@ pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef
*a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0, true)); *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0, true));
*b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1, true)); *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1, true));
} }
OperandValue::Ref(..) | OperandValue::Ref(..) => bug!()
OperandValue::UnsizedRef(..) => bug!()
} }
OperandRef { OperandRef {
...@@ -291,11 +287,11 @@ fn store_with_flags( ...@@ -291,11 +287,11 @@ fn store_with_flags(
return; return;
} }
match self { match self {
OperandValue::Ref(r, source_align) => { OperandValue::Ref(r, None, source_align) => {
base::memcpy_ty(bx, dest.llval, r, dest.layout, base::memcpy_ty(bx, dest.llval, r, dest.layout,
source_align.min(dest.align), flags) source_align.min(dest.align), flags)
} }
OperandValue::UnsizedRef(..) => { OperandValue::Ref(_, Some(_), _) => {
bug!("cannot directly store unsized values"); bug!("cannot directly store unsized values");
} }
OperandValue::Immediate(s) => { OperandValue::Immediate(s) => {
...@@ -321,7 +317,7 @@ pub fn store_unsized(self, bx: &Builder<'a, 'll, 'tcx>, indirect_dest: PlaceRef< ...@@ -321,7 +317,7 @@ pub fn store_unsized(self, bx: &Builder<'a, 'll, 'tcx>, indirect_dest: PlaceRef<
.unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest)).ty; .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest)).ty;
let (llptr, llextra) = let (llptr, llextra) =
if let OperandValue::UnsizedRef(llptr, llextra) = self { if let OperandValue::Ref(llptr, Some(llextra), _) = self {
(llptr, llextra) (llptr, llextra)
} else { } else {
bug!("store_unsized called with a sized value") bug!("store_unsized called with a sized value")
......
...@@ -132,7 +132,7 @@ pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> { ...@@ -132,7 +132,7 @@ pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> {
}; };
let val = if let Some(llextra) = self.llextra { let val = if let Some(llextra) = self.llextra {
OperandValue::UnsizedRef(self.llval, llextra) OperandValue::Ref(self.llval, Some(llextra), self.align)
} else if self.layout.is_llvm_immediate() { } else if self.layout.is_llvm_immediate() {
let mut const_llval = None; let mut const_llval = None;
unsafe { unsafe {
...@@ -163,7 +163,7 @@ pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> { ...@@ -163,7 +163,7 @@ pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> {
}; };
OperandValue::Pair(load(0, a), load(1, b)) OperandValue::Pair(load(0, a), load(1, b))
} else { } else {
OperandValue::Ref(self.llval, self.align) OperandValue::Ref(self.llval, None, self.align)
}; };
OperandRef { val, layout: self.layout } OperandRef { val, layout: self.layout }
......
...@@ -83,11 +83,11 @@ pub fn codegen_rvalue(&mut self, ...@@ -83,11 +83,11 @@ pub fn codegen_rvalue(&mut self,
base::coerce_unsized_into(&bx, scratch, dest); base::coerce_unsized_into(&bx, scratch, dest);
scratch.storage_dead(&bx); scratch.storage_dead(&bx);
} }
OperandValue::Ref(llref, align) => { OperandValue::Ref(llref, None, align) => {
let source = PlaceRef::new_sized(llref, operand.layout, align); let source = PlaceRef::new_sized(llref, operand.layout, align);
base::coerce_unsized_into(&bx, source, dest); base::coerce_unsized_into(&bx, source, dest);
} }
OperandValue::UnsizedRef(..) => { OperandValue::Ref(_, Some(_), _) => {
bug!("unsized coercion on an unsized rvalue") bug!("unsized coercion on an unsized rvalue")
} }
} }
...@@ -268,9 +268,6 @@ pub fn codegen_rvalue_operand(&mut self, ...@@ -268,9 +268,6 @@ pub fn codegen_rvalue_operand(&mut self,
bug!("by-ref operand {:?} in codegen_rvalue_operand", bug!("by-ref operand {:?} in codegen_rvalue_operand",
operand); operand);
} }
OperandValue::UnsizedRef(..) => {
bug!("unsized coercion on an unsized rvalue")
}
} }
} }
mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => { mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册