提交 37dd9f6c 编写于 作者: M Mark Simulacrum

Add Builder::sess and Builder::tcx methods

上级 f67e7d6b
......@@ -359,7 +359,7 @@ pub fn trans_get_discr<'a, 'tcx>(
layout::RawNullablePointer { nndiscr, .. } => {
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
let llptrty = type_of::sizing_type_of(bcx.ccx,
monomorphize::field_ty(bcx.ccx.tcx(), substs,
monomorphize::field_ty(bcx.tcx(), substs,
&def.variants[nndiscr as usize].fields[0]));
bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty))
}
......@@ -486,7 +486,7 @@ pub fn trans_set_discr<'a, 'tcx>(
}
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool {
bcx.ccx.sess().target.target.arch == "arm" || bcx.ccx.sess().target.target.arch == "aarch64"
bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
}
fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
......@@ -524,7 +524,7 @@ pub fn trans_field_ptr<'a, 'tcx>(
}
layout::General { discr: d, ref variants, .. } => {
let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false);
fields.insert(0, d.to_ty(&bcx.ccx.tcx(), false));
fields.insert(0, d.to_ty(&bcx.tcx(), false));
struct_field_ptr(bcx, &variants[discr.0 as usize],
&fields,
val, ix + 1, true)
......
......@@ -62,7 +62,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Default per-arch clobbers
// Basically what clang does
let arch_clobbers = match &bcx.ccx.sess().target.target.arch[..] {
let arch_clobbers = match &bcx.sess().target.target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
_ => Vec::new()
};
......
......@@ -272,10 +272,10 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
assert_eq!(def_a, def_b);
let src_fields = def_a.variants[0].fields.iter().map(|f| {
monomorphize::field_ty(bcx.ccx.tcx(), substs_a, f)
monomorphize::field_ty(bcx.tcx(), substs_a, f)
});
let dst_fields = def_b.variants[0].fields.iter().map(|f| {
monomorphize::field_ty(bcx.ccx.tcx(), substs_b, f)
monomorphize::field_ty(bcx.tcx(), substs_b, f)
});
let src = adt::MaybeSizedValue::sized(src);
......
......@@ -19,7 +19,8 @@
use type_::Type;
use value::Value;
use libc::{c_uint, c_char};
use rustc::ty::{Ty, TypeFoldable};
use rustc::ty::{Ty, TyCtxt, TypeFoldable};
use rustc::session::Session;
use type_of;
use std::borrow::Cow;
......@@ -93,6 +94,14 @@ pub fn build_new_block<'b>(&self, name: &'b str) -> Builder<'a, 'tcx> {
builder
}
pub fn sess(&self) -> &Session {
self.ccx.sess()
}
pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
self.ccx.tcx()
}
pub fn llfn(&self) -> ValueRef {
unsafe {
llvm::LLVMGetBasicBlockParent(self.llbb())
......
......@@ -59,7 +59,7 @@ fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef {
let llpersonality = bcx.ccx.eh_personality();
bcx.set_personality_fn(llpersonality);
if base::wants_msvc_seh(bcx.ccx.sess()) {
if base::wants_msvc_seh(bcx.sess()) {
let pad = bcx.cleanup_pad(None, &[]);
let funclet = Some(Funclet::new(pad));
self.trans(funclet.as_ref(), &bcx);
......@@ -80,7 +80,7 @@ fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef {
// Insert cleanup instructions into the cleanup block
self.trans(None, &bcx);
if !bcx.ccx.sess().target.target.options.custom_unwind_resume {
if !bcx.sess().target.target.options.custom_unwind_resume {
bcx.resume(llretval);
} else {
let exc_ptr = bcx.extract_value(llretval, 0);
......@@ -132,7 +132,7 @@ pub fn schedule_drop_adt_contents(
fn new(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
CleanupScope {
cleanup: Some(drop_val),
landing_pad: if !bcx.ccx.sess().no_landing_pads() {
landing_pad: if !bcx.sess().no_landing_pads() {
Some(drop_val.get_landing_pad(bcx))
} else {
None
......
......@@ -38,7 +38,7 @@ pub fn set_source_location(
};
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
debug!("set_source_location: {}", builder.ccx.sess().codemap().span_to_string(span));
debug!("set_source_location: {}", builder.sess().codemap().span_to_string(span));
let loc = span_start(builder.ccx, span);
InternalDebugLocation::new(scope, loc.line, loc.col.to_usize())
} else {
......
......@@ -44,8 +44,8 @@ pub fn trans_exchange_free_ty<'a, 'tcx>(
ptr: MaybeSizedValue,
content_ty: Ty<'tcx>
) {
let def_id = langcall(bcx.ccx.tcx(), None, "", BoxFreeFnLangItem);
let substs = bcx.ccx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem);
let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
let callee = Callee::def(bcx.ccx, def_id, substs);
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
......@@ -232,7 +232,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
}
ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => {
let shallow_drop = def.is_union();
let tcx = bcx.ccx.tcx();
let tcx = bcx.tcx();
let def = t.ty_adt_def().unwrap();
......@@ -330,7 +330,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = monomorphize::field_ty(bcx.ccx.tcx(), substs, last_field);
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
// FIXME (#26403, #27023): We should be adding padding
......@@ -382,7 +382,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
(bcx.load(size_ptr), bcx.load(align_ptr))
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.ccx.tcx());
let unit_ty = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx, unit_ty);
......@@ -405,7 +405,7 @@ fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
av: adt::MaybeSizedValue,
variant: &'tcx ty::VariantDef,
substs: &Substs<'tcx>) {
let tcx = cx.ccx.tcx();
let tcx = cx.tcx();
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i);
......@@ -416,7 +416,7 @@ fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
let mut cx = cx;
match t.sty {
ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.ccx.tcx()).enumerate() {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i);
drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty);
}
......@@ -424,12 +424,12 @@ fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
ty::TyArray(_, n) => {
let base = get_dataptr(&cx, ptr.value);
let len = C_uint(cx.ccx, n);
let unit_ty = t.sequence_element_type(cx.ccx.tcx());
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, base, unit_ty, len,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.ccx.tcx());
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
}
......@@ -441,7 +441,7 @@ fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
}
ty::TyAdt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct => {
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.ccx.tcx(), t, None);
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i);
let ptr = if cx.ccx.shared().type_is_sized(field_ty) {
......@@ -469,7 +469,7 @@ fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
}
}
(adt::BranchKind::Switch, Some(lldiscrim_a)) => {
let tcx = cx.ccx.tcx();
let tcx = cx.tcx();
drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize);
// Create a fall-through basic block for the "else" case of
......@@ -501,13 +501,13 @@ fn iter_variant<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
}
cx = next_cx;
}
_ => cx.ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"),
_ => cx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"),
}
}
},
_ => {
cx.ccx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
}
}
return cx;
......
......@@ -694,7 +694,7 @@ fn try_intrinsic<'a, 'tcx>(
local_ptr: ValueRef,
dest: ValueRef,
) {
if bcx.ccx.sess().no_landing_pads() {
if bcx.sess().no_landing_pads() {
bcx.call(func, &[data], None);
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
} else if wants_msvc_seh(bcx.sess()) {
......@@ -937,7 +937,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
bcx.ccx.sess(), span,
bcx.sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ",
$msg),
name, $($fmt)*));
......@@ -959,7 +959,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
let tcx = bcx.ccx.tcx();
let tcx = bcx.tcx();
let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig());
let arg_tys = sig.inputs();
......
......@@ -122,7 +122,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let ps = self.get_personality_slot(&bcx);
let lp = bcx.load(ps);
Lifetime::End.call(&bcx, ps);
if !bcx.ccx.sess().target.target.options.custom_unwind_resume {
if !bcx.sess().target.target.options.custom_unwind_resume {
bcx.resume(lp);
} else {
let exc_ptr = bcx.extract_value(lp, 0);
......@@ -146,7 +146,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
let discr_lvalue = self.trans_lvalue(&bcx, discr);
let ty = discr_lvalue.ty.to_ty(bcx.ccx.tcx());
let ty = discr_lvalue.ty.to_ty(bcx.tcx());
let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true);
let mut bb_hist = FxHashMap();
......@@ -203,7 +203,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
LocalRef::Lvalue(tr_lvalue) => {
OperandRef {
val: Ref(tr_lvalue.llval),
ty: tr_lvalue.ty.to_ty(bcx.ccx.tcx())
ty: tr_lvalue.ty.to_ty(bcx.tcx())
}
}
};
......@@ -233,7 +233,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
}
mir::TerminatorKind::Drop { ref location, target, unwind } => {
let ty = location.ty(&self.mir, bcx.ccx.tcx()).to_ty(bcx.ccx.tcx());
let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx());
let ty = self.monomorphize(&ty);
// Double check for necessity to drop
......@@ -314,7 +314,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
self.set_debug_loc(&bcx, terminator.source_info);
// Get the location information.
let loc = bcx.ccx.sess().codemap().lookup_char_pos(span.lo);
let loc = bcx.sess().codemap().lookup_char_pos(span.lo);
let filename = Symbol::intern(&loc.file.name).as_str();
let filename = C_str_slice(bcx.ccx, filename);
let line = C_u32(bcx.ccx, loc.line as u32);
......@@ -364,15 +364,15 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
if const_cond == Some(!expected) {
if let Some(err) = const_err {
let err = ConstEvalErr{ span: span, kind: err };
let mut diag = bcx.ccx.tcx().sess.struct_span_warn(
let mut diag = bcx.tcx().sess.struct_span_warn(
span, "this expression will panic at run-time");
note_const_eval_err(bcx.ccx.tcx(), &err, span, "expression", &mut diag);
note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag);
diag.emit();
}
}
// Obtain the panic entry point.
let def_id = common::langcall(bcx.ccx.tcx(), Some(span), "", lang_item);
let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
let callee = Callee::def(bcx.ccx, def_id,
bcx.ccx.empty_substs_for_def_id(def_id));
let llfn = callee.reify(bcx.ccx);
......@@ -411,12 +411,12 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
_ => bug!("{} is not callable", callee.ty)
};
let sig = bcx.ccx.tcx().erase_late_bound_regions_and_normalize(sig);
let sig = bcx.tcx().erase_late_bound_regions_and_normalize(sig);
// Handle intrinsics old trans wants Expr's for, ourselves.
let intrinsic = match (&callee.ty.sty, &callee.data) {
(&ty::TyFnDef(def_id, ..), &Intrinsic) => {
Some(bcx.ccx.tcx().item_name(def_id).as_str())
Some(bcx.tcx().item_name(def_id).as_str())
}
_ => None
};
......@@ -444,7 +444,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let extra_args = &args[sig.inputs().len()..];
let extra_args = extra_args.iter().map(|op_arg| {
let op_ty = op_arg.ty(&self.mir, bcx.ccx.tcx());
let op_ty = op_arg.ty(&self.mir, bcx.tcx());
self.monomorphize(&op_ty)
}).collect::<Vec<_>>();
let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args);
......@@ -635,7 +635,7 @@ fn trans_argument(&mut self,
let imm_op = |x| OperandRef {
val: Immediate(x),
// We won't be checking the type again.
ty: bcx.ccx.tcx().types.err
ty: bcx.tcx().types.err
};
self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee);
......@@ -875,13 +875,13 @@ fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>,
src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) {
let mut val = self.trans_operand(bcx, src);
if let ty::TyFnDef(def_id, substs, _) = val.ty.sty {
let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.ccx.tcx()));
let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx()));
let out_type_size = llbitsize_of_real(bcx.ccx, llouttype);
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let f = Callee::def(bcx.ccx, def_id, substs);
let ty = match f.ty.sty {
ty::TyFnDef(.., f) => bcx.ccx.tcx().mk_fn_ptr(f),
ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f),
_ => f.ty
};
val = OperandRef {
......
......@@ -167,7 +167,7 @@ pub fn trans_lvalue(&mut self,
let llindex = C_uint(bcx.ccx, from);
let llbase = project_index(llindex);
let base_ty = tr_base.ty.to_ty(bcx.ccx.tcx());
let base_ty = tr_base.ty.to_ty(bcx.tcx());
match base_ty.sty {
ty::TyArray(..) => {
// must cast the lvalue pointer type to the new
......
......@@ -268,7 +268,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
// User variable
let source_info = decl.source_info.unwrap();
let debug_scope = mircx.scopes[source_info.scope];
let dbg = debug_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo;
let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
if !lvalue_locals.contains(local.index()) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name);
......@@ -367,13 +367,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
lvalue_locals: &BitVector)
-> Vec<LocalRef<'tcx>> {
let mir = mircx.mir;
let tcx = bcx.ccx.tcx();
let tcx = bcx.tcx();
let mut idx = 0;
let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize;
// Get the argument scope, if it exists and if we need it.
let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE];
let arg_scope = if arg_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo {
let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo {
Some(arg_scope.scope_metadata)
} else {
None
......@@ -433,7 +433,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let arg = &mircx.fn_ty.args[idx];
idx += 1;
let llval = if arg.is_indirect() && bcx.ccx.sess().opts.debuginfo != FullDebugInfo {
let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(.
......
......@@ -211,7 +211,7 @@ pub fn trans_consume(&mut self,
// for most lvalues, to consume them we just load them
// out from their home
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx());
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
self.trans_load(bcx, tr_lvalue.llval, ty)
}
......
......@@ -92,7 +92,7 @@ pub fn trans_rvalue(&mut self,
mir::Rvalue::Repeat(ref elem, ref count) => {
let tr_elem = self.trans_operand(&bcx, elem);
let size = count.value.as_u64(bcx.ccx.tcx().sess.target.uint_type);
let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval);
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
......@@ -104,7 +104,7 @@ pub fn trans_rvalue(&mut self,
match *kind {
mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
let disr = Disr::from(adt_def.variants[variant_index].disr_val);
let dest_ty = dest.ty.to_ty(bcx.ccx.tcx());
let dest_ty = dest.ty.to_ty(bcx.tcx());
adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr));
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand);
......@@ -120,7 +120,7 @@ pub fn trans_rvalue(&mut self,
},
_ => {
// If this is a tuple or closure, we need to translate GEP indices.
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.ccx.tcx()));
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
let translation = if let Layout::Univariant { ref variant, .. } = *layout {
Some(&variant.memory_index)
} else {
......@@ -150,7 +150,7 @@ pub fn trans_rvalue(&mut self,
mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
let lvalue = self.trans_lvalue(&bcx, output);
(lvalue.llval, lvalue.ty.to_ty(bcx.ccx.tcx()))
(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
}).collect();
let input_vals = inputs.iter().map(|input| {
......@@ -345,9 +345,9 @@ pub fn trans_rvalue_operand(&mut self,
mir::Rvalue::Ref(_, bk, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx());
let ref_ty = bcx.ccx.tcx().mk_ref(
bcx.ccx.tcx().mk_region(ty::ReErased),
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
let ref_ty = bcx.tcx().mk_ref(
bcx.tcx().mk_region(ty::ReErased),
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
);
......@@ -372,7 +372,7 @@ pub fn trans_rvalue_operand(&mut self,
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let operand = OperandRef {
val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
ty: bcx.ccx.tcx().types.usize,
ty: bcx.tcx().types.usize,
};
(bcx, operand)
}
......@@ -399,7 +399,7 @@ pub fn trans_rvalue_operand(&mut self,
};
let operand = OperandRef {
val: OperandValue::Immediate(llresult),
ty: op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty),
ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
};
(bcx, operand)
}
......@@ -409,8 +409,8 @@ pub fn trans_rvalue_operand(&mut self,
let result = self.trans_scalar_checked_binop(&bcx, op,
lhs.immediate(), rhs.immediate(),
lhs.ty);
let val_ty = op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty);
let operand_ty = bcx.ccx.tcx().intern_tup(&[val_ty, bcx.ccx.tcx().types.bool]);
let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool]);
let operand = OperandRef {
val: result,
ty: operand_ty
......@@ -444,16 +444,16 @@ pub fn trans_rvalue_operand(&mut self,
let align = type_of::align_of(bcx.ccx, content_ty);
let llalign = C_uint(bcx.ccx, align);
let llty_ptr = llty.ptr_to();
let box_ty = bcx.ccx.tcx().mk_box(content_ty);
let box_ty = bcx.tcx().mk_box(content_ty);
// Allocate space:
let def_id = match bcx.ccx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
Ok(id) => id,
Err(s) => {
bcx.ccx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
}
};
let r = Callee::def(bcx.ccx, def_id, bcx.ccx.tcx().intern_substs(&[]))
let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[]))
.reify(bcx.ccx);
let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
......@@ -618,7 +618,7 @@ pub fn trans_scalar_checked_binop(&mut self,
// will only succeed if both operands are constant.
// This is necessary to determine when an overflow Assert
// will always panic at runtime, and produce a warning.
if let Some((val, of)) = const_scalar_checked_binop(bcx.ccx.tcx(), op, lhs, rhs, input_ty) {
if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
return OperandValue::Pair(val, C_bool(bcx.ccx, of));
}
......@@ -687,7 +687,7 @@ fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
use syntax::ast::UintTy::*;
use rustc::ty::{TyInt, TyUint};
let tcx = bcx.ccx.tcx();
let tcx = bcx.tcx();
let new_sty = match ty.sty {
TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册