提交 6584f374 编写于 作者: B bors

auto merge of #13170 : eddyb/rust/syntax-cleanup, r=alexcrichton

Removes all Cell's/RefCell's from lexer::Reader implementations and a couple @.
......@@ -23,6 +23,7 @@
use metadata::loader::Os;
use std::cell::RefCell;
use std::rc::Rc;
use collections::HashMap;
use syntax::ast;
use syntax::abi;
......@@ -41,7 +42,7 @@
pub fn read_crates(sess: &Session,
krate: &ast::Crate,
os: loader::Os,
intr: @IdentInterner) {
intr: Rc<IdentInterner>) {
let mut e = Env {
sess: sess,
os: os,
......@@ -114,7 +115,7 @@ struct Env<'a> {
os: loader::Os,
crate_cache: @RefCell<Vec<cache_entry>>,
next_crate_num: ast::CrateNum,
intr: @IdentInterner
intr: Rc<IdentInterner>
}
fn visit_crate(e: &Env, c: &ast::Crate) {
......@@ -295,7 +296,7 @@ fn resolve_crate(e: &mut Env,
id_hash: id_hash,
hash: hash.map(|a| &*a),
os: e.os,
intr: e.intr,
intr: e.intr.clone(),
rejected_via_hash: false,
};
let loader::Library {
......
......@@ -63,7 +63,7 @@ pub fn each_child_of_item(cstore: &cstore::CStore,
let get_crate_data: decoder::GetCrateDataCb = |cnum| {
cstore.get_crate_data(cnum)
};
decoder::each_child_of_item(cstore.intr,
decoder::each_child_of_item(cstore.intr.clone(),
crate_data,
def_id.node,
get_crate_data,
......@@ -80,7 +80,7 @@ pub fn each_top_level_item_of_crate(cstore: &cstore::CStore,
let get_crate_data: decoder::GetCrateDataCb = |cnum| {
cstore.get_crate_data(cnum)
};
decoder::each_top_level_item_of_crate(cstore.intr,
decoder::each_top_level_item_of_crate(cstore.intr.clone(),
crate_data,
get_crate_data,
callback)
......@@ -118,19 +118,19 @@ pub fn get_enum_variants(tcx: &ty::ctxt, def: ast::DefId)
-> Vec<@ty::VariantInfo> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
return decoder::get_enum_variants(cstore.intr, cdata, def.node, tcx)
return decoder::get_enum_variants(cstore.intr.clone(), cdata, def.node, tcx)
}
/// Returns information about the given implementation.
pub fn get_impl(tcx: &ty::ctxt, impl_def_id: ast::DefId)
-> ty::Impl {
let cdata = tcx.sess.cstore.get_crate_data(impl_def_id.krate);
decoder::get_impl(tcx.sess.cstore.intr, cdata, impl_def_id.node, tcx)
decoder::get_impl(tcx.sess.cstore.intr.clone(), cdata, impl_def_id.node, tcx)
}
pub fn get_method(tcx: &ty::ctxt, def: ast::DefId) -> ty::Method {
let cdata = tcx.sess.cstore.get_crate_data(def.krate);
decoder::get_method(tcx.sess.cstore.intr, cdata, def.node, tcx)
decoder::get_method(tcx.sess.cstore.intr.clone(), cdata, def.node, tcx)
}
pub fn get_method_name_and_explicit_self(cstore: &cstore::CStore,
......@@ -138,7 +138,7 @@ pub fn get_method_name_and_explicit_self(cstore: &cstore::CStore,
-> (ast::Ident, ast::ExplicitSelf_)
{
let cdata = cstore.get_crate_data(def.krate);
decoder::get_method_name_and_explicit_self(cstore.intr, cdata, def.node)
decoder::get_method_name_and_explicit_self(cstore.intr.clone(), cdata, def.node)
}
pub fn get_trait_method_def_ids(cstore: &cstore::CStore,
......@@ -158,7 +158,7 @@ pub fn get_provided_trait_methods(tcx: &ty::ctxt,
-> Vec<@ty::Method> {
let cstore = &tcx.sess.cstore;
let cdata = cstore.get_crate_data(def.krate);
decoder::get_provided_trait_methods(cstore.intr, cdata, def.node, tcx)
decoder::get_provided_trait_methods(cstore.intr.clone(), cdata, def.node, tcx)
}
pub fn get_supertraits(tcx: &ty::ctxt, def: ast::DefId) -> Vec<@ty::TraitRef> {
......@@ -177,7 +177,7 @@ pub fn get_static_methods_if_impl(cstore: &cstore::CStore,
def: ast::DefId)
-> Option<Vec<StaticMethodInfo> > {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_static_methods_if_impl(cstore.intr, cdata, def.node)
decoder::get_static_methods_if_impl(cstore.intr.clone(), cdata, def.node)
}
pub fn get_item_attrs(cstore: &cstore::CStore,
......@@ -191,7 +191,7 @@ pub fn get_struct_fields(cstore: &cstore::CStore,
def: ast::DefId)
-> Vec<ty::field_ty> {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_struct_fields(cstore.intr, cdata, def.node)
decoder::get_struct_fields(cstore.intr.clone(), cdata, def.node)
}
pub fn get_type(tcx: &ty::ctxt,
......@@ -251,7 +251,7 @@ pub fn get_impl_method(cstore: &cstore::CStore,
mname: ast::Ident)
-> Option<ast::DefId> {
let cdata = cstore.get_crate_data(def.krate);
decoder::get_impl_method(cstore.intr, cdata, def.node, mname)
decoder::get_impl_method(cstore.intr.clone(), cdata, def.node, mname)
}
pub fn get_item_visibility(cstore: &cstore::CStore,
......
......@@ -19,6 +19,7 @@
use std::cell::RefCell;
use std::c_vec::CVec;
use std::rc::Rc;
use collections::HashMap;
use syntax::ast;
use syntax::parse::token::IdentInterner;
......@@ -70,14 +71,14 @@ pub struct CStore {
priv used_crate_sources: RefCell<Vec<CrateSource> >,
priv used_libraries: RefCell<Vec<(~str, NativeLibaryKind)> >,
priv used_link_args: RefCell<Vec<~str> >,
intr: @IdentInterner
intr: Rc<IdentInterner>
}
// Map from NodeId's of local extern crate statements to crate numbers
type extern_mod_crate_map = HashMap<ast::NodeId, ast::CrateNum>;
impl CStore {
pub fn new(intr: @IdentInterner) -> CStore {
pub fn new(intr: Rc<IdentInterner>) -> CStore {
CStore {
metas: RefCell::new(HashMap::new()),
extern_mod_crate_map: RefCell::new(HashMap::new()),
......
......@@ -278,7 +278,7 @@ fn item_region_param_defs(item_doc: ebml::Doc, cdata: Cmd)
reader::tagged_docs(item_doc, tag_region_param_def, |rp_doc| {
let ident_str_doc = reader::get_doc(rp_doc,
tag_region_param_def_ident);
let ident = item_name(token::get_ident_interner(), ident_str_doc);
let ident = item_name(&*token::get_ident_interner(), ident_str_doc);
let def_id_doc = reader::get_doc(rp_doc,
tag_region_param_def_def_id);
let def_id = reader::with_doc_data(def_id_doc, parse_def_id);
......@@ -460,13 +460,13 @@ pub fn get_impl_vtables(cdata: Cmd,
}
pub fn get_impl_method(intr: @IdentInterner, cdata: Cmd, id: ast::NodeId,
pub fn get_impl_method(intr: Rc<IdentInterner>, cdata: Cmd, id: ast::NodeId,
name: ast::Ident) -> Option<ast::DefId> {
let items = reader::get_doc(reader::Doc(cdata.data()), tag_items);
let mut found = None;
reader::tagged_docs(find_item(id, items), tag_item_impl_method, |mid| {
let m_did = reader::with_doc_data(mid, parse_def_id);
if item_name(intr, find_item(m_did.node, items)) == name {
if item_name(&*intr, find_item(m_did.node, items)) == name {
found = Some(translate_def_id(cdata, m_did));
}
true
......@@ -509,7 +509,7 @@ pub fn each_lang_item(cdata: Cmd, f: |ast::NodeId, uint| -> bool) -> bool {
})
}
fn each_child_of_item_or_crate(intr: @IdentInterner,
fn each_child_of_item_or_crate(intr: Rc<IdentInterner>,
cdata: Cmd,
item_doc: ebml::Doc,
get_crate_data: GetCrateDataCb,
......@@ -536,7 +536,7 @@ fn each_child_of_item_or_crate(intr: @IdentInterner,
None => {}
Some(child_item_doc) => {
// Hand off the item to the callback.
let child_name = item_name(intr, child_item_doc);
let child_name = item_name(&*intr, child_item_doc);
let def_like = item_to_def_like(child_item_doc,
child_def_id,
cdata.cnum);
......@@ -577,7 +577,7 @@ fn each_child_of_item_or_crate(intr: @IdentInterner,
// Hand off the static method
// to the callback.
let static_method_name =
item_name(intr, impl_method_doc);
item_name(&*intr, impl_method_doc);
let static_method_def_like =
item_to_def_like(impl_method_doc,
impl_method_def_id,
......@@ -638,7 +638,7 @@ fn each_child_of_item_or_crate(intr: @IdentInterner,
}
/// Iterates over each child of the given item.
pub fn each_child_of_item(intr: @IdentInterner,
pub fn each_child_of_item(intr: Rc<IdentInterner>,
cdata: Cmd,
id: ast::NodeId,
get_crate_data: GetCrateDataCb,
......@@ -659,7 +659,7 @@ pub fn each_child_of_item(intr: @IdentInterner,
}
/// Iterates over all the top-level crate items.
pub fn each_top_level_item_of_crate(intr: @IdentInterner,
pub fn each_top_level_item_of_crate(intr: Rc<IdentInterner>,
cdata: Cmd,
get_crate_data: GetCrateDataCb,
callback: |DefLike,
......@@ -711,7 +711,7 @@ pub fn maybe_get_item_ast(cdata: Cmd, tcx: &ty::ctxt, id: ast::NodeId,
}
}
pub fn get_enum_variants(intr: @IdentInterner, cdata: Cmd, id: ast::NodeId,
pub fn get_enum_variants(intr: Rc<IdentInterner>, cdata: Cmd, id: ast::NodeId,
tcx: &ty::ctxt) -> Vec<@ty::VariantInfo> {
let data = cdata.data();
let items = reader::get_doc(reader::Doc(data), tag_items);
......@@ -723,7 +723,7 @@ pub fn get_enum_variants(intr: @IdentInterner, cdata: Cmd, id: ast::NodeId,
let item = find_item(did.node, items);
let ctor_ty = item_type(ast::DefId { krate: cdata.cnum, node: id},
item, tcx, cdata);
let name = item_name(intr, item);
let name = item_name(&*intr, item);
let arg_tys = match ty::get(ctor_ty).sty {
ty::ty_bare_fn(ref f) => f.sig.inputs.clone(),
_ => Vec::new(), // Nullary enum variant.
......@@ -770,12 +770,12 @@ fn get_mutability(ch: u8) -> ast::Mutability {
}
}
fn item_impl_methods(intr: @IdentInterner, cdata: Cmd, item: ebml::Doc,
fn item_impl_methods(intr: Rc<IdentInterner>, cdata: Cmd, item: ebml::Doc,
tcx: &ty::ctxt) -> Vec<@ty::Method> {
let mut rslt = Vec::new();
reader::tagged_docs(item, tag_item_impl_method, |doc| {
let m_did = reader::with_doc_data(doc, parse_def_id);
rslt.push(@get_method(intr, cdata, m_did.node, tcx));
rslt.push(@get_method(intr.clone(), cdata, m_did.node, tcx));
true
});
......@@ -783,7 +783,7 @@ fn item_impl_methods(intr: @IdentInterner, cdata: Cmd, item: ebml::Doc,
}
/// Returns information about the given implementation.
pub fn get_impl(intr: @IdentInterner, cdata: Cmd, impl_id: ast::NodeId,
pub fn get_impl(intr: Rc<IdentInterner>, cdata: Cmd, impl_id: ast::NodeId,
tcx: &ty::ctxt)
-> ty::Impl {
let data = cdata.data();
......@@ -793,23 +793,23 @@ pub fn get_impl(intr: @IdentInterner, cdata: Cmd, impl_id: ast::NodeId,
krate: cdata.cnum,
node: impl_id,
},
ident: item_name(intr, impl_item),
ident: item_name(&*intr, impl_item),
methods: item_impl_methods(intr, cdata, impl_item, tcx),
}
}
pub fn get_method_name_and_explicit_self(
intr: @IdentInterner,
intr: Rc<IdentInterner>,
cdata: Cmd,
id: ast::NodeId) -> (ast::Ident, ast::ExplicitSelf_)
{
let method_doc = lookup_item(id, cdata.data());
let name = item_name(intr, method_doc);
let name = item_name(&*intr, method_doc);
let explicit_self = get_explicit_self(method_doc);
(name, explicit_self)
}
pub fn get_method(intr: @IdentInterner, cdata: Cmd, id: ast::NodeId,
pub fn get_method(intr: Rc<IdentInterner>, cdata: Cmd, id: ast::NodeId,
tcx: &ty::ctxt) -> ty::Method
{
let method_doc = lookup_item(id, cdata.data());
......@@ -823,7 +823,7 @@ pub fn get_method(intr: @IdentInterner, cdata: Cmd, id: ast::NodeId,
_ => ImplContainer(container_id),
};
let name = item_name(intr, method_doc);
let name = item_name(&*intr, method_doc);
let type_param_defs = item_ty_param_defs(method_doc, tcx, cdata,
tag_item_method_tps);
let rp_defs = item_region_param_defs(method_doc, cdata);
......@@ -867,7 +867,7 @@ pub fn get_item_variances(cdata: Cmd, id: ast::NodeId) -> ty::ItemVariances {
unwrap_(Decodable::decode(&mut decoder))
}
pub fn get_provided_trait_methods(intr: @IdentInterner, cdata: Cmd,
pub fn get_provided_trait_methods(intr: Rc<IdentInterner>, cdata: Cmd,
id: ast::NodeId, tcx: &ty::ctxt) ->
Vec<@ty::Method> {
let data = cdata.data();
......@@ -879,7 +879,7 @@ pub fn get_provided_trait_methods(intr: @IdentInterner, cdata: Cmd,
let mth = lookup_item(did.node, data);
if item_method_sort(mth) == 'p' {
result.push(@get_method(intr, cdata, did.node, tcx));
result.push(@get_method(intr.clone(), cdata, did.node, tcx));
}
true
});
......@@ -921,7 +921,7 @@ pub fn get_type_name_if_impl(cdata: Cmd,
ret
}
pub fn get_static_methods_if_impl(intr: @IdentInterner,
pub fn get_static_methods_if_impl(intr: Rc<IdentInterner>,
cdata: Cmd,
node_id: ast::NodeId)
-> Option<Vec<StaticMethodInfo> > {
......@@ -957,7 +957,7 @@ pub fn get_static_methods_if_impl(intr: @IdentInterner,
}
static_impl_methods.push(StaticMethodInfo {
ident: item_name(intr, impl_method_doc),
ident: item_name(&*intr, impl_method_doc),
def_id: item_def_id(impl_method_doc, cdata),
purity: purity,
vis: item_visibility(impl_method_doc),
......@@ -1009,7 +1009,7 @@ fn struct_field_family_to_visibility(family: Family) -> ast::Visibility {
}
}
pub fn get_struct_fields(intr: @IdentInterner, cdata: Cmd, id: ast::NodeId)
pub fn get_struct_fields(intr: Rc<IdentInterner>, cdata: Cmd, id: ast::NodeId)
-> Vec<ty::field_ty> {
let data = cdata.data();
let item = lookup_item(id, data);
......@@ -1018,7 +1018,7 @@ pub fn get_struct_fields(intr: @IdentInterner, cdata: Cmd, id: ast::NodeId)
let f = item_family(an_item);
if f == PublicField || f == PrivateField || f == InheritedField {
// FIXME #6993: name should be of type Name, not Ident
let name = item_name(intr, an_item);
let name = item_name(&*intr, an_item);
let did = item_def_id(an_item, cdata);
result.push(ty::field_ty {
name: name.name,
......
......@@ -29,6 +29,7 @@
use std::cmp;
use std::io;
use std::os::consts::{macos, freebsd, linux, android, win32};
use std::rc::Rc;
use std::str;
use std::slice;
......@@ -52,7 +53,7 @@ pub struct Context<'a> {
id_hash: &'a str,
hash: Option<&'a Svh>,
os: Os,
intr: @IdentInterner,
intr: Rc<IdentInterner>,
rejected_via_hash: bool,
}
......
......@@ -44,7 +44,7 @@ pub fn highlight(src: &str, class: Option<&str>) -> ~str {
/// it's used. All source code emission is done as slices from the source map,
/// not from the tokens themselves, in order to stay true to the original
/// source.
fn doit(sess: &parse::ParseSess, lexer: lexer::StringReader, class: Option<&str>,
fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader, class: Option<&str>,
out: &mut Writer) -> io::IoResult<()> {
use syntax::parse::lexer::Reader;
......@@ -55,7 +55,7 @@ fn doit(sess: &parse::ParseSess, lexer: lexer::StringReader, class: Option<&str>
let mut is_macro_nonterminal = false;
loop {
let next = lexer.next_token();
let test = if next.tok == t::EOF {lexer.pos.get()} else {next.sp.lo};
let test = if next.tok == t::EOF {lexer.pos} else {next.sp.lo};
// The lexer consumes all whitespace and non-doc-comments when iterating
// between tokens. If this token isn't directly adjacent to our last
......
......@@ -581,14 +581,16 @@ pub enum TokenTree {
TTTok(Span, ::parse::token::Token),
// a delimited sequence (the delimiters appear as the first
// and last elements of the vector)
TTDelim(@Vec<TokenTree> ),
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
TTDelim(Rc<Vec<TokenTree>>),
// These only make sense for right-hand-sides of MBE macros:
// a kleene-style repetition sequence with a span, a TTForest,
// an optional separator, and a boolean where true indicates
// zero or more (..), and false indicates one or more (+).
TTSeq(Span, @Vec<TokenTree> , Option<::parse::token::Token>, bool),
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
TTSeq(Span, Rc<Vec<TokenTree>>, Option<::parse::token::Token>, bool),
// a syntactic variable that will be filled in by macro expansion.
TTNonterminal(Span, Ident)
......
......@@ -13,6 +13,8 @@
use ext::base;
use print;
use std::rc::Rc;
pub fn expand_syntax_ext(cx: &mut base::ExtCtxt,
sp: codemap::Span,
tt: &[ast::TokenTree])
......@@ -20,7 +22,7 @@ pub fn expand_syntax_ext(cx: &mut base::ExtCtxt,
cx.print_backtrace();
println!("{}", print::pprust::tt_to_str(&ast::TTDelim(
@tt.iter().map(|x| (*x).clone()).collect())));
Rc::new(tt.iter().map(|x| (*x).clone()).collect()))));
// any so that `log_syntax` can be invoked as an expression and item.
base::MacResult::dummy_any(sp)
......
......@@ -21,6 +21,7 @@
use parse::token::{Token, EOF, Nonterminal};
use parse::token;
use std::rc::Rc;
use collections::HashMap;
/* This is an Earley-like parser, without support for in-grammar nonterminals,
......@@ -102,7 +103,7 @@ pub struct MatcherPos {
sep: Option<Token>,
idx: uint,
up: Option<~MatcherPos>,
matches: Vec<Vec<@NamedMatch>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: uint, match_hi: uint,
sp_lo: BytePos,
}
......@@ -165,14 +166,14 @@ pub fn initial_matcher_pos(ms: Vec<Matcher> , sep: Option<Token>, lo: BytePos)
// ast::Matcher it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<@NamedMatch> , codemap::Span),
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[Matcher], res: &[@NamedMatch])
-> HashMap<Ident, @NamedMatch> {
fn n_rec(p_s: &ParseSess, m: &Matcher, res: &[@NamedMatch],
ret_val: &mut HashMap<Ident, @NamedMatch>) {
pub fn nameize(p_s: &ParseSess, ms: &[Matcher], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &Matcher, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>) {
match *m {
codemap::Spanned {node: MatchTok(_), .. } => (),
codemap::Spanned {node: MatchSeq(ref more_ms, _, _, _, _), .. } => {
......@@ -189,7 +190,7 @@ fn n_rec(p_s: &ParseSess, m: &Matcher, res: &[@NamedMatch],
p_s.span_diagnostic
.span_fatal(span, "duplicated bind name: " + string.get())
}
ret_val.insert(bind_name, res[idx]);
ret_val.insert(bind_name, res[idx].clone());
}
}
}
......@@ -199,16 +200,16 @@ fn n_rec(p_s: &ParseSess, m: &Matcher, res: &[@NamedMatch],
}
pub enum ParseResult {
Success(HashMap<Ident, @NamedMatch>),
Success(HashMap<Ident, Rc<NamedMatch>>),
Failure(codemap::Span, ~str),
Error(codemap::Span, ~str)
}
pub fn parse_or_else<R: Reader>(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: R,
ms: Vec<Matcher> )
-> HashMap<Ident, @NamedMatch> {
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<Matcher> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, ms.as_slice()) {
Success(m) => m,
Failure(sp, str) => sess.span_diagnostic.span_fatal(sp, str),
......@@ -226,11 +227,11 @@ pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
}
}
pub fn parse<R: Reader>(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: R,
ms: &[Matcher])
-> ParseResult {
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[Matcher])
-> ParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(ms.iter()
.map(|x| (*x).clone())
......@@ -282,8 +283,8 @@ pub fn parse<R: Reader>(sess: &ParseSess,
let sub = (*ei.matches.get(idx)).clone();
new_pos.matches
.get_mut(idx)
.push(@MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi)));
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.idx += 1;
......@@ -325,7 +326,7 @@ pub fn parse<R: Reader>(sess: &ParseSess,
for idx in range(match_idx_lo, match_idx_hi) {
new_ei.matches
.get_mut(idx)
.push(@MatchedSeq(Vec::new(), sp));
.push(Rc::new(MatchedSeq(Vec::new(), sp)));
}
cur_eis.push(new_ei);
......@@ -395,14 +396,14 @@ pub fn parse<R: Reader>(sess: &ParseSess,
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
let mut rust_parser = Parser(sess, cfg.clone(), rdr.dup());
let mut rust_parser = Parser(sess, cfg.clone(), ~rdr.clone());
let mut ei = bb_eis.pop().unwrap();
match ei.elts.get(ei.idx).node {
MatchNonterminal(_, name, idx) => {
let name_string = token::get_ident(name);
ei.matches.get_mut(idx).push(@MatchedNonterminal(
parse_nt(&mut rust_parser, name_string.get())));
ei.matches.get_mut(idx).push(Rc::new(MatchedNonterminal(
parse_nt(&mut rust_parser, name_string.get()))));
ei.idx += 1u;
}
_ => fail!()
......
......@@ -28,6 +28,7 @@
use util::small_vector::SmallVector;
use std::cell::RefCell;
use std::rc::Rc;
struct ParserAnyMacro<'a> {
parser: RefCell<Parser<'a>>,
......@@ -85,8 +86,8 @@ fn make_stmt(&self) -> @ast::Stmt {
struct MacroRulesMacroExpander {
name: Ident,
lhses: @Vec<@NamedMatch> ,
rhses: @Vec<@NamedMatch> ,
lhses: Vec<Rc<NamedMatch>>,
rhses: Vec<Rc<NamedMatch>>,
}
impl MacroExpander for MacroRulesMacroExpander {
......@@ -109,15 +110,15 @@ fn generic_extension(cx: &ExtCtxt,
sp: Span,
name: Ident,
arg: &[ast::TokenTree],
lhses: &[@NamedMatch],
rhses: &[@NamedMatch])
lhses: &[Rc<NamedMatch>],
rhses: &[Rc<NamedMatch>])
-> MacResult {
if cx.trace_macros() {
println!("{}! \\{ {} \\}",
token::get_ident(name),
print::pprust::tt_to_str(&TTDelim(@arg.iter()
.map(|x| (*x).clone())
.collect())));
print::pprust::tt_to_str(&TTDelim(Rc::new(arg.iter()
.map(|x| (*x).clone())
.collect()))));
}
// Which arm's failure should we report? (the one furthest along)
......@@ -220,12 +221,12 @@ fn ms(m: Matcher_) -> Matcher {
// Extract the arguments:
let lhses = match **argument_map.get(&lhs_nm) {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ @(*s).clone(),
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(sp, "wrong-structured lhs")
};
let rhses = match **argument_map.get(&rhs_nm) {
MatchedSeq(ref s, _) => /* FIXME (#2543) */ @(*s).clone(),
MatchedSeq(ref s, _) => /* FIXME (#2543) */ (*s).clone(),
_ => cx.span_bug(sp, "wrong-structured rhs")
};
......
......@@ -17,107 +17,79 @@
use parse::token;
use parse::lexer::TokenAndSpan;
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use collections::HashMap;
///an unzipping of `TokenTree`s
#[deriving(Clone)]
struct TtFrame {
forest: @Vec<ast::TokenTree> ,
idx: Cell<uint>,
forest: Rc<Vec<ast::TokenTree>>,
idx: uint,
dotdotdoted: bool,
sep: Option<Token>,
up: Option<@TtFrame>,
}
#[deriving(Clone)]
pub struct TtReader<'a> {
sp_diag: &'a SpanHandler,
// the unzipped tree:
priv stack: RefCell<@TtFrame>,
priv stack: Vec<TtFrame>,
/* for MBE-style macro transcription */
priv interpolations: RefCell<HashMap<Ident, @NamedMatch>>,
priv repeat_idx: RefCell<Vec<uint> >,
priv repeat_len: RefCell<Vec<uint> >,
priv interpolations: HashMap<Ident, Rc<NamedMatch>>,
priv repeat_idx: Vec<uint>,
priv repeat_len: Vec<uint>,
/* cached: */
cur_tok: RefCell<Token>,
cur_span: RefCell<Span>,
cur_tok: Token,
cur_span: Span,
}
/** This can do Macro-By-Example transcription. On the other hand, if
* `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and
* should) be none. */
pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler,
interp: Option<HashMap<Ident, @NamedMatch>>,
interp: Option<HashMap<Ident, Rc<NamedMatch>>>,
src: Vec<ast::TokenTree> )
-> TtReader<'a> {
let r = TtReader {
let mut r = TtReader {
sp_diag: sp_diag,
stack: RefCell::new(@TtFrame {
forest: @src,
idx: Cell::new(0u),
stack: vec!(TtFrame {
forest: Rc::new(src),
idx: 0,
dotdotdoted: false,
sep: None,
up: None
}),
interpolations: match interp { /* just a convienience */
None => RefCell::new(HashMap::new()),
Some(x) => RefCell::new(x),
None => HashMap::new(),
Some(x) => x,
},
repeat_idx: RefCell::new(Vec::new()),
repeat_len: RefCell::new(Vec::new()),
repeat_idx: Vec::new(),
repeat_len: Vec::new(),
/* dummy values, never read: */
cur_tok: RefCell::new(EOF),
cur_span: RefCell::new(DUMMY_SP),
cur_tok: EOF,
cur_span: DUMMY_SP,
};
tt_next_token(&r); /* get cur_tok and cur_span set up */
tt_next_token(&mut r); /* get cur_tok and cur_span set up */
r
}
fn dup_tt_frame(f: @TtFrame) -> @TtFrame {
@TtFrame {
forest: @(*f.forest).clone(),
idx: f.idx.clone(),
dotdotdoted: f.dotdotdoted,
sep: f.sep.clone(),
up: match f.up {
Some(up_frame) => Some(dup_tt_frame(up_frame)),
None => None
}
}
}
pub fn dup_tt_reader<'a>(r: &TtReader<'a>) -> TtReader<'a> {
TtReader {
sp_diag: r.sp_diag,
stack: RefCell::new(dup_tt_frame(r.stack.get())),
repeat_idx: r.repeat_idx.clone(),
repeat_len: r.repeat_len.clone(),
cur_tok: r.cur_tok.clone(),
cur_span: r.cur_span.clone(),
interpolations: r.interpolations.clone(),
}
}
fn lookup_cur_matched_by_matched(r: &TtReader, start: @NamedMatch)
-> @NamedMatch {
fn red(ad: @NamedMatch, idx: &uint) -> @NamedMatch {
fn lookup_cur_matched_by_matched(r: &TtReader, start: Rc<NamedMatch>) -> Rc<NamedMatch> {
r.repeat_idx.iter().fold(start, |ad, idx| {
match *ad {
MatchedNonterminal(_) => {
// end of the line; duplicate henceforth
ad
ad.clone()
}
MatchedSeq(ref ads, _) => *ads.get(*idx)
MatchedSeq(ref ads, _) => ads.get(*idx).clone()
}
}
r.repeat_idx.borrow().iter().fold(start, red)
})
}
fn lookup_cur_matched(r: &TtReader, name: Ident) -> @NamedMatch {
let matched_opt = r.interpolations.borrow().find_copy(&name);
fn lookup_cur_matched(r: &TtReader, name: Ident) -> Rc<NamedMatch> {
let matched_opt = r.interpolations.find_copy(&name);
match matched_opt {
Some(s) => lookup_cur_matched_by_matched(r, s),
None => {
r.sp_diag.span_fatal(r.cur_span.get(),
r.sp_diag.span_fatal(r.cur_span,
format!("unknown macro variable `{}`",
token::get_ident(name)));
}
......@@ -167,143 +139,140 @@ fn lockstep_iter_size(t: &TokenTree, r: &TtReader) -> LockstepIterSize {
// return the next token from the TtReader.
// EFFECT: advances the reader's token field
pub fn tt_next_token(r: &TtReader) -> TokenAndSpan {
pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
// FIXME(pcwalton): Bad copy?
let ret_val = TokenAndSpan {
tok: r.cur_tok.get(),
sp: r.cur_span.get(),
tok: r.cur_tok.clone(),
sp: r.cur_span.clone(),
};
loop {
if r.stack.borrow().idx.get() < r.stack.borrow().forest.len() {
break;
}
/* done with this set; pop or repeat? */
if !r.stack.get().dotdotdoted || {
*r.repeat_idx.borrow().last().unwrap() ==
*r.repeat_len.borrow().last().unwrap() - 1
} {
match r.stack.get().up {
None => {
r.cur_tok.set(EOF);
let should_pop = match r.stack.last() {
None => {
assert_eq!(ret_val.tok, EOF);
return ret_val;
}
Some(tt_f) => {
if r.stack.get().dotdotdoted {
r.repeat_idx.borrow_mut().pop().unwrap();
r.repeat_len.borrow_mut().pop().unwrap();
}
Some(frame) => {
if frame.idx < frame.forest.len() {
break;
}
r.stack.set(tt_f);
r.stack.get().idx.set(r.stack.get().idx.get() + 1u);
}
!frame.dotdotdoted ||
*r.repeat_idx.last().unwrap() == *r.repeat_len.last().unwrap() - 1
}
};
} else { /* repeat */
r.stack.get().idx.set(0u);
{
let mut repeat_idx = r.repeat_idx.borrow_mut();
let last_repeat_idx = repeat_idx.len() - 1u;
*repeat_idx.get_mut(last_repeat_idx) += 1u;
/* done with this set; pop or repeat? */
if should_pop {
let prev = r.stack.pop().unwrap();
match r.stack.mut_last() {
None => {
r.cur_tok = EOF;
return ret_val;
}
Some(frame) => {
frame.idx += 1;
}
}
match r.stack.get().sep.clone() {
Some(tk) => {
r.cur_tok.set(tk); /* repeat same span, I guess */
return ret_val;
}
None => ()
if prev.dotdotdoted {
r.repeat_idx.pop();
r.repeat_len.pop();
}
} else { /* repeat */
*r.repeat_idx.mut_last().unwrap() += 1u;
r.stack.mut_last().unwrap().idx = 0;
match r.stack.last().unwrap().sep.clone() {
Some(tk) => {
r.cur_tok = tk; /* repeat same span, I guess */
return ret_val;
}
None => {}
}
}
}
loop { /* because it's easiest, this handles `TTDelim` not starting
with a `TTTok`, even though it won't happen */
// FIXME(pcwalton): Bad copy.
match (*r.stack.get().forest.get(r.stack.get().idx.get())).clone() {
TTDelim(tts) => {
r.stack.set(@TtFrame {
forest: tts,
idx: Cell::new(0u),
dotdotdoted: false,
sep: None,
up: Some(r.stack.get())
});
// if this could be 0-length, we'd need to potentially recur here
}
TTTok(sp, tok) => {
r.cur_span.set(sp);
r.cur_tok.set(tok);
r.stack.get().idx.set(r.stack.get().idx.get() + 1u);
return ret_val;
}
TTSeq(sp, tts, sep, zerok) => {
with a `TTTok`, even though it won't happen */
let t = {
let frame = r.stack.last().unwrap();
// FIXME(pcwalton): Bad copy.
let t = TTSeq(sp, tts, sep.clone(), zerok);
match lockstep_iter_size(&t, r) {
LisUnconstrained => {
r.sp_diag.span_fatal(
sp, /* blame macro writer */
"attempted to repeat an expression \
containing no syntax \
variables matched as repeating at this depth");
}
LisContradiction(ref msg) => {
/* FIXME #2887 blame macro invoker instead*/
r.sp_diag.span_fatal(sp, (*msg));
}
LisConstraint(len, _) => {
if len == 0 {
if !zerok {
r.sp_diag.span_fatal(sp, /* FIXME #2887 blame invoker
*/
"this must repeat at least \
once");
}
r.stack.get().idx.set(r.stack.get().idx.get() + 1u);
return tt_next_token(r);
} else {
r.repeat_len.borrow_mut().push(len);
r.repeat_idx.borrow_mut().push(0u);
r.stack.set(@TtFrame {
forest: tts,
idx: Cell::new(0u),
dotdotdoted: true,
sep: sep,
up: Some(r.stack.get())
});
}
}
(*frame.forest.get(frame.idx)).clone()
};
match t {
TTDelim(tts) => {
r.stack.push(TtFrame {
forest: tts,
idx: 0,
dotdotdoted: false,
sep: None
});
// if this could be 0-length, we'd need to potentially recur here
}
}
// FIXME #2887: think about span stuff here
TTNonterminal(sp, ident) => {
match *lookup_cur_matched(r, ident) {
/* sidestep the interpolation tricks for ident because
(a) idents can be in lots of places, so it'd be a pain
(b) we actually can, since it's a token. */
MatchedNonterminal(NtIdent(~sn,b)) => {
r.cur_span.set(sp);
r.cur_tok.set(IDENT(sn,b));
r.stack.get().idx.set(r.stack.get().idx.get() + 1u);
TTTok(sp, tok) => {
r.cur_span = sp;
r.cur_tok = tok;
r.stack.mut_last().unwrap().idx += 1;
return ret_val;
}
MatchedNonterminal(ref other_whole_nt) => {
}
TTSeq(sp, tts, sep, zerok) => {
// FIXME(pcwalton): Bad copy.
r.cur_span.set(sp);
r.cur_tok.set(INTERPOLATED((*other_whole_nt).clone()));
r.stack.get().idx.set(r.stack.get().idx.get() + 1u);
return ret_val;
}
MatchedSeq(..) => {
r.sp_diag.span_fatal(
r.cur_span.get(), /* blame the macro writer */
format!("variable '{}' is still repeating at this depth",
token::get_ident(ident)));
}
match lockstep_iter_size(&TTSeq(sp, tts.clone(), sep.clone(), zerok), r) {
LisUnconstrained => {
r.sp_diag.span_fatal(
sp.clone(), /* blame macro writer */
"attempted to repeat an expression \
containing no syntax \
variables matched as repeating at this depth");
}
LisContradiction(ref msg) => {
// FIXME #2887 blame macro invoker instead
r.sp_diag.span_fatal(sp.clone(), *msg);
}
LisConstraint(len, _) => {
if len == 0 {
if !zerok {
// FIXME #2887 blame invoker
r.sp_diag.span_fatal(sp.clone(),
"this must repeat at least once");
}
r.stack.mut_last().unwrap().idx += 1;
return tt_next_token(r);
}
r.repeat_len.push(len);
r.repeat_idx.push(0);
r.stack.push(TtFrame {
forest: tts,
idx: 0,
dotdotdoted: true,
sep: sep.clone()
});
}
}
}
// FIXME #2887: think about span stuff here
TTNonterminal(sp, ident) => {
r.stack.mut_last().unwrap().idx += 1;
match *lookup_cur_matched(r, ident) {
/* sidestep the interpolation tricks for ident because
(a) idents can be in lots of places, so it'd be a pain
(b) we actually can, since it's a token. */
MatchedNonterminal(NtIdent(~sn,b)) => {
r.cur_span = sp;
r.cur_tok = IDENT(sn,b);
return ret_val;
}
MatchedNonterminal(ref other_whole_nt) => {
// FIXME(pcwalton): Bad copy.
r.cur_span = sp;
r.cur_tok = INTERPOLATED((*other_whole_nt).clone());
return ret_val;
}
MatchedSeq(..) => {
r.sp_diag.span_fatal(
r.cur_span, /* blame the macro writer */
format!("variable '{}' is still repeating at this depth",
token::get_ident(ident)));
}
}
}
}
}
}
}
......@@ -16,6 +16,8 @@
use owned_slice::OwnedSlice;
use util::small_vector::SmallVector;
use std::rc::Rc;
// We may eventually want to be able to fold over type parameters, too.
pub trait Folder {
fn fold_crate(&mut self, c: Crate) -> Crate {
......@@ -375,10 +377,10 @@ pub fn fold_tts<T: Folder>(tts: &[TokenTree], fld: &mut T) -> Vec<TokenTree> {
match *tt {
TTTok(span, ref tok) =>
TTTok(span,maybe_fold_ident(tok,fld)),
TTDelim(tts) => TTDelim(@fold_tts(tts.as_slice(), fld)),
TTSeq(span, pattern, ref sep, is_optional) =>
TTDelim(ref tts) => TTDelim(Rc::new(fold_tts(tts.as_slice(), fld))),
TTSeq(span, ref pattern, ref sep, is_optional) =>
TTSeq(span,
@fold_tts(pattern.as_slice(), fld),
Rc::new(fold_tts(pattern.as_slice(), fld)),
sep.as_ref().map(|tok|maybe_fold_ident(tok,fld)),
is_optional),
TTNonterminal(sp,ref ident) =>
......
......@@ -133,44 +133,42 @@ fn horizontal_trim(lines: Vec<~str> ) -> Vec<~str> {
fail!("not a doc-comment: {}", comment);
}
fn read_to_eol(rdr: &StringReader) -> ~str {
fn read_to_eol(rdr: &mut StringReader) -> ~str {
let mut val = ~"";
while !rdr.curr_is('\n') && !is_eof(rdr) {
val.push_char(rdr.curr.get().unwrap());
val.push_char(rdr.curr.unwrap());
bump(rdr);
}
if rdr.curr_is('\n') { bump(rdr); }
return val;
}
fn read_one_line_comment(rdr: &StringReader) -> ~str {
fn read_one_line_comment(rdr: &mut StringReader) -> ~str {
let val = read_to_eol(rdr);
assert!((val[0] == '/' as u8 && val[1] == '/' as u8) ||
(val[0] == '#' as u8 && val[1] == '!' as u8));
return val;
}
fn consume_non_eol_whitespace(rdr: &StringReader) {
while is_whitespace(rdr.curr.get()) && !rdr.curr_is('\n') &&
!is_eof(rdr) {
fn consume_non_eol_whitespace(rdr: &mut StringReader) {
while is_whitespace(rdr.curr) && !rdr.curr_is('\n') && !is_eof(rdr) {
bump(rdr);
}
}
fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment> ) {
fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) {
debug!(">>> blank-line comment");
let v: Vec<~str> = Vec::new();
comments.push(Comment {
style: BlankLine,
lines: v,
pos: rdr.last_pos.get(),
lines: Vec::new(),
pos: rdr.last_pos,
});
}
fn consume_whitespace_counting_blank_lines(rdr: &StringReader,
comments: &mut Vec<Comment> ) {
while is_whitespace(rdr.curr.get()) && !is_eof(rdr) {
if rdr.col.get() == CharPos(0u) && rdr.curr_is('\n') {
fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader,
comments: &mut Vec<Comment>) {
while is_whitespace(rdr.curr) && !is_eof(rdr) {
if rdr.col == CharPos(0u) && rdr.curr_is('\n') {
push_blank_line_comment(rdr, &mut *comments);
}
bump(rdr);
......@@ -178,10 +176,10 @@ fn consume_whitespace_counting_blank_lines(rdr: &StringReader,
}
fn read_shebang_comment(rdr: &StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
fn read_shebang_comment(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> shebang comment");
let p = rdr.last_pos.get();
let p = rdr.last_pos;
debug!("<<< shebang comment");
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
......@@ -190,10 +188,10 @@ fn read_shebang_comment(rdr: &StringReader, code_to_the_left: bool,
});
}
fn read_line_comments(rdr: &StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> line comments");
let p = rdr.last_pos.get();
let p = rdr.last_pos;
let mut lines: Vec<~str> = Vec::new();
while rdr.curr_is('/') && nextch_is(rdr, '/') {
let line = read_one_line_comment(rdr);
......@@ -247,13 +245,13 @@ fn trim_whitespace_prefix_and_push_line(lines: &mut Vec<~str> ,
lines.push(s1);
}
fn read_block_comment(rdr: &StringReader,
fn read_block_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> block comment");
let p = rdr.last_pos.get();
let p = rdr.last_pos;
let mut lines: Vec<~str> = Vec::new();
let col: CharPos = rdr.col.get();
let col = rdr.col;
bump(rdr);
bump(rdr);
......@@ -262,7 +260,7 @@ fn read_block_comment(rdr: &StringReader,
// doc-comments are not really comments, they are attributes
if (rdr.curr_is('*') && !nextch_is(rdr, '*')) || rdr.curr_is('!') {
while !(rdr.curr_is('*') && nextch_is(rdr, '/')) && !is_eof(rdr) {
curr_line.push_char(rdr.curr.get().unwrap());
curr_line.push_char(rdr.curr.unwrap());
bump(rdr);
}
if !is_eof(rdr) {
......@@ -286,7 +284,7 @@ fn read_block_comment(rdr: &StringReader,
curr_line = ~"";
bump(rdr);
} else {
curr_line.push_char(rdr.curr.get().unwrap());
curr_line.push_char(rdr.curr.unwrap());
if rdr.curr_is('/') && nextch_is(rdr, '*') {
bump(rdr);
bump(rdr);
......@@ -324,7 +322,7 @@ fn peeking_at_comment(rdr: &StringReader) -> bool {
!lexer::nextnextch_is(rdr, '['));
}
fn consume_comment(rdr: &StringReader,
fn consume_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> consume comment");
......@@ -355,7 +353,7 @@ pub fn gather_comments_and_literals(span_diagnostic:
let src = str::from_utf8_owned(src).unwrap();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let rdr = lexer::new_low_level_string_reader(span_diagnostic, filemap);
let mut rdr = lexer::new_low_level_string_reader(span_diagnostic, filemap);
let mut comments: Vec<Comment> = Vec::new();
let mut literals: Vec<Literal> = Vec::new();
......@@ -363,20 +361,20 @@ pub fn gather_comments_and_literals(span_diagnostic:
while !is_eof(&rdr) {
loop {
let mut code_to_the_left = !first_read;
consume_non_eol_whitespace(&rdr);
consume_non_eol_whitespace(&mut rdr);
if rdr.curr_is('\n') {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(&rdr, &mut comments);
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
while peeking_at_comment(&rdr) {
consume_comment(&rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(&rdr, &mut comments);
consume_comment(&mut rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos.get();
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan {tok: tok, sp: sp} = rdr.peek();
......
此差异已折叠。
......@@ -366,13 +366,13 @@ fn sp(a: u32, b: u32) -> Span {
[ast::TTTok(_,_),
ast::TTTok(_,token::NOT),
ast::TTTok(_,_),
ast::TTDelim(delim_elts)] => {
ast::TTDelim(ref delim_elts)] => {
let delim_elts: &[ast::TokenTree] = delim_elts.as_slice();
match delim_elts {
[ast::TTTok(_,token::LPAREN),
ast::TTDelim(first_set),
ast::TTDelim(ref first_set),
ast::TTTok(_,token::FAT_ARROW),
ast::TTDelim(second_set),
ast::TTDelim(ref second_set),
ast::TTTok(_,token::RPAREN)] => {
let first_set: &[ast::TokenTree] =
first_set.as_slice();
......
......@@ -80,6 +80,7 @@
use collections::HashSet;
use std::kinds::marker;
use std::mem::replace;
use std::rc::Rc;
use std::vec;
#[allow(non_camel_case_types)]
......@@ -274,7 +275,7 @@ struct ParsedItemsAndViewItems {
/* ident is handled by common.rs */
pub fn Parser<'a>(sess: &'a ParseSess, cfg: ast::CrateConfig, rdr: ~Reader:)
pub fn Parser<'a>(sess: &'a ParseSess, cfg: ast::CrateConfig, mut rdr: ~Reader:)
-> Parser<'a> {
let tok0 = rdr.next_token();
let span = tok0.sp;
......@@ -328,7 +329,7 @@ pub struct Parser<'a> {
restriction: restriction,
quote_depth: uint, // not (yet) related to the quasiquoter
reader: ~Reader:,
interner: @token::IdentInterner,
interner: Rc<token::IdentInterner>,
/// The set of seen errors about obsolete syntax. Used to suppress
/// extra detail when the same error is seen twice
obsolete_set: HashSet<ObsoleteSyntax>,
......@@ -2104,7 +2105,7 @@ fn parse_non_delim_tt_tok(p: &mut Parser) -> TokenTree {
let seq = match seq {
Spanned { node, .. } => node,
};
TTSeq(mk_sp(sp.lo, p.span.hi), @seq, s, z)
TTSeq(mk_sp(sp.lo, p.span.hi), Rc::new(seq), s, z)
} else {
TTNonterminal(sp, p.parse_ident())
}
......@@ -2147,7 +2148,7 @@ fn parse_any_tt_tok(p: &mut Parser) -> TokenTree {
result.push(parse_any_tt_tok(self));
self.open_braces.pop().unwrap();
TTDelim(@result)
TTDelim(Rc::new(result))
}
_ => parse_non_delim_tt_tok(self)
}
......
......@@ -22,6 +22,7 @@
use std::fmt;
use std::local_data;
use std::path::BytesContainer;
use std::rc::Rc;
#[allow(non_camel_case_types)]
#[deriving(Clone, Encodable, Decodable, Eq, TotalEq, Hash, Show)]
......@@ -531,13 +532,14 @@ pub fn token_to_binop(tok: &Token) -> Option<ast::BinOp> {
// if an interner exists in TLS, return it. Otherwise, prepare a
// fresh one.
pub fn get_ident_interner() -> @IdentInterner {
local_data_key!(key: @::parse::token::IdentInterner)
match local_data::get(key, |k| k.map(|k| *k)) {
// FIXME(eddyb) #8726 This should probably use a task-local reference.
pub fn get_ident_interner() -> Rc<IdentInterner> {
local_data_key!(key: Rc<::parse::token::IdentInterner>)
match local_data::get(key, |k| k.map(|k| k.clone())) {
Some(interner) => interner,
None => {
let interner = @mk_fresh_ident_interner();
local_data::set(key, interner);
let interner = Rc::new(mk_fresh_ident_interner());
local_data::set(key, interner.clone());
interner
}
}
......
......@@ -26,13 +26,12 @@
use print::pp;
use std::cast;
use std::cell::RefCell;
use std::char;
use std::str;
use std::io;
use std::io::{IoResult, MemWriter};
use std::rc::Rc;
// The &mut State is stored here to prevent recursive type.
pub enum AnnNode<'a> {
NodeBlock(&'a ast::Block),
NodeItem(&'a ast::Item),
......@@ -57,11 +56,11 @@ pub struct CurrentCommentAndLiteral {
pub struct State<'a> {
s: pp::Printer,
cm: Option<&'a CodeMap>,
intr: @token::IdentInterner,
intr: Rc<token::IdentInterner>,
comments: Option<Vec<comments::Comment> >,
literals: Option<Vec<comments::Literal> >,
cur_cmnt_and_lit: CurrentCommentAndLiteral,
boxes: RefCell<Vec<pp::Breaks> >,
boxes: Vec<pp::Breaks>,
ann: &'a PpAnn
}
......@@ -82,7 +81,7 @@ pub fn rust_printer_annotated<'a>(writer: ~io::Writer,
cur_cmnt: 0,
cur_lit: 0
},
boxes: RefCell::new(Vec::new()),
boxes: Vec::new(),
ann: ann
}
}
......@@ -124,7 +123,7 @@ pub fn print_crate<'a>(cm: &'a CodeMap,
cur_cmnt: 0,
cur_lit: 0
},
boxes: RefCell::new(Vec::new()),
boxes: Vec::new(),
ann: ann
};
try!(s.print_mod(&krate.module, krate.attrs.as_slice()));
......@@ -238,23 +237,23 @@ pub fn visibility_qualified(vis: ast::Visibility, s: &str) -> ~str {
impl<'a> State<'a> {
pub fn ibox(&mut self, u: uint) -> IoResult<()> {
self.boxes.borrow_mut().push(pp::Inconsistent);
self.boxes.push(pp::Inconsistent);
pp::ibox(&mut self.s, u)
}
pub fn end(&mut self) -> IoResult<()> {
self.boxes.borrow_mut().pop().unwrap();
self.boxes.pop().unwrap();
pp::end(&mut self.s)
}
pub fn cbox(&mut self, u: uint) -> IoResult<()> {
self.boxes.borrow_mut().push(pp::Consistent);
self.boxes.push(pp::Consistent);
pp::cbox(&mut self.s, u)
}
// "raw box"
pub fn rbox(&mut self, u: uint, b: pp::Breaks) -> IoResult<()> {
self.boxes.borrow_mut().push(b);
self.boxes.push(b);
pp::rbox(&mut self.s, u, b)
}
......@@ -321,8 +320,8 @@ pub fn is_bol(&mut self) -> bool {
self.s.last_token().is_eof() || self.s.last_token().is_hardbreak_tok()
}
pub fn in_cbox(&mut self) -> bool {
match self.boxes.borrow().last() {
pub fn in_cbox(&self) -> bool {
match self.boxes.last() {
Some(&last_box) => last_box == pp::Consistent,
None => false
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册