提交 d8b1fa0a 编写于 作者: B Brendan Zabarauskas

Use PascalCase for token variants

上级 bd7138dd
......@@ -55,7 +55,7 @@ extern crate syntax;
extern crate rustc;
use syntax::codemap::Span;
use syntax::parse::token::{IDENT, get_ident};
use syntax::parse::token;
use syntax::ast::{TokenTree, TtToken};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr};
use syntax::ext::build::AstBuilder; // trait for expr_uint
......@@ -71,7 +71,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
("I", 1)];
let text = match args {
[TtToken(_, IDENT(s, _))] => get_ident(s).to_string(),
[TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(),
_ => {
cx.span_err(sp, "argument should be a single identifier");
return DummyResult::any(sp);
......
......@@ -30,12 +30,12 @@
use syntax::ast;
use syntax::ast::Name;
use syntax::parse::token::*;
use syntax::parse::token;
use syntax::parse::lexer::TokenAndSpan;
fn parse_token_list(file: &str) -> HashMap<String, Token> {
fn id() -> Token {
IDENT(ast::Ident { name: Name(0), ctxt: 0, }, false)
token::Ident(ast::Ident { name: Name(0), ctxt: 0, }, false)
}
let mut res = HashMap::new();
......@@ -52,64 +52,64 @@ fn id() -> Token {
let num = line.slice_from(eq + 1);
let tok = match val {
"SHR" => BINOP(SHR),
"DOLLAR" => DOLLAR,
"LT" => LT,
"STAR" => BINOP(STAR),
"FLOAT_SUFFIX" => id(),
"INT_SUFFIX" => id(),
"SHL" => BINOP(SHL),
"LBRACE" => LBRACE,
"RARROW" => RARROW,
"LIT_STR" => LIT_STR(Name(0)),
"DOTDOT" => DOTDOT,
"MOD_SEP" => MOD_SEP,
"DOTDOTDOT" => DOTDOTDOT,
"NOT" => NOT,
"AND" => BINOP(AND),
"LPAREN" => LPAREN,
"ANDAND" => ANDAND,
"AT" => AT,
"LBRACKET" => LBRACKET,
"LIT_STR_RAW" => LIT_STR_RAW(Name(0), 0),
"RPAREN" => RPAREN,
"SLASH" => BINOP(SLASH),
"COMMA" => COMMA,
"LIFETIME" => LIFETIME(ast::Ident { name: Name(0), ctxt: 0 }),
"CARET" => BINOP(CARET),
"TILDE" => TILDE,
"IDENT" => id(),
"PLUS" => BINOP(PLUS),
"LIT_CHAR" => LIT_CHAR(Name(0)),
"LIT_BYTE" => LIT_BYTE(Name(0)),
"EQ" => EQ,
"RBRACKET" => RBRACKET,
"COMMENT" => COMMENT,
"DOC_COMMENT" => DOC_COMMENT(Name(0)),
"DOT" => DOT,
"EQEQ" => EQEQ,
"NE" => NE,
"GE" => GE,
"PERCENT" => BINOP(PERCENT),
"RBRACE" => RBRACE,
"BINOP" => BINOP(PLUS),
"POUND" => POUND,
"OROR" => OROR,
"LIT_INTEGER" => LIT_INTEGER(Name(0)),
"BINOPEQ" => BINOPEQ(PLUS),
"LIT_FLOAT" => LIT_FLOAT(Name(0)),
"WHITESPACE" => WS,
"UNDERSCORE" => UNDERSCORE,
"MINUS" => BINOP(MINUS),
"SEMI" => SEMI,
"COLON" => COLON,
"FAT_ARROW" => FAT_ARROW,
"OR" => BINOP(OR),
"GT" => GT,
"LE" => LE,
"LIT_BINARY" => LIT_BINARY(Name(0)),
"LIT_BINARY_RAW" => LIT_BINARY_RAW(Name(0), 0),
_ => continue
"SHR" => token::BinOp(token::Shr),
"DOLLAR" => token::Dollar,
"LT" => token::Lt,
"STAR" => token::BinOp(token::Star),
"FLOAT_SUFFIX" => id(),
"INT_SUFFIX" => id(),
"SHL" => token::BinOp(token::Shl),
"LBRACE" => token::LBrace,
"RARROW" => token::Rarrow,
"LIT_STR" => token::LitStr(Name(0)),
"DOTDOT" => token::DotDot,
"MOD_SEP" => token::ModSep,
"DOTDOTDOT" => token::DotDotDot,
"NOT" => token::Not,
"AND" => token::BinOp(token::And),
"LPAREN" => token::LParen,
"ANDAND" => token::AndAnd,
"AT" => token::At,
"LBRACKET" => token::LBracket,
"LIT_STR_RAW" => token::LitStrRaw(Name(0), 0),
"RPAREN" => token::RParen,
"SLASH" => token::BinOp(token::Slash),
"COMMA" => token::Comma,
"LIFETIME" => token::Lifetime(ast::Ident { name: Name(0), ctxt: 0 }),
"CARET" => token::BinOp(token::Caret),
"TILDE" => token::Tilde,
"IDENT" => token::Id(),
"PLUS" => token::BinOp(token::Plus),
"LIT_CHAR" => token::LitChar(Name(0)),
"LIT_BYTE" => token::LitByte(Name(0)),
"EQ" => token::Eq,
"RBRACKET" => token::RBracket,
"COMMENT" => token::Comment,
"DOC_COMMENT" => token::DocComment(Name(0)),
"DOT" => token::Dot,
"EQEQ" => token::EqEq,
"NE" => token::Ne,
"GE" => token::Ge,
"PERCENT" => token::BinOp(token::Percent),
"RBRACE" => token::RBrace,
"BINOP" => token::BinOp(token::Plus),
"POUND" => token::Pound,
"OROR" => token::OrOr,
"LIT_INTEGER" => token::LitInteger(Name(0)),
"BINOPEQ" => token::BinOpEq(token::Plus),
"LIT_FLOAT" => token::LitFloat(Name(0)),
"WHITESPACE" => token::Whitespace,
"UNDERSCORE" => token::Underscore,
"MINUS" => token::BinOp(token::Minus),
"SEMI" => token::Semi,
"COLON" => token::Colon,
"FAT_ARROW" => token::FatArrow,
"OR" => token::BinOp(token::Or),
"GT" => token::Gt,
"LE" => token::Le,
"LIT_BINARY" => token::LitBinary(Name(0)),
"LIT_BINARY_RAW" => token::LitBinaryRaw(Name(0), 0),
_ => continue,
};
res.insert(num.to_string(), tok);
......@@ -119,19 +119,19 @@ fn id() -> Token {
res
}
fn str_to_binop(s: &str) -> BinOp {
fn str_to_binop(s: &str) -> BinOpToken {
match s {
"+" => PLUS,
"/" => SLASH,
"-" => MINUS,
"*" => STAR,
"%" => PERCENT,
"^" => CARET,
"&" => AND,
"|" => OR,
"<<" => SHL,
">>" => SHR,
_ => fail!("Bad binop str `{}`", s)
"+" => token::Plus,
"/" => token::Slash,
"-" => token::Minus,
"*" => token::Star,
"%" => token::Percent,
"^" => token::Caret,
"&" => token::And,
"|" => token::Or,
"<<" => token::Shl,
">>" => token::Shr,
_ => fail!("Bad binop str `{}`", s),
}
}
......@@ -186,19 +186,20 @@ fn parse_antlr_token(s: &str, tokens: &HashMap<String, Token>) -> TokenAndSpan {
debug!("What we got: content (`{}`), proto: {}", content, proto_tok);
let real_tok = match *proto_tok {
BINOP(..) => BINOP(str_to_binop(content)),
BINOPEQ(..) => BINOPEQ(str_to_binop(content.slice_to(content.len() - 1))),
LIT_STR(..) => LIT_STR(fix(content)),
LIT_STR_RAW(..) => LIT_STR_RAW(fix(content), count(content)),
LIT_CHAR(..) => LIT_CHAR(fixchar(content)),
LIT_BYTE(..) => LIT_BYTE(fixchar(content)),
DOC_COMMENT(..) => DOC_COMMENT(nm),
LIT_INTEGER(..) => LIT_INTEGER(nm),
LIT_FLOAT(..) => LIT_FLOAT(nm),
LIT_BINARY(..) => LIT_BINARY(nm),
LIT_BINARY_RAW(..) => LIT_BINARY_RAW(fix(content), count(content)),
IDENT(..) => IDENT(ast::Ident { name: nm, ctxt: 0 }, true),
LIFETIME(..) => LIFETIME(ast::Ident { name: nm, ctxt: 0 }),
token::BinOp(..) => token::BinOp(str_to_binop(content)),
token::BinOpEq(..) => token::BinOpEq(str_to_binop(content.slice_to(
content.len() - 1))),
token::LitStr(..) => token::LitStr(fix(content)),
token::LitStrRaw(..) => token::LitStrRaw(fix(content), count(content)),
token::LitChar(..) => token::LitChar(fixchar(content)),
token::LitByte(..) => token::LitByte(fixchar(content)),
token::DocComment(..) => token::DocComment(nm),
token::LitInteger(..) => token::LitInteger(nm),
token::LitFloat(..) => token::LitFloat(nm),
token::LitBinary(..) => token::LitBinary(nm),
token::LitBinaryRaw(..) => token::LitBinaryRaw(fix(content), count(content)),
token::Ident(..) => token::Ident(ast::Ident { name: nm, ctxt: 0 }, true),
token::Lifetime(..) => token::Lifetime(ast::Ident { name: nm, ctxt: 0 }),
ref t => t.clone()
};
......@@ -222,8 +223,8 @@ fn parse_antlr_token(s: &str, tokens: &HashMap<String, Token>) -> TokenAndSpan {
fn tok_cmp(a: &Token, b: &Token) -> bool {
match a {
&IDENT(id, _) => match b {
&IDENT(id2, _) => id == id2,
&token::Ident(id, _) => match b {
&token::Ident(id2, _) => id == id2,
_ => false
},
_ => a == b
......@@ -281,19 +282,20 @@ fn next(r: &mut lexer::StringReader) -> TokenAndSpan {
)
)
matches!(LIT_BYTE(..),
LIT_CHAR(..),
LIT_INTEGER(..),
LIT_FLOAT(..),
LIT_STR(..),
LIT_STR_RAW(..),
LIT_BINARY(..),
LIT_BINARY_RAW(..),
IDENT(..),
LIFETIME(..),
INTERPOLATED(..),
DOC_COMMENT(..),
SHEBANG(..)
matches!(
LitByte(..),
LitChar(..),
LitInteger(..),
LitFloat(..),
LitStr(..),
LitStrRaw(..),
LitBinary(..),
LitBinaryRaw(..),
Ident(..),
Lifetime(..),
Interpolated(..),
DocComment(..),
Shebang(..)
);
}
}
......@@ -634,7 +634,7 @@ fn parse(cx: &mut ExtCtxt, tts: &[ast::TokenTree]) -> Option<String> {
return None
}
};
if !parser.eat(&token::EOF) {
if !parser.eat(&token::Eof) {
cx.span_err(parser.span, "only one string literal allowed");
return None;
}
......
......@@ -428,7 +428,7 @@ fn process_struct_field_def(&mut self,
let qualname = format!("{}::{}", qualname, name);
let typ = ppaux::ty_to_string(&self.analysis.ty_cx,
(*self.analysis.ty_cx.node_types.borrow())[field.node.id as uint]);
match self.span.sub_span_before_token(field.span, token::COLON) {
match self.span.sub_span_before_token(field.span, token::Colon) {
Some(sub_span) => self.fmt.field_str(field.span,
Some(sub_span),
field.node.id,
......@@ -1175,7 +1175,7 @@ fn visit_view_item(&mut self, i: &ast::ViewItem) {
// 'use' always introduces an alias, if there is not an explicit
// one, there is an implicit one.
let sub_span =
match self.span.sub_span_before_token(path.span, token::EQ) {
match self.span.sub_span_before_token(path.span, token::Eq) {
Some(sub_span) => Some(sub_span),
None => sub_span,
};
......
......@@ -93,7 +93,7 @@ pub fn span_for_last_ident(&self, span: Span) -> Option<Span> {
let mut bracket_count = 0u;
loop {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
return self.make_sub_span(span, result)
}
if bracket_count == 0 &&
......@@ -102,9 +102,9 @@ pub fn span_for_last_ident(&self, span: Span) -> Option<Span> {
}
bracket_count += match ts.tok {
token::LT => 1,
token::GT => -1,
token::BINOP(token::SHR) => -2,
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0
}
}
......@@ -116,7 +116,7 @@ pub fn span_for_first_ident(&self, span: Span) -> Option<Span> {
let mut bracket_count = 0u;
loop {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
return None;
}
if bracket_count == 0 &&
......@@ -125,9 +125,9 @@ pub fn span_for_first_ident(&self, span: Span) -> Option<Span> {
}
bracket_count += match ts.tok {
token::LT => 1,
token::GT => -1,
token::BINOP(token::SHR) => -2,
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0
}
}
......@@ -141,32 +141,32 @@ pub fn sub_span_for_meth_name(&self, span: Span) -> Option<Span> {
let mut result = None;
let mut bracket_count = 0u;
let mut last_span = None;
while prev.tok != token::EOF {
while prev.tok != token::Eof {
last_span = None;
let mut next = toks.next_token();
if (next.tok == token::LPAREN ||
next.tok == token::LT) &&
if (next.tok == token::LParen ||
next.tok == token::Lt) &&
bracket_count == 0 &&
is_ident(&prev.tok) {
result = Some(prev.sp);
}
if bracket_count == 0 &&
next.tok == token::MOD_SEP {
next.tok == token::ModSep {
let old = prev;
prev = next;
next = toks.next_token();
if next.tok == token::LT &&
if next.tok == token::Lt &&
is_ident(&old.tok) {
result = Some(old.sp);
}
}
bracket_count += match prev.tok {
token::LPAREN | token::LT => 1,
token::RPAREN | token::GT => -1,
token::BINOP(token::SHR) => -2,
token::LParen | token::Lt => 1,
token::RParen | token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0
};
......@@ -191,21 +191,21 @@ pub fn sub_span_for_type_name(&self, span: Span) -> Option<Span> {
loop {
let next = toks.next_token();
if (next.tok == token::LT ||
next.tok == token::COLON) &&
if (next.tok == token::Lt ||
next.tok == token::Colon) &&
bracket_count == 0 &&
is_ident(&prev.tok) {
result = Some(prev.sp);
}
bracket_count += match prev.tok {
token::LT => 1,
token::GT => -1,
token::BINOP(token::SHR) => -2,
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shr) => -2,
_ => 0
};
if next.tok == token::EOF {
if next.tok == token::Eof {
break;
}
prev = next;
......@@ -235,7 +235,7 @@ pub fn spans_with_brackets(&self, span: Span, nesting: int, limit: int) -> Vec<S
let mut bracket_count = 0i;
loop {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
if bracket_count != 0 {
let loc = self.sess.codemap().lookup_char_pos(span.lo);
self.sess.span_bug(span, format!(
......@@ -248,10 +248,10 @@ pub fn spans_with_brackets(&self, span: Span, nesting: int, limit: int) -> Vec<S
return result;
}
bracket_count += match ts.tok {
token::LT => 1,
token::GT => -1,
token::BINOP(token::SHL) => 2,
token::BINOP(token::SHR) => -2,
token::Lt => 1,
token::Gt => -1,
token::BinOp(token::Shl) => 2,
token::BinOp(token::Shr) => -2,
_ => 0
};
if is_ident(&ts.tok) &&
......@@ -265,7 +265,7 @@ pub fn sub_span_before_token(&self, span: Span, tok: Token) -> Option<Span> {
let mut toks = self.retokenise_span(span);
let mut prev = toks.next_token();
loop {
if prev.tok == token::EOF {
if prev.tok == token::Eof {
return None;
}
let next = toks.next_token();
......@@ -282,12 +282,12 @@ pub fn sub_span_after_keyword(&self,
let mut toks = self.retokenise_span(span);
loop {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
return None;
}
if is_keyword(keyword, &ts.tok) {
let ts = toks.next_token();
if ts.tok == token::EOF {
if ts.tok == token::Eof {
return None
} else {
return self.make_sub_span(span, Some(ts.sp));
......
......@@ -17,7 +17,7 @@
use std::io;
use syntax::parse::lexer;
use syntax::parse::token as t;
use syntax::parse::token;
use syntax::parse;
/// Highlights some source code, returning the HTML output.
......@@ -63,19 +63,19 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
let snip = |sp| sess.span_diagnostic.cm.span_to_snippet(sp).unwrap();
if next.tok == t::EOF { break }
if next.tok == token::Eof { break }
let klass = match next.tok {
t::WS => {
token::Whitespace => {
try!(write!(out, "{}", Escape(snip(next.sp).as_slice())));
continue
},
t::COMMENT => {
token::Comment => {
try!(write!(out, "<span class='comment'>{}</span>",
Escape(snip(next.sp).as_slice())));
continue
},
t::SHEBANG(s) => {
token::Shebang(s) => {
try!(write!(out, "{}", Escape(s.as_str())));
continue
},
......@@ -83,24 +83,25 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
// that it's the address-of operator instead of the and-operator.
// This allows us to give all pointers their own class (`Box` and
// `@` are below).
t::BINOP(t::AND) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
t::AT | t::TILDE => "kw-2",
token::BinOp(token::And) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
token::At | token::Tilde => "kw-2",
// consider this as part of a macro invocation if there was a
// leading identifier
t::NOT if is_macro => { is_macro = false; "macro" }
token::Not if is_macro => { is_macro = false; "macro" }
// operators
t::EQ | t::LT | t::LE | t::EQEQ | t::NE | t::GE | t::GT |
t::ANDAND | t::OROR | t::NOT | t::BINOP(..) | t::RARROW |
t::BINOPEQ(..) | t::FAT_ARROW => "op",
token::Eq | token::Lt | token::Le | token::EqEq | token::Ne | token::Ge | token::Gt |
token::AndAnd | token::OrOr | token::Not | token::BinOp(..) | token::RArrow |
token::BinOpEq(..) | token::FatArrow => "op",
// miscellaneous, no highlighting
t::DOT | t::DOTDOT | t::DOTDOTDOT | t::COMMA | t::SEMI |
t::COLON | t::MOD_SEP | t::LARROW | t::LPAREN |
t::RPAREN | t::LBRACKET | t::LBRACE | t::RBRACE | t::QUESTION => "",
t::DOLLAR => {
if t::is_ident(&lexer.peek().tok) {
token::Dot | token::DotDot | token::DotDotDot | token::Comma | token::Semi |
token::Colon | token::ModSep | token::LArrow | token::LParen |
token::RParen | token::LBracket | token::LBrace | token::RBrace |
token::Question => "",
token::Dollar => {
if token::is_ident(&lexer.peek().tok) {
is_macro_nonterminal = true;
"macro-nonterminal"
} else {
......@@ -112,12 +113,12 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
// continue highlighting it as an attribute until the ending ']' is
// seen, so skip out early. Down below we terminate the attribute
// span when we see the ']'.
t::POUND => {
token::Pound => {
is_attribute = true;
try!(write!(out, r"<span class='attribute'>#"));
continue
}
t::RBRACKET => {
token::RBracket => {
if is_attribute {
is_attribute = false;
try!(write!(out, "]</span>"));
......@@ -128,15 +129,15 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
}
// text literals
t::LIT_BYTE(..) | t::LIT_BINARY(..) | t::LIT_BINARY_RAW(..) |
t::LIT_CHAR(..) | t::LIT_STR(..) | t::LIT_STR_RAW(..) => "string",
token::LitByte(..) | token::LitBinary(..) | token::LitBinaryRaw(..) |
token::LitChar(..) | token::LitStr(..) | token::LitStrRaw(..) => "string",
// number literals
t::LIT_INTEGER(..) | t::LIT_FLOAT(..) => "number",
token::LitInteger(..) | token::LitFloat(..) => "number",
// keywords are also included in the identifier set
t::IDENT(ident, _is_mod_sep) => {
match t::get_ident(ident).get() {
token::Ident(ident, _is_mod_sep) => {
match token::get_ident(ident).get() {
"ref" | "mut" => "kw-2",
"self" => "self",
......@@ -145,12 +146,12 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
"Option" | "Result" => "prelude-ty",
"Some" | "None" | "Ok" | "Err" => "prelude-val",
_ if t::is_any_keyword(&next.tok) => "kw",
_ if token::is_any_keyword(&next.tok) => "kw",
_ => {
if is_macro_nonterminal {
is_macro_nonterminal = false;
"macro-nonterminal"
} else if lexer.peek().tok == t::NOT {
} else if lexer.peek().tok == token::Not {
is_macro = true;
"macro"
} else {
......@@ -160,9 +161,9 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
}
}
t::LIFETIME(..) => "lifetime",
t::DOC_COMMENT(..) => "doccomment",
t::UNDERSCORE | t::EOF | t::INTERPOLATED(..) => "",
token::Lifetime(..) => "lifetime",
token::DocComment(..) => "doccomment",
token::Underscore | token::Eof | token::Interpolated(..) => "",
};
// as mentioned above, use the original source code instead of
......
......@@ -50,7 +50,7 @@ pub fn expand_diagnostic_used<'cx>(ecx: &'cx mut ExtCtxt,
token_tree: &[TokenTree])
-> Box<MacResult+'cx> {
let code = match token_tree {
[ast::TtToken(_, token::IDENT(code, _))] => code,
[ast::TtToken(_, token::Ident(code, _))] => code,
_ => unreachable!()
};
with_registered_diagnostics(|diagnostics| {
......@@ -82,12 +82,12 @@ pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt,
token_tree: &[TokenTree])
-> Box<MacResult+'cx> {
let (code, description) = match token_tree {
[ast::TtToken(_, token::IDENT(ref code, _))] => {
[ast::TtToken(_, token::Ident(ref code, _))] => {
(code, None)
},
[ast::TtToken(_, token::IDENT(ref code, _)),
ast::TtToken(_, token::COMMA),
ast::TtToken(_, token::LIT_STR_RAW(description, _))] => {
[ast::TtToken(_, token::Ident(ref code, _)),
ast::TtToken(_, token::Comma),
ast::TtToken(_, token::LitStrRaw(description, _))] => {
(code, Some(description))
}
_ => unreachable!()
......@@ -110,7 +110,7 @@ pub fn expand_build_diagnostic_array<'cx>(ecx: &'cx mut ExtCtxt,
token_tree: &[TokenTree])
-> Box<MacResult+'cx> {
let name = match token_tree {
[ast::TtToken(_, token::IDENT(ref name, _))] => name,
[ast::TtToken(_, token::Ident(ref name, _))] => name,
_ => unreachable!()
};
......
......@@ -72,21 +72,21 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
asm_str_style = Some(style);
}
Outputs => {
while p.token != token::EOF &&
p.token != token::COLON &&
p.token != token::MOD_SEP {
while p.token != token::Eof &&
p.token != token::Colon &&
p.token != token::ModSep {
if outputs.len() != 0 {
p.eat(&token::COMMA);
p.eat(&token::Comma);
}
let (constraint, _str_style) = p.parse_str();
let span = p.last_span;
p.expect(&token::LPAREN);
p.expect(&token::LParen);
let out = p.parse_expr();
p.expect(&token::RPAREN);
p.expect(&token::RParen);
// Expands a read+write operand into two operands.
//
......@@ -113,12 +113,12 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
}
}
Inputs => {
while p.token != token::EOF &&
p.token != token::COLON &&
p.token != token::MOD_SEP {
while p.token != token::Eof &&
p.token != token::Colon &&
p.token != token::ModSep {
if inputs.len() != 0 {
p.eat(&token::COMMA);
p.eat(&token::Comma);
}
let (constraint, _str_style) = p.parse_str();
......@@ -129,21 +129,21 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
cx.span_err(p.last_span, "input operand constraint contains '+'");
}
p.expect(&token::LPAREN);
p.expect(&token::LParen);
let input = p.parse_expr();
p.expect(&token::RPAREN);
p.expect(&token::RParen);
inputs.push((constraint, input));
}
}
Clobbers => {
let mut clobs = Vec::new();
while p.token != token::EOF &&
p.token != token::COLON &&
p.token != token::MOD_SEP {
while p.token != token::Eof &&
p.token != token::Colon &&
p.token != token::ModSep {
if clobs.len() != 0 {
p.eat(&token::COMMA);
p.eat(&token::Comma);
}
let (s, _str_style) = p.parse_str();
......@@ -172,8 +172,8 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
cx.span_warn(p.last_span, "unrecognized option");
}
if p.token == token::COMMA {
p.eat(&token::COMMA);
if p.token == token::Comma {
p.eat(&token::Comma);
}
}
StateNone => ()
......@@ -183,17 +183,17 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
// MOD_SEP is a double colon '::' without space in between.
// When encountered, the state must be advanced twice.
match (&p.token, state.next(), state.next().next()) {
(&token::COLON, StateNone, _) |
(&token::MOD_SEP, _, StateNone) => {
(&token::Colon, StateNone, _) |
(&token::ModSep, _, StateNone) => {
p.bump();
break 'statement;
}
(&token::COLON, st, _) |
(&token::MOD_SEP, _, st) => {
(&token::Colon, st, _) |
(&token::ModSep, _, st) => {
p.bump();
state = st;
}
(&token::EOF, _, _) => break 'statement,
(&token::Eof, _, _) => break 'statement,
_ => break
}
}
......
......@@ -684,8 +684,8 @@ pub fn get_single_str_from_tts(cx: &ExtCtxt,
cx.span_err(sp, format!("{} takes 1 argument.", name).as_slice());
} else {
match tts[0] {
ast::TtToken(_, token::LIT_STR(ident)) => return Some(parse::str_lit(ident.as_str())),
ast::TtToken(_, token::LIT_STR_RAW(ident, _)) => {
ast::TtToken(_, token::LitStr(ident)) => return Some(parse::str_lit(ident.as_str())),
ast::TtToken(_, token::LitStrRaw(ident, _)) => {
return Some(parse::raw_str_lit(ident.as_str()))
}
_ => {
......@@ -704,12 +704,12 @@ pub fn get_exprs_from_tts(cx: &mut ExtCtxt,
tts: &[ast::TokenTree]) -> Option<Vec<P<ast::Expr>>> {
let mut p = cx.new_parser_from_tts(tts);
let mut es = Vec::new();
while p.token != token::EOF {
while p.token != token::Eof {
es.push(cx.expander().fold_expr(p.parse_expr()));
if p.eat(&token::COMMA) {
if p.eat(&token::Comma) {
continue;
}
if p.token != token::EOF {
if p.token != token::Eof {
cx.span_err(sp, "expected token: `,`");
return None;
}
......
......@@ -29,7 +29,7 @@ pub fn expand_cfg<'cx>(cx: &mut ExtCtxt,
let mut p = cx.new_parser_from_tts(tts);
let cfg = p.parse_meta_item();
if !p.eat(&token::EOF) {
if !p.eat(&token::Eof) {
cx.span_err(sp, "expected 1 cfg-pattern");
return DummyResult::expr(sp);
}
......
......@@ -23,21 +23,21 @@ pub fn expand_syntax_ext<'cx>(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]
for (i, e) in tts.iter().enumerate() {
if i & 1 == 1 {
match *e {
ast::TtToken(_, token::COMMA) => (),
ast::TtToken(_, token::Comma) => {},
_ => {
cx.span_err(sp, "concat_idents! expecting comma.");
return DummyResult::expr(sp);
}
},
}
} else {
match *e {
ast::TtToken(_, token::IDENT(ident,_)) => {
ast::TtToken(_, token::Ident(ident, _)) => {
res_str.push_str(token::get_ident(ident).get())
}
},
_ => {
cx.span_err(sp, "concat_idents! requires ident args.");
return DummyResult::expr(sp);
}
},
}
}
}
......
......@@ -91,7 +91,7 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
// Parse the leading function expression (maybe a block, maybe a path)
let invocation = if allow_method {
let e = p.parse_expr();
if !p.eat(&token::COMMA) {
if !p.eat(&token::Comma) {
ecx.span_err(sp, "expected token: `,`");
return (Call(e), None);
}
......@@ -99,28 +99,28 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
} else {
Call(p.parse_expr())
};
if !p.eat(&token::COMMA) {
if !p.eat(&token::Comma) {
ecx.span_err(sp, "expected token: `,`");
return (invocation, None);
}
if p.token == token::EOF {
if p.token == token::Eof {
ecx.span_err(sp, "requires at least a format string argument");
return (invocation, None);
}
let fmtstr = p.parse_expr();
let mut named = false;
while p.token != token::EOF {
if !p.eat(&token::COMMA) {
while p.token != token::Eof {
if !p.eat(&token::Comma) {
ecx.span_err(sp, "expected token: `,`");
return (invocation, None);
}
if p.token == token::EOF { break } // accept trailing commas
if p.token == token::Eof { break } // accept trailing commas
if named || (token::is_ident(&p.token) &&
p.look_ahead(1, |t| *t == token::EQ)) {
p.look_ahead(1, |t| *t == token::Eq)) {
named = true;
let ident = match p.token {
token::IDENT(i, _) => {
token::Ident(i, _) => {
p.bump();
i
}
......@@ -139,7 +139,7 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
};
let interned_name = token::get_ident(ident);
let name = interned_name.get();
p.expect(&token::EQ);
p.expect(&token::Eq);
let e = p.parse_expr();
match names.find_equiv(&name) {
None => {}
......
......@@ -515,123 +515,122 @@ fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> {
cx.expr_path(cx.path_global(sp, idents))
}
fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOp) -> P<ast::Expr> {
fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOpToken) -> P<ast::Expr> {
let name = match bop {
PLUS => "PLUS",
MINUS => "MINUS",
STAR => "STAR",
SLASH => "SLASH",
PERCENT => "PERCENT",
CARET => "CARET",
AND => "AND",
OR => "OR",
SHL => "SHL",
SHR => "SHR"
token::Plus => "Plus",
token::Minus => "Minus",
token::Star => "Star",
token::Slash => "Slash",
token::Percent => "Percent",
token::Caret => "Caret",
token::And => "And",
token::Or => "Or",
token::Shl => "Shl",
token::Shr => "Shr"
};
mk_token_path(cx, sp, name)
}
fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> {
match *tok {
BINOP(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BINOP"), vec!(mk_binop(cx, sp, binop)));
token::BinOp(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOp"), vec!(mk_binop(cx, sp, binop)));
}
BINOPEQ(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BINOPEQ"),
token::BinOpEq(binop) => {
return cx.expr_call(sp, mk_token_path(cx, sp, "BinOpEq"),
vec!(mk_binop(cx, sp, binop)));
}
LIT_BYTE(i) => {
token::LitByte(i) => {
let e_byte = mk_name(cx, sp, i.ident());
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_BYTE"), vec!(e_byte));
return cx.expr_call(sp, mk_token_path(cx, sp, "LitByte"), vec!(e_byte));
}
LIT_CHAR(i) => {
token::LitChar(i) => {
let e_char = mk_name(cx, sp, i.ident());
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_CHAR"), vec!(e_char));
return cx.expr_call(sp, mk_token_path(cx, sp, "LitChar"), vec!(e_char));
}
LIT_INTEGER(i) => {
token::LitInteger(i) => {
let e_int = mk_name(cx, sp, i.ident());
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_INTEGER"), vec!(e_int));
return cx.expr_call(sp, mk_token_path(cx, sp, "LitInteger"), vec!(e_int));
}
LIT_FLOAT(fident) => {
token::LitFloat(fident) => {
let e_fident = mk_name(cx, sp, fident.ident());
return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_FLOAT"), vec!(e_fident));
return cx.expr_call(sp, mk_token_path(cx, sp, "LitFloat"), vec!(e_fident));
}
LIT_STR(ident) => {
token::LitStr(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "LIT_STR"),
mk_token_path(cx, sp, "LitStr"),
vec!(mk_name(cx, sp, ident.ident())));
}
LIT_STR_RAW(ident, n) => {
token::LitStrRaw(ident, n) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "LIT_STR_RAW"),
mk_token_path(cx, sp, "LitStrRaw"),
vec!(mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n)));
}
IDENT(ident, b) => {
token::Ident(ident, b) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "IDENT"),
mk_token_path(cx, sp, "Ident"),
vec!(mk_ident(cx, sp, ident), cx.expr_bool(sp, b)));
}
LIFETIME(ident) => {
token::Lifetime(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "LIFETIME"),
mk_token_path(cx, sp, "Lifetime"),
vec!(mk_ident(cx, sp, ident)));
}
DOC_COMMENT(ident) => {
token::DocComment(ident) => {
return cx.expr_call(sp,
mk_token_path(cx, sp, "DOC_COMMENT"),
mk_token_path(cx, sp, "DocComment"),
vec!(mk_name(cx, sp, ident.ident())));
}
INTERPOLATED(_) => fail!("quote! with interpolated token"),
token::Interpolated(_) => fail!("quote! with interpolated token"),
_ => ()
}
let name = match *tok {
EQ => "EQ",
LT => "LT",
LE => "LE",
EQEQ => "EQEQ",
NE => "NE",
GE => "GE",
GT => "GT",
ANDAND => "ANDAND",
OROR => "OROR",
NOT => "NOT",
TILDE => "TILDE",
AT => "AT",
DOT => "DOT",
DOTDOT => "DOTDOT",
COMMA => "COMMA",
SEMI => "SEMI",
COLON => "COLON",
MOD_SEP => "MOD_SEP",
RARROW => "RARROW",
LARROW => "LARROW",
FAT_ARROW => "FAT_ARROW",
LPAREN => "LPAREN",
RPAREN => "RPAREN",
LBRACKET => "LBRACKET",
RBRACKET => "RBRACKET",
LBRACE => "LBRACE",
RBRACE => "RBRACE",
POUND => "POUND",
DOLLAR => "DOLLAR",
UNDERSCORE => "UNDERSCORE",
EOF => "EOF",
_ => fail!()
token::Eq => "Eq",
token::Lt => "Lt",
token::Le => "Le",
token::EqEq => "EqEq",
token::Ne => "Ne",
token::Ge => "Ge",
token::Gt => "Gt",
token::AndAnd => "AndAnd",
token::OrOr => "OrOr",
token::Not => "Not",
token::Tilde => "Tilde",
token::At => "At",
token::Dot => "Dot",
token::DotDot => "DotDot",
token::Comma => "Comma",
token::Semi => "Semi",
token::Colon => "Colon",
token::ModSep => "ModSep",
token::RArrow => "RArrow",
token::LArrow => "LArrow",
token::FatArrow => "FatArrow",
token::LParen => "LParen",
token::RParen => "RParen",
token::LBracket => "LBracket",
token::RBracket => "RBracket",
token::LBrace => "LBrace",
token::RBrace => "RBrace",
token::Pound => "Pound",
token::Dollar => "Dollar",
token::Underscore => "Underscore",
token::Eof => "Eof",
_ => fail!(),
};
mk_token_path(cx, sp, name)
}
......@@ -702,7 +701,7 @@ fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree])
p.quote_depth += 1u;
let cx_expr = p.parse_expr();
if !p.eat(&token::COMMA) {
if !p.eat(&token::Comma) {
p.fatal("expected token `,`");
}
......
......@@ -85,7 +85,7 @@
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Token, EOF, Nonterminal};
use parse::token::{Token, Nonterminal};
use parse::token;
use ptr::P;
......@@ -226,8 +226,8 @@ pub fn parse_or_else(sess: &ParseSess,
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::IDENT(id1,_),&token::IDENT(id2,_))
| (&token::LIFETIME(id1),&token::LIFETIME(id2)) =>
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
......@@ -354,9 +354,9 @@ pub fn parse(sess: &ParseSess,
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::RPAREN |
token::RBRACE |
token::RBRACKET => {},
token::RParen |
token::RBrace |
token::RBracket => {},
_ => bb_eis.push(ei)
}
}
......@@ -372,7 +372,7 @@ pub fn parse(sess: &ParseSess,
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &EOF) {
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1u {
let mut v = Vec::new();
for dv in eof_eis.get_mut(0).matches.iter_mut() {
......@@ -447,7 +447,7 @@ pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal {
"ty" => token::NtTy(p.parse_ty(false /* no need to disambiguate*/)),
// this could be handled like a token, since it is one
"ident" => match p.token {
token::IDENT(sn,b) => { p.bump(); token::NtIdent(box sn,b) }
token::Ident(sn,b) => { p.bump(); token::NtIdent(box sn,b) }
_ => {
let token_str = token::to_string(&p.token);
p.fatal((format!("expected ident, found {}",
......
......@@ -20,7 +20,7 @@
use parse::parser::Parser;
use parse::attr::ParserAttr;
use parse::token::{special_idents, gensym_ident};
use parse::token::{FAT_ARROW, SEMI, NtMatchers, NtTT, EOF};
use parse::token::{NtMatchers, NtTT};
use parse::token;
use print;
use ptr::P;
......@@ -43,10 +43,10 @@ impl<'a> ParserAnyMacro<'a> {
/// allowed to be there.
fn ensure_complete_parse(&self, allow_semi: bool) {
let mut parser = self.parser.borrow_mut();
if allow_semi && parser.token == SEMI {
if allow_semi && parser.token == token::Semi {
parser.bump()
}
if parser.token != EOF {
if parser.token != token::Eof {
let token_str = parser.this_token_to_string();
let msg = format!("macro expansion ignores token `{}` and any \
following",
......@@ -89,7 +89,7 @@ fn make_methods(self: Box<ParserAnyMacro<'a>>) -> Option<SmallVector<P<ast::Meth
loop {
let mut parser = self.parser.borrow_mut();
match parser.token {
EOF => break,
token::Eof => break,
_ => {
let attrs = parser.parse_outer_attributes();
ret.push(parser.parse_method(attrs, ast::Inherited))
......@@ -231,12 +231,13 @@ fn ms(m: Matcher_) -> Matcher {
let argument_gram = vec!(
ms(MatchSeq(vec!(
ms(MatchNonterminal(lhs_nm, special_idents::matchers, 0u)),
ms(MatchTok(FAT_ARROW)),
ms(MatchNonterminal(rhs_nm, special_idents::tt, 1u))), Some(SEMI),
ast::OneOrMore, 0u, 2u)),
ms(MatchTok(token::FatArrow)),
ms(MatchNonterminal(rhs_nm, special_idents::tt, 1u))),
Some(token::Semi), ast::OneOrMore, 0u, 2u)),
//to phase into semicolon-termination instead of
//semicolon-separation
ms(MatchSeq(vec!(ms(MatchTok(SEMI))), None, ast::ZeroOrMore, 2u, 2u)));
ms(MatchSeq(vec!(ms(MatchTok(token::Semi))), None,
ast::ZeroOrMore, 2u, 2u)));
// Parse the macro_rules! invocation (`none` is for no interpolations):
......
......@@ -13,7 +13,7 @@
use codemap::{Span, DUMMY_SP};
use diagnostic::SpanHandler;
use ext::tt::macro_parser::{NamedMatch, MatchedSeq, MatchedNonterminal};
use parse::token::{EOF, INTERPOLATED, IDENT, Token, NtIdent};
use parse::token::{Token, NtIdent};
use parse::token;
use parse::lexer::TokenAndSpan;
......@@ -66,7 +66,7 @@ pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler,
repeat_idx: Vec::new(),
repeat_len: Vec::new(),
/* dummy values, never read: */
cur_tok: EOF,
cur_tok: token::Eof,
cur_span: DUMMY_SP,
};
tt_next_token(&mut r); /* get cur_tok and cur_span set up */
......@@ -158,7 +158,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
loop {
let should_pop = match r.stack.last() {
None => {
assert_eq!(ret_val.tok, EOF);
assert_eq!(ret_val.tok, token::Eof);
return ret_val;
}
Some(frame) => {
......@@ -175,7 +175,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
let prev = r.stack.pop().unwrap();
match r.stack.last_mut() {
None => {
r.cur_tok = EOF;
r.cur_tok = token::Eof;
return ret_val;
}
Some(frame) => {
......@@ -272,13 +272,13 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
(b) we actually can, since it's a token. */
MatchedNonterminal(NtIdent(box sn, b)) => {
r.cur_span = sp;
r.cur_tok = IDENT(sn,b);
r.cur_tok = token::Ident(sn,b);
return ret_val;
}
MatchedNonterminal(ref other_whole_nt) => {
// FIXME(pcwalton): Bad copy.
r.cur_span = sp;
r.cur_tok = INTERPOLATED((*other_whole_nt).clone());
r.cur_tok = token::Interpolated((*other_whole_nt).clone());
return ret_val;
}
MatchedSeq(..) => {
......
......@@ -602,11 +602,11 @@ pub fn noop_fold_tts<T: Folder>(tts: &[TokenTree], fld: &mut T) -> Vec<TokenTree
// apply ident folder if it's an ident, apply other folds to interpolated nodes
pub fn noop_fold_token<T: Folder>(t: token::Token, fld: &mut T) -> token::Token {
match t {
token::IDENT(id, followed_by_colons) => {
token::IDENT(fld.fold_ident(id), followed_by_colons)
token::Ident(id, followed_by_colons) => {
token::Ident(fld.fold_ident(id), followed_by_colons)
}
token::LIFETIME(id) => token::LIFETIME(fld.fold_ident(id)),
token::INTERPOLATED(nt) => token::INTERPOLATED(fld.fold_interpolated(nt)),
token::Lifetime(id) => token::Lifetime(fld.fold_ident(id)),
token::Interpolated(nt) => token::Interpolated(fld.fold_interpolated(nt)),
_ => t
}
}
......
......@@ -14,7 +14,6 @@
use parse::common::*; //resolve bug?
use parse::token;
use parse::parser::Parser;
use parse::token::INTERPOLATED;
use ptr::P;
/// A parser that can parse attributes.
......@@ -36,10 +35,10 @@ fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> {
debug!("parse_outer_attributes: self.token={}",
self.token);
match self.token {
token::POUND => {
token::Pound => {
attrs.push(self.parse_attribute(false));
}
token::DOC_COMMENT(s) => {
token::DocComment(s) => {
let attr = ::attr::mk_sugared_doc_attr(
attr::mk_attr_id(),
self.id_to_interned_str(s.ident()),
......@@ -66,11 +65,11 @@ fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute {
debug!("parse_attributes: permit_inner={} self.token={}",
permit_inner, self.token);
let (span, value, mut style) = match self.token {
token::POUND => {
token::Pound => {
let lo = self.span.lo;
self.bump();
let style = if self.eat(&token::NOT) {
let style = if self.eat(&token::Not) {
if !permit_inner {
let span = self.span;
self.span_err(span,
......@@ -82,10 +81,10 @@ fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute {
ast::AttrOuter
};
self.expect(&token::LBRACKET);
self.expect(&token::LBracket);
let meta_item = self.parse_meta_item();
let hi = self.span.hi;
self.expect(&token::RBRACKET);
self.expect(&token::RBracket);
(mk_sp(lo, hi), meta_item, style)
}
......@@ -96,7 +95,7 @@ fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute {
}
};
if permit_inner && self.eat(&token::SEMI) {
if permit_inner && self.eat(&token::Semi) {
self.span_warn(span, "this inner attribute syntax is deprecated. \
The new syntax is `#![foo]`, with a bang and no semicolon.");
style = ast::AttrInner;
......@@ -130,10 +129,10 @@ fn parse_inner_attrs_and_next(&mut self)
let mut next_outer_attrs: Vec<ast::Attribute> = Vec::new();
loop {
let attr = match self.token {
token::POUND => {
token::Pound => {
self.parse_attribute(true)
}
token::DOC_COMMENT(s) => {
token::DocComment(s) => {
// we need to get the position of this token before we bump.
let Span { lo, hi, .. } = self.span;
self.bump();
......@@ -161,7 +160,7 @@ fn parse_inner_attrs_and_next(&mut self)
/// | IDENT meta_seq
fn parse_meta_item(&mut self) -> P<ast::MetaItem> {
let nt_meta = match self.token {
token::INTERPOLATED(token::NtMeta(ref e)) => {
token::Interpolated(token::NtMeta(ref e)) => {
Some(e.clone())
}
_ => None
......@@ -179,7 +178,7 @@ fn parse_meta_item(&mut self) -> P<ast::MetaItem> {
let ident = self.parse_ident();
let name = self.id_to_interned_str(ident);
match self.token {
token::EQ => {
token::Eq => {
self.bump();
let lit = self.parse_lit();
// FIXME #623 Non-string meta items are not serialized correctly;
......@@ -195,7 +194,7 @@ fn parse_meta_item(&mut self) -> P<ast::MetaItem> {
let hi = self.span.hi;
P(spanned(lo, hi, ast::MetaNameValue(name, lit)))
}
token::LPAREN => {
token::LParen => {
let inner_items = self.parse_meta_seq();
let hi = self.span.hi;
P(spanned(lo, hi, ast::MetaList(name, inner_items)))
......@@ -209,15 +208,15 @@ fn parse_meta_item(&mut self) -> P<ast::MetaItem> {
/// matches meta_seq = ( COMMASEP(meta_item) )
fn parse_meta_seq(&mut self) -> Vec<P<ast::MetaItem>> {
self.parse_seq(&token::LPAREN,
&token::RPAREN,
seq_sep_trailing_disallowed(token::COMMA),
self.parse_seq(&token::LParen,
&token::RParen,
seq_sep_trailing_disallowed(token::Comma),
|p| p.parse_meta_item()).node
}
fn parse_optional_meta(&mut self) -> Vec<P<ast::MetaItem>> {
match self.token {
token::LPAREN => self.parse_meta_seq(),
token::LParen => self.parse_meta_seq(),
_ => Vec::new()
}
}
......
此差异已折叠。
......@@ -793,34 +793,34 @@ fn string_to_tts_macro () {
let tts = string_to_tts("macro_rules! zip (($a)=>($a))".to_string());
let tts: &[ast::TokenTree] = tts.as_slice();
match tts {
[ast::TtToken(_, token::IDENT(name_macro_rules, false)),
ast::TtToken(_, token::NOT),
ast::TtToken(_, token::IDENT(name_zip, false)),
[ast::TtToken(_, token::Ident(name_macro_rules, false)),
ast::TtToken(_, token::Not),
ast::TtToken(_, token::Ident(name_zip, false)),
ast::TtDelimited(_, ref macro_delimed)]
if name_macro_rules.as_str() == "macro_rules"
&& name_zip.as_str() == "zip" => {
let (ref macro_open, ref macro_tts, ref macro_close) = **macro_delimed;
match (macro_open, macro_tts.as_slice(), macro_close) {
(&ast::Delimiter { token: token::LPAREN, .. },
(&ast::Delimiter { token: token::LParen, .. },
[ast::TtDelimited(_, ref first_delimed),
ast::TtToken(_, token::FAT_ARROW),
ast::TtToken(_, token::FatArrow),
ast::TtDelimited(_, ref second_delimed)],
&ast::Delimiter { token: token::RPAREN, .. }) => {
&ast::Delimiter { token: token::RParen, .. }) => {
let (ref first_open, ref first_tts, ref first_close) = **first_delimed;
match (first_open, first_tts.as_slice(), first_close) {
(&ast::Delimiter { token: token::LPAREN, .. },
[ast::TtToken(_, token::DOLLAR),
ast::TtToken(_, token::IDENT(name, false))],
&ast::Delimiter { token: token::RPAREN, .. })
(&ast::Delimiter { token: token::LParen, .. },
[ast::TtToken(_, token::Dollar),
ast::TtToken(_, token::Ident(name, false))],
&ast::Delimiter { token: token::RParen, .. })
if name.as_str() == "a" => {},
_ => fail!("value 3: {}", **first_delimed),
}
let (ref second_open, ref second_tts, ref second_close) = **second_delimed;
match (second_open, second_tts.as_slice(), second_close) {
(&ast::Delimiter { token: token::LPAREN, .. },
[ast::TtToken(_, token::DOLLAR),
ast::TtToken(_, token::IDENT(name, false))],
&ast::Delimiter { token: token::RPAREN, .. })
(&ast::Delimiter { token: token::LParen, .. },
[ast::TtToken(_, token::Dollar),
ast::TtToken(_, token::Ident(name, false))],
&ast::Delimiter { token: token::RParen, .. })
if name.as_str() == "a" => {},
_ => fail!("value 4: {}", **second_delimed),
}
......@@ -842,7 +842,7 @@ fn string_to_tts_1 () {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"fn\",\
false\
......@@ -855,7 +855,7 @@ fn string_to_tts_1 () {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"a\",\
false\
......@@ -870,7 +870,7 @@ fn string_to_tts_1 () {
[\
{\
\"span\":null,\
\"token\":\"LPAREN\"\
\"token\":\"LParen\"\
},\
[\
{\
......@@ -878,7 +878,7 @@ fn string_to_tts_1 () {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"b\",\
false\
......@@ -890,7 +890,7 @@ fn string_to_tts_1 () {
\"variant\":\"TtToken\",\
\"fields\":[\
null,\
\"COLON\"\
\"Colon\"\
]\
},\
{\
......@@ -898,7 +898,7 @@ fn string_to_tts_1 () {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"int\",\
false\
......@@ -909,7 +909,7 @@ fn string_to_tts_1 () {
],\
{\
\"span\":null,\
\"token\":\"RPAREN\"\
\"token\":\"RParen\"\
}\
]\
]\
......@@ -921,7 +921,7 @@ fn string_to_tts_1 () {
[\
{\
\"span\":null,\
\"token\":\"LBRACE\"\
\"token\":\"LBrace\"\
},\
[\
{\
......@@ -929,7 +929,7 @@ fn string_to_tts_1 () {
\"fields\":[\
null,\
{\
\"variant\":\"IDENT\",\
\"variant\":\"Ident\",\
\"fields\":[\
\"b\",\
false\
......@@ -941,13 +941,13 @@ fn string_to_tts_1 () {
\"variant\":\"TtToken\",\
\"fields\":[\
null,\
\"SEMI\"\
\"Semi\"\
]\
}\
],\
{\
\"span\":null,\
\"token\":\"RBRACE\"\
\"token\":\"RBrace\"\
}\
]\
]\
......@@ -1002,7 +1002,7 @@ fn string_to_tts_1 () {
}
fn parser_done(p: Parser){
assert_eq!(p.token.clone(), token::EOF);
assert_eq!(p.token.clone(), token::Eof);
}
#[test] fn parse_ident_pat () {
......
......@@ -118,7 +118,7 @@ fn report(&mut self,
fn is_obsolete_ident(&mut self, ident: &str) -> bool {
match self.token {
token::IDENT(sid, _) => {
token::Ident(sid, _) => {
token::get_ident(sid).equiv(&ident)
}
_ => false
......
此差异已折叠。
此差异已折叠。
......@@ -1035,7 +1035,7 @@ pub fn print_tt(&mut self, tt: &ast::TokenTree) -> IoResult<()> {
ast::TtToken(_, ref tk) => {
try!(word(&mut self.s, parse::token::to_string(tk).as_slice()));
match *tk {
parse::token::DOC_COMMENT(..) => {
parse::token::DocComment(..) => {
hardbreak(&mut self.s)
}
_ => Ok(())
......
......@@ -17,7 +17,7 @@
extern crate rustc;
use syntax::codemap::Span;
use syntax::parse::token::{IDENT, get_ident};
use syntax::parse::token;
use syntax::ast::{TokenTree, TtToken};
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr};
use syntax::ext::build::AstBuilder; // trait for expr_uint
......@@ -39,7 +39,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree])
("I", 1)];
let text = match args {
[TtToken(_, IDENT(s, _))] => get_ident(s).to_string(),
[TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(),
_ => {
cx.span_err(sp, "argument should be a single identifier");
return DummyResult::any(sp);
......
......@@ -8,4 +8,4 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
type t = { f: () }; //~ ERROR expected type, found token LBRACE
type t = { f: () }; //~ ERROR expected type, found token LBrace
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册