提交 fefdb63c 编写于 作者: G Graydon Hoare

Begin shift over to using pandoc, markdown and llnextgen for reference manual....

Begin shift over to using pandoc, markdown and llnextgen for reference manual. Fix man page URL while at it.
上级 565ea068
......@@ -132,17 +132,17 @@ ifdef CFG_BAD_VALGRIND
endif
DOCS :=
ifeq ($(CFG_MAKEINFO),)
$(info cfg: no makeinfo found, omitting doc/rust.html)
ifeq ($(CFG_PANDOC),)
$(info cfg: no pandoc found, omitting doc/rust.html)
else
DOCS += doc/rust.html
endif
ifeq ($(CFG_TEXI2PDF),)
$(info cfg: no texi2pdf found, omitting doc/rust.pdf)
ifeq ($(CFG_PANDOC),)
$(info cfg: no pandoc found, omitting doc/rust.pdf)
else
ifeq ($(CFG_TEX),)
$(info cfg: no tex found, omitting doc/rust.pdf)
ifeq ($(CFG_PDFLATEX),)
$(info cfg: no pdflatex found, omitting doc/rust.pdf)
else
DOCS += doc/rust.pdf
endif
......
......@@ -284,11 +284,11 @@ probe CFG_GCC gcc
probe CFG_LLVM_CONFIG llvm-config
probe CFG_VALGRIND valgrind
probe CFG_PERF perf
probe CFG_MAKEINFO makeinfo
probe CFG_TEXI2PDF texi2pdf
probe CFG_TEX tex
probe CFG_MAKENSIS makensis
probe CFG_NATURALDOCS naturaldocs
probe CFG_LLNEXTGEN LLnextgen
probe CFG_PANDOC pandoc
probe CFG_PDFLATEX pdflatex
if [ -z "$CFG_ENABLE_CLANG" -a -z "$CFG_GCC" ]
then
......
此差异已折叠。
......@@ -10,7 +10,7 @@ Only the most commonly-used options are listed here. All options are listed and
described below.
.SH DESCRIPTION
This program is a compiler for the Rust language, available at
<\fBhttps://github.com/graydon/rust\fR>.
<\fBhttps://www.rust-lang.org\fR>.
.SH OPTIONS
.TP
\fB-h, --help\fR:
......@@ -124,7 +124,7 @@ Build a test harness.
\fB--warn-unused-imports\fR:
Warn about unnecessary imports.
.SH "BUGS"
See \fBhttps://github.com/graydon/rust/issues\fR for a list of known bugs.
See \fBhttps://github.com/mozilla/rust/issues\fR for a list of known bugs.
.SH "AUTHOR"
See \fBAUTHORS.txt\fR in the rust source distribution. Graydon Hoare
<\fIgraydon@mozilla.com\fR> is the project leader.
......
......@@ -52,8 +52,8 @@ clean-misc:
$(wildcard doc/*.$(ext) \
doc/*/*.$(ext) \
doc/*/*/*.$(ext)))
$(Q)rm -Rf doc/keywords.texi
$(Q)rm -Rf doc/version.texi
$(Q)rm -Rf doc/keywords.md
$(Q)rm -Rf doc/version.md
$(Q)rm -Rf $(foreach sub, index styles files search javascript, \
$(wildcard doc/*/$(sub)))
$(Q)rm -rf libuv
......
......@@ -2,28 +2,61 @@
# Doc variables and rules
######################################################################
doc/keywords.texi: $(S)doc/keywords.txt $(S)src/etc/gen-keywords-table.py
@$(call E, gen-keywords-table: $@)
$(Q)$(S)src/etc/gen-keywords-table.py
doc/version.texi: $(MKFILE_DEPS) rust.texi
doc/version.md: $(MKFILE_DEPS) rust.md
@$(call E, version-stamp: $@)
$(Q)echo "@macro gitversion" >$@
$(Q)echo "$(CFG_VERSION)" >>$@
$(Q)echo "@end macro" >>$@
GENERATED += doc/keywords.texi doc/version.texi
doc/keywords.md: $(MKFILE_DEPS) rust.md
@$(call E, grep -v: $$@)
$(Q)grep -v '^#' $< >$@
ifdef CFG_PANDOC
doc/rust.html: rust.md doc/version.md doc/keywords.md
@$(call E, pandoc: $@)
$(Q)$(CFG_PANDOC) \
--standalone --toc \
--section-divs \
--number-sections \
--from=markdown --to=html \
--output=$@ \
$<
ifdef CFG_PDFLATEX
doc/rust.tex: rust.md doc/version.md doc/keywords.md
@$(call E, pandoc: $@)
$(Q)$(CFG_PANDOC) \
--standalone --toc \
--number-sections \
--from=markdown --to=latex \
--output=$@ \
$<
doc/rust.pdf: doc/rust.tex
@$(call E, pdflatex: $@)
$(Q)$(CFG_PDFLATEX) \
-interaction=batchmode \
-output-directory=doc \
$<
endif
endif
ifdef CFG_LLNEXTGEN
doc/rust.g: rust.md $(S)src/etc/extract_grammar.py
@$(call E, extract_grammar: $@)
$(Q)$(S)src/etc/extract_grammar.py $< >$@
verify-grammar: doc/rust.g
@$(call E, LLnextgen: $<)
$(Q)$(CFG_LLNEXTGEN) --generate-lexer-wrapper=no $< >$@
$(Q)rm -f doc/rust.c doc/rust.h
endif
doc/%.pdf: %.texi doc/version.texi doc/keywords.texi
@$(call E, texi2pdf: $@)
@# LC_COLLATE=C works around a bug in texi2dvi; see
@# https://bugzilla.redhat.com/show_bug.cgi?id=583011 and
@# https://github.com/graydon/rust/issues/1134
$(Q)LC_COLLATE=C texi2pdf --silent --batch -I doc -o $@ --clean $<
doc/%.html: %.texi doc/version.texi doc/keywords.texi
@$(call E, makeinfo: $@)
$(Q)makeinfo -I doc --html --ifhtml --force --no-split --output=$@ $<
GENERATED += doc/keywords.md doc/version.md
docsnap: doc/rust.pdf
@$(call E, snap: doc/rust-$(shell date +"%Y-%m-%d")-snap.pdf)
......
#!/usr/bin/env python
# This script is for extracting the grammar from the rust docs.
import fileinput
collections = { "gram": [],
"keyword": [],
"reserved": [],
"binop": [],
"unop": [] }
in_coll = False
coll = ""
for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")):
if in_coll:
if line.startswith("~~~~"):
in_coll = False
else:
if coll in ["keyword", "reserved", "binop", "unop"]:
for word in line.split():
if word not in collections[coll]:
collections[coll].append(word)
else:
collections[coll].append(line)
else:
if line.startswith("~~~~"):
for cname in collections:
if ("." + cname) in line:
coll = cname
in_coll = True
break
# Define operator symbol-names here
tokens = ["non_star", "non_slash", "non_eol",
"non_single_quote", "non_double_quote", "ident" ]
symnames = {
".": "dot",
"+": "plus",
"-": "minus",
"/": "slash",
"*": "star",
"%": "percent",
"~": "tilde",
"@": "at",
"!": "not",
"&": "and",
"|": "or",
"^": "xor",
"<<": "lsl",
">>": "lsr",
">>>": "asr",
"&&": "andand",
"||": "oror",
"<" : "lt",
"<=" : "le",
"==" : "eqeq",
">=" : "ge",
">" : "gt",
"=": "eq",
"+=": "plusequal",
"-=": "minusequal",
"/=": "divequal",
"*=": "starequal",
"%=": "percentequal",
"&=": "andequal",
"|=": "orequal",
"^=": "xorequal",
">>=": "lsrequal",
">>>=": "asrequal",
"<<=": "lslequal",
"::": "coloncolon",
"//": "linecomment",
"/*": "openblockcomment",
"*/": "closeblockcomment"
}
lines = []
for line in collections["gram"]:
line2 = ""
for word in line.split():
# replace strings with keyword-names or symbol-names from table
if word.startswith("\""):
word = word[1:-1]
if word in symnames:
word = symnames[word]
else:
for ch in word:
if not ch.isalpha():
raise Exception("non-alpha apparent keyword: "
+ word)
if word not in tokens:
if (word in collections["keyword"] or
word in collections["reserved"]):
tokens.append(word)
else:
raise Exception("unknown keyword/reserved word: "
+ word)
line2 += " " + word
lines.append(line2)
for word in collections["keyword"] + collections["reserved"]:
if word not in tokens:
tokens.append(word)
for sym in collections["unop"] + collections["binop"] + symnames.keys():
word = symnames[sym]
if word not in tokens:
tokens.append(word)
print("%start parser, token;")
print("%%token %s ;" % ("\n\t, ".join(tokens)))
for coll in ["keyword", "reserved"]:
print("%s: %s ; " % (coll, "\n\t| ".join(collections[coll])));
for coll in ["binop", "unop"]:
print("%s: %s ; " % (coll, "\n\t| ".join([symnames[x]
for x in collections[coll]])));
print("\n".join(lines));
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册