...
 
Commits (18)
    https://gitcode.net/wa-lang/wa/-/commit/783d26fc35f05422fceec4779515a90d1ef39be6 本地 play 支持凹中文 2023-06-25T20:13:58+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/be17597bdba7f0655f913ac3158e7f2cce4a5dd9 添加手册-2.1 2023-06-25T21:29:43+08:00 3dgen 476582@qq.com https://gitcode.net/wa-lang/wa/-/commit/61571ac7b059bfb44f25f550d9be20434dc7fbf7 Merge branch 'master' of https://gitee.com/wa-lang/wa 2023-06-25T21:29:57+08:00 3dgen 476582@qq.com https://gitcode.net/wa-lang/wa/-/commit/8201374e1700dbcded9abb2ba43f29c244182979 zz 2023-06-26T11:03:11+08:00 3dgen 476582@qq.com https://gitcode.net/wa-lang/wa/-/commit/0a31dc803b74cf81d746fc2592dac562702593f8 增加代码文件类型识别语法 2023-06-26T22:40:51+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/52f3c77d3707559dc8c446cc304536c1f494281f 改进 wz 语法支持 2023-06-26T23:32:52+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/bcadb4704879f4f3fe2aa6026db454dd142ddccb 删除 playground 中输出的调试信息 2023-06-26T23:45:02+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/db9d81ad4df3f59404a0a357187ee77d35d19d3a 完善 wat 导出的中文名字处理 2023-06-27T00:58:24+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/a61c9d839c4bbae362872d97e4eba3a85da43923 修复并完善 fmt 命令 2023-06-27T07:22:06+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/6cf3f999e09fd487ef488e8fa6e8fdcc861861fd 增加mvp os 2023-06-27T21:40:40+08:00 3dgen 476582@qq.com https://gitcode.net/wa-lang/wa/-/commit/14d79a0b71979b6897c0d27b97d050b74f4b718b Merge branch 'master' of https://gitee.com/wa-lang/wa 2023-06-27T21:44:58+08:00 3dgen 476582@qq.com https://gitcode.net/wa-lang/wa/-/commit/08da0802698f8e1f97a50796ccd19a96ceb9146f 完善 MVP 目标定义 2023-06-27T23:20:25+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/9b9870b2f94ad498989b754eae2a00e9218e359d 完善 run 命令对相对路径文件的支持 2023-06-28T01:12:44+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/edaa2dd38098a58760eb2cfa655d04ae9bebdc87 完善 syntax 语法格式 2023-06-28T01:17:26+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/86a784e0198d96284bf806580db92845bb6eb791 fmt 支持 global 关键字 2023-06-28T02:07:11+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/0a5a671f5da3d7829d8ff7d2a37111beb86bc900 CI 添加 fmt 并包含更多测试 2023-06-28T19:28:16+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/dfa6b44319be468122f54020aea4e2cf32f36884 屏蔽 parser 失败的测试 2023-06-28T19:32:25+08:00 chai2010 chaishushan@gmail.com https://gitcode.net/wa-lang/wa/-/commit/b0839d7a2c0df4b52d4d0e937e54d6cb1205493b 增加 版本号 文件 2023-06-28T22:20:25+08:00 chai2010 chaishushan@gmail.com
......@@ -38,6 +38,7 @@ ci-test-all:
go run main.go test unicode/utf8
@echo "== std ok =="
go run main.go ./waroot/hello.wa
cd waroot && go run ../main.go hello.wa
make -C ./waroot/examples ci-test-all
......
......@@ -11,6 +11,7 @@ import (
"wa-lang.org/wa/internal/format"
"wa-lang.org/wa/internal/loader"
"wa-lang.org/wa/internal/logger"
"wa-lang.org/wa/internal/wamime"
)
// 调试参数
......@@ -102,3 +103,8 @@ func FormatCode(filename, code string) (string, error) {
}
return string(data), nil
}
// 获取代码语法类型
func GetCodeSyntax(filename string, code []byte) string {
return wamime.GetCodeMime(filename, code)
}
......@@ -59,3 +59,28 @@ func ExampleRunCode_args() {
// 0 : aa
// 1 : bb
}
func ExampleRunCode_wz() {
const code = `
#wa:syntax=wz
引于 "书"
【启】:
书·说:"你好,凹语言中文版!"
`
output, err := api.RunCode(api.DefaultConfig(), "hello.wa", code)
if err != nil {
if len(output) != 0 {
log.Println(string(output))
}
log.Fatal(err)
}
fmt.Print(string(output))
// Output:
// 你好,凹语言中文版!
}
......@@ -8,7 +8,7 @@ import (
"wa-lang.org/wa/api"
)
func _ExampleFormatCode() {
func ExampleFormatCode() {
s, err := api.FormatCode("hello.wa", "func add(a:i32, b:i32)=>i32 {return a+b}")
if err != nil {
panic(err)
......
......@@ -14,19 +14,32 @@ import (
func Fmt(path string) error {
if path == "" {
path, _ = os.Getwd()
path = "."
}
var waFileList []string
if strings.HasSuffix(path, "...") {
waFileList = getDirWaFileList(strings.TrimSuffix(path, "..."))
switch {
case strings.HasSuffix(path, ".wa"):
waFileList = append(waFileList, path)
case strings.HasSuffix(path, ".wz"):
waFileList = append(waFileList, path)
case strings.HasSuffix(path, "..."):
waFileList = getDirWaFileList(
strings.TrimSuffix(path, "..."),
true, ".wa", ".wz", // 包含子目录
)
default:
// 不包含子目录
waFileList = getDirWaFileList(
path, false, ".wa", ".wz",
)
}
var changedFileList []string
for _, s := range waFileList {
changed, err := fmtFile(s)
if err != nil {
return err
return fmt.Errorf("%s: %w", s, err)
}
if changed {
changedFileList = append(changedFileList, s)
......@@ -51,8 +64,28 @@ func fmtFile(path string) (changed bool, err error) {
return true, nil
}
func getDirWaFileList(dir string) []string {
func getDirWaFileList(dir string, walkSubDir bool, extList ...string) []string {
var waFileList []string
if !walkSubDir {
files, err := os.ReadDir(".")
if err != nil {
return nil
}
for _, file := range files {
if file.IsDir() {
continue
}
for _, ext := range extList {
if strings.HasSuffix(file.Name(), ext) {
waFileList = append(waFileList, filepath.Join(dir, file.Name()))
}
}
}
sort.Strings(waFileList)
return waFileList
}
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
......@@ -60,8 +93,11 @@ func getDirWaFileList(dir string) []string {
if info.IsDir() {
return nil
}
if strings.HasSuffix(path, ".wa") {
waFileList = append(waFileList, path)
for _, ext := range extList {
if strings.HasSuffix(path, ext) {
waFileList = append(waFileList, path)
return nil
}
}
return nil
})
......
......@@ -8,6 +8,7 @@ import (
"net/http"
"wa-lang.org/wa/api"
"wa-lang.org/wa/internal/wamime"
)
func (p *WebServer) fmtHandler(w http.ResponseWriter, r *http.Request) {
......@@ -28,7 +29,12 @@ func (p *WebServer) fmtHandler(w http.ResponseWriter, r *http.Request) {
}
func (p *WebServer) fmtCode(code []byte) (*fmtResponse, error) {
output, err := api.FormatCode("prog.wa", string(code))
filename := "prog.wa"
if wamime.GetCodeMime(filename, code) == "wz" {
filename = "prog.wz"
}
output, err := api.FormatCode(filename, string(code))
if err != nil {
resp := &fmtResponse{
Error: err.Error(),
......
......@@ -10,6 +10,7 @@ import (
"os"
"wa-lang.org/wa/api"
"wa-lang.org/wa/internal/wamime"
)
func (p *WebServer) runHandler(w http.ResponseWriter, r *http.Request) {
......@@ -41,7 +42,12 @@ func (p *WebServer) compileAndRun(req *Request) (*Response, error) {
}
defer os.RemoveAll(tmpDir)
result, err := api.RunCode(api.DefaultConfig(), "prog.wa", req.Body)
filename := "prog.wa"
if wamime.GetCodeMime(filename, []byte(req.Body)) == "wz" {
filename = "prog.wz"
}
result, err := api.RunCode(api.DefaultConfig(), filename, req.Body)
if err != nil {
resp := &Response{Errors: err.Error()}
return resp, nil
......
......@@ -9,9 +9,7 @@ package app
import (
"fmt"
"os"
"runtime/debug"
"strings"
"time"
"wa-lang.org/wa/api"
"wa-lang.org/wa/internal/3rdparty/cli"
......@@ -20,25 +18,19 @@ import (
"wa-lang.org/wa/internal/lsp"
"wa-lang.org/wa/internal/wabt"
"wa-lang.org/wa/internal/wazero"
"wa-lang.org/wa/waroot"
)
func Main() {
cliApp := cli.NewApp()
cliApp.Name = "Wa"
cliApp.Usage = "Wa is a tool for managing Wa source code."
cliApp.Version = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
if info.Main.Version != "" {
return info.Main.Version
}
}
return "devel:" + time.Now().Format("2006-01-02+15:04:05")
}()
cliApp.Version = waroot.GetVersion()
cliApp.Flags = []cli.Flag{
&cli.StringFlag{
Name: "target",
Usage: "set target os (arduino|chrome|wasi)",
Usage: fmt.Sprintf("set target os (%s)", strings.Join(config.WaOS_List, "|")),
Value: config.WaOS_Default,
},
&cli.BoolFlag{
......@@ -54,10 +46,7 @@ func Main() {
}
cliApp.Before = func(c *cli.Context) error {
switch c.String("target") {
case "wasi", "arduino", "chrome":
// OK
default:
if !config.CheckWaOS(c.String("target")) {
fmt.Printf("unknown target: %s\n", c.String("target"))
os.Exit(1)
}
......@@ -141,7 +130,7 @@ func Main() {
Flags: []cli.Flag{
&cli.StringFlag{
Name: "target",
Usage: "set target os (wasi|arduino|chrome)",
Usage: fmt.Sprintf("set target os (%s)", strings.Join(config.WaOS_List, "|")),
Value: config.WaOS_Default,
},
&cli.StringFlag{
......@@ -166,7 +155,7 @@ func Main() {
},
&cli.StringFlag{
Name: "target",
Usage: "set target os (wasi|arduino|chrome)",
Usage: fmt.Sprintf("set target os (%s)", strings.Join(config.WaOS_List, "|")),
Value: config.WaOS_Default,
},
&cli.StringFlag{
......
......@@ -3,8 +3,6 @@
package main
import (
"fmt"
"wa-lang.org/wa/api"
"wa-lang.org/wa/internal/backends/compiler_wat"
"wa-lang.org/wa/internal/config"
......@@ -65,8 +63,6 @@ func waBuildFile(cfg *config.Config, filename string, src interface{}) (wat []by
return nil, err
}
fmt.Println(prog.DebugString())
watOut, err := compiler_wat.New().Compile(prog, "main")
return []byte(watOut), err
}
......@@ -2143,7 +2143,7 @@ func output() {
if !lflag {
fmt.Fprintf(ftable, "\n//line yacctab:1")
}
fmt.Fprintf(ftable, "\nvar %sExca = [...]int{\n", prefix)
fmt.Fprintf(ftable, "\nglobal %sExca = [...]int{\n", prefix)
if len(errors) > 0 {
stateTable = make([]Row, nstate)
......
......@@ -868,7 +868,7 @@ type (
GenDecl struct {
Doc *CommentGroup // associated documentation; or nil
TokPos token.Pos // position of Tok
Tok token.Token // IMPORT, CONST, TYPE, VAR
Tok token.Token // IMPORT, CONST, TYPE, VAR, GLOBAL
Lparen token.Pos // position of '(', if any
Specs []Spec
Rparen token.Pos // position of ')', if any
......
......@@ -536,7 +536,7 @@ func NodeDescription(n ast.Node) string {
return "constant declaration"
case token.TYPE:
return "type declaration"
case token.VAR:
case token.VAR, token.GLOBAL:
return "variable declaration"
}
case *ast.Ident:
......
......@@ -184,7 +184,7 @@ func GetFnMangleName(v interface{}) (internal string, external string) {
switch rt := recv.Type().(type) {
case *types.Named:
internal += GenSymbolName(rt.Obj().Name())
external += rt.Obj().Name()
external += GenSymbolName(rt.Obj().Name())
case *types.Pointer:
btype, ok := rt.Elem().(*types.Named)
......@@ -192,7 +192,7 @@ func GetFnMangleName(v interface{}) (internal string, external string) {
panic("Unreachable")
}
internal += GenSymbolName(btype.Obj().Name())
external += btype.Obj().Name()
external += GenSymbolName(btype.Obj().Name())
default:
panic("Unreachable")
......@@ -205,7 +205,7 @@ func GetFnMangleName(v interface{}) (internal string, external string) {
internal += "."
external += "."
internal += GenSymbolName(f.Name())
external += f.Name()
external += GenSymbolName(f.Name())
case *types.Func:
internal, external = GetPkgMangleName(f.Pkg().Path())
......@@ -216,7 +216,7 @@ func GetFnMangleName(v interface{}) (internal string, external string) {
switch rt := recv.Type().(type) {
case *types.Named:
internal += GenSymbolName(rt.Obj().Name())
external += rt.Obj().Name()
external += GenSymbolName(rt.Obj().Name())
case *types.Pointer:
btype, ok := rt.Elem().(*types.Named)
......@@ -224,7 +224,7 @@ func GetFnMangleName(v interface{}) (internal string, external string) {
panic("Unreachable")
}
internal += GenSymbolName(btype.Obj().Name())
external += btype.Obj().Name()
external += GenSymbolName(btype.Obj().Name())
default:
panic("Unreachable")
......@@ -233,7 +233,7 @@ func GetFnMangleName(v interface{}) (internal string, external string) {
internal += "."
external += "."
internal += GenSymbolName(f.Name())
external += f.Name()
external += GenSymbolName(f.Name())
}
return internal, external
......@@ -251,7 +251,7 @@ func GetPkgMangleName(pkg_path string) (string, string) {
symbol_name += GenSymbolName(p)
symbol_name += "$"
}
exp_name += pkg_path
exp_name += GenSymbolName(pkg_path)
symbol_name += GenSymbolName(pkg_path)
return symbol_name, exp_name
}
......
......@@ -36,6 +36,7 @@ const (
WaOS_arduino = "arduino" // Arduino 平台
WaOS_chrome = "chrome" // Chrome 浏览器
WaOS_wasi = "wasi" // WASI 接口
WaOS_mvp = "mvp" // MVP 接口, 最小可用
)
// 体系结构类型
......@@ -61,6 +62,7 @@ var WaOS_List = []string{
WaOS_arduino,
WaOS_chrome,
WaOS_wasi,
WaOS_mvp,
}
// CPU 列表
......@@ -71,3 +73,13 @@ var WaArch_List = []string{
WaArch_riscv64,
WaArch_wasm,
}
// 检查 OS 值是否 OK
func CheckWaOS(os string) bool {
for _, x := range WaOS_List {
if x == os {
return true
}
}
return false
}
......@@ -27,6 +27,7 @@ import (
"wa-lang.org/wa/internal/parser"
"wa-lang.org/wa/internal/printer"
"wa-lang.org/wa/internal/token"
"wa-lang.org/wa/internal/wamime"
)
var config = printer.Config{Mode: printer.UseSpaces | printer.TabIndent, Tabwidth: 8}
......@@ -65,11 +66,15 @@ func File(vfs fs.FS, filename string, src interface{}) (text []byte, changed boo
if err != nil {
return nil, false, err
}
golden, err := SourceFile(text)
if !bytes.Equal(text, golden) {
return text, false, nil
// TODO: 支持中文格式化
if wamime.GetCodeMime(filename, text) == "wa" {
golden, err := SourceFile(text)
if bytes.Equal(text, golden) {
return text, false, nil
}
return golden, true, err
}
return golden, true, nil
return text, false, nil
}
// Node formats node in canonical gofmt style and writes the result to dst.
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package format
import (
"bytes"
"io/ioutil"
"strings"
"testing"
"wa-lang.org/wa/internal/parser"
"wa-lang.org/wa/internal/token"
)
const testfile = "format_test.go.wa"
func diff(t *testing.T, dst, src []byte) {
line := 1
offs := 0 // line offset
for i := 0; i < len(dst) && i < len(src); i++ {
d := dst[i]
s := src[i]
if d != s {
t.Errorf("dst:%d: %s\n", line, dst[offs:i+1])
t.Errorf("src:%d: %s\n", line, src[offs:i+1])
return
}
if s == '\n' {
line++
offs = i + 1
}
}
if len(dst) != len(src) {
t.Errorf("len(dst) = %d, len(src) = %d\nsrc = %q", len(dst), len(src), src)
}
}
func TestNode(t *testing.T) {
src, err := ioutil.ReadFile(testfile)
if err != nil {
t.Fatal(err)
}
fset := token.NewFileSet()
file, err := parser.ParseFile(nil, fset, testfile, src, parser.ParseComments)
if err != nil {
t.Fatal(err)
}
var buf bytes.Buffer
if err = Node(&buf, fset, file); err != nil {
t.Fatal("Node failed:", err)
}
diff(t, buf.Bytes(), src)
}
func TestSource(t *testing.T) {
src, err := ioutil.ReadFile(testfile)
if err != nil {
t.Fatal(err)
}
res, err := Source(src)
if err != nil {
t.Fatal("Source failed:", err)
}
diff(t, res, src)
}
// Test cases that are expected to fail are marked by the prefix "ERROR".
// The formatted result must look the same as the input for successful tests.
var tests = []string{
// declaration lists
`import "wa-lang.org/wa/internal/format"`,
"var x int",
"var x int\n\ntype T struct{}",
// statement lists
"x := 0",
"f(a, b, c)\nvar x int = f(1, 2, 3)",
// indentation, leading and trailing space
"\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\nfoo\n`\n\n\n", // no indentation added inside raw strings
"\n\t\t\n\n\t\t\tx := 0\n\t\t\tconst s = `\n\t\tfoo\n`\n\n\n", // no indentation removed inside raw strings
// comments
"/* Comment */",
"\t/* Comment */ ",
"\n/* Comment */ ",
"i := 5 /* Comment */", // issue #5551
"\ta()\n//line :1", // issue #11276
"\t//xxx\n\ta()\n//line :2", // issue #11276
"\ta() //line :1\n\tb()\n", // issue #11276
"x := 0\n//line :1\n//line :2", // issue #11276
// whitespace
"", // issue #11275
" ", // issue #11275
"\t", // issue #11275
"\t\t", // issue #11275
"\n", // issue #11275
"\n\n", // issue #11275
"\t\n", // issue #11275
// erroneous programs
"ERROR1 + 2 +",
"ERRORx := 0",
}
func String(s string) (string, error) {
res, err := Source([]byte(s))
if err != nil {
return "", err
}
return string(res), nil
}
func TestPartial(t *testing.T) {
for _, src := range tests {
if strings.HasPrefix(src, "ERROR") {
// test expected to fail
src = src[5:] // remove ERROR prefix
res, err := String(src)
if err == nil && res == src {
t.Errorf("formatting succeeded but was expected to fail:\n%q", src)
}
} else {
// test expected to succeed
res, err := String(src)
if err != nil {
t.Errorf("formatting failed (%s):\n%q", err, src)
} else if res != src {
t.Errorf("formatting incorrect:\nsource: %q\nresult: %q", src, res)
}
}
}
}
......@@ -20,6 +20,7 @@ import (
"wa-lang.org/wa/internal/ssa"
"wa-lang.org/wa/internal/token"
"wa-lang.org/wa/internal/types"
"wa-lang.org/wa/internal/wamime"
"wa-lang.org/wa/waroot"
)
......@@ -462,7 +463,7 @@ func (p *_Loader) ParseDir(pkgpath string) (filenames []string, files []*ast.Fil
for i, filename := range filenames {
var f *ast.File
if p.hasExt(filename, ".wz") {
if wamime.GetCodeMime(filename, datas[i]) == "wz" {
f, err = wzparser.ParseFile(nil, p.prog.Fset, filename, datas[i], wzparser.AllErrors|wzparser.ParseComments)
} else {
f, err = parser.ParseFile(nil, p.prog.Fset, filename, datas[i], parser.AllErrors|parser.ParseComments)
......
......@@ -43,7 +43,7 @@ func loadProgramFileMeta(cfg *config.Config, filename string, src interface{}) (
Root: "__main__",
MainPkg: "__main__",
Pkg: config.Manifest_package{
Name: filename,
Name: filepath.Base(filename),
Pkgpath: "__main__",
},
}
......@@ -55,7 +55,7 @@ func loadProgramFileMeta(cfg *config.Config, filename string, src interface{}) (
vfs = new(config.PkgVFS)
if vfs.App == nil {
vfs.App = fstest.MapFS{
filename: &fstest.MapFile{
filepath.Base(filename): &fstest.MapFile{
Data: srcData,
},
}
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements a parser test harness. The files in the testdata
// directory are parsed and the errors reported are compared against the
// error messages expected in the test files. The test files must end in
// .src rather than .go so that they are not disturbed by gofmt runs.
//
// Expected errors are indicated in the test files by putting a comment
// of the form /* ERROR "rx" */ immediately following an offending token.
// The harness will verify that an error matching the regular expression
// rx is reported at that source position.
//
// For instance, the following test file indicates that a "not declared"
// error should be reported for the undeclared variable x:
//
// package p
// func f() {
// _ = x /* ERROR "not declared" */ + 1
// }
package parser
import (
"io/fs"
"io/ioutil"
"path/filepath"
"regexp"
"strings"
"testing"
"wa-lang.org/wa/internal/scanner"
"wa-lang.org/wa/internal/token"
)
const testdata = "testdata"
// getFile assumes that each filename occurs at most once
func getFile(fset *token.FileSet, filename string) (file *token.File) {
fset.Iterate(func(f *token.File) bool {
if f.Name() == filename {
if file != nil {
panic(filename + " used multiple times")
}
file = f
}
return true
})
return file
}
func getPos(fset *token.FileSet, filename string, offset int) token.Pos {
if f := getFile(fset, filename); f != nil {
return f.Pos(offset)
}
return token.NoPos
}
// ERROR comments must be of the form /* ERROR "rx" */ and rx is
// a regular expression that matches the expected error message.
// The special form /* ERROR HERE "rx" */ must be used for error
// messages that appear immediately after a token, rather than at
// a token's position.
//
var errRx = regexp.MustCompile(`^/\* *ERROR *(HERE)? *"([^"]*)" *\*/$`)
// expectedErrors collects the regular expressions of ERROR comments found
// in files and returns them as a map of error positions to error messages.
//
func expectedErrors(fset *token.FileSet, filename string, src []byte) map[token.Pos]string {
errors := make(map[token.Pos]string)
var s scanner.Scanner
// file was parsed already - do not add it again to the file
// set otherwise the position information returned here will
// not match the position information collected by the parser
s.Init(getFile(fset, filename), src, nil, scanner.ScanComments)
var prev token.Pos // position of last non-comment, non-semicolon token
var here token.Pos // position immediately after the token at position prev
for {
pos, tok, lit := s.Scan()
switch tok {
case token.EOF:
return errors
case token.COMMENT:
s := errRx.FindStringSubmatch(lit)
if len(s) == 3 {
pos := prev
if s[1] == "HERE" {
pos = here
}
errors[pos] = string(s[2])
}
case token.SEMICOLON:
// don't use the position of auto-inserted (invisible) semicolons
if lit != ";" {
break
}
fallthrough
default:
prev = pos
var l int // token length
if tok.IsLiteral() {
l = len(lit)
} else {
l = len(tok.String())
}
here = prev + token.Pos(l)
}
}
}
// compareErrors compares the map of expected error messages with the list
// of found errors and reports discrepancies.
//
func compareErrors(t *testing.T, fset *token.FileSet, expected map[token.Pos]string, found scanner.ErrorList) {
for _, error := range found {
// error.Pos is a token.Position, but we want
// a token.Pos so we can do a map lookup
pos := getPos(fset, error.Pos.Filename, error.Pos.Offset)
if msg, found := expected[pos]; found {
// we expect a message at pos; check if it matches
rx, err := regexp.Compile(msg)
if err != nil {
t.Errorf("%s: %v", error.Pos, err)
continue
}
if match := rx.MatchString(error.Msg); !match {
t.Errorf("%s: %q does not match %q", error.Pos, error.Msg, msg)
continue
}
// we have a match - eliminate this error
delete(expected, pos)
} else {
// To keep in mind when analyzing failed test output:
// If the same error position occurs multiple times in errors,
// this message will be triggered (because the first error at
// the position removes this position from the expected errors).
t.Errorf("%s: unexpected error: %s", error.Pos, error.Msg)
}
}
// there should be no expected errors left
if len(expected) > 0 {
t.Errorf("%d errors not reported:", len(expected))
for pos, msg := range expected {
t.Errorf("%s: %s\n", fset.Position(pos), msg)
}
}
}
func checkErrors(vfs fs.FS, t *testing.T, filename string, input interface{}) {
src, err := readSource(vfs, filename, input)
if err != nil {
t.Error(err)
return
}
fset := token.NewFileSet()
_, err = ParseFile(vfs, fset, filename, src, DeclarationErrors|AllErrors)
found, ok := err.(scanner.ErrorList)
if err != nil && !ok {
t.Error(err)
return
}
found.RemoveMultiples()
// we are expecting the following errors
// (collect these after parsing a file so that it is found in the file set)
expected := expectedErrors(fset, filename, src)
// verify errors returned by the parser
compareErrors(t, fset, expected, found)
}
func TestErrors(t *testing.T) {
list, err := ioutil.ReadDir(testdata)
if err != nil {
t.Fatal(err)
}
for _, fi := range list {
name := fi.Name()
if !fi.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".src") {
checkErrors(nil, t, filepath.Join(testdata, name), nil)
}
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the exported entry points for invoking the parser.
package parser
import (
"bytes"
"errors"
"io"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"strings"
"wa-lang.org/wa/internal/ast"
"wa-lang.org/wa/internal/token"
)
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
func readSource(vfs fs.FS, filename string, src interface{}) ([]byte, error) {
if src != nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s != nil {
return s.Bytes(), nil
}
case io.Reader:
return ioutil.ReadAll(s)
}
return nil, errors.New("invalid source")
}
if vfs != nil {
return fs.ReadFile(vfs, filename)
}
return os.ReadFile(filename)
}
// A Mode value is a set of flags (or 0).
// They control the amount of source code parsed and other optional
// parser functionality.
//
type Mode uint
const (
PackageClauseOnly Mode = 1 << iota // stop parsing after package clause
ImportsOnly // stop parsing after import declarations
ParseComments // parse comments and add them to AST
Trace // print a trace of parsed productions
DeclarationErrors // report declaration errors
SpuriousErrors // same as AllErrors, for backward-compatibility
AllErrors = SpuriousErrors // report all errors (not just the first 10 on different lines)
)
// ParseFile parses the source code of a single Go source file and returns
// the corresponding ast.File node. The source code may be provided via
// the filename of the source file, or via the src parameter.
//
// If src != nil, ParseFile parses the source from src and the filename is
// only used when recording position information. The type of the argument
// for the src parameter must be string, []byte, or io.Reader.
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality. Position information is recorded in the
// file set fset, which must not be nil.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.Bad* nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func ParseFile(vfs fs.FS, fset *token.FileSet, filename string, src interface{}, mode Mode) (f *ast.File, err error) {
if fset == nil {
panic("parser.ParseFile: no token.FileSet provided (fset == nil)")
}
// get source
text, err := readSource(vfs, filename, src)
if err != nil {
return nil, err
}
var p parser
defer func() {
if e := recover(); e != nil {
// resume same panic if it's not a bailout
if _, ok := e.(bailout); !ok {
panic(e)
}
}
// set result values
if f == nil {
// source is not a valid Go source file - satisfy
// ParseFile API and return a valid (but) empty
// *ast.File
f = &ast.File{
Name: new(ast.Ident),
Scope: ast.NewScope(nil),
}
}
p.errors.Sort()
err = p.errors.Err()
}()
// parse source
p.init(fset, filename, text, mode)
f = p.parseFile()
return
}
// ParseDir calls ParseFile for all files with names ending in ".wa" in the
// directory specified by path and returns a map of package name -> package
// AST with all the packages found.
//
// If filter != nil, only the files with os.FileInfo entries passing through
// the filter (and ending in ".wa") are considered. The mode bits are passed
// to ParseFile unchanged. Position information is recorded in fset, which
// must not be nil.
//
// If the directory couldn't be read, a nil map and the respective error are
// returned. If a parse error occurred, a non-nil but incomplete map and the
// first error encountered are returned.
//
func ParseDir(vfs fs.FS, fset *token.FileSet, path string, filter func(os.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error) {
fd, err := os.Open(path)
if err != nil {
return nil, err
}
defer fd.Close()
list, err := fd.Readdir(-1)
if err != nil {
return nil, err
}
pkgs = make(map[string]*ast.Package)
for _, d := range list {
if strHasSuffix(d.Name(), ".wa") {
if filter == nil || filter(d) {
filename := filepath.Join(path, d.Name())
if src, err := ParseFile(vfs, fset, filename, nil, mode); err == nil {
name := src.Name.Name
pkg, found := pkgs[name]
if !found {
pkg = &ast.Package{
Name: name,
Files: make(map[string]*ast.File),
}
pkgs[name] = pkg
}
pkg.Files[filename] = src
} else if first == nil {
first = err
}
}
}
}
return
}
func strHasSuffix(s string, ext ...string) bool {
for _, suffix := range ext {
if strings.HasSuffix(s, suffix) {
return true
}
}
return false
}
// ParseExprFrom is a convenience function for parsing an expression.
// The arguments have the same meaning as for ParseFile, but the source must
// be a valid Go (type or value) expression. Specifically, fset must not
// be nil.
//
func ParseExprFrom(fset *token.FileSet, filename string, src interface{}, mode Mode) (ast.Expr, error) {
if fset == nil {
panic("parser.ParseExprFrom: no token.FileSet provided (fset == nil)")
}
// get source
text, err := readSource(nil, filename, src)
if err != nil {
return nil, err
}
var p parser
defer func() {
if e := recover(); e != nil {
// resume same panic if it's not a bailout
if _, ok := e.(bailout); !ok {
panic(e)
}
}
p.errors.Sort()
err = p.errors.Err()
}()
// parse expr
p.init(fset, filename, text, mode)
// Set up pkg-level scopes to avoid nil-pointer errors.
// This is not needed for a correct expression x as the
// parser will be ok with a nil topScope, but be cautious
// in case of an erroneous x.
p.openScope()
p.pkgScope = p.topScope
e := p.parseRhsOrType()
p.closeScope()
assert(p.topScope == nil, "unbalanced scopes")
// If a semicolon was inserted, consume it;
// report an error if there's more tokens.
if p.tok == token.SEMICOLON && p.lit == "\n" {
p.next()
}
p.expect(token.EOF)
if p.errors.Len() > 0 {
p.errors.Sort()
return nil, p.errors.Err()
}
return e, nil
}
// ParseExpr is a convenience function for obtaining the AST of an expression x.
// The position information recorded in the AST is undefined. The filename used
// in error messages is the empty string.
//
func ParseExpr(x string) (ast.Expr, error) {
return ParseExprFrom(token.NewFileSet(), "", []byte(x), 0)
}
......@@ -2394,7 +2394,7 @@ func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota
p.expectSemi() // call before accessing p.linecomment
switch keyword {
case token.VAR:
case token.VAR, token.GLOBAL:
if typ == nil && values == nil {
p.error(pos, "missing variable type or initialization")
}
......@@ -2417,7 +2417,7 @@ func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota
Comment: p.lineComment,
}
kind := ast.Con
if keyword == token.VAR {
if keyword == token.VAR || keyword == token.GLOBAL {
kind = ast.Var
}
p.declare(spec, iota, p.topScope, kind, idents...)
......@@ -2460,10 +2460,6 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.Gen
doc := p.leadComment
pos := p.expect(keyword)
if keyword == token.GLOBAL {
keyword = token.VAR // TODO(chai2010): AST 支持 global
}
var lparen, rparen token.Pos
var list []ast.Spec
if p.tok == token.LPAREN {
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package parser implements a parser for Go source files. Input may be
// provided in a variety of forms (see the various Parse* functions); the
// output is an abstract syntax tree (AST) representing the Go source. The
// parser is invoked through one of the Parse* functions.
//
// The parser accepts a larger language than is syntactically permitted by
// the Go spec, for simplicity, and for improved robustness in the presence
// of syntax errors. For instance, in method declarations, the receiver is
// treated like an ordinary parameter list and thus may contain multiple
// entries where the spec permits exactly one. Consequently, the corresponding
// field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
//
package parser
import (
"fmt"
"strconv"
"strings"
"unicode"
"wa-lang.org/wa/internal/ast"
"wa-lang.org/wa/internal/scanner"
"wa-lang.org/wa/internal/token"
)
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
errors scanner.ErrorList
scanner scanner.Scanner
// Tracing/debugging
mode Mode // parsing mode
trace bool // == (mode & Trace != 0)
indent int // indentation used for tracing output
// Comments
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
// Next token
pos token.Pos // token position
tok token.Token // one token look-ahead
lit string // token literal
// Error recovery
// (used to limit the number of calls to parser.advance
// w/o making scanning progress - avoids potential endless
// loops across multiple parser functions during error recovery)
syncPos token.Pos // last synchronization position
syncCnt int // number of parser.advance calls without progress
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
inRhs bool // if set, the parser is parsing a rhs expression
// Ordinary identifier scopes
pkgScope *ast.Scope // pkgScope.Outer == nil
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
imports []*ast.ImportSpec // list of imports
// Label scopes
// (maintained by open/close LabelScope)
labelScope *ast.Scope // label scope for current function
targetStack [][]*ast.Ident // stack of unresolved labels
}
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
p.file = fset.AddFile(filename, -1, len(src))
var m scanner.Mode
if mode&ParseComments != 0 {
m = scanner.ScanComments
}
eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
p.scanner.Init(p.file, src, eh, m)
p.mode = mode
p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
p.next()
}
// ----------------------------------------------------------------------------
// Scoping support
func (p *parser) openScope() {
p.topScope = ast.NewScope(p.topScope)
}
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
scope := p.labelScope
for _, ident := range p.targetStack[n] {
ident.Obj = scope.Lookup(ident.Name)
if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
}
}
// pop label scope
p.targetStack = p.targetStack[0:n]
p.labelScope = p.labelScope.Outer
}
func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
obj.Data = data
ident.Obj = obj
if ident.Name != "_" {
if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
}
p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
}
}
}
}
func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, x := range list {
if ident, isIdent := x.(*ast.Ident); isIdent {
assert(ident.Obj == nil, "identifier already declared or resolved")
obj := ast.NewObj(ast.Var, ident.Name)
// remember corresponding assignment for other tools
obj.Decl = decl
ident.Obj = obj
if ident.Name != "_" {
if alt := p.topScope.Insert(obj); alt != nil {
ident.Obj = alt // redeclaration
} else {
n++ // new declaration
}
}
} else {
p.errorExpected(x.Pos(), "identifier on left side of :=")
}
}
if n == 0 && p.mode&DeclarationErrors != 0 {
p.error(list[0].Pos(), "no new variables on left side of :=")
}
}
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
// If x is an identifier, tryResolve attempts to resolve x by looking up
// the object it denotes. If no object is found and collectUnresolved is
// set, x is marked as unresolved and collected in the list of unresolved
// identifiers.
//
func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
if ident == nil {
return
}
assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s != nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj != nil {
ident.Obj = obj
return
}
}
// all local scopes are known, so any unresolved identifier
// must be found either in the file scope, package scope
// (perhaps in another file), or universe scope --- collect
// them so that they can be resolved later
if collectUnresolved {
ident.Obj = unresolved
p.unresolved = append(p.unresolved, ident)
}
}
func (p *parser) resolve(x ast.Expr) {
p.tryResolve(x, true)
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a ...interface{}) {
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *parser) {
p.indent--
p.printTrace(")")
}
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (!p.pos.IsValid()) is not initialized
// (it is token.ILLEGAL), so don't print it .
if p.trace && p.pos.IsValid() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
if p.lit[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.lit); i++ {
if p.lit[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Slash: p.pos, Text: p.lit}
p.next0()
return
}
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. A non-comment token or n
// empty lines terminate a comment group.
//
func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.file.Line(p.pos)
for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
prev := p.pos
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
if p.file.Line(p.pos) == p.file.Line(prev) {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.file.Line(p.pos) != endline || p.tok == token.EOF {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.file.Line(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
// A bailout panic is raised to indicate early termination.
type bailout struct{}
func (p *parser) error(pos token.Pos, msg string) {
epos := p.file.Position(pos)
// If AllErrors is not set, discard errors reported on the same line
// as the last recorded error and stop parsing if there are more than
// 10 errors.
if p.mode&AllErrors == 0 {
n := len(p.errors)
if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
return // discard - likely a spurious error
}
if n > 10 {
panic(bailout{})
}
}
p.errors.Add(epos, msg)
}
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
switch {
case p.tok == token.SEMICOLON && p.lit == "\n":
msg += ", found newline"
case p.tok.IsLiteral():
// print 123 rather than 'INT', etc.
msg += ", found " + p.lit
default:
msg += ", found '" + p.tok.String() + "'"
}
}
p.error(pos, msg)
}
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
p.errorExpected(pos, "'"+tok.String()+"'")
}
p.next() // make progress
return pos
}
// expectClosing is like expect but provides a better error message
// for the common case of a missing comma before a newline.
//
func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
p.error(p.pos, "missing ',' before newline in "+context)
p.next()
}
return p.expect(tok)
}
func (p *parser) expectSemi() {
// semicolon is optional before a closing ')' or '}'
if p.tok != token.RPAREN && p.tok != token.RBRACE {
switch p.tok {
case token.COMMA:
// permit a ',' instead of a ';' but complain
p.errorExpected(p.pos, "';'")
fallthrough
case token.SEMICOLON:
p.next()
default:
p.errorExpected(p.pos, "';'")
p.advance(stmtStart)
}
}
}
func (p *parser) atComma(context string, follow token.Token) bool {
if p.tok == token.COMMA {
return true
}
if p.tok != follow {
msg := "missing ','"
if p.tok == token.SEMICOLON && p.lit == "\n" {
msg += " before newline"
}
p.error(p.pos, msg+" in "+context)
return true // "insert" comma and continue
}
return false
}
func assert(cond bool, msg string) {
if !cond {
panic("wa-lang.org/wa/internal/parser internal error: " + msg)
}
}
// advance consumes tokens until the current token p.tok
// is in the 'to' set, or token.EOF. For error recovery.
func (p *parser) advance(to map[token.Token]bool) {
for ; p.tok != token.EOF; p.next() {
if to[p.tok] {
// Return only if parser made some progress since last
// sync or if it has not reached 10 advance calls without
// progress. Otherwise consume at least one token to
// avoid an endless parser loop (it is possible that
// both parseOperand and parseStmt call advance and
// correctly do not advance, thus the need for the
// invocation limit p.syncCnt).
if p.pos == p.syncPos && p.syncCnt < 10 {
p.syncCnt++
return
}
if p.pos > p.syncPos {
p.syncPos = p.pos
p.syncCnt = 0
return
}
// Reaching here indicates a parser bug, likely an
// incorrect token list in this function, but it only
// leads to skipping of possibly correct code if a
// previous error is present, and thus is preferred
// over a non-terminating parse.
}
}
}
var stmtStart = map[token.Token]bool{
token.BREAK: true,
token.CONST: true,
token.CONTINUE: true,
token.DEFER: true,
token.FOR: true,
token.IF: true,
token.RETURN: true,
token.SWITCH: true,
token.TYPE: true,
token.VAR: true,
}
var declStart = map[token.Token]bool{
token.CONST: true,
token.TYPE: true,
token.VAR: true,
}
var exprEnd = map[token.Token]bool{
token.COMMA: true,
token.COLON: true,
token.SEMICOLON: true,
token.RPAREN: true,
token.RBRACK: true,
token.RBRACE: true,
}
// safePos returns a valid file position for a given position: If pos
// is valid to begin with, safePos returns pos. If pos is out-of-range,
// safePos returns the EOF position.
//
// This is hack to work around "artificial" end positions in the AST which
// are computed by adding 1 to (presumably valid) token positions. If the
// token positions are invalid due to parse errors, the resulting end position
// may be past the file's EOF position, which would lead to panics if used
// later on.
//
func (p *parser) safePos(pos token.Pos) (res token.Pos) {
defer func() {
if recover() != nil {
res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
}
}()
_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
return pos
}
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{NamePos: pos, Name: name}
}
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
}
list = append(list, p.parseIdent())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseIdent())
}
return
}
// ----------------------------------------------------------------------------
// Common productions
// If lhs is set, result list elements which are identifiers are not resolved.
func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
list = append(list, p.checkExpr(p.parseExpr(lhs)))
for p.tok == token.COMMA {
p.next()
list = append(list, p.checkExpr(p.parseExpr(lhs)))
}
return
}
func (p *parser) parseLhsList() []ast.Expr {
old := p.inRhs
p.inRhs = false
list := p.parseExprList(true)
switch p.tok {
case token.DEFINE:
// lhs of a short variable declaration
// but doesn't enter scope until later:
// caller must call p.shortVarDecl(p.makeIdentList(list))
// at appropriate time.
case token.COLON:
// lhs of a label declaration or a communication clause of a select
// statement (parseLhsList is not called when parsing the case clause
// of a switch statement):
// - labels are declared by the caller of parseLhsList
// - for communication clauses, if there is a stand-alone identifier
// followed by a colon, we have a syntax error; there is no need
// to resolve the identifier in that case
default:
// identifiers must be declared elsewhere
for _, x := range list {
p.resolve(x)
}
}
p.inRhs = old
return list
}
func (p *parser) parseRhsList() []ast.Expr {
old := p.inRhs
p.inRhs = true
list := p.parseExprList(false)
p.inRhs = old
return list
}
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.advance(exprEnd)
return &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
defer un(trace(p, "TypeName"))
}
ident := p.parseIdent()
// don't resolve ident yet - it may be a parameter or field name
if p.tok == token.PERIOD {
// ident is a package name
p.next()
p.resolve(ident)
sel := p.parseIdent()
return &ast.SelectorExpr{X: ident, Sel: sel}
}
return ident
}
func (p *parser) parseArrayType() ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
}
lbrack := p.expect(token.LBRACK)
p.exprLev++
var len ast.Expr
// always permit ellipsis for more fault-tolerant parsing
if p.tok == token.ELLIPSIS {
len = &ast.Ellipsis{Ellipsis: p.pos}
p.next()
} else if p.tok != token.RBRACK {
len = p.parseRhs()
}
p.exprLev--
p.expect(token.RBRACK)
elt := p.parseType()
return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
}
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
idents := make([]*ast.Ident, len(list))
for i, x := range list {
ident, isIdent := x.(*ast.Ident)
if !isIdent {
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.errorExpected(x.Pos(), "identifier")
}
ident = &ast.Ident{NamePos: x.Pos(), Name: "_"}
}
idents[i] = ident
}
return idents
}
func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
doc := p.leadComment
// 1st FieldDecl
// A type name used as an anonymous field looks like a field identifier.
var list []ast.Expr
for {
list = append(list, p.parseVarType(false))
if p.tok != token.COMMA {
break
}
p.next()
}
typ := p.tryVarType(false)
// analyze case
var idents []*ast.Ident
if typ != nil {
// IdentifierList Type
idents = p.makeIdentList(list)
} else {
// ["*"] TypeName (AnonymousField)
typ = list[0] // we always have at least one element
if n := len(list); n > 1 {
p.errorExpected(p.pos, "type")
typ = &ast.BadExpr{From: p.pos, To: p.pos}
} else if !isTypeName(deref(typ)) {
p.errorExpected(typ.Pos(), "anonymous field")
typ = &ast.BadExpr{From: typ.Pos(), To: p.safePos(typ.End())}
}
}
// Tag
var tag *ast.BasicLit
if p.tok == token.STRING {
tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
}
p.expectSemi() // call before accessing p.linecomment
field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
p.declare(field, nil, scope, ast.Var, idents...)
p.resolve(typ)
return field
}
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.StructType{
Struct: pos,
Fields: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
}
star := p.expect(token.MUL)
base := p.parseType()
return &ast.StarExpr{Star: star, X: base}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
typ := p.tryIdentOrType() // don't use parseType so we can provide better error message
if typ != nil {
p.resolve(typ)
} else {
p.error(pos, "'...' parameter is missing type")
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
}
return p.tryIdentOrType()
}
// If the result is an identifier, it is not resolved.
func (p *parser) parseVarType(isParam bool) ast.Expr {
typ := p.tryVarType(isParam)
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
typ = &ast.BadExpr{From: pos, To: p.pos}
}
return typ
}
func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
}
// 1st ParameterDecl
// A list of identifiers looks like a list of type names.
var list []ast.Expr
for {
list = append(list, p.parseVarType(ellipsisOk))
if p.tok != token.COMMA {
break
}
p.next()
if p.tok == token.RPAREN {
break
}
}
// analyze case
if typ := p.tryVarType(ellipsisOk); typ != nil {
// IdentifierList Type
idents := p.makeIdentList(list)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
p.resolve(typ)
if !p.atComma("parameter list", token.RPAREN) {
return
}
p.next()
for p.tok != token.RPAREN && p.tok != token.EOF {
idents := p.parseIdentList()
typ := p.parseVarType(ellipsisOk)
field := &ast.Field{Names: idents, Type: typ}
params = append(params, field)
// Go spec: The scope of an identifier denoting a function
// parameter or result variable is the function body.
p.declare(field, nil, scope, ast.Var, idents...)
p.resolve(typ)
if !p.atComma("parameter list", token.RPAREN) {
break
}
p.next()
}
return
}
// Type { "," Type } (anonymous parameters)
params = make([]*ast.Field, len(list))
for i, typ := range list {
p.resolve(typ)
params[i] = &ast.Field{Type: typ}
}
return
}
func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
}
var params []*ast.Field
lparen := p.expect(token.LPAREN)
if p.tok != token.RPAREN {
params = p.parseParameterList(scope, ellipsisOk)
}
rparen := p.expect(token.RPAREN)
return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
}
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
}
if p.tok == token.LPAREN {
return p.parseParameters(scope, false)
}
typ := p.tryType()
if typ != nil {
list := make([]*ast.Field, 1)
list[0] = &ast.Field{Type: typ}
return &ast.FieldList{List: list}
}
return nil
}
func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
}
params = p.parseParameters(scope, true)
results = p.parseResult(scope)
return
}
func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
}
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
params, results := p.parseSignature(scope)
return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
}
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
x := p.parseTypeName()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
scope := ast.NewScope(nil) // method scope
params, results := p.parseSignature(scope)
typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
} else {
// embedded interface
typ = x
p.resolve(typ)
}
p.expectSemi() // call before accessing p.linecomment
spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
p.declare(spec, nil, scope, ast.Fun, idents...)
return spec
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
for p.tok == token.IDENT {
list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
return &ast.InterfaceType{
Interface: pos,
Methods: &ast.FieldList{
Opening: lbrace,
List: list,
Closing: rbrace,
},
}
}
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
}
pos := p.expect(token.MAP)
p.expect(token.LBRACK)
key := p.parseType()
p.expect(token.RBRACK)
value := p.parseType()
return &ast.MapType{Map: pos, Key: key, Value: value}
}
// If the result is an identifier, it is not resolved.
func (p *parser) tryIdentOrType() ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
case token.LBRACK:
return p.parseArrayType()
case token.STRUCT:
return p.parseStructType()
case token.MUL:
return p.parsePointerType()
case token.FUNC:
typ, _ := p.parseFuncType()
return typ
case token.INTERFACE:
return p.parseInterfaceType()
case token.MAP:
return p.parseMapType()
case token.LPAREN:
lparen := p.pos
p.next()
typ := p.parseType()
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
}
// no type found
return nil
}
func (p *parser) tryType() ast.Expr {
typ := p.tryIdentOrType()
if typ != nil {
p.resolve(typ)
}
return typ
}
// ----------------------------------------------------------------------------
// Blocks
func (p *parser) parseStmtList() (list []ast.Stmt) {
if p.trace {
defer un(trace(p, "StatementList"))
}
for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseStmt())
}
return
}
func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
}
lbrace := p.expect(token.LBRACE)
p.topScope = scope // open function scope
p.openLabelScope()
list := p.parseStmtList()
p.closeLabelScope()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
}
lbrace := p.expect(token.LBRACE)
p.openScope()
list := p.parseStmtList()
p.closeScope()
rbrace := p.expect(token.RBRACE)
return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
}
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ, scope := p.parseFuncType()
if p.tok != token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBody(scope)
p.exprLev--
return &ast.FuncLit{Type: typ, Body: body}
}
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
//
func (p *parser) parseOperand(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
x := p.parseIdent()
if !lhs {
p.resolve(x)
}
return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseRhsOrType() // types may be parenthesized: (some type)
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
}
if typ := p.tryIdentOrType(); typ != nil {
// could be type for composite literal or conversion
_, isIdent := typ.(*ast.Ident)
assert(!isIdent, "type cannot be identifier")
return typ
}
// we have an error
pos := p.pos
p.errorExpected(pos, "operand")
p.advance(stmtStart)
return &ast.BadExpr{From: pos, To: p.pos}
}
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
}
sel := p.parseIdent()
return &ast.SelectorExpr{X: x, Sel: sel}
}
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
}
lparen := p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
// type switch: typ == nil
p.next()
} else {
typ = p.parseType()
}
rparen := p.expect(token.RPAREN)
return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
}
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
}
const N = 3 // change the 3 to 2 to disable 3-index slices
lbrack := p.expect(token.LBRACK)
p.exprLev++
var index [N]ast.Expr
var colons [N - 1]token.Pos
if p.tok != token.COLON {
index[0] = p.parseRhs()
}
ncolons := 0
for p.tok == token.COLON && ncolons < len(colons) {
colons[ncolons] = p.pos
ncolons++
p.next()
if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
index[ncolons] = p.parseRhs()
}
}
p.exprLev--
rbrack := p.expect(token.RBRACK)
if ncolons > 0 {
// slice expression
slice3 := false
if ncolons == 2 {
slice3 = true
// Check presence of 2nd and 3rd index here rather than during type-checking
// to prevent erroneous programs from passing through gofmt (was issue 7305).
if index[1] == nil {
p.error(colons[0], "2nd index required in 3-index slice")
index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
}
if index[2] == nil {
p.error(colons[1], "3rd index required in 3-index slice")
index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
}
}
return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
}
return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
}
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
}
lparen := p.expect(token.LPAREN)
p.exprLev++
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
}
if !p.atComma("argument list", token.RPAREN) {
break
}
p.next()
}
p.exprLev--
rparen := p.expectClosing(token.RPAREN, "argument list")
return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
}
func (p *parser) parseValue(keyOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
if p.tok == token.LBRACE {
return p.parseLiteralValue(nil)
}
// Because the parser doesn't know the composite literal type, it cannot
// know if a key that's an identifier is a struct field name or a name
// denoting a value. The former is not resolved by the parser or the
// resolver.
//
// Instead, _try_ to resolve such a key if possible. If it resolves,
// it a) has correctly resolved, or b) incorrectly resolved because
// the key is a struct field with a name matching another identifier.
// In the former case we are done, and in the latter case we don't
// care because the type checker will do a separate field lookup.
//
// If the key does not resolve, it a) must be defined at the top
// level in another file of the same package, the universe scope, or be
// undeclared; or b) it is a struct field. In the former case, the type
// checker can do a top-level lookup, and in the latter case it will do
// a separate field lookup.
x := p.checkExpr(p.parseExpr(keyOk))
if keyOk {
if p.tok == token.COLON {
// Try to resolve the key but don't collect it
// as unresolved identifier if it fails so that
// we don't get (possibly false) errors about
// undeclared names.
p.tryResolve(x, false)
} else {
// not a key
p.resolve(x)
}
}
return x
}
func (p *parser) parseElement() ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
}
x := p.parseValue(true)
if p.tok == token.COLON {
colon := p.pos
p.next()
x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue(false)}
}
return x
}
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
}
for p.tok != token.RBRACE && p.tok != token.EOF {
list = append(list, p.parseElement())
if !p.atComma("composite literal", token.RBRACE) {
break
}
p.next()
}
return
}
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "LiteralValue"))
}
lbrace := p.expect(token.LBRACE)
var elts []ast.Expr
p.exprLev++
if p.tok != token.RBRACE {
elts = p.parseElementList()
}
p.exprLev--
rbrace := p.expectClosing(token.RBRACE, "composite literal")
return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
}
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
panic("unreachable")
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
// If t.Type == nil we have a type assertion of the form
// y.(type), which is only allowed in type switch expressions.
// It's hard to exclude those but for the case where we are in
// a type switch. Instead be lenient and test this in the type
// checker.
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
}
return x
}
// isTypeName reports whether x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
default:
return false // all other nodes are not type names
}
return true
}
// isLiteralType reports whether x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
}
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
x = p.X
}
return x
}
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
x = unparen(p.X)
}
return x
}
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
case *ast.ParenExpr:
panic("unreachable")
case *ast.UnaryExpr:
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
}
}
// all other nodes are expressions or types
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
x := p.parseOperand(lhs)
L:
for {
switch p.tok {
case token.PERIOD:
p.next()
if lhs {
p.resolve(x)
}
switch p.tok {
case token.IDENT:
x = p.parseSelector(p.checkExprOrType(x))
case token.LPAREN:
x = p.parseTypeAssertion(p.checkExpr(x))
default:
pos := p.pos
p.errorExpected(pos, "selector or type assertion")
p.next() // make progress
sel := &ast.Ident{NamePos: pos, Name: "_"}
x = &ast.SelectorExpr{X: x, Sel: sel}
}
case token.LBRACK:
if lhs {
p.resolve(x)
}
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
if lhs {
p.resolve(x)
}
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
if lhs {
p.resolve(x)
}
x = p.parseLiteralValue(x)
} else {
break L
}
default:
break L
}
lhs = false // no need to try to resolve again
}
return x
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
switch p.tok {
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
pos, op := p.pos, p.tok
p.next()
x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
x := p.parseUnaryExpr(false)
return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
}
return p.parsePrimaryExpr(lhs)
}
func (p *parser) tokPrec() (token.Token, int) {
tok := p.tok
if p.inRhs && tok == token.ASSIGN {
tok = token.EQL
}
return tok, tok.Precedence()
}
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
x := p.parseUnaryExpr(lhs)
for {
op, oprec := p.tokPrec()
if oprec < prec1 {
return x
}
pos := p.expect(op)
if lhs {
p.resolve(x)
lhs = false
}
y := p.parseBinaryExpr(false, oprec+1)
x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
}
}
// If lhs is set and the result is an identifier, it is not resolved.
// The result may be a type or even a raw type ([...]int). Callers must
// check the result (using checkExpr or checkExprOrType), depending on
// context.
func (p *parser) parseExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(lhs, token.LowestPrec+1)
}
func (p *parser) parseRhs() ast.Expr {
old := p.inRhs
p.inRhs = true
x := p.checkExpr(p.parseExpr(false))
p.inRhs = old
return x
}
func (p *parser) parseRhsOrType() ast.Expr {
old := p.inRhs
p.inRhs = true
x := p.checkExprOrType(p.parseExpr(false))
p.inRhs = old
return x
}
// ----------------------------------------------------------------------------
// Statements
// Parsing modes for parseSimpleStmt.
const (
basic = iota
labelOk
rangeOk
)
// parseSimpleStmt returns true as 2nd result if it parsed the assignment
// of a range clause (with mode == rangeOk). The returned statement is an
// assignment with a right-hand side that is a single unary expression of
// the form "range x". No guarantees are given for the left-hand side.
func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseLhsList()
switch p.tok {
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement, possibly part of a range clause
pos, tok := p.pos, p.tok
p.next()
var y []ast.Expr
isRange := false
if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
pos := p.pos
p.next()
y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
isRange = true
} else {
y = p.parseRhsList()
}
as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
if tok == token.DEFINE {
p.shortVarDecl(as, x)
}
return as, isRange
}
if len(x) > 1 {
p.errorExpected(x[0].Pos(), "1 expression")
// continue with first expression
}
switch p.tok {
case token.COLON:
// labeled statement
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
return stmt, false
}
// The label declaration typically starts at x[0].Pos(), but the label
// declaration may be erroneous due to a token after that position (and
// before the ':'). If SpuriousErrors is not set, the (only) error
// reported for the line is the illegal label error instead of the token
// before the ':' that caused the problem. Thus, use the (latest) colon
// position for error reporting.
p.error(colon, "illegal label declaration")
return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
case token.INC, token.DEC:
// increment or decrement
s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
p.next()
return s, false
}
// expression
return &ast.ExprStmt{X: x[0]}, false
}
func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
x := p.parseRhsOrType() // could be a conversion: (some type)(x)
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
p.error(p.safePos(x.End()), fmt.Sprintf("function must be invoked in %s statement", callType))
}
return nil
}
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
}
pos := p.expect(token.DEFER)
call := p.parseCallExpr("defer")
p.expectSemi()
if call == nil {
return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
}
return &ast.DeferStmt{Defer: pos, Call: call}
}
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
}
pos := p.pos
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
x = p.parseRhsList()
}
p.expectSemi()
return &ast.ReturnStmt{Return: pos, Results: x}
}
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
}
pos := p.expect(tok)
var label *ast.Ident
if p.tok == token.IDENT {
label = p.parseIdent()
// add to list of unresolved targets
n := len(p.targetStack) - 1
p.targetStack[n] = append(p.targetStack[n], label)
}
p.expectSemi()
return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
}
func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
if s == nil {
return nil
}
if es, isExpr := s.(*ast.ExprStmt); isExpr {
return p.checkExpr(es.X)
}
found := "simple statement"
if _, isAss := s.(*ast.AssignStmt); isAss {
found = "assignment"
}
p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
}
// parseIfHeader is an adjusted version of parser.header
// in cmd/compile/internal/syntax/parser.go, which has
// been tuned for better error handling.
func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
if p.tok == token.LBRACE {
p.error(p.pos, "missing condition in if statement")
cond = &ast.BadExpr{From: p.pos, To: p.pos}
return
}
// p.tok != token.LBRACE
outer := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
// accept potential variable declaration but complain
if p.tok == token.VAR {
p.next()
p.error(p.pos, fmt.Sprintf("var declaration not allowed in 'IF' initializer"))
}
init, _ = p.parseSimpleStmt(basic)
}
var condStmt ast.Stmt
var semi struct {
pos token.Pos
lit string // ";" or "\n"; valid if pos.IsValid()
}
if p.tok != token.LBRACE {
if p.tok == token.SEMICOLON {
semi.pos = p.pos
semi.lit = p.lit
p.next()
} else {
p.expect(token.SEMICOLON)
}
if p.tok != token.LBRACE {
condStmt, _ = p.parseSimpleStmt(basic)
}
} else {
condStmt = init
init = nil
}
if condStmt != nil {
cond = p.makeExpr(condStmt, "boolean expression")
} else if semi.pos.IsValid() {
if semi.lit == "\n" {
p.error(semi.pos, "unexpected newline, expecting { after if clause")
} else {
p.error(semi.pos, "missing condition in if statement")
}
}
// make sure we have a valid AST
if cond == nil {
cond = &ast.BadExpr{From: p.pos, To: p.pos}
}
p.exprLev = outer
return
}
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
}
pos := p.expect(token.IF)
p.openScope()
defer p.closeScope()
init, cond := p.parseIfHeader()
body := p.parseBlockStmt()
var else_ ast.Stmt
if p.tok == token.ELSE {
p.next()
switch p.tok {
case token.IF:
else_ = p.parseIfStmt()
case token.LBRACE:
else_ = p.parseBlockStmt()
p.expectSemi()
default:
p.errorExpected(p.pos, "if statement or block")
else_ = &ast.BadStmt{From: p.pos, To: p.pos}
}
} else {
p.expectSemi()
}
return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_}
}
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
}
list = append(list, p.parseType())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseType())
}
return
}
func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
}
pos := p.pos
var list []ast.Expr
if p.tok == token.CASE {
p.next()
if typeSwitch {
list = p.parseTypeList()
} else {
list = p.parseRhsList()
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
p.openScope()
body := p.parseStmtList()
p.closeScope()
return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
}
func isTypeSwitchAssert(x ast.Expr) bool {
a, ok := x.(*ast.TypeAssertExpr)
return ok && a.Type == nil
}
func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool {
switch t := s.(type) {
case *ast.ExprStmt:
// x.(type)
return isTypeSwitchAssert(t.X)
case *ast.AssignStmt:
// v := x.(type)
if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) {
switch t.Tok {
case token.ASSIGN:
// permit v = x.(type) but complain
p.error(t.TokPos, "expected ':=', found '='")
fallthrough
case token.DEFINE:
return true
}
}
}
return false
}
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
}
pos := p.expect(token.SWITCH)
p.openScope()
defer p.closeScope()
var s1, s2 ast.Stmt
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
if p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.LBRACE {
// A TypeSwitchGuard may declare a variable in addition
// to the variable declared in the initial SimpleStmt.
// Introduce extra scope to avoid redeclaration errors:
//
// switch t := 0; t := x.(T) { ... }
//
// (this code is not valid Go because the first t
// cannot be accessed and thus is never used, the extra
// scope is needed for the correct error message).
//
// If we don't have a type switch, s2 must be an expression.
// Having the extra nested but empty scope won't affect it.
p.openScope()
defer p.closeScope()
s2, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
typeSwitch := p.isTypeSwitchGuard(s2)
lbrace := p.expect(token.LBRACE)
var list []ast.Stmt
for p.tok == token.CASE || p.tok == token.DEFAULT {
list = append(list, p.parseCaseClause(typeSwitch))
}
rbrace := p.expect(token.RBRACE)
p.expectSemi()
body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
if typeSwitch {
return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
}
return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
}
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
}
p.openScope()
pos := p.pos
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
lhs := p.parseLhsList()
// RecvStmt
if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
// RecvStmt with assignment
if len(lhs) > 2 {
p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
// continue with first two expressions
lhs = lhs[0:2]
}
pos := p.pos
p.next()
rhs := p.parseRhs()
as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
if tok == token.DEFINE {
p.shortVarDecl(as, lhs)
}
comm = as
} else {
// lhs must be single receive operation
if len(lhs) > 1 {
p.errorExpected(lhs[0].Pos(), "1 expression")
// continue with first expression
}
comm = &ast.ExprStmt{X: lhs[0]}
}
} else {
p.expect(token.DEFAULT)
}
colon := p.expect(token.COLON)
body := p.parseStmtList()
p.closeScope()
return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
}
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
}
pos := p.expect(token.FOR)
p.openScope()
defer p.closeScope()
var s1, s2, s3 ast.Stmt
var isRange bool
if p.tok != token.LBRACE {
prevLev := p.exprLev
p.exprLev = -1
if p.tok != token.SEMICOLON {
if p.tok == token.RANGE {
// "for range x" (nil lhs in assignment)
pos := p.pos
p.next()
y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
s2 = &ast.AssignStmt{Rhs: y}
isRange = true
} else {
s2, isRange = p.parseSimpleStmt(rangeOk)
}
}
if !isRange && p.tok == token.SEMICOLON {
p.next()
s1 = s2
s2 = nil
if p.tok != token.SEMICOLON {
s2, _ = p.parseSimpleStmt(basic)
}
p.expectSemi()
if p.tok != token.LBRACE {
s3, _ = p.parseSimpleStmt(basic)
}
}
p.exprLev = prevLev
}
body := p.parseBlockStmt()
p.expectSemi()
if isRange {
as := s2.(*ast.AssignStmt)
// check lhs
var key, value ast.Expr
switch len(as.Lhs) {
case 0:
// nothing to do
case 1:
key = as.Lhs[0]
case 2:
key, value = as.Lhs[0], as.Lhs[1]
default:
p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
x := as.Rhs[0].(*ast.UnaryExpr).X
return &ast.RangeStmt{
For: pos,
Key: key,
Value: value,
TokPos: as.TokPos,
Tok: as.Tok,
X: x,
Body: body,
}
}
// regular for statement
return &ast.ForStmt{
For: pos,
Init: s1,
Cond: p.makeExpr(s2, "boolean or range expression"),
Post: s3,
Body: body,
}
}
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
}
switch p.tok {
case token.CONST, token.TYPE, token.VAR:
s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)}
case
// tokens that may start an expression
token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
token.LBRACK, token.STRUCT, token.MAP, token.INTERFACE, // composite types
token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.NOT: // unary operators
s, _ = p.parseSimpleStmt(labelOk)
// because of the required look-ahead, labeled statements are
// parsed by parseSimpleStmt - don't expect a semicolon after
// them
if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
p.expectSemi()
}
case token.DEFER:
s = p.parseDeferStmt()
case token.RETURN:
s = p.parseReturnStmt()
case token.BREAK, token.CONTINUE:
s = p.parseBranchStmt(p.tok)
case token.LBRACE:
s = p.parseBlockStmt()
p.expectSemi()
case token.IF:
s = p.parseIfStmt()
case token.SWITCH:
s = p.parseSwitchStmt()
case token.FOR:
s = p.parseForStmt()
case token.SEMICOLON:
// Is it ever possible to have an implicit semicolon
// producing an empty statement in a valid program?
// (handle correctly anyway)
s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"}
p.next()
case token.RBRACE:
// a semicolon may be omitted before a closing "}"
s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true}
default:
// no statement found
pos := p.pos
p.errorExpected(pos, "statement")
p.advance(stmtStart)
s = &ast.BadStmt{From: pos, To: p.pos}
}
return
}
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec
func isValidImport(lit string) bool {
const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
s, _ := strconv.Unquote(lit) // github.com/wa-lang/wa/internal/scanner returns a legal string literal
for _, r := range s {
if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
return false
}
}
return s != ""
}
func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
}
var ident *ast.Ident
switch p.tok {
case token.PERIOD:
ident = &ast.Ident{NamePos: p.pos, Name: "."}
p.next()
case token.IDENT:
ident = p.parseIdent()
}
pos := p.pos
var path string
if p.tok == token.STRING {
path = p.lit
if !isValidImport(path) {
p.error(pos, "invalid import path: "+path)
}
p.next()
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi() // call before accessing p.linecomment
// collect imports
spec := &ast.ImportSpec{
Doc: doc,
Name: ident,
Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
Comment: p.lineComment,
}
p.imports = append(p.imports, spec)
return spec
}
func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec {
if p.trace {
defer un(trace(p, keyword.String()+"Spec"))
}
pos := p.pos
idents := p.parseIdentList()
typ := p.tryType()
var values []ast.Expr
// always permit optional initialization for more tolerant parsing
if p.tok == token.ASSIGN {
p.next()
values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
switch keyword {
case token.VAR:
if typ == nil && values == nil {
p.error(pos, "missing variable type or initialization")
}
case token.CONST:
if values == nil && (iota == 0 || typ != nil) {
p.error(pos, "missing constant value")
}
}
// Go spec: The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec and ends at
// the end of the innermost containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.ValueSpec{
Doc: doc,
Names: idents,
Type: typ,
Values: values,
Comment: p.lineComment,
}
kind := ast.Con
if keyword == token.VAR {
kind = ast.Var
}
p.declare(spec, iota, p.topScope, kind, idents...)
return spec
}
func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
}
ident := p.parseIdent()
// Go spec: The scope of a type identifier declared inside a function begins
// at the identifier in the TypeSpec and ends at the end of the innermost
// containing block.
// (Global identifiers are resolved in a separate phase after parsing.)
spec := &ast.TypeSpec{Doc: doc, Name: ident}
p.declare(spec, nil, p.topScope, ast.Typ, ident)
if p.tok == token.ASSIGN {
spec.Assign = p.pos
p.next()
}
spec.Type = p.parseType()
p.expectSemi() // call before accessing p.linecomment
spec.Comment = p.lineComment
return spec
}
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
}
doc := p.leadComment
pos := p.expect(keyword)
var lparen, rparen token.Pos
var list []ast.Spec
if p.tok == token.LPAREN {
lparen = p.pos
p.next()
for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
list = append(list, f(p.leadComment, keyword, iota))
}
rparen = p.expect(token.RPAREN)
p.expectSemi()
} else {
list = append(list, f(nil, keyword, 0))
}
return &ast.GenDecl{
Doc: doc,
TokPos: pos,
Tok: keyword,
Lparen: lparen,
Specs: list,
Rparen: rparen,
}
}
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
}
doc := p.leadComment
pos := p.expect(token.FUNC)
scope := ast.NewScope(p.topScope) // function scope
var recv *ast.FieldList
if p.tok == token.LPAREN {
recv = p.parseParameters(scope, false)
}
ident := p.parseIdent()
params, results := p.parseSignature(scope)
var body *ast.BlockStmt
if p.tok == token.LBRACE {
body = p.parseBody(scope)
}
p.expectSemi()
decl := &ast.FuncDecl{
Doc: doc,
Recv: recv,
Name: ident,
Type: &ast.FuncType{
Func: pos,
Params: params,
Results: results,
},
Body: body,
}
if recv == nil {
// Go spec: The scope of an identifier denoting a constant, type,
// variable, or function (but not method) declared at top level
// (outside any function) is the package block.
//
// init() functions cannot be referred to and there may
// be more than one - don't put them in the pkgScope
if ident.Name != "init" {
p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
}
}
return decl
}
func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
}
var f parseSpecFunction
switch p.tok {
case token.CONST, token.VAR:
f = p.parseValueSpec
case token.TYPE:
f = p.parseTypeSpec
case token.FUNC:
return p.parseFuncDecl()
default:
pos := p.pos
p.errorExpected(pos, "declaration")
p.advance(sync)
return &ast.BadDecl{From: pos, To: p.pos}
}
return p.parseGenDecl(p.tok, f)
}
// ----------------------------------------------------------------------------
// Source files
func (p *parser) parseFile() *ast.File {
if p.trace {
defer un(trace(p, "File"))
}
// Don't bother parsing the rest if we had errors scanning the first token.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
// package clause
doc := p.leadComment
pos := p.expect(token.PACKAGE)
// Go spec: The package clause is not a declaration;
// the package name does not appear in any scope.
ident := p.parseIdent()
if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
p.error(p.pos, "invalid package name _")
}
p.expectSemi()
// Don't bother parsing the rest if we had errors parsing the package clause.
// Likely not a Go source file at all.
if p.errors.Len() != 0 {
return nil
}
p.openScope()
p.pkgScope = p.topScope
var decls []ast.Decl
if p.mode&PackageClauseOnly == 0 {
// import decls
for p.tok == token.IMPORT {
decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
}
if p.mode&ImportsOnly == 0 {
// rest of package body
for p.tok != token.EOF {
decls = append(decls, p.parseDecl(declStart))
}
}
}
p.closeScope()
assert(p.topScope == nil, "unbalanced scopes")
assert(p.labelScope == nil, "unbalanced label scopes")
// resolve global identifiers within the same file
i := 0
for _, ident := range p.unresolved {
// i <= index for current ident
assert(ident.Obj == unresolved, "object already resolved")
ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
if ident.Obj == nil {
p.unresolved[i] = ident
i++
}
}
return &ast.File{
Doc: doc,
Package: pos,
Name: ident,
Decls: decls,
Scope: p.pkgScope,
Imports: p.imports,
Unresolved: p.unresolved[0:i],
Comments: p.comments,
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package parser
import (
"bytes"
"fmt"
"os"
"strings"
"testing"
"wa-lang.org/wa/internal/ast"
"wa-lang.org/wa/internal/token"
)
var validFiles = []string{
"parser.go.wa",
"parser_test.go.wa",
"error_test.go.wa",
"short_test.go.wa",
}
func TestParse(t *testing.T) {
for _, filename := range validFiles {
_, err := ParseFile(nil, token.NewFileSet(), filename, nil, DeclarationErrors)
if err != nil {
t.Fatalf("ParseFile(%s): %v", filename, err)
}
}
}
func nameFilter(filename string) bool {
switch filename {
case "parser.go", "interface.go", "parser_test.go":
return true
case "parser.go.orig":
return true // permit but should be ignored by ParseDir
}
return false
}
func dirFilter(f os.FileInfo) bool { return nameFilter(f.Name()) }
func TestParseDir(t *testing.T) {
path := "."
pkgs, err := ParseDir(nil, token.NewFileSet(), path, dirFilter, 0)
if err != nil {
t.Fatalf("ParseDir(%s): %v", path, err)
}
if n := len(pkgs); n != 1 {
t.Errorf("got %d packages; want 1", n)
}
pkg := pkgs["parser"]
if pkg == nil {
t.Errorf(`package "parser" not found`)
return
}
if n := len(pkg.Files); n != 3 {
t.Errorf("got %d package files; want 3", n)
}
for filename := range pkg.Files {
if !nameFilter(filename) {
t.Errorf("unexpected package file: %s", filename)
}
}
}
func TestParseExpr(t *testing.T) {
// just kicking the tires:
// a valid arithmetic expression
src := "a + b"
x, err := ParseExpr(src)
if err != nil {
t.Errorf("ParseExpr(%q): %v", src, err)
}
// sanity check
if _, ok := x.(*ast.BinaryExpr); !ok {
t.Errorf("ParseExpr(%q): got %T, want *ast.BinaryExpr", src, x)
}
// a valid type expression
src = "struct{x *int}"
x, err = ParseExpr(src)
if err != nil {
t.Errorf("ParseExpr(%q): %v", src, err)
}
// sanity check
if _, ok := x.(*ast.StructType); !ok {
t.Errorf("ParseExpr(%q): got %T, want *ast.StructType", src, x)
}
// an invalid expression
src = "a + *"
if _, err := ParseExpr(src); err == nil {
t.Errorf("ParseExpr(%q): got no error", src)
}
// a valid expression followed by extra tokens is invalid
src = "a[i] := x"
if _, err := ParseExpr(src); err == nil {
t.Errorf("ParseExpr(%q): got no error", src)
}
// a semicolon is not permitted unless automatically inserted
src = "a + b\n"
if _, err := ParseExpr(src); err != nil {
t.Errorf("ParseExpr(%q): got error %s", src, err)
}
src = "a + b;"
if _, err := ParseExpr(src); err == nil {
t.Errorf("ParseExpr(%q): got no error", src)
}
// various other stuff following a valid expression
const validExpr = "a + b"
const anything = "dh3*#D)#_"
for _, c := range "!)]};," {
src := validExpr + string(c) + anything
if _, err := ParseExpr(src); err == nil {
t.Errorf("ParseExpr(%q): got no error", src)
}
}
// ParseExpr must not crash
for _, src := range valids {
ParseExpr(src)
}
}
func TestColonEqualsScope(t *testing.T) {
f, err := ParseFile(nil, token.NewFileSet(), "", `package p; func f() { x, y, z := x, y, z }`, 0)
if err != nil {
t.Fatal(err)
}
// RHS refers to undefined globals; LHS does not.
as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.AssignStmt)
for _, v := range as.Rhs {
id := v.(*ast.Ident)
if id.Obj != nil {
t.Errorf("rhs %s has Obj, should not", id.Name)
}
}
for _, v := range as.Lhs {
id := v.(*ast.Ident)
if id.Obj == nil {
t.Errorf("lhs %s does not have Obj, should", id.Name)
}
}
}
func TestVarScope(t *testing.T) {
f, err := ParseFile(nil, token.NewFileSet(), "", `package p; func f() { var x, y, z = x, y, z }`, 0)
if err != nil {
t.Fatal(err)
}
// RHS refers to undefined globals; LHS does not.
as := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.DeclStmt).Decl.(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
for _, v := range as.Values {
id := v.(*ast.Ident)
if id.Obj != nil {
t.Errorf("rhs %s has Obj, should not", id.Name)
}
}
for _, id := range as.Names {
if id.Obj == nil {
t.Errorf("lhs %s does not have Obj, should", id.Name)
}
}
}
func TestObjects(t *testing.T) {
const src = `
package p
import fmt "fmt"
const pi = 3.14
type T struct{}
var x int
func f() { L: }
`
f, err := ParseFile(nil, token.NewFileSet(), "", src, 0)
if err != nil {
t.Fatal(err)
}
objects := map[string]ast.ObjKind{
"p": ast.Bad, // not in a scope
"fmt": ast.Bad, // not resolved yet
"pi": ast.Con,
"T": ast.Typ,
"x": ast.Var,
"int": ast.Bad, // not resolved yet
"f": ast.Fun,
"L": ast.Lbl,
}
ast.Inspect(f, func(n ast.Node) bool {
if ident, ok := n.(*ast.Ident); ok {
obj := ident.Obj
if obj == nil {
if objects[ident.Name] != ast.Bad {
t.Errorf("no object for %s", ident.Name)
}
return true
}
if obj.Name != ident.Name {
t.Errorf("names don't match: obj.Name = %s, ident.Name = %s", obj.Name, ident.Name)
}
kind := objects[ident.Name]
if obj.Kind != kind {
t.Errorf("%s: obj.Kind = %s; want %s", ident.Name, obj.Kind, kind)
}
}
return true
})
}
func TestUnresolved(t *testing.T) {
f, err := ParseFile(nil, token.NewFileSet(), "", `
package p
//
func f1a(int)
func f2a(byte, int, float)
func f3a(a, b int, c float)
func f4a(...complex)
func f5a(a s1a, b ...complex)
//
func f1b(*int)
func f2b([]byte, (int), *float)
func f3b(a, b *int, c []float)
func f4b(...*complex)
func f5b(a s1a, b ...[]complex)
//
type s1a struct { int }
type s2a struct { byte; int; s1a }
type s3a struct { a, b int; c float }
//
type s1b struct { *int }
type s2b struct { byte; int; *float }
type s3b struct { a, b *s3b; c []float }
`, 0)
if err != nil {
t.Fatal(err)
}
want := "int " + // f1a
"byte int float " + // f2a
"int float " + // f3a
"complex " + // f4a
"complex " + // f5a
//
"int " + // f1b
"byte int float " + // f2b
"int float " + // f3b
"complex " + // f4b
"complex " + // f5b
//
"int " + // s1a
"byte int " + // s2a
"int float " + // s3a
//
"int " + // s1a
"byte int float " + // s2a
"float " // s3a
// collect unresolved identifiers
var buf bytes.Buffer
for _, u := range f.Unresolved {
buf.WriteString(u.Name)
buf.WriteByte(' ')
}
got := buf.String()
if got != want {
t.Errorf("\ngot: %s\nwant: %s", got, want)
}
}
var imports = map[string]bool{
`"a"`: true,
"`a`": true,
`"a/b"`: true,
`"a.b"`: true,
`"m\x61th"`: true,
`"greek/αβ"`: true,
`""`: false,
// Each of these pairs tests both `` vs "" strings
// and also use of invalid characters spelled out as
// escape sequences and written directly.
// For example `"\x00"` tests import "\x00"
// while "`\x00`" tests import `<actual-NUL-byte>`.
`"\x00"`: false,
"`\x00`": false,
`"\x7f"`: false,
"`\x7f`": false,
`"a!"`: false,
"`a!`": false,
`"a b"`: false,
"`a b`": false,
`"a\\b"`: false,
"`a\\b`": false,
"\"`a`\"": false,
"`\"a\"`": false,
`"\x80\x80"`: false,
"`\x80\x80`": false,
`"\xFFFD"`: false,
"`\xFFFD`": false,
}
func TestImports(t *testing.T) {
for path, isValid := range imports {
src := fmt.Sprintf("package p; import %s", path)
_, err := ParseFile(nil, token.NewFileSet(), "", src, 0)
switch {
case err != nil && isValid:
t.Errorf("ParseFile(%s): got %v; expected no error", src, err)
case err == nil && !isValid:
t.Errorf("ParseFile(%s): got no error; expected one", src)
}
}
}
func TestCommentGroups(t *testing.T) {
f, err := ParseFile(nil, token.NewFileSet(), "", `
package p /* 1a */ /* 1b */ /* 1c */ // 1d
/* 2a
*/
// 2b
const pi = 3.1415
/* 3a */ // 3b
/* 3c */ const e = 2.7182
// Example from issue 3139
func ExampleCount() {
fmt.Println(strings.Count("cheese", "e"))
fmt.Println(strings.Count("five", "")) // before & after each rune
// Output:
// 3
// 5
}
`, ParseComments)
if err != nil {
t.Fatal(err)
}
expected := [][]string{
{"/* 1a */", "/* 1b */", "/* 1c */", "// 1d"},
{"/* 2a\n*/", "// 2b"},
{"/* 3a */", "// 3b", "/* 3c */"},
{"// Example from issue 3139"},
{"// before & after each rune"},
{"// Output:", "// 3", "// 5"},
}
if len(f.Comments) != len(expected) {
t.Fatalf("got %d comment groups; expected %d", len(f.Comments), len(expected))
}
for i, exp := range expected {
got := f.Comments[i].List
if len(got) != len(exp) {
t.Errorf("got %d comments in group %d; expected %d", len(got), i, len(exp))
continue
}
for j, exp := range exp {
got := got[j].Text
if got != exp {
t.Errorf("got %q in group %d; expected %q", got, i, exp)
}
}
}
}
func getField(file *ast.File, fieldname string) *ast.Field {
parts := strings.Split(fieldname, ".")
for _, d := range file.Decls {
if d, ok := d.(*ast.GenDecl); ok && d.Tok == token.TYPE {
for _, s := range d.Specs {
if s, ok := s.(*ast.TypeSpec); ok && s.Name.Name == parts[0] {
if s, ok := s.Type.(*ast.StructType); ok {
for _, f := range s.Fields.List {
for _, name := range f.Names {
if name.Name == parts[1] {
return f
}
}
}
}
}
}
}
}
return nil
}
// Don't use ast.CommentGroup.Text() - we want to see exact comment text.
func commentText(c *ast.CommentGroup) string {
var buf bytes.Buffer
if c != nil {
for _, c := range c.List {
buf.WriteString(c.Text)
}
}
return buf.String()
}
func checkFieldComments(t *testing.T, file *ast.File, fieldname, lead, line string) {
f := getField(file, fieldname)
if f == nil {
t.Fatalf("field not found: %s", fieldname)
}
if got := commentText(f.Doc); got != lead {
t.Errorf("got lead comment %q; expected %q", got, lead)
}
if got := commentText(f.Comment); got != line {
t.Errorf("got line comment %q; expected %q", got, line)
}
}
func TestLeadAndLineComments(t *testing.T) {
f, err := ParseFile(nil, token.NewFileSet(), "", `
package p
type T struct {
/* F1 lead comment */
//
F1 int /* F1 */ // line comment
// F2 lead
// comment
F2 int // F2 line comment
// f3 lead comment
f3 int // f3 line comment
}
`, ParseComments)
if err != nil {
t.Fatal(err)
}
checkFieldComments(t, f, "T.F1", "/* F1 lead comment *///", "/* F1 */// line comment")
checkFieldComments(t, f, "T.F2", "// F2 lead// comment", "// F2 line comment")
checkFieldComments(t, f, "T.f3", "// f3 lead comment", "// f3 line comment")
ast.FileExports(f)
checkFieldComments(t, f, "T.F1", "/* F1 lead comment *///", "/* F1 */// line comment")
checkFieldComments(t, f, "T.F2", "// F2 lead// comment", "// F2 line comment")
if getField(f, "T.f3") != nil {
t.Error("not expected to find T.f3")
}
}
// TestIssue9979 verifies that empty statements are contained within their enclosing blocks.
func TestIssue9979(t *testing.T) {
for _, src := range []string{
"package p; func f() {;}",
"package p; func f() {L:}",
"package p; func f() {L:;}",
"package p; func f() {L:\n}",
"package p; func f() {L:\n;}",
"package p; func f() { ; }",
"package p; func f() { L: }",
"package p; func f() { L: ; }",
"package p; func f() { L: \n}",
"package p; func f() { L: \n; }",
} {
fset := token.NewFileSet()
f, err := ParseFile(nil, fset, "", src, 0)
if err != nil {
t.Fatal(err)
}
var pos, end token.Pos
ast.Inspect(f, func(x ast.Node) bool {
switch s := x.(type) {
case *ast.BlockStmt:
pos, end = s.Pos()+1, s.End()-1 // exclude "{", "}"
case *ast.LabeledStmt:
pos, end = s.Pos()+2, s.End() // exclude "L:"
case *ast.EmptyStmt:
// check containment
if s.Pos() < pos || s.End() > end {
t.Errorf("%s: %T[%d, %d] not inside [%d, %d]", src, s, s.Pos(), s.End(), pos, end)
}
// check semicolon
offs := fset.Position(s.Pos()).Offset
if ch := src[offs]; ch != ';' != s.Implicit {
want := "want ';'"
if s.Implicit {
want = "but ';' is implicit"
}
t.Errorf("%s: found %q at offset %d; %s", src, ch, offs, want)
}
}
return true
})
}
}
// TestIncompleteSelection ensures that an incomplete selector
// expression is parsed as a (blank) *ast.SelectorExpr, not a
// *ast.BadExpr.
func TestIncompleteSelection(t *testing.T) {
for _, src := range []string{
"package p; var _ = fmt.", // at EOF
"package p; var _ = fmt.\ntype X int", // not at EOF
} {
fset := token.NewFileSet()
f, err := ParseFile(nil, fset, "", src, 0)
if err == nil {
t.Errorf("ParseFile(%s) succeeded unexpectedly", src)
continue
}
const wantErr = "expected selector or type assertion"
if !strings.Contains(err.Error(), wantErr) {
t.Errorf("ParseFile returned wrong error %q, want %q", err, wantErr)
}
var sel *ast.SelectorExpr
ast.Inspect(f, func(n ast.Node) bool {
if n, ok := n.(*ast.SelectorExpr); ok {
sel = n
}
return true
})
if sel == nil {
t.Error("found no *ast.SelectorExpr")
continue
}
const wantSel = "&{fmt _}"
if fmt.Sprint(sel) != wantSel {
t.Errorf("found selector %s, want %s", sel, wantSel)
continue
}
}
}
func TestLastLineComment(t *testing.T) {
const src = `package main
type x int // comment
`
fset := token.NewFileSet()
f, err := ParseFile(nil, fset, "", src, ParseComments)
if err != nil {
t.Fatal(err)
}
comment := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.TypeSpec).Comment.List[0].Text
if comment != "// comment" {
t.Errorf("got %q, want %q", comment, "// comment")
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains test cases for short valid and invalid programs.
package parser
import "testing"
var valids = []string{
"package p\n",
`package p;`,
`package p; import "fmt"; func f() { fmt.Println("Hello, World!") };`,
`package p; func f() { if f(T{}) {} };`,
`package p; func f(func() func() func());`,
`package p; func f(...T);`,
`package p; func f(float, ...int);`,
`package p; func f(x int, a ...int) { f(0, a...); f(1, a...,) };`,
`package p; func f(int,) {};`,
`package p; func f(...int,) {};`,
`package p; func f(x ...int,) {};`,
`package p; type T []int; var a []bool; func f() { if a[T{42}[0]] {} };`,
`package p; type T []int; func g(int) bool { return true }; func f() { if g(T{42}[0]) {} };`,
`package p; type T []int; func f() { for _ = range []int{T{42}[0]} {} };`,
`package p; var a = T{{1, 2}, {3, 4}}`,
`package p; func f() { if ; true {} };`,
`package p; func f() { switch ; {} };`,
`package p; func f() { for _ = range "foo" + "bar" {} };`,
`package p; func f() { var s []int; g(s[:], s[i:], s[:j], s[i:j], s[i:j:k], s[:j:k]) };`,
`package p; var ( _ = (struct {*T}).m; _ = (interface {T}).m )`,
`package p; func ((T),) m() {}`,
`package p; func ((*T),) m() {}`,
`package p; func (*(T),) m() {}`,
`package p; func _(x []int) { for range x {} }`,
`package p; func _() { if [T{}.n]int{} {} }`,
`package p; func _() { map[int]int{}[0]++; map[int]int{}[0] += 1 }`,
`package p; func _(x interface{f()}) { interface{f()}(x).f() }`,
`package p; const (x = 0; y; z)`, // issue 9639
`package p; var _ = map[P]int{P{}:0, {}:1}`,
`package p; var _ = map[*P]int{&P{}:0, {}:1}`,
`package p; type T = int`,
`package p; type (T = p.T; _ = struct{}; x = *T)`,
}
func TestValid(t *testing.T) {
for _, src := range valids {
checkErrors(nil, t, src, src)
}
}
var invalids = []string{
// `foo /* ERROR "expected 'package'" */ !`,
`package p; func f() { if { /* ERROR "missing condition" */ } };`,
`package p; func f() { if ; /* ERROR "missing condition" */ {} };`,
`package p; func f() { if f(); /* ERROR "missing condition" */ {} };`,
`package p; func f() { if _ = range /* ERROR "expected operand" */ x; true {} };`,
`package p; func f() { switch _ /* ERROR "expected switch expression" */ = range x; true {} };`,
`package p; func f() { for _ = range x ; /* ERROR "expected '{'" */ ; {} };`,
`package p; func f() { for ; ; _ = range /* ERROR "expected operand" */ x {} };`,
`package p; func f() { for ; _ /* ERROR "expected boolean or range expression" */ = range x ; {} };`,
`package p; func f() { switch t = /* ERROR "expected ':=', found '='" */ t.(type) {} };`,
`package p; func f() { switch t /* ERROR "expected switch expression" */ , t = t.(type) {} };`,
`package p; func f() { switch t /* ERROR "expected switch expression" */ = t.(type), t {} };`,
`package p; var a = [ /* ERROR "expected expression" */ 1]int;`,
`package p; var a = [ /* ERROR "expected expression" */ ...]int;`,
`package p; var a = struct /* ERROR "expected expression" */ {}`,
`package p; var a = func /* ERROR "expected expression" */ ();`,
`package p; var a = interface /* ERROR "expected expression" */ {}`,
`package p; var a = [ /* ERROR "expected expression" */ ]int`,
`package p; var a = map /* ERROR "expected expression" */ [int]int`,
`package p; var a = []int{[ /* ERROR "expected expression" */ ]int};`,
`package p; var a = ( /* ERROR "expected expression" */ []int);`,
`package p; var a = a[[ /* ERROR "expected expression" */ ]int:[]int];`,
`package p; func f() { var t []int; t /* ERROR "expected identifier on left side of :=" */ [0] := 0 };`,
`package p; func f() { if x := g(); x /* ERROR "expected boolean expression" */ = 0 {}};`,
`package p; func f() { _ = x = /* ERROR "expected '=='" */ 0 {}};`,
`package p; func f() { _ = 1 == func()int { var x bool; x = x = /* ERROR "expected '=='" */ true; return x }() };`,
`package p; func f() { var s []int; _ = s[] /* ERROR "expected operand" */ };`,
`package p; func f() { var s []int; _ = s[i:j: /* ERROR "3rd index required" */ ] };`,
`package p; func f() { var s []int; _ = s[i: /* ERROR "2nd index required" */ :k] };`,
`package p; func f() { var s []int; _ = s[i: /* ERROR "2nd index required" */ :] };`,
`package p; func f() { var s []int; _ = s[: /* ERROR "2nd index required" */ :] };`,
`package p; func f() { var s []int; _ = s[: /* ERROR "2nd index required" */ ::] };`,
`package p; func f() { var s []int; _ = s[i:j:k: /* ERROR "expected ']'" */ l] };`,
`package p; func f() { for x /* ERROR "boolean or range expression" */ = []string {} }`,
`package p; func f() { for x /* ERROR "boolean or range expression" */ := []string {} }`,
`package p; func f() { for i /* ERROR "boolean or range expression" */ , x = []string {} }`,
`package p; func f() { for i /* ERROR "boolean or range expression" */ , x := []string {} }`,
`package p; func f() { defer func() {} /* ERROR HERE "function must be invoked" */ }`,
`package p; func f(x func(), u v func /* ERROR "missing ','" */ ()){}`,
// issue 8656
`package p; func f() (a b string /* ERROR "missing ','" */ , ok bool)`,
// issue 9639
`package p; var x /* ERROR "missing variable type or initialization" */ , y, z;`,
`package p; const x /* ERROR "missing constant value" */ ;`,
`package p; const x /* ERROR "missing constant value" */ int;`,
`package p; const (x = 0; y; z /* ERROR "missing constant value" */ int);`,
// issue 12437
`package p; var _ = struct { x int, /* ERROR "expected ';', found ','" */ }{};`,
`package p; var _ = struct { x int, /* ERROR "expected ';', found ','" */ y float }{};`,
// issue 11611
`package p; type _ struct { int, } /* ERROR "expected type, found '}'" */ ;`,
`package p; type _ struct { int, float } /* ERROR "expected type, found '}'" */ ;`,
`package p; type _ struct { ( /* ERROR "expected anonymous field" */ int) };`,
`package p; func _()(x, y, z ... /* ERROR "expected '\)', found '...'" */ int){}`,
`package p; func _()(... /* ERROR "expected type, found '...'" */ int){}`,
// issue 13475
`package p; func f() { if true {} else ; /* ERROR "expected if statement or block" */ }`,
`package p; func f() { if true {} else defer /* ERROR "expected if statement or block" */ f() }`,
}
func TestInvalid(t *testing.T) {
for _, src := range invalids {
checkErrors(nil, t, src, src)
}
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package printer_test
import (
"bytes"
"fmt"
"strings"
"testing"
"wa-lang.org/wa/internal/ast"
"wa-lang.org/wa/internal/parser"
"wa-lang.org/wa/internal/printer"
"wa-lang.org/wa/internal/token"
)
// Dummy test function so that godoc does not use the entire file as example.
func Test(*testing.T) {}
func parseFunc(filename, functionname string) (fun *ast.FuncDecl, fset *token.FileSet) {
fset = token.NewFileSet()
if file, err := parser.ParseFile(nil, fset, filename, nil, 0); err == nil {
for _, d := range file.Decls {
if f, ok := d.(*ast.FuncDecl); ok && f.Name.Name == functionname {
fun = f
return
}
}
}
panic("function not found")
}
func ExampleFprint() {
// Parse source file and extract the AST without comments for
// this function, with position information referring to the
// file set fset.
funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
// Print the function body into buffer buf.
// The file set is provided to the printer so that it knows
// about the original source formatting and can add additional
// line breaks where they were present in the source.
var buf bytes.Buffer
printer.Fprint(&buf, fset, funcAST.Body)
// Remove braces {} enclosing the function body, unindent,
// and trim leading and trailing white space.
s := buf.String()
s = s[1 : len(s)-1]
s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n"))
// Print the cleaned-up body text to stdout.
fmt.Println(s)
// output:
// funcAST, fset := parseFunc("example_test.go", "ExampleFprint")
//
// var buf bytes.Buffer
// printer.Fprint(&buf, fset, funcAST.Body)
//
// s := buf.String()
// s = s[1 : len(s)-1]
// s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n"))
//
// fmt.Println(s)
}
......@@ -469,10 +469,10 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
}
p.expr(x)
}
p.print(token.COLON)
if len(f.Names) > 0 {
p.print(blank)
}
p.print(token.COLON)
p.expr(f.Type)
} else { // interface
if ftyp, isFtyp := f.Type.(*ast.FuncType); isFtyp {
......@@ -512,8 +512,8 @@ func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool)
if len(f.Names) > 0 {
// named fields
p.identList(f.Names, false)
p.print(sep)
p.print(token.COLON)
p.print(sep)
p.expr(f.Type)
extraTabs = 1
} else {
......@@ -1401,6 +1401,7 @@ func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool) {
extraTabs--
}
if s.Type != nil {
p.print(token.COLON)
p.expr(s.Type)
}
if s.Values != nil {
......@@ -1523,7 +1524,7 @@ func (p *printer) genDecl(d *ast.GenDecl) {
p.print(d.Lparen, token.LPAREN)
if n := len(d.Specs); n > 0 {
p.print(indent, formfeed)
if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) {
if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR || d.Tok == token.GLOBAL) {
// two or more grouped const/var declarations:
// determine if the type column must be kept
keepType := keepTypeColumn(d.Specs)
......
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package printer implements printing of AST nodes.
package printer
import (
"fmt"
"io"
"os"
"strings"
"text/tabwriter"
"unicode"
"wa-lang.org/wa/internal/ast"
"wa-lang.org/wa/internal/token"
)
const (
maxNewlines = 2 // max. number of newlines between source text
debug = false // enable for debugging
infinity = 1 << 30
)
type whiteSpace byte
const (
ignore = whiteSpace(0)
blank = whiteSpace(' ')
vtab = whiteSpace('\v')
newline = whiteSpace('\n')
formfeed = whiteSpace('\f')
indent = whiteSpace('>')
unindent = whiteSpace('<')
)
// A pmode value represents the current printer mode.
type pmode int
const (
noExtraBlank pmode = 1 << iota // disables extra blank after /*-style comment
noExtraLinebreak // disables extra line break after /*-style comment
)
type commentInfo struct {
cindex int // current comment index
comment *ast.CommentGroup // = printer.comments[cindex]; or nil
commentOffset int // = printer.posFor(printer.comments[cindex].List[0].Pos()).Offset; or infinity
commentNewline bool // true if the comment group contains newlines
}
type printer struct {
// Configuration (does not change after initialization)
Config
fset *token.FileSet
// Current state
output []byte // raw printer result
indent int // current indentation
level int // level == 0: outside composite literal; level > 0: inside composite literal
mode pmode // current printer mode
endAlignment bool // if set, terminate alignment immediately
impliedSemi bool // if set, a linebreak implies a semicolon
lastTok token.Token // last token printed (token.ILLEGAL if it's whitespace)
prevOpen token.Token // previous non-brace "open" token (, [, or token.ILLEGAL
wsbuf []whiteSpace // delayed white space
// Positions
// The out position differs from the pos position when the result
// formatting differs from the source formatting (in the amount of
// white space). If there's a difference and SourcePos is set in
// ConfigMode, //line directives are used in the output to restore
// original source positions for a reader.
pos token.Position // current position in AST (source) space
out token.Position // current position in output space
last token.Position // value of pos after calling writeString
linePtr *int // if set, record out.Line for the next token in *linePtr
// The list of all source comments, in order of appearance.
comments []*ast.CommentGroup // may be nil
useNodeComments bool // if not set, ignore lead and line comments of nodes
// Information about p.comments[p.cindex]; set up by nextComment.
commentInfo
// Cache of already computed node sizes.
nodeSizes map[ast.Node]int
// Cache of most recently computed line position.
cachedPos token.Pos
cachedLine int // line corresponding to cachedPos
}
func (p *printer) init(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {
p.Config = *cfg
p.fset = fset
p.pos = token.Position{Line: 1, Column: 1}
p.out = token.Position{Line: 1, Column: 1}
p.wsbuf = make([]whiteSpace, 0, 16) // whitespace sequences are short
p.nodeSizes = nodeSizes
p.cachedPos = -1
}
func (p *printer) internalError(msg ...interface{}) {
if debug {
fmt.Print(p.pos.String() + ": ")
fmt.Println(msg...)
panic("wa-lang.org/wa/internal/printer")
}
}
// commentsHaveNewline reports whether a list of comments belonging to
// an *ast.CommentGroup contains newlines. Because the position information
// may only be partially correct, we also have to read the comment text.
func (p *printer) commentsHaveNewline(list []*ast.Comment) bool {
// len(list) > 0
line := p.lineFor(list[0].Pos())
for i, c := range list {
if i > 0 && p.lineFor(list[i].Pos()) != line {
// not all comments on the same line
return true
}
if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) {
return true
}
}
_ = line
return false
}
func (p *printer) nextComment() {
for p.cindex < len(p.comments) {
c := p.comments[p.cindex]
p.cindex++
if list := c.List; len(list) > 0 {
p.comment = c
p.commentOffset = p.posFor(list[0].Pos()).Offset
p.commentNewline = p.commentsHaveNewline(list)
return
}
// we should not reach here (correct ASTs don't have empty
// ast.CommentGroup nodes), but be conservative and try again
}
// no more comments
p.commentOffset = infinity
}
// commentBefore reports whether the current comment group occurs
// before the next position in the source code and printing it does
// not introduce implicit semicolons.
//
func (p *printer) commentBefore(next token.Position) bool {
return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline)
}
// commentSizeBefore returns the estimated size of the
// comments on the same line before the next position.
//
func (p *printer) commentSizeBefore(next token.Position) int {
// save/restore current p.commentInfo (p.nextComment() modifies it)
defer func(info commentInfo) {
p.commentInfo = info
}(p.commentInfo)
size := 0
for p.commentBefore(next) {
for _, c := range p.comment.List {
size += len(c.Text)
}
p.nextComment()
}
return size
}
// recordLine records the output line number for the next non-whitespace
// token in *linePtr. It is used to compute an accurate line number for a
// formatted construct, independent of pending (not yet emitted) whitespace
// or comments.
//
func (p *printer) recordLine(linePtr *int) {
p.linePtr = linePtr
}
// linesFrom returns the number of output lines between the current
// output line and the line argument, ignoring any pending (not yet
// emitted) whitespace or comments. It is used to compute an accurate
// size (in number of lines) for a formatted construct.
//
func (p *printer) linesFrom(line int) int {
return p.out.Line - line
}
func (p *printer) posFor(pos token.Pos) token.Position {
// not used frequently enough to cache entire token.Position
return p.fset.PositionFor(pos, false /* absolute position */)
}
func (p *printer) lineFor(pos token.Pos) int {
if pos != p.cachedPos {
p.cachedPos = pos
p.cachedLine = p.fset.PositionFor(pos, false /* absolute position */).Line
}
return p.cachedLine
}
// writeLineDirective writes a //line directive if necessary.
func (p *printer) writeLineDirective(pos token.Position) {
if pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) {
p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation
p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...)
p.output = append(p.output, tabwriter.Escape)
// p.out must match the //line directive
p.out.Filename = pos.Filename
p.out.Line = pos.Line
}
}
// writeIndent writes indentation.
func (p *printer) writeIndent() {
// use "hard" htabs - indentation columns
// must not be discarded by the tabwriter
n := p.Config.Indent + p.indent // include base indentation
for i := 0; i < n; i++ {
p.output = append(p.output, '\t')
}
// update positions
p.pos.Offset += n
p.pos.Column += n
p.out.Column += n
}
// writeByte writes ch n times to p.output and updates p.pos.
// Only used to write formatting (white space) characters.
func (p *printer) writeByte(ch byte, n int) {
if p.endAlignment {
// Ignore any alignment control character;
// and at the end of the line, break with
// a formfeed to indicate termination of
// existing columns.
switch ch {
case '\t', '\v':
ch = ' '
case '\n', '\f':
ch = '\f'
p.endAlignment = false
}
}
if p.out.Column == 1 {
// no need to write line directives before white space
p.writeIndent()
}
for i := 0; i < n; i++ {
p.output = append(p.output, ch)
}
// update positions
p.pos.Offset += n
if ch == '\n' || ch == '\f' {
p.pos.Line += n
p.out.Line += n
p.pos.Column = 1
p.out.Column = 1
return
}
p.pos.Column += n
p.out.Column += n
}
// writeString writes the string s to p.output and updates p.pos, p.out,
// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters
// to protect s from being interpreted by the tabwriter.
//
// Note: writeString is only used to write Go tokens, literals, and
// comments, all of which must be written literally. Thus, it is correct
// to always set isLit = true. However, setting it explicitly only when
// needed (i.e., when we don't know that s contains no tabs or line breaks)
// avoids processing extra escape characters and reduces run time of the
// printer benchmark by up to 10%.
//
func (p *printer) writeString(pos token.Position, s string, isLit bool) {
if p.out.Column == 1 {
if p.Config.Mode&SourcePos != 0 {
p.writeLineDirective(pos)
}
p.writeIndent()
}
if pos.IsValid() {
// update p.pos (if pos is invalid, continue with existing p.pos)
// Note: Must do this after handling line beginnings because
// writeIndent updates p.pos if there's indentation, but p.pos
// is the position of s.
p.pos = pos
}
if isLit {
// Protect s such that is passes through the tabwriter
// unchanged. Note that valid Go programs cannot contain
// tabwriter.Escape bytes since they do not appear in legal
// UTF-8 sequences.
p.output = append(p.output, tabwriter.Escape)
}
if debug {
p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos!
}
p.output = append(p.output, s...)
// update positions
nlines := 0
var li int // index of last newline; valid if nlines > 0
for i := 0; i < len(s); i++ {
// Raw string literals may contain any character except back quote (`).
if ch := s[i]; ch == '\n' || ch == '\f' {
// account for line break
nlines++
li = i
// A line break inside a literal will break whatever column
// formatting is in place; ignore any further alignment through
// the end of the line.
p.endAlignment = true
}
}
p.pos.Offset += len(s)
if nlines > 0 {
p.pos.Line += nlines
p.out.Line += nlines
c := len(s) - li
p.pos.Column = c
p.out.Column = c
} else {
p.pos.Column += len(s)
p.out.Column += len(s)
}
if isLit {
p.output = append(p.output, tabwriter.Escape)
}
p.last = p.pos
}
// writeCommentPrefix writes the whitespace before a comment.
// If there is any pending whitespace, it consumes as much of
// it as is likely to help position the comment nicely.
// pos is the comment position, next the position of the item
// after all pending comments, prev is the previous comment in
// a group of comments (or nil), and tok is the next token.
//
func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, tok token.Token) {
if len(p.output) == 0 {
// the comment is the first item to be printed - don't write any whitespace
return
}
if pos.IsValid() && pos.Filename != p.last.Filename {
// comment in a different file - separate with newlines
p.writeByte('\f', maxNewlines)
return
}
if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') {
// comment on the same line as last item:
// separate with at least one separator
hasSep := false
if prev == nil {
// first comment of a comment group
j := 0
for i, ch := range p.wsbuf {
switch ch {
case blank:
// ignore any blanks before a comment
p.wsbuf[i] = ignore
continue
case vtab:
// respect existing tabs - important
// for proper formatting of commented structs
hasSep = true
continue
case indent:
// apply pending indentation
continue
}
j = i
break
}
p.writeWhitespace(j)
}
// make sure there is at least one separator
if !hasSep {
sep := byte('\t')
if pos.Line == next.Line {
// next item is on the same line as the comment
// (which must be a /*-style comment): separate
// with a blank instead of a tab
sep = ' '
}
p.writeByte(sep, 1)
}
} else {
// comment on a different line:
// separate with at least one line break
droppedLinebreak := false
j := 0
for i, ch := range p.wsbuf {
switch ch {
case blank, vtab:
// ignore any horizontal whitespace before line breaks
p.wsbuf[i] = ignore
continue
case indent:
// apply pending indentation
continue
case unindent:
// if this is not the last unindent, apply it
// as it is (likely) belonging to the last
// construct (e.g., a multi-line expression list)
// and is not part of closing a block
if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent {
continue
}
// if the next token is not a closing }, apply the unindent
// if it appears that the comment is aligned with the
// token; otherwise assume the unindent is part of a
// closing block and stop (this scenario appears with
// comments before a case label where the comments
// apply to the next case instead of the current one)
if tok != token.RBRACE && pos.Column == next.Column {
continue
}
case newline, formfeed:
p.wsbuf[i] = ignore
droppedLinebreak = prev == nil // record only if first comment of a group
}
j = i
break
}
p.writeWhitespace(j)
// determine number of linebreaks before the comment
n := 0
if pos.IsValid() && p.last.IsValid() {
n = pos.Line - p.last.Line
if n < 0 { // should never happen
n = 0
}
}
// at the package scope level only (p.indent == 0),
// add an extra newline if we dropped one before:
// this preserves a blank line before documentation
// comments at the package scope level (issue 2570)
if p.indent == 0 && droppedLinebreak {
n++
}
// make sure there is at least one line break
// if the previous comment was a line comment
if n == 0 && prev != nil && prev.Text[1] == '/' {
n = 1
}
if n > 0 {
// use formfeeds to break columns before a comment;
// this is analogous to using formfeeds to separate
// individual lines of /*-style comments
p.writeByte('\f', nlimit(n))
}
}
}
// Returns true if s contains only white space
// (only tabs and blanks can appear in the printer's context).
//
func isBlank(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] > ' ' {
return false
}
}
return true
}
// commonPrefix returns the common prefix of a and b.
func commonPrefix(a, b string) string {
i := 0
for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') {
i++
}
return a[0:i]
}
// trimRight returns s with trailing whitespace removed.
func trimRight(s string) string {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
// stripCommonPrefix removes a common prefix from /*-style comment lines (unless no
// comment line is indented, all but the first line have some form of space prefix).
// The prefix is computed using heuristics such that is likely that the comment
// contents are nicely laid out after re-printing each line using the printer's
// current indentation.
//
func stripCommonPrefix(lines []string) {
if len(lines) <= 1 {
return // at most one line - nothing to do
}
// len(lines) > 1
// The heuristic in this function tries to handle a few
// common patterns of /*-style comments: Comments where
// the opening /* and closing */ are aligned and the
// rest of the comment text is aligned and indented with
// blanks or tabs, cases with a vertical "line of stars"
// on the left, and cases where the closing */ is on the
// same line as the last comment text.
// Compute maximum common white prefix of all but the first,
// last, and blank lines, and replace blank lines with empty
// lines (the first line starts with /* and has no prefix).
// In cases where only the first and last lines are not blank,
// such as two-line comments, or comments where all inner lines
// are blank, consider the last line for the prefix computation
// since otherwise the prefix would be empty.
//
// Note that the first and last line are never empty (they
// contain the opening /* and closing */ respectively) and
// thus they can be ignored by the blank line check.
prefix := ""
prefixSet := false
if len(lines) > 2 {
for i, line := range lines[1 : len(lines)-1] {
if isBlank(line) {
lines[1+i] = "" // range starts with lines[1]
} else {
if !prefixSet {
prefix = line
prefixSet = true
}
prefix = commonPrefix(prefix, line)
}
}
}
// If we don't have a prefix yet, consider the last line.
if !prefixSet {
line := lines[len(lines)-1]
prefix = commonPrefix(line, line)
}
/*
* Check for vertical "line of stars" and correct prefix accordingly.
*/
lineOfStars := false
if i := strings.Index(prefix, "*"); i >= 0 {
// Line of stars present.
if i > 0 && prefix[i-1] == ' ' {
i-- // remove trailing blank from prefix so stars remain aligned
}
prefix = prefix[0:i]
lineOfStars = true
} else {
// No line of stars present.
// Determine the white space on the first line after the /*
// and before the beginning of the comment text, assume two
// blanks instead of the /* unless the first character after
// the /* is a tab. If the first comment line is empty but
// for the opening /*, assume up to 3 blanks or a tab. This
// whitespace may be found as suffix in the common prefix.
first := lines[0]
if isBlank(first[2:]) {
// no comment text on the first line:
// reduce prefix by up to 3 blanks or a tab
// if present - this keeps comment text indented
// relative to the /* and */'s if it was indented
// in the first place
i := len(prefix)
for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ {
i--
}
if i == len(prefix) && i > 0 && prefix[i-1] == '\t' {
i--
}
prefix = prefix[0:i]
} else {
// comment text on the first line
suffix := make([]byte, len(first))
n := 2 // start after opening /*
for n < len(first) && first[n] <= ' ' {
suffix[n] = first[n]
n++
}
if n > 2 && suffix[2] == '\t' {
// assume the '\t' compensates for the /*
suffix = suffix[2:n]
} else {
// otherwise assume two blanks
suffix[0], suffix[1] = ' ', ' '
suffix = suffix[0:n]
}
// Shorten the computed common prefix by the length of
// suffix, if it is found as suffix of the prefix.
prefix = strings.TrimSuffix(prefix, string(suffix))
}
}
// Handle last line: If it only contains a closing */, align it
// with the opening /*, otherwise align the text with the other
// lines.
last := lines[len(lines)-1]
closing := "*/"
i := strings.Index(last, closing) // i >= 0 (closing is always present)
if isBlank(last[0:i]) {
// last line only contains closing */
if lineOfStars {
closing = " */" // add blank to align final star
}
lines[len(lines)-1] = prefix + closing
} else {
// last line contains more comment text - assume
// it is aligned like the other lines and include
// in prefix computation
prefix = commonPrefix(prefix, last)
}
// Remove the common prefix from all but the first and empty lines.
for i, line := range lines {
if i > 0 && line != "" {
lines[i] = line[len(prefix):]
}
}
}
func (p *printer) writeComment(comment *ast.Comment) {
text := comment.Text
pos := p.posFor(comment.Pos())
const linePrefix = "//line "
if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) {
// Possibly a //-style line directive.
// Suspend indentation temporarily to keep line directive valid.
defer func(indent int) { p.indent = indent }(p.indent)
p.indent = 0
}
// shortcut common case of //-style comments
if text[1] == '/' {
p.writeString(pos, trimRight(text), true)
return
}
// for /*-style comments, print line by line and var the
// write function take care of the proper indentation
lines := strings.Split(text, "\n")
// The comment started in the first column but is going
// to be indented. For an idempotent result, add indentation
// to all lines such that they look like they were indented
// before - this will make sure the common prefix computation
// is the same independent of how many times formatting is
// applied (was issue 1835).
if pos.IsValid() && pos.Column == 1 && p.indent > 0 {
for i, line := range lines[1:] {
lines[1+i] = " " + line
}
}
stripCommonPrefix(lines)
// write comment lines, separated by formfeed,
// without a line break after the last line
for i, line := range lines {
if i > 0 {
p.writeByte('\f', 1)
pos = p.pos
}
if len(line) > 0 {
p.writeString(pos, trimRight(line), true)
}
}
}
// writeCommentSuffix writes a line break after a comment if indicated
// and processes any leftover indentation information. If a line break
// is needed, the kind of break (newline vs formfeed) depends on the
// pending whitespace. The writeCommentSuffix result indicates if a
// newline was written or if a formfeed was dropped from the whitespace
// buffer.
//
func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) {
for i, ch := range p.wsbuf {
switch ch {
case blank, vtab:
// ignore trailing whitespace
p.wsbuf[i] = ignore
case indent, unindent:
// don't lose indentation information
case newline, formfeed:
// if we need a line break, keep exactly one
// but remember if we dropped any formfeeds
if needsLinebreak {
needsLinebreak = false
wroteNewline = true
} else {
if ch == formfeed {
droppedFF = true
}
p.wsbuf[i] = ignore
}
}
}
p.writeWhitespace(len(p.wsbuf))
// make sure we have a line break
if needsLinebreak {
p.writeByte('\n', 1)
wroteNewline = true
}
return
}
// containsLinebreak reports whether the whitespace buffer contains any line breaks.
func (p *printer) containsLinebreak() bool {
for _, ch := range p.wsbuf {
if ch == newline || ch == formfeed {
return true
}
}
return false
}
// intersperseComments consumes all comments that appear before the next token
// tok and prints it together with the buffered whitespace (i.e., the whitespace
// that needs to be written before the next token). A heuristic is used to mix
// the comments and whitespace. The intersperseComments result indicates if a
// newline was written or if a formfeed was dropped from the whitespace buffer.
//
func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
var last *ast.Comment
for p.commentBefore(next) {
for _, c := range p.comment.List {
p.writeCommentPrefix(p.posFor(c.Pos()), next, last, tok)
p.writeComment(c)
last = c
}
p.nextComment()
}
if last != nil {
// If the last comment is a /*-style comment and the next item
// follows on the same line but is not a comma, and not a "closing"
// token immediately following its corresponding "opening" token,
// add an extra separator unless explicitly disabled. Use a blank
// as separator unless we have pending linebreaks, they are not
// disabled, and we are outside a composite literal, in which case
// we want a linebreak (issue 15137).
// TODO(gri) This has become overly complicated. We should be able
// to track whether we're inside an expression or statement and
// use that information to decide more directly.
needsLinebreak := false
if p.mode&noExtraBlank == 0 &&
last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line &&
tok != token.COMMA &&
(tok != token.RPAREN || p.prevOpen == token.LPAREN) &&
(tok != token.RBRACK || p.prevOpen == token.LBRACK) {
if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 && p.level == 0 {
needsLinebreak = true
} else {
p.writeByte(' ', 1)
}
}
// Ensure that there is a line break after a //-style comment,
// before EOF, and before a closing '}' unless explicitly disabled.
if last.Text[1] == '/' ||
tok == token.EOF ||
tok == token.RBRACE && p.mode&noExtraLinebreak == 0 {
needsLinebreak = true
}
return p.writeCommentSuffix(needsLinebreak)
}
// no comment was written - we should never reach here since
// intersperseComments should not be called in that case
p.internalError("intersperseComments called without pending comments")
return
}
// whiteWhitespace writes the first n whitespace entries.
func (p *printer) writeWhitespace(n int) {
// write entries
for i := 0; i < n; i++ {
switch ch := p.wsbuf[i]; ch {
case ignore:
// ignore!
case indent:
p.indent++
case unindent:
p.indent--
if p.indent < 0 {
p.internalError("negative indentation:", p.indent)
p.indent = 0
}
case newline, formfeed:
// A line break immediately followed by a "correcting"
// unindent is swapped with the unindent - this permits
// proper label positioning. If a comment is between
// the line break and the label, the unindent is not
// part of the comment whitespace prefix and the comment
// will be positioned correctly indented.
if i+1 < n && p.wsbuf[i+1] == unindent {
// Use a formfeed to terminate the current section.
// Otherwise, a long label name on the next line leading
// to a wide column may increase the indentation column
// of lines before the label; effectively leading to wrong
// indentation.
p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed
i-- // do it again
continue
}
fallthrough
default:
p.writeByte(byte(ch), 1)
}
}
// shift remaining entries down
l := copy(p.wsbuf, p.wsbuf[n:])
p.wsbuf = p.wsbuf[:l]
}
// ----------------------------------------------------------------------------
// Printing interface
// nlines limits n to maxNewlines.
func nlimit(n int) int {
if n > maxNewlines {
n = maxNewlines
}
return n
}
func mayCombine(prev token.Token, next byte) (b bool) {
switch prev {
case token.INT:
b = next == '.' // 1.
case token.ADD:
b = next == '+' // ++
case token.SUB:
b = next == '-' // --
case token.QUO:
b = next == '*' // /*
case token.LSS:
b = next == '-' || next == '<' // <- or <<
case token.AND:
b = next == '&' || next == '^' // && or &^
}
return
}
// print prints a list of "items" (roughly corresponding to syntactic
// tokens, but also including whitespace and formatting information).
// It is the only print function that should be called directly from
// any of the AST printing functions in nodes.go.
//
// Whitespace is accumulated until a non-whitespace token appears. Any
// comments that need to appear before that token are printed first,
// taking into account the amount and structure of any pending white-
// space for best comment placement. Then, any leftover whitespace is
// printed, followed by the actual token.
//
func (p *printer) print(args ...interface{}) {
for _, arg := range args {
// information about the current arg
var data string
var isLit bool
var impliedSemi bool // value for p.impliedSemi after this arg
// record previous opening token, if any
switch p.lastTok {
case token.ILLEGAL:
// ignore (white space)
case token.LPAREN, token.LBRACK:
p.prevOpen = p.lastTok
default:
// other tokens followed any opening token
p.prevOpen = token.ILLEGAL
}
switch x := arg.(type) {
case pmode:
// toggle printer mode
p.mode ^= x
continue
case whiteSpace:
if x == ignore {
// don't add ignore's to the buffer; they
// may screw up "correcting" unindents (see
// LabeledStmt)
continue
}
i := len(p.wsbuf)
if i == cap(p.wsbuf) {
// Whitespace sequences are very short so this should
// never happen. Handle gracefully (but possibly with
// bad comment placement) if it does happen.
p.writeWhitespace(i)
i = 0
}
p.wsbuf = p.wsbuf[0 : i+1]
p.wsbuf[i] = x
if x == newline || x == formfeed {
// newlines affect the current state (p.impliedSemi)
// and not the state after printing arg (impliedSemi)
// because comments can be interspersed before the arg
// in this case
p.impliedSemi = false
}
p.lastTok = token.ILLEGAL
continue
case *ast.Ident:
data = x.Name
impliedSemi = true
p.lastTok = token.IDENT
case *ast.BasicLit:
data = x.Value
isLit = true
impliedSemi = true
p.lastTok = x.Kind
case token.Token:
s := x.String()
if mayCombine(p.lastTok, s[0]) {
// the previous and the current token must be
// separated by a blank otherwise they combine
// into a different incorrect token sequence
// (except for token.INT followed by a '.' this
// should never happen because it is taken care
// of via binary expression formatting)
if len(p.wsbuf) != 0 {
p.internalError("whitespace buffer not empty")
}
p.wsbuf = p.wsbuf[0:1]
p.wsbuf[0] = ' '
}
data = s
// some keywords followed by a newline imply a semicolon
switch x {
case token.BREAK, token.CONTINUE, token.RETURN,
token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE:
impliedSemi = true
}
p.lastTok = x
case token.Pos:
if x.IsValid() {
p.pos = p.posFor(x) // accurate position of next item
}
continue
case string:
// incorrect AST - print error message
data = x
isLit = true
impliedSemi = true
p.lastTok = token.STRING
default:
fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg)
panic("wa-lang.org/wa/internal/printer type")
}
// data != ""
next := p.pos // estimated/accurate position of next item
wroteNewline, droppedFF := p.flush(next, p.lastTok)
// intersperse extra newlines if present in the source and
// if they don't cause extra semicolons (don't do this in
// flush as it will cause extra newlines at the end of a file)
if !p.impliedSemi {
n := nlimit(next.Line - p.pos.Line)
// don't exceed maxNewlines if we already wrote one
if wroteNewline && n == maxNewlines {
n = maxNewlines - 1
}
if n > 0 {
ch := byte('\n')
if droppedFF {
ch = '\f' // use formfeed since we dropped one before
}
p.writeByte(ch, n)
impliedSemi = false
}
}
// the next token starts now - record its line number if requested
if p.linePtr != nil {
*p.linePtr = p.out.Line
p.linePtr = nil
}
p.writeString(next, data, isLit)
p.impliedSemi = impliedSemi
}
}
// flush prints any pending comments and whitespace occurring textually
// before the position of the next token tok. The flush result indicates
// if a newline was written or if a formfeed was dropped from the whitespace
// buffer.
//
func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
if p.commentBefore(next) {
// if there are comments before the next item, intersperse them
wroteNewline, droppedFF = p.intersperseComments(next, tok)
} else {
// otherwise, write any leftover whitespace
p.writeWhitespace(len(p.wsbuf))
}
return
}
// getNode returns the ast.CommentGroup associated with n, if any.
func getDoc(n ast.Node) *ast.CommentGroup {
switch n := n.(type) {
case *ast.Field:
return n.Doc
case *ast.ImportSpec:
return n.Doc
case *ast.ValueSpec:
return n.Doc
case *ast.TypeSpec:
return n.Doc
case *ast.GenDecl:
return n.Doc
case *ast.FuncDecl:
return n.Doc
case *ast.File:
return n.Doc
}
return nil
}
func getLastComment(n ast.Node) *ast.CommentGroup {
switch n := n.(type) {
case *ast.Field:
return n.Comment
case *ast.ImportSpec:
return n.Comment
case *ast.ValueSpec:
return n.Comment
case *ast.TypeSpec:
return n.Comment
case *ast.GenDecl:
if len(n.Specs) > 0 {
return getLastComment(n.Specs[len(n.Specs)-1])
}
case *ast.File:
if len(n.Comments) > 0 {
return n.Comments[len(n.Comments)-1]
}
}
return nil
}
func (p *printer) printNode(node interface{}) error {
// unpack *CommentedNode, if any
var comments []*ast.CommentGroup
if cnode, ok := node.(*CommentedNode); ok {
node = cnode.Node
comments = cnode.Comments
}
if comments != nil {
// commented node - restrict comment list to relevant range
n, ok := node.(ast.Node)
if !ok {
// goto unsupported
}
beg := n.Pos()
end := n.End()
// if the node has associated documentation,
// include that commentgroup in the range
// (the comment list is sorted in the order
// of the comment appearance in the source code)
if doc := getDoc(n); doc != nil {
beg = doc.Pos()
}
if com := getLastComment(n); com != nil {
if e := com.End(); e > end {
end = e
}
}
// token.Pos values are global offsets, we can
// compare them directly
i := 0
for i < len(comments) && comments[i].End() < beg {
i++
}
j := i
for j < len(comments) && comments[j].Pos() < end {
j++
}
if i < j {
p.comments = comments[i:j]
}
} else if n, ok := node.(*ast.File); ok {
// use ast.File comments, if any
p.comments = n.Comments
}
// if there are no comments, use node comments
p.useNodeComments = p.comments == nil
// get comments ready for use
p.nextComment()
// format node
switch n := node.(type) {
case ast.Expr:
p.expr(n)
case ast.Stmt:
// A labeled statement will un-indent to position the label.
// Set p.indent to 1 so we don't get indent "underflow".
if _, ok := n.(*ast.LabeledStmt); ok {
p.indent = 1
}
p.stmt(n, false)
case ast.Decl:
p.decl(n)
case ast.Spec:
p.spec(n, 1, false)
case []ast.Stmt:
// A labeled statement will un-indent to position the label.
// Set p.indent to 1 so we don't get indent "underflow".
for _, s := range n {
if _, ok := s.(*ast.LabeledStmt); ok {
p.indent = 1
}
}
p.stmtList(n, 0, false)
case []ast.Decl:
p.declList(n)
case *ast.File:
p.file(n)
default:
// goto unsupported
}
return nil
unsupported:
return fmt.Errorf("wa-lang.org/wa/internal/printer: unsupported node type %T", node)
}
// ----------------------------------------------------------------------------
// Trimmer
// A trimmer is an io.Writer filter for stripping tabwriter.Escape
// characters, trailing blanks and tabs, and for converting formfeed
// and vtab characters into newlines and htabs (in case no tabwriter
// is used). Text bracketed by tabwriter.Escape characters is passed
// through unchanged.
//
type trimmer struct {
output io.Writer
state int
space []byte
}
// trimmer is implemented as a state machine.
// It can be in one of the following states:
const (
inSpace = iota // inside space
inEscape // inside text bracketed by tabwriter.Escapes
inText // inside text
)
func (p *trimmer) resetSpace() {
p.state = inSpace
p.space = p.space[0:0]
}
// Design note: It is tempting to eliminate extra blanks occurring in
// whitespace in this function as it could simplify some
// of the blanks logic in the node printing functions.
// However, this would mess up any formatting done by
// the tabwriter.
var aNewline = []byte("\n")
func (p *trimmer) Write(data []byte) (n int, err error) {
// invariants:
// p.state == inSpace:
// p.space is unwritten
// p.state == inEscape, inText:
// data[m:n] is unwritten
m := 0
var b byte
for n, b = range data {
if b == '\v' {
b = '\t' // convert to htab
}
switch p.state {
case inSpace:
switch b {
case '\t', ' ':
p.space = append(p.space, b)
case '\n', '\f':
p.resetSpace() // discard trailing space
_, err = p.output.Write(aNewline)
case tabwriter.Escape:
_, err = p.output.Write(p.space)
p.state = inEscape
m = n + 1 // +1: skip tabwriter.Escape
default:
_, err = p.output.Write(p.space)
p.state = inText
m = n
}
case inEscape:
if b == tabwriter.Escape {
_, err = p.output.Write(data[m:n])
p.resetSpace()
}
case inText:
switch b {
case '\t', ' ':
_, err = p.output.Write(data[m:n])
p.resetSpace()
p.space = append(p.space, b)
case '\n', '\f':
_, err = p.output.Write(data[m:n])
p.resetSpace()
if err == nil {
_, err = p.output.Write(aNewline)
}
case tabwriter.Escape:
_, err = p.output.Write(data[m:n])
p.state = inEscape
m = n + 1 // +1: skip tabwriter.Escape
}
default:
panic("unreachable")
}
if err != nil {
return
}
}
n = len(data)
switch p.state {
case inEscape, inText:
_, err = p.output.Write(data[m:n])
p.resetSpace()
}
return
}
// ----------------------------------------------------------------------------
// Public interface
// A Mode value is a set of flags (or 0). They control printing.
type Mode uint
const (
RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored
TabIndent // use tabs for indentation independent of UseSpaces
UseSpaces // use spaces instead of tabs for alignment
SourcePos // emit //line directives to preserve original source positions
)
// A Config node controls the output of Fprint.
type Config struct {
Mode Mode // default: 0
Tabwidth int // default: 8
Indent int // default: 0 (all code is indented at least by this much)
}
// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (err error) {
// print node
var p printer
p.init(cfg, fset, nodeSizes)
if err = p.printNode(node); err != nil {
return
}
// print outstanding comments
p.impliedSemi = false // EOF acts like a newline
p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
// redirect output through a trimmer to eliminate trailing whitespace
// (Input to a tabwriter must be untrimmed since trailing tabs provide
// formatting information. The tabwriter could provide trimming
// functionality but no tabwriter is used when RawFormat is set.)
output = &trimmer{output: output}
// redirect output through a tabwriter if necessary
if cfg.Mode&RawFormat == 0 {
minwidth := cfg.Tabwidth
padchar := byte('\t')
if cfg.Mode&UseSpaces != 0 {
padchar = ' '
}
twmode := tabwriter.DiscardEmptyColumns
if cfg.Mode&TabIndent != 0 {
minwidth = 0
twmode |= tabwriter.TabIndent
}
output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode)
}
// write printer result via tabwriter/trimmer to output
if _, err = output.Write(p.output); err != nil {
return
}
// flush tabwriter, if any
if tw, _ := output.(*tabwriter.Writer); tw != nil {
err = tw.Flush()
}
return
}
// A CommentedNode bundles an AST node and corresponding comments.
// It may be provided as argument to any of the Fprint functions.
//
type CommentedNode struct {
Node interface{} // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt
Comments []*ast.CommentGroup
}
// Fprint "pretty-prints" an AST node to output for a given configuration cfg.
// Position information is interpreted relative to the file set fset.
// The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt,
// or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.
//
func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
return cfg.fprint(output, fset, node, make(map[ast.Node]int))
}
// Fprint "pretty-prints" an AST node to output.
// It calls Config.Fprint with default settings.
// Note that gofmt uses tabs for indentation but spaces for alignment;
// use format.Node (package github.com/wa-lang/wa/internal/format) for output that matches gofmt.
//
func Fprint(output io.Writer, fset *token.FileSet, node interface{}) error {
return (&Config{Tabwidth: 8}).Fprint(output, fset, node)
}
......@@ -164,6 +164,7 @@ var tokens = [...]elt{
{token.FOR, "for", keyword},
{token.FUNC, "func", keyword},
{token.GLOBAL, "global", keyword},
{token.IF, "if", keyword},
{token.IMPORT, "import", keyword},
......
......@@ -1722,7 +1722,7 @@ start:
case *ast.DeclStmt: // Con, Var or Typ
d := s.Decl.(*ast.GenDecl)
if d.Tok == token.VAR {
if d.Tok == token.VAR || d.Tok == token.GLOBAL {
for _, spec := range d.Specs {
if vs, ok := spec.(*ast.ValueSpec); ok {
b.localValueSpec(fn, vs)
......
......@@ -126,7 +126,7 @@ func membersFromDecl(pkg *Package, decl ast.Decl) {
}
}
case token.VAR:
case token.VAR, token.GLOBAL:
for _, spec := range decl.Specs {
for _, id := range spec.(*ast.ValueSpec).Names {
if !isBlankIdent(id) {
......
......@@ -77,7 +77,7 @@ func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function
if n := len(path); n >= 2 { // [... {Gen,Func}Decl File]
switch decl := path[n-2].(type) {
case *ast.GenDecl:
if decl.Tok == token.VAR && n >= 3 {
if (decl.Tok == token.VAR || decl.Tok == token.GLOBAL) && n >= 3 {
// Package-level 'var' initializer.
return pkg.init
}
......
......@@ -615,7 +615,7 @@ func (check *Checker) declStmt(decl ast.Decl) {
check.declare(check.scope, name, lhs[i], scopePos)
}
case token.VAR:
case token.VAR, token.GLOBAL:
top := len(check.delayed)
lhs0 := make([]*Var, len(s.Names))
......
......@@ -124,7 +124,7 @@ func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *ast.Labele
stmtBranches = func(s ast.Stmt) {
switch s := s.(type) {
case *ast.DeclStmt:
if d, _ := s.Decl.(*ast.GenDecl); d != nil && d.Tok == token.VAR {
if d, _ := s.Decl.(*ast.GenDecl); d != nil && (d.Tok == token.VAR || d.Tok == token.GLOBAL) {
recordVarDecl(d.Pos())
}
......
......@@ -356,7 +356,7 @@ func (check *Checker) collectObjects() {
check.arityMatch(s, last)
case token.VAR:
case token.VAR, token.GLOBAL:
lhs := make([]*Var, len(s.Names))
// If there's exactly one rhs initializer, use
// the same declInfo d1 for all lhs variables
......
// 版权 @2023 凹语言 作者。保留所有权利。
package wamime
import (
"strings"
"wa-lang.org/wa/internal/scanner"
"wa-lang.org/wa/internal/token"
)
const mimePrefix0 = "#syntax=" // 旧语法, 先保持兼容
const mimePrefix = "#wa:syntax=" // #wa:syntax=wa, #wa:syntax=wz
func GetCodeMime(filename string, code []byte) string {
var s scanner.Scanner
fset := token.NewFileSet()
file := fset.AddFile(filename, fset.Base(), len(code))
s.Init(file, code, nil, scanner.ScanComments)
// 解析 #wa:syntax=xx
for {
_, tok, lit := s.Scan()
if tok != token.COMMENT {
break
}
// 旧语法 #syntax=
if strings.HasPrefix(lit, mimePrefix0) {
if mime := lit[len(mimePrefix0):]; mime != "" {
return mime
}
return ""
}
// 新语法 #wa:syntax=
if strings.HasPrefix(lit, mimePrefix) {
if mime := lit[len(mimePrefix):]; mime != "" {
return mime
}
return ""
}
}
// 根据文件名后缀解析
if i := strings.LastIndex(filename, "."); i > 0 {
if s := filename[i+1:]; s != "" {
return s
}
}
// 未知类型
return ""
}
// 版权 @2023 凹语言 作者。保留所有权利。
package wamime
import (
"testing"
)
func TestGetCodeMime(t *testing.T) {
for i, tx := range tests {
got := GetCodeMime(tx.filename, []byte(tx.code))
expect := tx.mime
if got != expect {
t.Fatalf("%d: expect =%q, got = %q", i, expect, got)
}
}
}
var tests = []struct {
filename string
code string
mime string
}{
{"-", "", ""},
{"prog.wa", "", "wa"},
{"prog.wz", "", "wz"},
{"x.wa", "#", "wa"},
{"", "#wa:syntax=wx", "wx"},
{
"",
`// 版权 @2019 凹语言 作者。保留所有权利。
#wa:syntax=wa
import "fmt"
import "runtime"
global year: i32 = 2023
func main {
println("你好,凹语言!", runtime.WAOS)
println(add(40, 2), year)
fmt.Println("1+1 =", 1+1)
}
func add(a: i32, b: i32) => i32 {
return a+b
}
`,
"wa",
},
{
"",
`// 版权 @2019 凹语言 作者。保留所有权利。
#wa:syntax=wz
引于 "书"
【启】:
书·说:"你好,凹语言中文版!"
`,
"wz",
},
}
......@@ -156,6 +156,14 @@ func (p *Module) buildModule() error {
p.wazeroInitErr = err
return err
}
case config.WaOS_mvp:
if _, err = MvpInstantiate(p.wazeroCtx, p.wazeroRuntime); err != nil {
p.wazeroInitErr = err
return err
}
default:
return fmt.Errorf("unknown waos: %q", p.cfg.WaOS)
}
return nil
......
// 版权 @2022 凹语言 作者。保留所有权利。
package wazero
import (
"context"
"fmt"
"github.com/tetratelabs/wazero"
"github.com/tetratelabs/wazero/api"
"wa-lang.org/wa/internal/config"
)
func MvpInstantiate(ctx context.Context, rt wazero.Runtime) (api.Closer, error) {
return rt.NewHostModuleBuilder(config.WaOS_mvp).
// func waPrintI32(v: i32)
NewFunctionBuilder().
WithFunc(func(ctx context.Context, v int32) {
fmt.Print(v)
}).
WithParameterNames("v").
Export("waPrintI32").
// func waPrintU32(v: u32)
NewFunctionBuilder().
WithFunc(func(ctx context.Context, v uint32) {
fmt.Print(v)
}).
WithParameterNames("v").
Export("waPrintU32").
// func waPrintI64(v: i64)
NewFunctionBuilder().
WithFunc(func(ctx context.Context, v int64) {
fmt.Print(v)
}).
WithParameterNames("v").
Export("waPrintI64").
// func waPrintU64(v: u64)
NewFunctionBuilder().
WithFunc(func(ctx context.Context, v uint64) {
fmt.Print(v)
}).
WithParameterNames("v").
Export("waPrintU64").
// func waPrintF32(v: f32)
NewFunctionBuilder().
WithFunc(func(ctx context.Context, v float32) {
fmt.Print(v)
}).
WithParameterNames("v").
Export("waPrintF32").
// func waPrintF64(v: f64)
NewFunctionBuilder().
WithFunc(func(ctx context.Context, v float64) {
fmt.Print(v)
}).
WithParameterNames("v").
Export("waPrintF64").
// func waPrintRune(ch: i32)
NewFunctionBuilder().
WithFunc(func(ctx context.Context, ch uint32) {
fmt.Printf("%c", rune(ch))
}).
WithParameterNames("ch").
Export("waPrintRune").
// func waPuts(ptr: i32, len: i32)
NewFunctionBuilder().
WithFunc(func(ctx context.Context, m api.Module, ptr, len uint32) {
bytes, _ := m.Memory().Read(ctx, ptr, len)
fmt.Print(string(bytes))
}).
WithParameterNames("ptr", "len").
Export("waPuts").
// Done
Instantiate(ctx, rt)
}
v0.8.0-dev
\ No newline at end of file
......@@ -3,6 +3,9 @@
default:
ci-test-all:
@echo "== fmt examples/... =="
go run ../../main.go fmt ./...
@echo "== examples test begin =="
# loop forever
......@@ -12,6 +15,7 @@ ci-test-all:
cd ./brainfuck && make
cd ./expr && make
cd ./hello && make
cd ./misc && make
cd ./pkg && make
cd ./prime && make
cd ./reftoptr && make
......
......@@ -8,10 +8,10 @@ func main() {
}
type BrainFuck struct {
mem :[30000]byte
code :string
pos :int
pc :int
mem: [30000]byte
code: string
pos: int
pc: int
}
func NewBrainFuck(code: string) => *BrainFuck {
......
......@@ -2,10 +2,10 @@
// BF 虚拟机
type BrainFuck struct {
mem :[30000]byte
code :string
pos :int
pc :int
mem: [30000]byte
code: string
pos: int
pc: int
}
func NewBrainFuck(code: string) => *BrainFuck {
......
// 版权 @2021 凹语言 作者。保留所有权利。
var s = []int{1, 2, 3, 4, 5, 6}
func main() {
d := make([]int, 3)
copy(d, s)
for i, v := range d {
println("d[", i, "]=", v)
}
a := s[0:4]
a := s[0:4]
b := s[1:5]
copy(a, b)
for i, v := range s {
println("s[", i, "]=", v)
}
copy(b, a)
for i, v := range s {
println("s[", i, "]=", v)
}
}
\ No newline at end of file
}
......@@ -7,9 +7,9 @@ func main {
println("你好,凹语言!", runtime.WAOS)
println(add(40, 2))
fmt.Println(1+1)
fmt.Println(1 + 1)
}
func add(a: i32, b: i32) => i32 {
return a+b
return a + b
}
......@@ -5,7 +5,7 @@ type T1 struct {
b: string
}
func T1.print(){
func T1.print() {
println("a: ", this.a)
}
......@@ -18,8 +18,8 @@ type T2 struct {
}
func main() {
v1, v2: T1
var v1, v2: T1
v1.a = 13
v2.a = 42
if v1 == v2 {
......@@ -27,21 +27,21 @@ func main() {
} else {
println("ne")
}
v2.a = 13
if v1 == v2 {
println("eq")
} else {
println("ne")
}
v1.b = "abc"
if v1 == v2 {
println("eq")
} else {
println("ne")
}
v2.b = "abc"
if v1 == v2 {
println("eq")
......@@ -49,20 +49,20 @@ func main() {
println("ne")
}
i1, i2: interface{}
var i1, i2: interface{}
i1 = "abc"
if i1 == nil{
if i1 == nil {
println("i1 == nil:eq")
} else {
println("i1 == nil:ne")
}
i1 = nil
if i1 == nil{
if i1 == nil {
println("i1 == nil:eq")
} else {
println("i1 == nil:ne")
}
}
i1 = i32(13)
i2 = i32(42)
if i1 == i2 {
......@@ -76,20 +76,20 @@ func main() {
} else {
println("ne")
}
i2 = "abc"
if i1 == i2 {
println("eq")
} else {
println("ne")
}
i1 = "abc"
i1 = "abc"
if i1 == i2 {
println("eq")
} else {
println("ne")
}
i1 = v1
if i1 == i2 {
println("eq")
......@@ -102,35 +102,33 @@ func main() {
} else {
println("ne")
}
i3: I
var i3: I
i3 = &v1
if i1 == i3 {
if i1 == i3 {
println("eq")
} else {
println("ne")
}
i1 = &v1
if i1 == i3 {
if i1 == i3 {
println("eq")
} else {
println("ne")
}
v3, v4: T2
var v3, v4: T2
//if v3 == v4 {
// println("eq")
//} else {
// println("ne")
//}
i1 = v3
i2 = v4
if i1 == i2 { //panic
if i1 == i2 { //panic
println("eq")
} else {
println("ne")
}
}
......@@ -7,13 +7,13 @@
import "strconv" => __yystrconv__
type exprSymType struct {
yys :int
num :int
yys: int
num: int
}
const NUM = 57346
var exprToknames = [...]string{
global exprToknames = [...]string{
"$end",
"error",
"$unk",
......@@ -26,7 +26,7 @@ var exprToknames = [...]string{
"NUM",
}
var exprStatenames = [...]string{}
global exprStatenames = [...]string{}
const exprEofCode = 1
const exprErrCode = 2
......@@ -36,13 +36,13 @@ const exprInitialStackSize = 16
const eof = 0
type exprToken struct {
Kind :int
Value :int
Kind: int
Value: int
}
type exprLexer struct {
tokens :[]exprToken
pos :int
tokens: []exprToken
pos: int
}
func exprLexer.Lex(yylval: *exprSymType) => int {
......@@ -79,7 +79,7 @@ func main {
})
}
var exprExca = [...]int{
global exprExca = [...]int{
-1, 1,
1, -1,
-2, 0,
......@@ -89,45 +89,45 @@ const exprPrivate = 57344
const exprLast = 23
var exprAct = [...]int{
global exprAct = [...]int{
7, 4, 5, 2, 21, 9, 6, 8, 12, 13,
9, 1, 8, 16, 3, 19, 20, 17, 18, 14,
15, 10, 11,
}
var exprPact = [...]int{
global exprPact = [...]int{
-3, -1000, -1000, 17, -3, -3, 13, -1000, -1000, -3,
2, 2, -1000, -1000, 2, 2, -5, 13, 13, -1000,
-1000, -1000,
}
var exprPgo = [...]int{
global exprPgo = [...]int{
0, 3, 14, 6, 0, 11,
}
var exprR1 = [...]int{
global exprR1 = [...]int{
0, 5, 1, 1, 1, 2, 2, 2, 3, 3,
3, 4, 4,
}
var exprR2 = [...]int{
global exprR2 = [...]int{
0, 1, 1, 2, 2, 1, 3, 3, 1, 3,
3, 1, 3,
}
var exprChk = [...]int{
global exprChk = [...]int{
-1000, -5, -1, -2, 4, 5, -3, -4, 10, 8,
4, 5, -1, -1, 6, 7, -1, -3, -3, -4,
-4, 9,
}
var exprDef = [...]int{
global exprDef = [...]int{
0, -2, 1, 2, 0, 0, 5, 8, 11, 0,
0, 0, 3, 4, 0, 0, 0, 6, 7, 9,
10, 12,
}
var exprTok1 = [...]int{
global exprTok1 = [...]int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
......@@ -135,33 +135,33 @@ var exprTok1 = [...]int{
8, 9, 6, 4, 3, 5, 3, 7,
}
var exprTok2 = [...]int{
global exprTok2 = [...]int{
2, 3, 10,
}
var exprTok3 = [...]int{
global exprTok3 = [...]int{
0,
}
type exprErrorMessageInfo struct {
state :int
token :int
msg :string
state: int
token: int
msg: string
}
var exprErrorMessages = [...]exprErrorMessageInfo{}
global exprErrorMessages = [...]exprErrorMessageInfo{}
/* parser for yacc output */
var (
global (
exprDebug = 0
exprErrorVerbose = false
)
type exprParser struct {
lval :exprSymType
stack :[exprInitialStackSize]exprSymType
char :int
lval: exprSymType
stack: [exprInitialStackSize]exprSymType
char: int
}
func exprParser.Lookahead => int {
......
......@@ -5,8 +5,8 @@ import "3rdparty/pkg"
import "myapp/mymath"
func main {
fmt.Println(40+2)
pkg.Println(100+2)
fmt.Println(40 + 2)
pkg.Println(100 + 2)
println(mymath.I8Max)
println(sum(100))
......@@ -14,7 +14,7 @@ func main {
}
func sum(n: int) => int {
v: int
var v: int
for i := 1; i <= n; i++ {
v += i
}
......
......@@ -20,44 +20,44 @@ func T1.f() {
println("This is T1, this.a==", this.a)
}
func T2.f(){
func T2.f() {
println("This is T2, this.b==", this.b)
}
func main() {
v1 := T1{a: 13}
i1: I1 = &v1 //具体类型到具名接口
v1.f() //直接调用
i1.f() //接口调用
var i1: I1 = &v1 //具体类型到具名接口
v1.f() //直接调用
i1.f() //接口调用
doConcreteType(i1)
i1.f()
v2 := T2{b: 42}
i1 = &v2 //具体类型到具名接口
i1 = &v2 //具体类型到具名接口
i1.f()
doConcreteType(i1)
i1.f()
ni: interface{} = &v1 //具体类型到空接口
i1 = ni.(I1) //接口动态互转
var ni: interface{} = &v1 //具体类型到空接口
i1 = ni.(I1) //接口动态互转
i1.f()
ni = &v2 //具体类型到空接口
i1 = ni.(I1) //接口动态互转
ni = &v2 //具体类型到空接口
i1 = ni.(I1) //接口动态互转
i1.f()
ival: i32 = 777
var ival: i32 = 777
ni = ival
doConcreteType(ni)
doConcreteType(v1)
doConcreteType(v2)
doConcreteType("你好凹语言")
//i2 := ni.(I2) //接口互转,由于v2未实现I2,这会触发异常
//i2.f2()
anoni: interface{ f() } = &v1 //具体类型到匿名接口
var anoni: interface{ f() } = &v1 //具体类型到匿名接口
anoni.f()
i1 = anoni //匿名接口向具名接口转换
i1.f()
......@@ -65,25 +65,25 @@ func main() {
func doConcreteType(i: interface{}) {
//接口到具体类型断言
switch c := i.(type){
case *T1:
println("*T1")
c.a *= 2
case *T2:
println("*T2")
c.b *= 2
case i32:
println("i32: ", c)
case string:
println("string: ", c)
case T1:
println("T1, T1.a==", c.a)
case T2:
println("T2, T2.b==", c.b)
switch c := i.(type) {
case *T1:
println("*T1")
c.a *= 2
case *T2:
println("*T2")
c.b *= 2
case i32:
println("i32: ", c)
case string:
println("string: ", c)
case T1:
println("T1, T1.a==", c.a)
case T2:
println("T2, T2.b==", c.b)
}
}
......@@ -7,7 +7,7 @@ func main() {
}
func test_array(a: int, b: int) {
arr: [10][10]int
var arr: [10][10]int
for i := 0; i < 10; i++ {
for j := 0; j < 10; j++ {
arr[i][j] = i * j
......
......@@ -8,7 +8,7 @@ type fff32 f32
var arr1: [size]fff32
func makearr0a(i: int) => [size]fff32 {
r: [size]fff32
var r: [size]fff32
for j := 0; j < size; j++ {
r[j] = fff32(i * j)
}
......
......@@ -2,8 +2,8 @@
// Test read/write global variables.
type ty0 struct {
v0 :int
v1 :f64
v0: int
v1: f64
}
var gv0: ty0 = ty0{5555, 3.1415926}
......
......@@ -2,8 +2,8 @@
// Test read/write global variables.
type ty0 struct {
v0 :int
v1 :f64
v0: int
v1: f64
}
var gv0: ty0 = ty0{5555, 3.1415926}
......
......@@ -2,8 +2,8 @@
// Test anonymous functions and closure functions.
type pair struct {
i :f32
j :f32
i: f32
j: f32
}
func main() {
......@@ -12,9 +12,9 @@ func main() {
println("World!")
}()
i: int = 31
j: [4]f32 = [4]f32{1, 2.2, 5.5, 9.8}
k: pair = pair{3.14, 2.718}
var i: int = 31
var j: [4]f32 = [4]f32{1, 2.2, 5.5, 9.8}
var k: pair = pair{3.14, 2.718}
show := func(q: int) {
println(i, " + ", q, " = ", i+q)
println("{", j[0], ", ", j[1], ", ", j[2], ", ", j[3], "}")
......
......@@ -2,8 +2,8 @@
// Test multiple return values.
type pair struct {
a :f32
b :f32
a: f32
b: f32
}
func calc() => ([4]f32, pair, f32) {
......
......@@ -2,7 +2,7 @@
func main() {
for n := 2; n <= 30; n = n + 1 {
isPrime: int = 1
var isPrime: int = 1
for i := 2; i*i <= n; i = i + 1 {
if x := n % i; x == 0 {
isPrime = 0
......
......@@ -10,10 +10,10 @@ func main() {
}
type t0 struct {
a :i64
b :i32
c :[4]f64
d :u32
a: i64
b: i32
c: [4]f64
d: u32
}
func test_struct(a: int) {
......
......@@ -10,10 +10,10 @@ func main() {
}
type t0 struct {
a :i64
b :i32
c :[4]f64
d :u32
a: i64
b: i32
c: [4]f64
d: u32
}
func test_struct1(a: int, b: int, c: int) {
......
......@@ -6,8 +6,8 @@ type fff32 f32
const size = 16
type struct_t0 struct {
arr0 :[size]int
arr1 :[size]fff32
arr0: [size]int
arr1: [size]fff32
}
var st0: struct_t0
......
......@@ -4,14 +4,14 @@
const size = 16
type struct_t0 struct {
arr0 :[size]int
arr1 :[size]f32
arr0: [size]int
arr1: [size]f32
}
var st1: struct_t0
func maket0a(i: int) => struct_t0 {
r: struct_t0
var r: struct_t0
for j := 0; j < 16; j++ {
r.arr0[j] = 100 - j - i
r.arr1[j] = 200.0 - f32(j+i)
......
# 版权 @2023 凹语言 作者。保留所有权利。
default:
go run ../../../main.go array.wa
go run ../../../main.go array_1.wa
go run ../../../main.go closure.wa
go run ../../../main.go global.wa
go run ../../../main.go heart.wa
go run ../../../main.go multi_ret.wa
#go run ../../../main.go native_test.wa
go run ../../../main.go prime.wa
go run ../../../main.go ref.wa
go run ../../../main.go slice.wa
go run ../../../main.go string.wa
go run ../../../main.go struct.wa
clean:
// 版权 @2021 凹语言 作者。保留所有权利。
package main
var I: [8]i32
func Gen_arr(param: [8]i32) => [8]i32 {
......@@ -10,7 +8,7 @@ func Gen_arr(param: [8]i32) => [8]i32 {
}
func main() {
j: [8]i32
var j: [8]i32
j[3] = 40
I = Gen_arr(j)
......
// 版权 @2022 凹语言 作者。保留所有权利。
func main {
test_array(2, 4)
test_array(7, 9)
test_array(2, 4)
test_array(7, 9)
}
func test_array(a: int, b: int) {
arr: [10][10]int
for i := 0; i < 10; i++ {
for j := 0; j < 10; j++ {
arr[i][j] = i * j
}
}
var arr: [10][10]int
for i := 0; i < 10; i++ {
for j := 0; j < 10; j++ {
arr[i][j] = i * j
}
}
println("arr[", a, "][", b, "] = ", arr[a][b])
println("arr[", b, "][", a, "] = ", arr[b][a])
println("arr[", a, "][", b, "] = ", arr[a][b])
println("arr[", b, "][", a, "] = ", arr[b][a])
}
// 版权 @2021 凹语言 作者。保留所有权利。
type FP func (i: i32) => i32
type FP func(i: i32) => i32
type ST struct {
i: i32
}
func (t *ST) meth_p(p: i32) => i32 {
t.i += p
return t.i
func ST.meth_p(p: i32) => i32 {
this.i += p
return this.i
}
func (t ST) meth_v(p: i32) => i32 {
t.i += p
return t.i
func ST.meth_v(p: i32) => i32 {
this.i += p
return this.i
}
var g_f: FP
global g_f: FP
func main() {
o: ST
func main() {
var o: ST
o.i = 11
g_f = o.meth_p
println(g_f(11))
......@@ -27,20 +27,20 @@ func main() {
g_f = o.meth_v
println(g_f(11))
println(o.i)
n := i32(21)
g_f = func(i: i32) => i32{
g_f = func(i: i32) => i32 {
n += i
return n
}
println(g_f(22))
println(n)
func(i: i32) {
n += i
}(22)
println(n)
g_f = Double
println(g_f(13))
}
......
// 版权 @2021 凹语言 作者。保留所有权利。
package main
var counter: i32 = 13
var G: *i32
func gen_i() => *i32{
i: i32
func gen_i() => *i32 {
var i: i32
i = counter
counter++
return &i
......
// 版权 @2021 凹语言 作者。保留所有权利。
func main() {
i := ST{i: 1, j:2}
j := ST{i: 3, j:4}
i := ST{i: 1, j: 2}
j := ST{i: 3, j: 4}
i, j = Swap(i, j)
println(i.i)
println(i.j)
......@@ -10,10 +10,10 @@ func main() {
println(j.j)
}
func Swap(i, j: ST) => (ST, ST){
func Swap(i, j: ST) => (ST, ST) {
return j, i
}
type ST struct{
type ST struct {
i, j: i32
}
\ No newline at end of file
}
// 版权 @2022 凹语言 作者。保留所有权利。
func main {
println("您好,凹语言!\nHello, wa-lang!")
println("幸运整数:", test_calc1(6, 9, 8), ", ", 5, ", ", test_calc5(6, 9, 8))
println("幸运浮点数:", 5.44, ", ", test_calc3(6, 9, 8))
test_print(-100, 100, -100, 100, 5)
test_cmp0(20, 19)
test_cmp0(20, 20)
test_cmp1(-3, -3)
test_cmp1(-3, -4)
test_cmp2(10, 10)
test_cmp2(10, 10.0001)
test_jmp(4)
test_loop0(10)
test_loop1(10, 10)
test_array(2, 4)
test_array(7, 9)
test_struct0(10)
test_struct0(11)
test_struct0(2)
test_struct0(13)
test_struct0(0)
test_struct1(10, 0, 0)
test_struct1(11, 1, 0)
test_struct1(2, 0, 1)
test_struct1(13, 1, 0)
test_struct1(2, 1, 0)
test_convert0()
test_convert1()
test_convert2()
test_convert3()
test_global_consts()
test_global_variables_0()
test_global_variables_1()
test_pointer(1)
test_pointer(-1)
test_f32()
test_struct2()
test_struct3()
test_struct4()
test_array2()
test_array3()
test_array4()
bye()
}
func bye() {
println("再见!\nbye!")
}
func test_calc0(a i32, b i32, c i32) i32 {
return (-a) * 5 / (b - c + 2)
}
func test_calc1(a i32, b i32, c i32) i32 {
return test_calc0(a, b, c)
}
func test_calc2(a f64, b f64, c f64) f64 {
return (-a) * 5 / (b - c + 2)
}
func test_calc3(a f64, b f64, c f64) f64 {
return test_calc2(a, b, c)
}
func test_calc4(a u16, b u16, c u16) u16 {
return (-a) * 5 / (b - c + 2)
}
func test_calc5(a u16, b u16, c u16) u16 {
return test_calc4(-a, b, c)
}
func test_cmp0(a u16, b u16) {
if a > b {
println(a, " > ", b)
} else {
println(a, " <= ", b)
}
}
func test_cmp1(a i64, b i64) {
if a <= b {
println(a, " <= ", b)
} else {
println(a, " > ", b)
}
}
func test_cmp2(a f64, b f64) {
if a != b {
println(a, " != ", b)
} else {
println(a, " == ", b)
}
}
func test_jmp(a i32) {
for {
if a > 10 {
return
}
println(a)
a++
}
}
func test_loop0(a i16) {
for i := i16(0); i < a; i++ {
println(i, " * ", i, " = ", i * i)
}
}
func test_loop1(a u64, b u64) {
for i := u64(0); i < a; i++ {
for j := u64(0); j < b; j++ {
println(i, " * ", j, " = ", i * j)
}
}
}
func test_array(a int, b int) {
arr: [10][10]int
for i := 0; i < 10; i++ {
for j := 0; j < 10; j++ {
arr[i][j] = i * j
}
}
println("arr[", a, "][", b, "] = ", arr[a][b])
println("arr[", b, "][", a, "] = ", arr[b][a])
}
type t0 struct {
a i64
b i32
c [4]f64
d u32
}
func test_struct0(a int) {
s0 := t0{17, -668, [4]f64{2.71828, 0.717, 3.14159}, 234}
switch a {
case 10:
println(s0.a)
case 11:
println(s0.b)
case 13:
println(s0.d)
default:
println(s0.c[a])
}
}
func test_struct1(a int, b int, c int) {
s0 := [2]t0{{11, -12, [4]f64{0.1, -0.2}, 13}, {21, -22, [4]f64{1.1, -1.2}, 24}}
switch a {
case 10:
println(s0[b].a)
case 11:
println(s0[b].b)
case 13:
println(s0[b].d)
default:
println(s0[b].c[c])
}
}
type struct_t0 struct {
arr0 [16]int
arr1 [16]f32
}
var st0: struct_t0
var st1: struct_t0
func test_struct2() {
for i := 0; i < 16; i++ {
st0.arr0[i] = i * i
st0.arr1[i] = f32(i) * 2.0
}
}
func getst0int(st0 struct_t0, i int) int {
return st0.arr0[i]
}
func getst0f32(st0 *struct_t0, i int) f32 {
return st0.arr1[i]
}
func test_struct3() {
for i := 6; i < 14; i += 2 {
println(getst0int(st0, i), ", ", getst0f32(&st0, i + 1))
}
}
func maket0a(i int) struct_t0 {
r: struct_t0
for j := 0; j < 16; j++ {
r.arr0[j] = 100 - j - i
r.arr1[j] = 200.0 - f32(j + i)
}
return r
}
func maket0b(i int) *struct_t0 {
for j := 0; j < 16; j++ {
st1.arr0[j] = 100 - j - i
st1.arr1[j] = 200.0 - f32(j + i)
}
return &st1
}
func test_struct4() {
r0, r1 := maket0a(20), maket0b(30)
for i := 0; i < 16; i++ {
println(r0.arr0[i], ", ", r0.arr1[i], " : ", r1.arr0[i], ", ", r1.arr1[i])
}
}
type fff32 f32
var arr0: [32]fff32
var arr1: [32]fff32
func test_array2() {
for i := 0; i < 32; i++ {
arr0[i] = fff32(i * i)
}
}
func getarr0a(arr [32]fff32, i int) fff32 {
return arr[i]
}
func getarr0b(arr *[32]fff32, i int) fff32 {
return arr[i]
}
func makearr0a(i int) [32]fff32 {
r: [32]fff32
for j := 0; j < 32; j++ {
r[j] = fff32(i * j)
}
return r
}
func makearr0b(i int) *[32]fff32 {
for j := 0; j < 32; j++ {
arr1[j] = 2000 - fff32(i * j)
}
return &arr1
}
func test_array3() {
for i := 6; i < 20; i += 2 {
println(getarr0a(arr0, i), ", ", getarr0b(&arr0, i + 1))
}
}
func test_array4() {
r0, r1 := makearr0a(10), makearr0b(20)
for i := 6; i < 20; i += 2 {
println(r0[i], ", ", r1[i])
}
}
func convert_i32_to_i16(a i32) i16 {
return i16(a)
}
func convert_i32_to_u16(a i32) u16 {
return u16(a)
}
func convert_i16_to_i32(a i16) i32 {
return i32(a)
}
func convert_u16_to_i32(a u16) i32 {
return i32(a)
}
func test_convert0() {
println(i32(convert_i32_to_i16(65537)))
println(u32(convert_i32_to_u16(65537)))
println(convert_i16_to_i32(100))
println(convert_i16_to_i32(-100))
println(convert_u16_to_i32(100))
println(convert_u16_to_i32(65530))
}
func convert_f64_to_f32(a f64) f32 {
return f32(a)
}
func test_convert1() {
println(f64(convert_f64_to_f32(3.1415926535)))
}
func convert_i16_to_f64(a i16) f64 {
return f64(a)
}
func convert_u16_to_f64(a u16) f64 {
return f64(a)
}
func test_convert2() {
println(convert_i16_to_f64(100))
println(convert_i16_to_f64(-100))
println(convert_u16_to_f64(100))
println(convert_u16_to_f64(65530))
}
func convert_f64_to_i32(a f64) i32 {
return i32(a)
}
func convert_f64_to_u32(a f64) u32 {
return u32(a)
}
func test_convert3() {
println(convert_f64_to_i32(100.001))
println(convert_f64_to_u32(100.001))
println(convert_f64_to_i32(99.99))
println(convert_f64_to_u32(99.99))
println(convert_f64_to_i32(-100.001))
println(convert_f64_to_u32(-100.001))
}
func test_print(a i16, b u16, c i8, d u8, e f32) {
println(a, ", ", b, ", ", c, ", ", d, ", ", e/2)
}
const gbegin = 10
const gend = 20
func test_global_consts() {
for i := gbegin; i < gend; i++ {
println(i, " - ", gbegin, " = ", i - gbegin)
}
}
type ty0 struct {
v0 int
v1 f64
}
var gv0: ty0 = ty0{5555, 3.1415926}
var gv1: [4]int = [4]int{81, 82, 17, 76}
func test_global_variables_0() {
println("gv0: {", gv0.v0, ", ", gv0.v1, "}")
println("gv1: {", gv1[0], ", ", gv1[1], ", ", gv1[2], ", ", gv1[3], "}")
}
func test_global_variables_1() {
gv0.v0 = 8888
gv0.v1 = 2.71828
gv1[0] += 2
gv1[1] += 3
gv1[2] += 4
gv1[3] += 5
println("gv0: {", gv0.v0, ", ", gv0.v1, "}")
println("gv1: {", gv1[0], ", ", gv1[1], ", ", gv1[2], ", ", gv1[3], "}")
}
var gint0: int = 100
var gint1: int = 200
func get_int_addr(a int) *int {
if a > 0 {
return &gint0
} else {
return &gint1
}
}
func set_int(a *int) {
*a += 10
}
func test_pointer(a int) {
println(gint0, ", ", gint1)
p := get_int_addr(a)
set_int(p)
println(gint0, ", ", gint1)
}
func getPi() f32 {
return 3.1415926535
}
func getE() f32 {
return 2.71828
}
func minusf32(a f32) f32 {
return 0 - a
}
func circle_area(r f32) f32 {
return r * r * 3.1415926
}
func test_f32() {
println(getPi())
println(getE())
println(minusf32(getPi()))
println(minusf32(-1.3))
println(circle_area(1.1))
println(circle_area(getE()))
}
// 版权 @2022 凹语言 作者。保留所有权利。
func main {
println("您好,凹语言!\nHello, wa-lang!")
println("幸运整数:", test_calc1(6, 9, 8), ", ", 5, ", ", test_calc5(6, 9, 8))
println("幸运浮点数:", 5.44, ", ", test_calc3(6, 9, 8))
test_print(100, 100, 5)
test_cmp0(20, 19)
test_cmp0(20, 20)
//test_cmp1(-3, -3)
//test_cmp1(-3, -4)
test_cmp2(10, 10)
test_cmp2(10, 10.0001)
test_jmp(4)
test_loop0(10)
test_loop1(10, 10)
test_array(2, 4)
test_array(7, 9)
test_struct0(10)
test_struct0(11)
test_struct0(2)
test_struct0(13)
test_struct0(0)
test_struct1(10, 0, 0)
test_struct1(11, 1, 0)
test_struct1(2, 0, 1)
test_struct1(13, 1, 0)
test_struct1(2, 1, 0)
test_convert0()
test_convert1()
test_convert2()
test_convert3()
test_global_consts()
test_global_variables_0()
test_global_variables_1()
test_pointer(1)
test_pointer(-1)
test_f32()
test_struct2()
test_struct3()
test_struct4()
test_array2()
test_array3()
test_array4()
bye()
}
func bye {
println("再见!\nbye!")
}
func test_calc0(a: i32, b: i32, c: i32) => i32 {
return (-a) * 5 / (b - c + 2)
}
func test_calc1(a: i32, b: i32, c: i32) => i32 {
return test_calc0(a, b, c)
}
func test_calc2(a: f64, b: f64, c: f64) => f64 {
return (-a) * 5 / (b - c + 2)
}
func test_calc3(a: f64, b: f64, c: f64) => f64 {
return test_calc2(a, b, c)
}
func test_calc4(a: u16, b: u16, c: u16) => u16 {
return (-a) * 5 / (b - c + 2)
}
func test_calc5(a: u16, b: u16, c: u16) => u16 {
return test_calc4(-a, b, c)
}
func test_cmp0(a: u16, b: u16) {
if a > b {
println(a, " > ", b)
} else {
println(a, " <= ", b)
}
}
//func test_cmp1(a: i64, b: i64) {
// if a <= b {
// println(a, " <= ", b)
// } else {
// println(a, " > ", b)
// }
//}
func test_cmp2(a: f64, b: f64) {
if a != b {
println(a, " != ", b)
} else {
println(a, " == ", b)
}
}
func test_jmp(a: i32) {
for {
if a > 10 {
return
}
println(a)
a++
}
}
func test_loop0(a: u16) {
for i := u16(0); i < a; i++ {
println(i, " * ", i, " = ", i*i)
}
}
func test_loop1(a: u64, b: u64) {
for i := u64(0); i < a; i++ {
for j := u64(0); j < b; j++ {
println(i, " * ", j, " = ", i*j)
}
}
}
func test_array(a: int, b: int) {
var arr: [10][10]int
for i := 0; i < 10; i++ {
for j := 0; j < 10; j++ {
arr[i][j] = i * j
}
}
println("arr[", a, "][", b, "] = ", arr[a][b])
println("arr[", b, "][", a, "] = ", arr[b][a])
}
type t0 struct {
a: i64
b: i32
c: [4]f64
d: u32
}
func test_struct0(a: int) {
s0 := t0{17, -668, [4]f64{2.71828, 0.717, 3.14159}, 234}
switch a {
case 10:
println(s0.a)
case 11:
println(s0.b)
case 13:
println(s0.d)
default:
println(s0.c[a])
}
}
func test_struct1(a: int, b: int, c: int) {
s0 := [2]t0{{11, -12, [4]f64{0.1, -0.2}, 13}, {21, -22, [4]f64{1.1, -1.2}, 24}}
switch a {
case 10:
println(s0[b].a)
case 11:
println(s0[b].b)
case 13:
println(s0[b].d)
default:
println(s0[b].c[c])
}
}
type struct_t0 struct {
arr0: [16]int
arr1: [16]f32
}
global st0: struct_t0
global st1: struct_t0
func test_struct2 {
for i := 0; i < 16; i++ {
st0.arr0[i] = i * i
st0.arr1[i] = f32(i) * 2.0
}
}
func getst0int(st0: struct_t0, i: int) => int {
return st0.arr0[i]
}
func getst0f32(st0: *struct_t0, i: int) => f32 {
return st0.arr1[i]
}
func test_struct3 {
for i := 6; i < 14; i += 2 {
println(getst0int(st0, i), ", ", getst0f32(&st0, i+1))
}
}
func maket0a(i: int) => struct_t0 {
var r: struct_t0
for j := 0; j < 16; j++ {
r.arr0[j] = 100 - j - i
r.arr1[j] = 200.0 - f32(j+i)
}
return r
}
func maket0b(i: int) => *struct_t0 {
for j := 0; j < 16; j++ {
st1.arr0[j] = 100 - j - i
st1.arr1[j] = 200.0 - f32(j+i)
}
return &st1
}
func test_struct4 {
r0, r1 := maket0a(20), maket0b(30)
for i := 0; i < 16; i++ {
println(r0.arr0[i], ", ", r0.arr1[i], " : ", r1.arr0[i], ", ", r1.arr1[i])
}
}
type fff32 f32
global arr0: [32]fff32
global arr1: [32]fff32
func test_array2() {
for i := 0; i < 32; i++ {
arr0[i] = fff32(i * i)
}
}
func getarr0a(arr: [32]fff32, i: int) => fff32 {
return arr[i]
}
func getarr0b(arr: *[32]fff32, i: int) => fff32 {
return arr[i]
}
func makearr0a(i: int) => [32]fff32 {
var r: [32]fff32
for j := 0; j < 32; j++ {
r[j] = fff32(i * j)
}
return r
}
func makearr0b(i: int) => *[32]fff32 {
for j := 0; j < 32; j++ {
arr1[j] = 2000 - fff32(i*j)
}
return &arr1
}
func test_array3 {
for i := 6; i < 20; i += 2 {
println(getarr0a(arr0, i), ", ", getarr0b(&arr0, i+1))
}
}
func test_array4 {
r0, r1 := makearr0a(10), makearr0b(20)
for i := 6; i < 20; i += 2 {
println(r0[i], ", ", r1[i])
}
}
//func convert_i32_to_i16(a: i32) => i16 {
// return i16(a)
//}
func convert_i32_to_u16(a: i32) => u16 {
return u16(a)
}
//func convert_i16_to_i32(a: i16) => i32 {
// return i32(a)
//}
func convert_u16_to_i32(a: u16) => i32 {
return i32(a)
}
func test_convert0 {
//println(i32(convert_i32_to_i16(65537)))
println(u32(convert_i32_to_u16(65537)))
//println(convert_i16_to_i32(100))
//println(convert_i16_to_i32(-100))
println(convert_u16_to_i32(100))
println(convert_u16_to_i32(65530))
}
func convert_f64_to_f32(a: f64) => f32 {
return f32(a)
}
func test_convert1() {
println(f64(convert_f64_to_f32(3.1415926535)))
}
//func convert_i16_to_f64(a: i16) => f64 {
// return f64(a)
//}
func convert_u16_to_f64(a: u16) => f64 {
return f64(a)
}
func test_convert2() {
//println(convert_i16_to_f64(100))
//println(convert_i16_to_f64(-100))
println(convert_u16_to_f64(100))
println(convert_u16_to_f64(65530))
}
func convert_f64_to_i32(a: f64) => i32 {
return i32(a)
}
func convert_f64_to_u32(a: f64) => u32 {
return u32(a)
}
func test_convert3() {
println(convert_f64_to_i32(100.001))
println(convert_f64_to_u32(100.001))
println(convert_f64_to_i32(99.99))
println(convert_f64_to_u32(99.99))
println(convert_f64_to_i32(-100.001))
println(convert_f64_to_u32(-100.001))
}
func test_print(b: u16, d: u8, e: f32) {
println(b, ", ", d, ", ", e/2)
}
const gbegin = 10
const gend = 20
func test_global_consts() {
for i := gbegin; i < gend; i++ {
println(i, " - ", gbegin, " = ", i-gbegin)
}
}
type ty0 struct {
v0: int
v1: f64
}
global gv0: ty0 = ty0{5555, 3.1415926}
global gv1: [4]int = [4]int{81, 82, 17, 76}
func test_global_variables_0() {
println("gv0: {", gv0.v0, ", ", gv0.v1, "}")
println("gv1: {", gv1[0], ", ", gv1[1], ", ", gv1[2], ", ", gv1[3], "}")
}
func test_global_variables_1 {
gv0.v0 = 8888
gv0.v1 = 2.71828
gv1[0] += 2
gv1[1] += 3
gv1[2] += 4
gv1[3] += 5
println("gv0: {", gv0.v0, ", ", gv0.v1, "}")
println("gv1: {", gv1[0], ", ", gv1[1], ", ", gv1[2], ", ", gv1[3], "}")
}
global gint0: int = 100
global gint1: int = 200
func get_int_addr(a: int) => *int {
if a > 0 {
return &gint0
} else {
return &gint1
}
}
func set_int(a: *int) {
*a += 10
}
func test_pointer(a: int) {
println(gint0, ", ", gint1)
p := get_int_addr(a)
set_int(p)
println(gint0, ", ", gint1)
}
func getPi => f32 {
return 3.1415926535
}
func getE => f32 {
return 2.71828
}
func minusf32(a: f32) => f32 {
return 0 - a
}
func circle_area(r: f32) => f32 {
return r * r * 3.1415926
}
func test_f32() {
println(getPi())
println(getE())
println(minusf32(getPi()))
println(minusf32(-1.3))
println(circle_area(1.1))
println(circle_area(getE()))
}
......@@ -2,7 +2,7 @@
func main() {
for n := 2; n <= 30; n = n + 1 {
isPrime: int = 1
var isPrime: int = 1
for i := 2; i*i <= n; i = i + 1 {
if x := n % i; x == 0 {
isPrime = 0
......
......@@ -8,7 +8,7 @@ func main() {
}
func new_int() => *i32 {
i: i32
var i: i32
i = 42
return &i
}
......
......@@ -9,7 +9,7 @@ func main() {
println(a[1]) # 13
println(len(s)) # 2
k: []i32
var k: []i32
k = append(k, 99, 81, 170)
println(k[0]) # 99
......@@ -22,4 +22,4 @@ func main() {
l[0] = 111
println(l[0]) #111
println(k[0]) #99
}
\ No newline at end of file
}
func gen_str() => string {
return "123456"
}
func main() {
println("你好,凹语言!")
s1 := gen_str()
s2 := s1[0:3]
s2 = s2 + "abc"
println(s1)
println(s2)
println(s2)
}
......@@ -20,8 +20,8 @@ type sc struct {
}
func gen_scref() => *sc {
v: sc
i: i32
var v: sc
var i: i32
i = 13
v.a = &i
v.b = 42
......@@ -29,8 +29,8 @@ func gen_scref() => *sc {
}
func gen_sc() => sc {
i: i32 = 26
v: sc
var i: i32 = 26
var v: sc
v.a = &i
v.b = 58
return v
......
......@@ -24,7 +24,7 @@ func main {
Do()
println(G.A)
println(G.B)
println(mypkg.G.A)
println(mypkg.G.B)
}
// 版权 @2022 _examples/hello 作者。保留所有权利。
func Println(x int) {
func Println(x: int) {
println(x)
}
......@@ -7,8 +7,8 @@ func refToPtr_i32(p: *i32) => i32
func refToPtr_byteSlice(t: []byte) => i32
func main() {
i: i32
j: []byte
var i: i32
var j: []byte
println(refToPtr_i32(&i))
println(refToPtr_byteSlice(j))
j = append(j, 33)
......
// 版权 @2023 凹语言 作者。保留所有权利。
func main {
L0: bool
L: int = 123
var L0: bool
var L: int = 123
Loop:
for 1 > 0 {
......
......@@ -22,24 +22,24 @@ type OnKey func(key: u32)
//画布对象
type Canvas struct {
device_id :u32 //画布对象对应的网页DOM对象id
width :u32 //画布宽度,以像素为单位
height :u32 //画布高度,以像素为单位
frame_buf :[]u32 //画布帧缓存,容量为Width * Height
device_id: u32 //画布对象对应的网页DOM对象id
width: u32 //画布宽度,以像素为单位
height: u32 //画布高度,以像素为单位
frame_buf: []u32 //画布帧缓存,容量为Width * Height
}
//画布事件
type CanvasEvents struct {
Device_id :u32 //画布设备ID
OnMouseDown :OnTouch //鼠标按下时的回调处理函数
OnMouseUp :OnTouch //鼠标松开时的回调处理函数
OnKeyDown :OnKey //键盘按下时的回调处理函数
OnKeyUp :OnKey //键盘弹起时的回调处理函数
Device_id: u32 //画布设备ID
OnMouseDown: OnTouch //鼠标按下时的回调处理函数
OnMouseUp: OnTouch //鼠标松开时的回调处理函数
OnKeyDown: OnKey //键盘按下时的回调处理函数
OnKeyUp: OnKey //键盘弹起时的回调处理函数
}
//创建一个宽度为w像素、高度为h像素的画布对象
func NewCanvas(w, h: u32) => *Canvas {
canvas: Canvas
var canvas: Canvas
canvas.device_id = newCanvas_JS(w, h)
canvas.width = w
canvas.height = h
......
......@@ -5,7 +5,7 @@ import "snake/canvas"
var ca: *canvas.Canvas
type Position struct {
x, y :i32
x, y: i32
}
const DefaultColor = 0x00000000
......@@ -13,13 +13,13 @@ const BodyColor = 0xFF202020
const FoodColor = 0xFF00FF00
const (
GridNull: u8 = iota
GridNull :u8 = iota
GridBody
GridFood
)
const (
DirNull: i32 = iota
DirNull :i32 = iota
DirLeft
DirUp
DirRight
......@@ -29,13 +29,13 @@ const (
var Dirs: [5]Position
type GameState struct {
w, h :i32
scale :i32
grid :[]u8
body :[]Position
dir :i32
w, h: i32
scale: i32
grid: []u8
body: []Position
dir: i32
ca :*canvas.Canvas
ca: *canvas.Canvas
}
var gameState: GameState
......@@ -47,7 +47,7 @@ func GameState.Init(w, h: i32, scale: i32) {
this.grid = make([]u8, u32(w*h))
this.ca = canvas.NewCanvas(u32(w*scale), u32(h*scale))
caev: canvas.CanvasEvents
var caev: canvas.CanvasEvents
caev.Device_id = this.ca.GetDeviceID()
caev.OnMouseDown = func(x, y: u32) {}
caev.OnMouseUp = func(x, y: u32) {}
......@@ -80,7 +80,7 @@ func GameState.Start() {
func GameState.SetGridType(p: Position, t: u8) {
this.grid[p.y*this.w+p.x] = t
color: u32
var color: u32
switch t {
case GridBody:
color = BodyColor
......@@ -92,7 +92,7 @@ func GameState.SetGridType(p: Position, t: u8) {
color = DefaultColor
}
x, y: i32
var x, y: i32
for y = 0; y < this.scale; y++ {
for x = 0; x < this.scale; x++ {
this.ca.SetPixel(u32(p.x*this.scale+x), u32(p.y*this.scale+y), color)
......@@ -103,7 +103,7 @@ func GameState.SetGridType(p: Position, t: u8) {
#wa:import wa_js_env rand
func rand_JS(_: i32) => i32
func GameState.GenFood() => Position {
p: Position
var p: Position
for {
p = Position{x: rand_JS(this.w), y: rand_JS(this.h)}
if this.grid[p.y*this.w+p.x] == GridNull {
......
......@@ -8,7 +8,7 @@ func main() {
println(c)
}
bytes[0] = 113
str = string(bytes)
println(str)
}
// 版权 @2023 凹语言 作者。保留所有权利。
var Info: struct{
var Info: struct {
name: string
age: i32
age: i32
}
func main() {
i := gen_scref()
println(*i.a) //13
println(*i.a) //13
println(i.b) //42
j := gen_sc()
println(*j.a) //26
println(*j.a) //26
println(j.b) //58
//全局匿名结构体变量
Info.name = "张三"
Info.age = 88
println(Info.name, " ", Info.age) //张三 88
println(Info.name, " ", Info.age) //张三 88
//局部匿名结构体变量
k := struct {name: string; age: i32}{"李四", 66}
println(k.name, " ", k.age) //李四 66
k := struct {
name: string
age: i32
}{"李四", 66}
println(k.name, " ", k.age) //李四 66
Info = k
println(Info.name, " ", Info.age) //李四 66
println(Info.name, " ", Info.age) //李四 66
}
type sp struct {
......@@ -37,8 +40,8 @@ type sc struct {
}
func gen_scref() => *sc {
v: sc
i: i32
var v: sc
var i: i32
i = 13
v.a = &i
v.b = 42
......@@ -46,8 +49,8 @@ func gen_scref() => *sc {
}
func gen_sc() => sc {
i: i32 = 26
v: sc
var i: i32 = 26
var v: sc
v.a = &i
v.b = 58
return v
......
// 版权 @2022 凹语言 作者。保留所有权利。
import "syscall/mvp"
var WAOS = "mvp"
#wa:linkname $runtime.argsSizesGet
func argsSizesGet(result_argc: i32, result_argv_len: i32) => (errno: i32) {
return
}
#wa:linkname $runtime.argsGet
func argsGet(result_argv: i32, result_argv_buf: i32) => (errno: i32) {
return
}
#wa:linkname $runtime.environSizesGet
func environSizesGet(result_environc: i32, result_environv_len: i32) => (errno: i32) {
return
}
#wa:linkname $runtime.environGet
func environGet(result_environv: i32, result_environv_buf: i32) => (errno: i32) {
return
}
#wa:linkname $runtime.fdWrite
func fdWrite(fd: i32, io: i32, iovs_len: i32, nwritten: i32) => (written: i32) {
return
}
#wa:linkname $runtime.procExit
func procExit(code: i32) {}
#wa:linkname $runtime.assert
func assert(ok: i32, pos_msg_ptr: i32, pos_msg_len: i32) {}
#wa:linkname $runtime.assertMessage
func assertMessage(ok: i32, msg_ptr: i32, msg_len: i32, pos_msg_ptr: i32, pos_msg_len: i32) {}
#wa:linkname $runtime.waPrintI32
func waPrintI32(i: i32) {
mvp.PrintI32(i)
}
#wa:linkname $runtime.waPrintU32
func waPrintU32(i: u32) {
mvp.PrintU32(i)
}
#wa:linkname $runtime.waPrintI64
func waPrintI64(i: i64) {
mvp.PrintI64(i)
}
#wa:linkname $runtime.waPrintU64
func waPrintU64(i: u64) {
mvp.PrintU64(i)
}
#wa:linkname $runtime.waPrintF32
func waPrintF32(i: f32) {
mvp.PrintF32(i)
}
#wa:linkname $runtime.waPrintF64
func waPrintF64(i: f64) {
mvp.PrintF64(i)
}
#wa:linkname $runtime.waPrintRune
func waPrintRune(ch: i32) {
mvp.PrintRune(ch)
}
#wa:linkname $runtime.waPuts
func waPuts(ptr: i32, len: i32) {
mvp.Puts(ptr, len)
}
// 版权 @2023 凹语言 作者。保留所有权利。
#wa:import mvp waPrintI32
func PrintI32(i: i32)
#wa:import mvp waPrintU32
func PrintU32(i: u32)
#wa:import mvp waPrintI64
func PrintI64(i: i64)
#wa:import mvp waPrintU64
func PrintU64(i: u64)
#wa:import mvp waPrintF32
func PrintF32(i: f32)
#wa:import mvp waPrintF64
func PrintF64(i: f64)
#wa:import mvp waPrintRune
func PrintRune(ch: i32)
#wa:import mvp waPuts
func Puts(ptr: i32, len: i32)
......@@ -10,12 +10,20 @@ import (
"wa-lang.org/wa/internal/config"
)
//go:embed VERSION
var _VERSION string
//go:embed misc/_example_app
var _exampleAppFS embed.FS
//go:embed misc/_example_vendor
var _exampleVendorFS embed.FS
// 版本号(dev后缀表示开发版)
func GetVersion() string {
return _VERSION
}
func GetExampleAppFS() fs.FS {
fs, err := fs.Sub(_exampleAppFS, "_example_app")
if err != nil {
......