diff --git a/.travis.yml b/.travis.yml index b79a35b319a37ca85c447065772fccb375963add..7f9ae2b553900386b3df11d7301f5986bc9e0316 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,6 +29,3 @@ script: - make docker - make cover - make test-cli - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/CHANGES.md b/CHANGES.md index d4b1ad59d4bea69fa5a6e81ae8eac56767104acc..b6f25e1e0a729ab4705812fbee2490e609d5675e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,8 +1,17 @@ # CHANGELOG -## 2018-12 +## 2019-01 - DOING: english translation +- add JSONFind function, which support JSON iterate +- add new test database `world_x` +- SplitStatement support optimizer hint `/*+xxx */` +- include [bats](https://github.com/bats-core/bats-core) bash auto test framework +- fix explain result with multi rows error +- fix #178 JSON datatype only support utf8mb4 + +## 2018-12 + - replace mysql database driver mymysql with go-sql-driver - add new -report-type [ast-json, tiast-json] - command line dsn args support '@', '/', ':' in password diff --git a/Makefile b/Makefile index 95d88c8bdefc5512b15b0f323ca76304924bd2be..ac06587eb864a62904398c227709c29ecf56a28c 100644 --- a/Makefile +++ b/Makefile @@ -187,7 +187,7 @@ release: build .PHONY: docker docker: - @echo "$(CGREEN)Build mysql test enviorment ...$(CEND)" + @echo "$(CGREEN)Build mysql test environment ...$(CEND)" @docker stop soar-mysql 2>/dev/null || true @docker wait soar-mysql 2>/dev/null >/dev/null || true @echo "docker run --name soar-mysql $(MYSQL_RELEASE):$(MYSQL_VERSION)" @@ -204,7 +204,7 @@ docker: timeout=`expr $$timeout - 1`; \ printf '.' ; sleep 1 ; \ else \ - echo "." ; echo "mysql test enviorment is ready!" ; break ; \ + echo "." ; echo "mysql test environment is ready!" ; break ; \ fi ; \ if [ $$timeout = 0 ] ; then \ echo "." ; echo "$(CRED)docker soar-mysql start timeout(180 s)!$(CEND)" ; exit 1 ; \ diff --git a/advisor/heuristic.go b/advisor/heuristic.go index 3485c07ebffbd25c352256000348d4a7a9cdfcaf..a1e61527b78e8aca4605f0b2ef56b8b48d6e3d4b 100644 --- a/advisor/heuristic.go +++ b/advisor/heuristic.go @@ -31,6 +31,7 @@ import ( "github.com/percona/go-mysql/query" tidb "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" + "github.com/tidwall/gjson" "vitess.io/vitess/go/vt/sqlparser" ) @@ -1312,37 +1313,16 @@ func (q *Query4Audit) RuleLoadFile() Rule { func (q *Query4Audit) RuleMultiCompare() Rule { var rule = q.RuleOK() if q.TiStmt != nil { - for _, tiStmt := range q.TiStmt { - switch node := tiStmt.(type) { - case *tidb.SelectStmt: - switch where := node.Where.(type) { - case *tidb.BinaryOperationExpr: - switch where.L.(type) { - case *tidb.BinaryOperationExpr: - if where.Op.String() == "eq" { - rule = HeuristicRules["RES.009"] - } - } - } - case *tidb.UpdateStmt: - switch where := node.Where.(type) { - case *tidb.BinaryOperationExpr: - switch where.L.(type) { - case *tidb.BinaryOperationExpr: - if where.Op.String() == "eq" { - rule = HeuristicRules["RES.009"] - } - } - } - case *tidb.DeleteStmt: - switch where := node.Where.(type) { - case *tidb.BinaryOperationExpr: - switch where.L.(type) { - case *tidb.BinaryOperationExpr: - if where.Op.String() == "eq" { - rule = HeuristicRules["RES.009"] - } - } + json := ast.StmtNode2JSON(q.Query, "", "") + whereJSON := common.JSONFind(json, "Where") + for _, where := range whereJSON { + conds := []string{where} + conds = append(conds, common.JSONFind(where, "L")...) + conds = append(conds, common.JSONFind(where, "R")...) + for _, cond := range conds { + if gjson.Get(cond, "Op").Int() == 7 && gjson.Get(cond, "L.Op").Int() == 7 { + rule = HeuristicRules["RES.009"] + return rule } } } diff --git a/advisor/heuristic_test.go b/advisor/heuristic_test.go index e163f237f2d9956998c5351de56fdd29632dcd7c..be3359010ab674c48b0ce50da070158256672e95 100644 --- a/advisor/heuristic_test.go +++ b/advisor/heuristic_test.go @@ -946,6 +946,9 @@ func TestRuleMultiCompare(t *testing.T) { sqls := [][]string{ { "SELECT * FROM tbl WHERE col = col = 'abc'", + "SELECT * FROM tbl WHERE col = 'def' and col = col = 'abc'", + "SELECT * FROM tbl WHERE col = 'def' or col = col = 'abc'", + "SELECT * FROM tbl WHERE col = col = 'abc' and col = 'def'", "UPDATE tbl set col = 1 WHERE col = col = 'abc'", "DELETE FROM tbl WHERE col = col = 'abc'", }, diff --git a/ast/tidb.go b/ast/tidb.go index 1dac85c0e9306b6be6a52a72800e125164063263..063c79769bff05d4d19510eadac6aed5e876b401 100644 --- a/ast/tidb.go +++ b/ast/tidb.go @@ -17,14 +17,14 @@ package ast import ( - "encoding/json" - "github.com/XiaoMi/soar/common" "github.com/kr/pretty" "github.com/pingcap/parser" "github.com/pingcap/parser/ast" + json "github.com/CorgiMan/json2" + // for pingcap parser _ "github.com/pingcap/tidb/types/parser_driver" ) diff --git a/cmd/soar/tool.go b/cmd/soar/tool.go index ced15f6bc470c71e9455930ed121066b88a819a8..5d753cbf32e1f282ede24c2cb24557ab91815eff 100644 --- a/cmd/soar/tool.go +++ b/cmd/soar/tool.go @@ -210,6 +210,7 @@ func initQuery(query string) string { if err != nil { common.Log.Critical("ioutil.ReadAll Error: %v", err) } + common.Log.Debug("initQuery get query from os.Stdin") return string(data) } @@ -219,6 +220,7 @@ func initQuery(query string) string { if err != nil { common.Log.Critical("ioutil.ReadFile Error: %v", err) } + common.Log.Debug("initQuery get query from file: %s", query) return string(data) } diff --git a/common/config.go b/common/config.go index 9370f10e3988872c2012d181289fbfdc1390fbc1..786d150d11d14a04b41efc9296493bee030b28fc 100644 --- a/common/config.go +++ b/common/config.go @@ -93,7 +93,7 @@ type Configuration struct { MaxDistinctCount int `yaml:"max-distinct-count"` // 单条 SQL 中 Distinct 的最大数量 MaxIdxColsCount int `yaml:"max-index-cols-count"` // 复合索引中包含列的最大数量 MaxTextColsCount int `yaml:"max-text-cols-count"` // 表中含有的 text/blob 列的最大数量 - MaxTotalRows int64 `yaml:"max-total-rows"` // 计算散粒度时,当数据行数大于 MaxTotalRows 即开启数据库保护模式,散粒度返回结果可信度下降 + MaxTotalRows uint64 `yaml:"max-total-rows"` // 计算散粒度时,当数据行数大于 MaxTotalRows 即开启数据库保护模式,散粒度返回结果可信度下降 MaxQueryCost int64 `yaml:"max-query-cost"` // last_query_cost 超过该值时将给予警告 SpaghettiQueryLength int `yaml:"spaghetti-query-length"` // SQL最大长度警告,超过该长度会给警告 AllowDropIndex bool `yaml:"allow-drop-index"` // 允许输出删除重复索引的建议 @@ -426,7 +426,7 @@ func parseDSN(odbc string, d *Dsn) *Dsn { func ParseDSN(odbc string, d *Dsn) *Dsn { cfg, err := mysql.ParseDSN(odbc) if err != nil { - Log.Warn("go-sql-driver/mysql.ParseDSN Error: %s, DSN: %s, try to use old version parseDSN", err.Error(), odbc) + Log.Debug("go-sql-driver/mysql.ParseDSN Error: %s, DSN: %s, try to use old version parseDSN", err.Error(), odbc) return parseDSN(odbc, d) } return newDSN(cfg) @@ -596,7 +596,7 @@ func readCmdFlags() error { maxDistinctCount := flag.Int("max-distinct-count", Config.MaxDistinctCount, "MaxDistinctCount, 单条 SQL 中 Distinct 的最大数量") maxIdxColsCount := flag.Int("max-index-cols-count", Config.MaxIdxColsCount, "MaxIdxColsCount, 复合索引中包含列的最大数量") maxTextColsCount := flag.Int("max-text-cols-count", Config.MaxTextColsCount, "MaxTextColsCount, 表中含有的 text/blob 列的最大数量") - maxTotalRows := flag.Int64("max-total-rows", Config.MaxTotalRows, "MaxTotalRows, 计算散粒度时,当数据行数大于MaxTotalRows即开启数据库保护模式,不计算散粒度") + maxTotalRows := flag.Uint64("max-total-rows", Config.MaxTotalRows, "MaxTotalRows, 计算散粒度时,当数据行数大于MaxTotalRows即开启数据库保护模式,不计算散粒度") maxQueryCost := flag.Int64("max-query-cost", Config.MaxQueryCost, "MaxQueryCost, last_query_cost 超过该值时将给予警告") spaghettiQueryLength := flag.Int("spaghetti-query-length", Config.SpaghettiQueryLength, "SpaghettiQueryLength, SQL最大长度警告,超过该长度会给警告") allowDropIdx := flag.Bool("allow-drop-index", Config.AllowDropIndex, "AllowDropIndex, 允许输出删除重复索引的建议") diff --git a/common/testdata/TestJSONFind.golden b/common/testdata/TestJSONFind.golden new file mode 100644 index 0000000000000000000000000000000000000000..d08de39314fbd456539cd8df303f7497825005ed --- /dev/null +++ b/common/testdata/TestJSONFind.golden @@ -0,0 +1,8 @@ +[McLaughlin Hunter Harold] +[{ + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }] +[ binary binary utf8mb4_bin ] diff --git a/common/tricks.go b/common/tricks.go index 53360caa703cbf4ba104e3f80943ebf6d1bd0d5d..efe85ccf8a382e2c1b976966e59efb61bb79ca6c 100644 --- a/common/tricks.go +++ b/common/tricks.go @@ -25,6 +25,8 @@ import ( "path/filepath" "reflect" "sort" + + "github.com/tidwall/gjson" ) // GoldenDiff 从 gofmt 学来的测试方法 @@ -104,3 +106,35 @@ func SortedKey(m interface{}) []string { sort.Strings(keys) return keys } + +// jsonFind internal function +func jsonFind(json string, name string, find *[]string) (next []string) { + res := gjson.Parse(json) + res.ForEach(func(key, value gjson.Result) bool { + if key.String() == name { + *find = append(*find, value.String()) + } else { + switch value.Type { + case gjson.Number, gjson.True, gjson.False, gjson.Null: + default: + next = append(next, value.String()) + } + } + return true // keep iterating + }) + return next +} + +// JSONFind iterate find name in json +func JSONFind(json string, name string) []string { + var find []string + next := []string{json} + for len(next) > 0 { + var tmpNext []string + for _, subJSON := range next { + tmpNext = append(tmpNext, jsonFind(subJSON, name, &find)...) + } + next = tmpNext + } + return find +} diff --git a/common/tricks_test.go b/common/tricks_test.go index 063750ee2a183403251e72790762034862d6bbdf..5f4dab8b9f8b8dfbe93c5e0f418a13b0490ccb71 100644 --- a/common/tricks_test.go +++ b/common/tricks_test.go @@ -24,6 +24,7 @@ import ( ) func TestCaptureOutput(t *testing.T) { + Log.Debug("Entering function: %s", GetFunctionName()) c1 := make(chan string, 1) // test output buf large than 65535 length := 1<<16 + 1 @@ -48,4 +49,343 @@ func TestCaptureOutput(t *testing.T) { case <-time.After(1 * time.Second): t.Error("capture timeout, pipe read hangup") } + Log.Debug("Exiting function: %s", GetFunctionName()) +} + +func TestJSONFind(t *testing.T) { + Log.Debug("Entering function: %s", GetFunctionName()) + jsons := []string{ + `{ + "programmers": [ + { + "firstName": "Janet", + "Collate": "McLaughlin", + }, { + "firstName": "Elliotte", + "Collate": "Hunter", + }, { + "firstName": "Jason", + "Collate": "Harold", + } + ] +}`, + ` +{ + "widget": { + "debug": "on", + "Collate": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +`, + ` +[ + { + "SQLCache": true, + "CalcFoundRows": false, + "StraightJoin": false, + "Priority": 0, + "Distinct": false, + "From": { + "TableRefs": { + "Left": { + "Source": { + "Schema": { + "O": "", + "L": "" + }, + "Name": { + "O": "tb", + "L": "tb" + }, + "DBInfo": null, + "TableInfo": null, + "IndexHints": null + }, + "AsName": { + "O": "", + "L": "" + } + }, + "Right": null, + "Tp": 0, + "On": null, + "Using": null, + "NaturalJoin": false, + "StraightJoin": false + } + }, + "Where": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Op": 4, + "L": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Op": 7, + "L": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Name": { + "Schema": { + "O": "", + "L": "" + }, + "Table": { + "O": "", + "L": "" + }, + "Name": { + "O": "col3", + "L": "col3" + } + }, + "Refer": null + }, + "R": { + "Type": { + "Tp": 8, + "Flag": 128, + "Flen": 1, + "Decimal": 0, + "Charset": "binary", + "Collate": "binary", + "Elems": null + } + } + }, + "R": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Op": 1, + "L": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Op": 7, + "L": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Name": { + "Schema": { + "O": "", + "L": "" + }, + "Table": { + "O": "", + "L": "" + }, + "Name": { + "O": "col3", + "L": "col3" + } + }, + "Refer": null + }, + "R": { + "Type": { + "Tp": 8, + "Flag": 128, + "Flen": 1, + "Decimal": 0, + "Charset": "binary", + "Collate": "binary", + "Elems": null + } + } + }, + "R": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Op": 7, + "L": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Op": 7, + "L": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Name": { + "Schema": { + "O": "", + "L": "" + }, + "Table": { + "O": "", + "L": "" + }, + "Name": { + "O": "col1", + "L": "col1" + } + }, + "Refer": null + }, + "R": { + "Type": { + "Tp": 0, + "Flag": 0, + "Flen": 0, + "Decimal": 0, + "Charset": "", + "Collate": "", + "Elems": null + }, + "Name": { + "Schema": { + "O": "", + "L": "" + }, + "Table": { + "O": "", + "L": "" + }, + "Name": { + "O": "col2", + "L": "col2" + } + }, + "Refer": null + } + }, + "R": { + "Type": { + "Tp": 253, + "Flag": 0, + "Flen": 3, + "Decimal": -1, + "Charset": "utf8mb4", + "Collate": "utf8mb4_bin", + "Elems": null + } + } + } + } + }, + "Fields": { + "Fields": [ + { + "Offset": 7, + "WildCard": { + "Table": { + "O": "", + "L": "" + }, + "Schema": { + "O": "", + "L": "" + } + }, + "Expr": null, + "AsName": { + "O": "", + "L": "" + }, + "Auxiliary": false + } + ] + }, + "GroupBy": null, + "Having": null, + "WindowSpecs": null, + "OrderBy": null, + "Limit": null, + "LockTp": 0, + "TableHints": null, + "IsAfterUnionDistinct": false, + "IsInBraces": false + } +] +`, + } + err := GoldenDiff(func() { + for _, json := range jsons { + result := JSONFind(json, "Collate") + fmt.Println(result) + } + }, t.Name(), update) + if err != nil { + t.Error(err) + } + Log.Debug("Exiting function: %s", GetFunctionName()) } diff --git a/database/profiling.go b/database/profiling.go index e08e7372675c901d2d1f7dd0641c625e8fd0ba17..0f46d37bbca26e719c15e9d802d844b3e3f802f9 100644 --- a/database/profiling.go +++ b/database/profiling.go @@ -26,7 +26,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" ) -// Profiling show profile输出的结果 +// Profiling show profile 输出的结果 type Profiling struct { Rows []ProfilingRow } @@ -35,7 +35,7 @@ type Profiling struct { type ProfilingRow struct { Status string Duration float64 - // TODO: 支持show profile all,不过目前看all的信息过多有点眼花缭乱 + // TODO: 支持show profile all, 不过目前看所有的信息过多有点眼花缭乱 } // Profiling 执行SQL,并对其 Profile @@ -48,14 +48,14 @@ func (db *Connector) Profiling(sql string, params ...interface{}) ([]ProfilingRo return rows, errors.New("no need profiling") } - // 测试环境如果检查是关闭的,则SQL不会被执行 + // 测试环境如果检查是关闭的,则 SQL 不会被执行 if common.Config.TestDSN.Disable { return rows, errors.New("dsn is disable") } // 数据库安全性检查:如果 Connector 的 IP 端口与 TEST 环境不一致,则启用 SQL 白名单 // 不在白名单中的 SQL 不允许执行 - // 执行环境与test环境不相同 + // 执行环境与 test 环境不相同 if db.Addr != common.Config.TestDSN.Addr && db.dangerousQuery(sql) { return rows, fmt.Errorf("query execution deny: Execute SQL with DSN(%s/%s) '%s'", db.Addr, db.Database, fmt.Sprintf(sql, params...)) @@ -114,7 +114,7 @@ func (db *Connector) Profiling(sql string, params ...interface{}) ([]ProfilingRo return rows, err } -// FormatProfiling 格式化输出Profiling信息 +// FormatProfiling 格式化输出 Profiling 信息 func FormatProfiling(rows []ProfilingRow) string { str := []string{"| Status | Duration |"} str = append(str, "| --- | --- |") diff --git a/database/profiling_test.go b/database/profiling_test.go index d1948ab1ef55439ad74c36d87b5991eda1bf25c0..2b49da9474c7e64273e647b9195b8dddb738b423 100644 --- a/database/profiling_test.go +++ b/database/profiling_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/XiaoMi/soar/common" + "github.com/kr/pretty" ) diff --git a/database/show.go b/database/show.go index 905b0697b450e768c766ffcb6bcad6c75cf13391..57ff6b378a0eab11640b9579c88ccb589fa5a8ab 100644 --- a/database/show.go +++ b/database/show.go @@ -41,25 +41,25 @@ type tableStatusRow struct { Engine []byte // 该表使用的存储引擎 Version []byte // 该表的 .frm 文件版本号 RowFormat []byte // 该表使用的行存储格式 - Rows int64 // 表行数, InnoDB 引擎中为预估值,甚至可能会有40%~50%的数值偏差 - AvgRowLength int // 平均行长度 + Rows uint64 // 表行数, InnoDB 引擎中为预估值,甚至可能会有40%~50%的数值偏差 + AvgRowLength uint64 // 平均行长度 // MyISAM: Data_length 为数据文件的大小,单位为 bytes // InnoDB: Data_length 为聚簇索引分配的近似内存量,单位为 bytes, 计算方式为聚簇索引数量乘以 InnoDB 页面大小 // 其他不同的存储引擎中该值的意义可能不尽相同 - DataLength int + DataLength uint64 // MyISAM: Max_data_length 为数据文件长度的最大值。这是在给定使用的数据指针大小的情况下,可以存储在表中的数据的最大字节数 // InnoDB: 未使用 // 其他不同的存储引擎中该值的意义可能不尽相同 - MaxDataLength int + MaxDataLength uint64 // MyISAM: Index_length 为 index 文件的大小,单位为 bytes // InnoDB: Index_length 为非聚簇索引分配的近似内存量,单位为 bytes,计算方式为非聚簇索引数量乘以 InnoDB 页面大小 // 其他不同的存储引擎中该值的意义可能不尽相同 - IndexLength int + IndexLength uint64 - DataFree int // 已分配但未使用的字节数 + DataFree uint64 // 已分配但未使用的字节数 AutoIncrement []byte // 下一个自增值 CreateTime []byte // 创建时间 UpdateTime []byte // 最近一次更新时间,该值不准确 diff --git a/env/env.go b/env/env.go index a8c1e55e40d0645e139591b923da6524ca02ccf3..6dcb046d59523e2ca8af120e4ad6ba482d5390b7 100644 --- a/env/env.go +++ b/env/env.go @@ -90,7 +90,7 @@ func BuildEnv() (*VirtualEnv, *database.Connector) { common.Config.TestDSN.Disable = true } - // 检查是否允许Online和Test一致,防止误操作 + // 检查是否允许 Online 和 Test 一致,防止误操作 if common.FormatDSN(common.Config.OnlineDSN) == common.FormatDSN(common.Config.TestDSN) && !common.Config.AllowOnlineAsTest { common.Log.Warn("BuildEnv AllowOnlineAsTest: %s:********@%s/%s OnlineDSN can't config as TestDSN", @@ -108,7 +108,7 @@ func BuildEnv() (*VirtualEnv, *database.Connector) { return vEnv, connOnline } -// RealDB 从测试环境中获取通过hash后的DB +// RealDB 从测试环境中获取通过 hash 后的 DB func (vEnv *VirtualEnv) RealDB(hash string) string { if _, ok := vEnv.Hash2DB[hash]; ok { return vEnv.Hash2DB[hash] @@ -120,7 +120,7 @@ func (vEnv *VirtualEnv) RealDB(hash string) string { return hash } -// DBHash 从测试环境中根据DB找到对应的hash值 +// DBHash 从测试环境中根据 DB 找到对应的 hash 值 func (vEnv *VirtualEnv) DBHash(db string) string { if _, ok := vEnv.DBRef[db]; ok { return vEnv.DBRef[db] @@ -194,15 +194,15 @@ func (vEnv *VirtualEnv) CleanupTestDatabase() { common.Log.Debug("CleanupTestDatabase done") } -// BuildVirtualEnv rEnv为SQL源环境,DB使用的信息从接口获取 -// 注意:如果是USE,DDL等语句,执行完第一条就会返回,后面的SQL不会执行 +// BuildVirtualEnv rEnv 为 SQL 源环境,DB 使用的信息从接口获取 +// 注意:如果是 USE, DDL 等语句,执行完第一条就会返回,后面的 SQL 不会执行 func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string) bool { var stmt sqlparser.Statement var err error // 置空错误信息 vEnv.Error = nil - // 检测是否已经创建初始数据库,如果未创建则创建一个名称hash过的映射数据库 + // 检测是否已经创建初始数据库,如果未创建则创建一个名称 hash 过的映射数据库 err = vEnv.createDatabase(rEnv) common.LogIfWarn(err, "") @@ -212,7 +212,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string return true } - // 判断rEnv中是否指定了DB + // 判断 rEnv 中是否指定了 DB if rEnv.Database == "" { common.Log.Error("BuildVirtualEnv no database specified, TestDSN init failed") return false @@ -221,9 +221,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string // 库表提取 meta := make(map[string]*common.DB) for _, sql := range SQLs { - common.Log.Debug("BuildVirtualEnv Database&TableName Mapping, SQL: %s", sql) - stmt, err = sqlparser.Parse(sql) if err != nil { common.Log.Error("BuildVirtualEnv Error : %v", err) @@ -249,7 +247,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string // 为不影响其他SQL操作,复制一个Connector对象,将数据库切换到对应的DB上直接执行 vEnv.Database = vEnv.DBRef[rEnv.Database] - // 为了支持并发,需要将DB进行映射,但db.table这种形式无法保证DB的映射是正确的 + // 为了支持并发,需要将DB进行映射,但 db.table 这种形式无法保证 DB 的映射是正确的 // TODO:暂不支持 create db.tableName (id int) 形式的建表语句 if stmt.Table.Qualifier.String() != "" { common.Log.Error("BuildVirtualEnv DDL Not support db.tb format") @@ -300,7 +298,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string meta := ast.GetMeta(stmt, nil) - // 由于DB环境可能是变的,所以需要每一次都单独的提取库表结构,整体随着rEnv的变动而发生变化 + // 由于 DB 环境可能是变的,所以需要每一次都单独的提取库表结构,整体随着 rEnv 的变动而发生变化 for db, table := range meta { if db == "" { db = rEnv.Database @@ -361,7 +359,7 @@ func (vEnv *VirtualEnv) createDatabase(rEnv *database.Connector) error { // optimizer_YYMMDDHHmmss_xxxx dbHash := fmt.Sprintf("optimizer_%s_%s", // Total 39 bytes - time.Now().Format("060102150405"), // 12 Bytes 20180102030405 + time.Now().Format("060102150405"), // 12 Bytes 180102030405 strings.ToLower(uniuri.New())) // 16 Bytes random string common.Log.Debug("createDatabase, mapping `%s` :`%s`-->`%s`", rEnv.Database, rEnv.Database, dbHash) ddl, err := rEnv.ShowCreateDatabase(rEnv.Database) @@ -496,7 +494,7 @@ func (vEnv *VirtualEnv) GenTableColumns(meta common.Meta) common.TableColumns { } if len(tb.Column) == 0 { - // tb.column为空说明SQL里这个表是用的*来查询 + // tb.column 为空说明 SQL 里这个表是用的*来查询 if err != nil { common.Log.Error("ast.Rewrite ShowColumns, Error: %v", err) break diff --git a/test/env.bats b/test/env.bats new file mode 100644 index 0000000000000000000000000000000000000000..f14758eac5452b3b46ee887febda7bf13b84ed28 --- /dev/null +++ b/test/env.bats @@ -0,0 +1,16 @@ +#!/usr/bin/env bats + +load test_helper + +@test "Simple Query Optimizer" { + ${SOAR_BIN_ENV} -query "select * from film where length > 120" | grep -v "散粒度" > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden + run golden_diff + [ $status -eq 0 ] +} + +@test "Run all test cases" { + ${SOAR_BIN} -list-test-sqls | ${SOAR_BIN_ENV} | grep -v "散粒度" > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden + run golden_diff + [ $status -eq 0 ] +} + diff --git a/test/fixture/test_Run_default_printconfig_cases.golden b/test/fixture/test_Run_default_printconfig_cases.golden new file mode 100644 index 0000000000000000000000000000000000000000..4624675e3fb3f9aad9d6a9132d95756d5d1d9cae --- /dev/null +++ b/test/fixture/test_Run_default_printconfig_cases.golden @@ -0,0 +1,125 @@ +online-dsn: + user: "" + password: '********' + net: tcp + addr: 127.0.0.1:3306 + schema: information_schema + charset: utf8 + collation: utf8_general_ci + loc: UTC + tls: "" + server-public-key: "" + maxallowedpacket: 4194304 + params: + charset: utf8 + timeout: 0 + read-timeout: 0 + write-timeout: 0 + allow-native-passwords: true + allow-old-passwords: false + disable: false +test-dsn: + user: "" + password: '********' + net: tcp + addr: 127.0.0.1:3306 + schema: information_schema + charset: utf8 + collation: utf8_general_ci + loc: UTC + tls: "" + server-public-key: "" + maxallowedpacket: 4194304 + params: + charset: utf8 + timeout: 0 + read-timeout: 0 + write-timeout: 0 + allow-native-passwords: true + allow-old-passwords: false + disable: false +allow-online-as-test: false +drop-test-temporary: true +cleanup-test-database: false +only-syntax-check: false +sampling-statistic-target: 100 +sampling: false +sampling-condition: "" +profiling: false +trace: false +explain: true +delimiter: ; +log-level: 3 +log-output: /dev/null +report-type: markdown +report-css: "" +report-javascript: "" +report-title: SQL优化分析报告 +markdown-extensions: 94 +markdown-html-flags: 0 +ignore-rules: +- COL.011 +rewrite-rules: +- delimiter +- orderbynull +- groupbyconst +- dmlorderby +- having +- star2columns +- insertcolumns +- distinctstar +blacklist: "" +max-join-table-count: 5 +max-group-by-cols-count: 5 +max-distinct-count: 5 +max-index-cols-count: 5 +max-text-cols-count: 2 +max-total-rows: 9999999 +max-query-cost: 9999 +spaghetti-query-length: 2048 +allow-drop-index: false +max-in-count: 10 +max-index-bytes-percolumn: 767 +max-index-bytes: 3072 +allow-charsets: +- utf8 +- utf8mb4 +allow-collates: [] +allow-engines: +- innodb +max-index-count: 10 +max-column-count: 40 +max-value-count: 100 +index-prefix: idx_ +unique-key-prefix: uk_ +max-subquery-depth: 5 +max-varchar-length: 1024 +column-not-allow-type: +- boolean +min-cardinality: 0 +explain-sql-report-type: pretty +explain-type: extended +explain-format: traditional +explain-warn-select-type: +- "" +explain-warn-access-type: +- ALL +explain-max-keys: 3 +explain-min-keys: 0 +explain-max-rows: 10000 +explain-warn-extra: +- Using temporary +- Using filesort +explain-max-filtered: 100 +explain-warn-scalability: +- O(n) +show-warnings: false +show-last-query-cost: false +query: "" +list-heuristic-rules: false +list-rewrite-rules: false +list-test-sqls: false +list-report-types: false +verbose: false +dry-run: true +max-pretty-sql-length: 1024 diff --git a/test/main.bats b/test/main.bats old mode 100755 new mode 100644 index ac67bb06125d0c8e54ca2d32bbdada8597eb4179..38fbfd2e167f73204d83ec68dc8156090e427a0e --- a/test/main.bats +++ b/test/main.bats @@ -2,19 +2,41 @@ load test_helper -@test "Simple Query Optimizer" { - ${SOAR_BIN_ENV} -query "select * from film where length > 120" | grep -v "散粒度" > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden - run golden_diff ${BATS_TEST_NAME} - [ $status -eq 0 ] +@test "Test soar version" { + run ${SOAR_BIN} -version + [ "$status" -eq 0 ] + [ "${lines[0]%% *}" == "Version:" ] + [ "${lines[1]%% *}" == "Branch:" ] + [ "${lines[2]%% *}" == "Compile:" ] + [ $(expr "${lines[2]}" : "Compile: $(date +'%Y-%m-%d').*") -ne 0 ] } -@test "Syntax Check" { - run ${SOAR_BIN} -query "select * frm film" -only-syntax-check +@test "No arguments prints message" { + run ${SOAR_BIN} [ $status -eq 1 ] + [ "${lines[0]}" == 'Args format error, use --help see how to use it!' ] +} + +@test "Run default printconfig cases" { + ${SOAR_BIN} -print-config -log-output=/dev/null > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden + run golden_diff + [ $status -eq 0 ] +} + +@test "Check config cases" { + run ${SOAR_BIN_ENV} -check-config + [ $status -eq 0 ] + [ -z ${output} ] } -@test "Run all test cases" { - ${SOAR_BIN} -list-test-sqls | ${SOAR_BIN_ENV} | grep -v "散粒度" > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden - run golden_diff ${BATS_TEST_NAME} +@test "Syntax Check OK" { + run ${SOAR_BIN} -query "select * from film" -only-syntax-check [ $status -eq 0 ] + [ -z $ouput ] +} + +@test "Syntax Check Error" { + run ${SOAR_BIN} -query "select * frm film" -only-syntax-check + [ $status -eq 1 ] + [ -n $ouput ] } diff --git a/test/other.bats b/test/other.bats new file mode 100644 index 0000000000000000000000000000000000000000..fdc7da46e1a364c106e5d2cded7c7a63e468e6e4 --- /dev/null +++ b/test/other.bats @@ -0,0 +1,3 @@ +#!/usr/bin/env bats + +load test_helper diff --git a/test/query.bats b/test/query.bats new file mode 100644 index 0000000000000000000000000000000000000000..debb93536db69a4d5178ac4f082ad85d2caa80b7 --- /dev/null +++ b/test/query.bats @@ -0,0 +1,9 @@ +#!/usr/bin/env bats + +load test_helper + +@test "Check Query Optimizer" { + run ${SOAR_BIN} -query "select * from film where length > 120" + [ $status -eq 0 ] +} + diff --git a/test/test_helper.bash b/test/test_helper.bash index 1af72cc378fad6e4b977e0a0897ab1b19b41b0e9..c3db244d073a5cb8e86621f53acc655b58eb4db9 100644 --- a/test/test_helper.bash +++ b/test/test_helper.bash @@ -7,8 +7,7 @@ setup() { mkdir -p "${BATS_TMP_DIRNAME}" } +# golden_diff like gofmt golden file check method, use this function check output different with template golden_diff() { - FUNC_NAME=$1 - diff "${BATS_TMP_DIRNAME}/${FUNC_NAME}.golden" "${BATS_FIXTURE_DIRNAME}/${FUNC_NAME}.golden" >/dev/null - return $? + diff "${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden" "${BATS_FIXTURE_DIRNAME}/${BATS_TEST_NAME}.golden" >/dev/null } diff --git a/vendor/github.com/CorgiMan/json2/README.md b/vendor/github.com/CorgiMan/json2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..dc55617756cbde81afdbd40486806ff109dcd3a9 Binary files /dev/null and b/vendor/github.com/CorgiMan/json2/README.md differ diff --git a/vendor/github.com/CorgiMan/json2/decode.go b/vendor/github.com/CorgiMan/json2/decode.go new file mode 100644 index 0000000000000000000000000000000000000000..23e06e4126c93b57e8a93472d33ec43a777c04c3 --- /dev/null +++ b/vendor/github.com/CorgiMan/json2/decode.go @@ -0,0 +1,1047 @@ +// I stole this from golang, only changed package name +// and commented out line 1018-1020 of encode.go + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json2 + +import ( + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshalling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + tempstr string // scratch space to avoid some allocations + useNumber bool +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := int(d.data[d.off]) + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type()}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type()}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte of the object ('{') has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type()}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type()}) + break + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type()}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + d.value(reflect.ValueOf(&d.tempstr)) + d.literalStore([]byte(d.tempstr), subv, true) + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type()}) + } + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type()}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type()}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type()}) + case reflect.Slice: + if v.Type() != byteSliceType { + d.saveError(&UnmarshalTypeError{"string", v.Type()}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.Set(reflect.ValueOf(b[0:n])) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type()}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type()}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type()}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type()}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type()}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type()}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/CorgiMan/json2/encode.go b/vendor/github.com/CorgiMan/json2/encode.go new file mode 100644 index 0000000000000000000000000000000000000000..9b1d0b55bc8918159ff83e607b39402826bbeeb5 --- /dev/null +++ b/vendor/github.com/CorgiMan/json2/encode.go @@ -0,0 +1,1168 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// http://golang.org/doc/articles/json_and_go.html +package json2 + +import ( + "bytes" + "encoding" + "encoding/base64" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings. InvalidUTF8Error will be returned +// if an invalid UTF-8 sequence is encountered. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// or integer types. This extra level of encoding is sometimes used when +// communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the object keys are used directly +// as map keys. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML