未验证 提交 7e7eead0 编写于 作者: X xiyangxixian 提交者: GitHub

Merge pull request #186 from XiaoMi/dev

fix #184 and #173
...@@ -29,6 +29,3 @@ script: ...@@ -29,6 +29,3 @@ script:
- make docker - make docker
- make cover - make cover
- make test-cli - make test-cli
after_success:
- bash <(curl -s https://codecov.io/bash)
# CHANGELOG # CHANGELOG
## 2018-12 ## 2019-01
- DOING: english translation - DOING: english translation
- add JSONFind function, which support JSON iterate
- add new test database `world_x`
- SplitStatement support optimizer hint `/*+xxx */`
- include [bats](https://github.com/bats-core/bats-core) bash auto test framework
- fix explain result with multi rows error
- fix #178 JSON datatype only support utf8mb4
## 2018-12
- replace mysql database driver mymysql with go-sql-driver - replace mysql database driver mymysql with go-sql-driver
- add new -report-type [ast-json, tiast-json] - add new -report-type [ast-json, tiast-json]
- command line dsn args support '@', '/', ':' in password - command line dsn args support '@', '/', ':' in password
......
...@@ -187,7 +187,7 @@ release: build ...@@ -187,7 +187,7 @@ release: build
.PHONY: docker .PHONY: docker
docker: docker:
@echo "$(CGREEN)Build mysql test enviorment ...$(CEND)" @echo "$(CGREEN)Build mysql test environment ...$(CEND)"
@docker stop soar-mysql 2>/dev/null || true @docker stop soar-mysql 2>/dev/null || true
@docker wait soar-mysql 2>/dev/null >/dev/null || true @docker wait soar-mysql 2>/dev/null >/dev/null || true
@echo "docker run --name soar-mysql $(MYSQL_RELEASE):$(MYSQL_VERSION)" @echo "docker run --name soar-mysql $(MYSQL_RELEASE):$(MYSQL_VERSION)"
...@@ -204,7 +204,7 @@ docker: ...@@ -204,7 +204,7 @@ docker:
timeout=`expr $$timeout - 1`; \ timeout=`expr $$timeout - 1`; \
printf '.' ; sleep 1 ; \ printf '.' ; sleep 1 ; \
else \ else \
echo "." ; echo "mysql test enviorment is ready!" ; break ; \ echo "." ; echo "mysql test environment is ready!" ; break ; \
fi ; \ fi ; \
if [ $$timeout = 0 ] ; then \ if [ $$timeout = 0 ] ; then \
echo "." ; echo "$(CRED)docker soar-mysql start timeout(180 s)!$(CEND)" ; exit 1 ; \ echo "." ; echo "$(CRED)docker soar-mysql start timeout(180 s)!$(CEND)" ; exit 1 ; \
......
...@@ -31,6 +31,7 @@ import ( ...@@ -31,6 +31,7 @@ import (
"github.com/percona/go-mysql/query" "github.com/percona/go-mysql/query"
tidb "github.com/pingcap/parser/ast" tidb "github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql" "github.com/pingcap/parser/mysql"
"github.com/tidwall/gjson"
"vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/sqlparser"
) )
...@@ -1312,37 +1313,16 @@ func (q *Query4Audit) RuleLoadFile() Rule { ...@@ -1312,37 +1313,16 @@ func (q *Query4Audit) RuleLoadFile() Rule {
func (q *Query4Audit) RuleMultiCompare() Rule { func (q *Query4Audit) RuleMultiCompare() Rule {
var rule = q.RuleOK() var rule = q.RuleOK()
if q.TiStmt != nil { if q.TiStmt != nil {
for _, tiStmt := range q.TiStmt { json := ast.StmtNode2JSON(q.Query, "", "")
switch node := tiStmt.(type) { whereJSON := common.JSONFind(json, "Where")
case *tidb.SelectStmt: for _, where := range whereJSON {
switch where := node.Where.(type) { conds := []string{where}
case *tidb.BinaryOperationExpr: conds = append(conds, common.JSONFind(where, "L")...)
switch where.L.(type) { conds = append(conds, common.JSONFind(where, "R")...)
case *tidb.BinaryOperationExpr: for _, cond := range conds {
if where.Op.String() == "eq" { if gjson.Get(cond, "Op").Int() == 7 && gjson.Get(cond, "L.Op").Int() == 7 {
rule = HeuristicRules["RES.009"] rule = HeuristicRules["RES.009"]
} return rule
}
}
case *tidb.UpdateStmt:
switch where := node.Where.(type) {
case *tidb.BinaryOperationExpr:
switch where.L.(type) {
case *tidb.BinaryOperationExpr:
if where.Op.String() == "eq" {
rule = HeuristicRules["RES.009"]
}
}
}
case *tidb.DeleteStmt:
switch where := node.Where.(type) {
case *tidb.BinaryOperationExpr:
switch where.L.(type) {
case *tidb.BinaryOperationExpr:
if where.Op.String() == "eq" {
rule = HeuristicRules["RES.009"]
}
}
} }
} }
} }
......
...@@ -946,6 +946,9 @@ func TestRuleMultiCompare(t *testing.T) { ...@@ -946,6 +946,9 @@ func TestRuleMultiCompare(t *testing.T) {
sqls := [][]string{ sqls := [][]string{
{ {
"SELECT * FROM tbl WHERE col = col = 'abc'", "SELECT * FROM tbl WHERE col = col = 'abc'",
"SELECT * FROM tbl WHERE col = 'def' and col = col = 'abc'",
"SELECT * FROM tbl WHERE col = 'def' or col = col = 'abc'",
"SELECT * FROM tbl WHERE col = col = 'abc' and col = 'def'",
"UPDATE tbl set col = 1 WHERE col = col = 'abc'", "UPDATE tbl set col = 1 WHERE col = col = 'abc'",
"DELETE FROM tbl WHERE col = col = 'abc'", "DELETE FROM tbl WHERE col = col = 'abc'",
}, },
......
...@@ -17,14 +17,14 @@ ...@@ -17,14 +17,14 @@
package ast package ast
import ( import (
"encoding/json"
"github.com/XiaoMi/soar/common" "github.com/XiaoMi/soar/common"
"github.com/kr/pretty" "github.com/kr/pretty"
"github.com/pingcap/parser" "github.com/pingcap/parser"
"github.com/pingcap/parser/ast" "github.com/pingcap/parser/ast"
json "github.com/CorgiMan/json2"
// for pingcap parser // for pingcap parser
_ "github.com/pingcap/tidb/types/parser_driver" _ "github.com/pingcap/tidb/types/parser_driver"
) )
......
...@@ -210,6 +210,7 @@ func initQuery(query string) string { ...@@ -210,6 +210,7 @@ func initQuery(query string) string {
if err != nil { if err != nil {
common.Log.Critical("ioutil.ReadAll Error: %v", err) common.Log.Critical("ioutil.ReadAll Error: %v", err)
} }
common.Log.Debug("initQuery get query from os.Stdin")
return string(data) return string(data)
} }
...@@ -219,6 +220,7 @@ func initQuery(query string) string { ...@@ -219,6 +220,7 @@ func initQuery(query string) string {
if err != nil { if err != nil {
common.Log.Critical("ioutil.ReadFile Error: %v", err) common.Log.Critical("ioutil.ReadFile Error: %v", err)
} }
common.Log.Debug("initQuery get query from file: %s", query)
return string(data) return string(data)
} }
......
...@@ -93,7 +93,7 @@ type Configuration struct { ...@@ -93,7 +93,7 @@ type Configuration struct {
MaxDistinctCount int `yaml:"max-distinct-count"` // 单条 SQL 中 Distinct 的最大数量 MaxDistinctCount int `yaml:"max-distinct-count"` // 单条 SQL 中 Distinct 的最大数量
MaxIdxColsCount int `yaml:"max-index-cols-count"` // 复合索引中包含列的最大数量 MaxIdxColsCount int `yaml:"max-index-cols-count"` // 复合索引中包含列的最大数量
MaxTextColsCount int `yaml:"max-text-cols-count"` // 表中含有的 text/blob 列的最大数量 MaxTextColsCount int `yaml:"max-text-cols-count"` // 表中含有的 text/blob 列的最大数量
MaxTotalRows int64 `yaml:"max-total-rows"` // 计算散粒度时,当数据行数大于 MaxTotalRows 即开启数据库保护模式,散粒度返回结果可信度下降 MaxTotalRows uint64 `yaml:"max-total-rows"` // 计算散粒度时,当数据行数大于 MaxTotalRows 即开启数据库保护模式,散粒度返回结果可信度下降
MaxQueryCost int64 `yaml:"max-query-cost"` // last_query_cost 超过该值时将给予警告 MaxQueryCost int64 `yaml:"max-query-cost"` // last_query_cost 超过该值时将给予警告
SpaghettiQueryLength int `yaml:"spaghetti-query-length"` // SQL最大长度警告,超过该长度会给警告 SpaghettiQueryLength int `yaml:"spaghetti-query-length"` // SQL最大长度警告,超过该长度会给警告
AllowDropIndex bool `yaml:"allow-drop-index"` // 允许输出删除重复索引的建议 AllowDropIndex bool `yaml:"allow-drop-index"` // 允许输出删除重复索引的建议
...@@ -426,7 +426,7 @@ func parseDSN(odbc string, d *Dsn) *Dsn { ...@@ -426,7 +426,7 @@ func parseDSN(odbc string, d *Dsn) *Dsn {
func ParseDSN(odbc string, d *Dsn) *Dsn { func ParseDSN(odbc string, d *Dsn) *Dsn {
cfg, err := mysql.ParseDSN(odbc) cfg, err := mysql.ParseDSN(odbc)
if err != nil { if err != nil {
Log.Warn("go-sql-driver/mysql.ParseDSN Error: %s, DSN: %s, try to use old version parseDSN", err.Error(), odbc) Log.Debug("go-sql-driver/mysql.ParseDSN Error: %s, DSN: %s, try to use old version parseDSN", err.Error(), odbc)
return parseDSN(odbc, d) return parseDSN(odbc, d)
} }
return newDSN(cfg) return newDSN(cfg)
...@@ -596,7 +596,7 @@ func readCmdFlags() error { ...@@ -596,7 +596,7 @@ func readCmdFlags() error {
maxDistinctCount := flag.Int("max-distinct-count", Config.MaxDistinctCount, "MaxDistinctCount, 单条 SQL 中 Distinct 的最大数量") maxDistinctCount := flag.Int("max-distinct-count", Config.MaxDistinctCount, "MaxDistinctCount, 单条 SQL 中 Distinct 的最大数量")
maxIdxColsCount := flag.Int("max-index-cols-count", Config.MaxIdxColsCount, "MaxIdxColsCount, 复合索引中包含列的最大数量") maxIdxColsCount := flag.Int("max-index-cols-count", Config.MaxIdxColsCount, "MaxIdxColsCount, 复合索引中包含列的最大数量")
maxTextColsCount := flag.Int("max-text-cols-count", Config.MaxTextColsCount, "MaxTextColsCount, 表中含有的 text/blob 列的最大数量") maxTextColsCount := flag.Int("max-text-cols-count", Config.MaxTextColsCount, "MaxTextColsCount, 表中含有的 text/blob 列的最大数量")
maxTotalRows := flag.Int64("max-total-rows", Config.MaxTotalRows, "MaxTotalRows, 计算散粒度时,当数据行数大于MaxTotalRows即开启数据库保护模式,不计算散粒度") maxTotalRows := flag.Uint64("max-total-rows", Config.MaxTotalRows, "MaxTotalRows, 计算散粒度时,当数据行数大于MaxTotalRows即开启数据库保护模式,不计算散粒度")
maxQueryCost := flag.Int64("max-query-cost", Config.MaxQueryCost, "MaxQueryCost, last_query_cost 超过该值时将给予警告") maxQueryCost := flag.Int64("max-query-cost", Config.MaxQueryCost, "MaxQueryCost, last_query_cost 超过该值时将给予警告")
spaghettiQueryLength := flag.Int("spaghetti-query-length", Config.SpaghettiQueryLength, "SpaghettiQueryLength, SQL最大长度警告,超过该长度会给警告") spaghettiQueryLength := flag.Int("spaghetti-query-length", Config.SpaghettiQueryLength, "SpaghettiQueryLength, SQL最大长度警告,超过该长度会给警告")
allowDropIdx := flag.Bool("allow-drop-index", Config.AllowDropIndex, "AllowDropIndex, 允许输出删除重复索引的建议") allowDropIdx := flag.Bool("allow-drop-index", Config.AllowDropIndex, "AllowDropIndex, 允许输出删除重复索引的建议")
......
[McLaughlin Hunter Harold]
[{
"title": "Sample Konfabulator Widget",
"name": "main_window",
"width": 500,
"height": 500
}]
[ binary binary utf8mb4_bin ]
...@@ -25,6 +25,8 @@ import ( ...@@ -25,6 +25,8 @@ import (
"path/filepath" "path/filepath"
"reflect" "reflect"
"sort" "sort"
"github.com/tidwall/gjson"
) )
// GoldenDiff 从 gofmt 学来的测试方法 // GoldenDiff 从 gofmt 学来的测试方法
...@@ -104,3 +106,35 @@ func SortedKey(m interface{}) []string { ...@@ -104,3 +106,35 @@ func SortedKey(m interface{}) []string {
sort.Strings(keys) sort.Strings(keys)
return keys return keys
} }
// jsonFind internal function
func jsonFind(json string, name string, find *[]string) (next []string) {
res := gjson.Parse(json)
res.ForEach(func(key, value gjson.Result) bool {
if key.String() == name {
*find = append(*find, value.String())
} else {
switch value.Type {
case gjson.Number, gjson.True, gjson.False, gjson.Null:
default:
next = append(next, value.String())
}
}
return true // keep iterating
})
return next
}
// JSONFind iterate find name in json
func JSONFind(json string, name string) []string {
var find []string
next := []string{json}
for len(next) > 0 {
var tmpNext []string
for _, subJSON := range next {
tmpNext = append(tmpNext, jsonFind(subJSON, name, &find)...)
}
next = tmpNext
}
return find
}
...@@ -24,6 +24,7 @@ import ( ...@@ -24,6 +24,7 @@ import (
) )
func TestCaptureOutput(t *testing.T) { func TestCaptureOutput(t *testing.T) {
Log.Debug("Entering function: %s", GetFunctionName())
c1 := make(chan string, 1) c1 := make(chan string, 1)
// test output buf large than 65535 // test output buf large than 65535
length := 1<<16 + 1 length := 1<<16 + 1
...@@ -48,4 +49,343 @@ func TestCaptureOutput(t *testing.T) { ...@@ -48,4 +49,343 @@ func TestCaptureOutput(t *testing.T) {
case <-time.After(1 * time.Second): case <-time.After(1 * time.Second):
t.Error("capture timeout, pipe read hangup") t.Error("capture timeout, pipe read hangup")
} }
Log.Debug("Exiting function: %s", GetFunctionName())
}
func TestJSONFind(t *testing.T) {
Log.Debug("Entering function: %s", GetFunctionName())
jsons := []string{
`{
"programmers": [
{
"firstName": "Janet",
"Collate": "McLaughlin",
}, {
"firstName": "Elliotte",
"Collate": "Hunter",
}, {
"firstName": "Jason",
"Collate": "Harold",
}
]
}`,
`
{
"widget": {
"debug": "on",
"Collate": {
"title": "Sample Konfabulator Widget",
"name": "main_window",
"width": 500,
"height": 500
},
"image": {
"src": "Images/Sun.png",
"hOffset": 250,
"vOffset": 250,
"alignment": "center"
},
"text": {
"data": "Click Here",
"size": 36,
"style": "bold",
"vOffset": 100,
"alignment": "center",
"onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
}
}
}
`,
`
[
{
"SQLCache": true,
"CalcFoundRows": false,
"StraightJoin": false,
"Priority": 0,
"Distinct": false,
"From": {
"TableRefs": {
"Left": {
"Source": {
"Schema": {
"O": "",
"L": ""
},
"Name": {
"O": "tb",
"L": "tb"
},
"DBInfo": null,
"TableInfo": null,
"IndexHints": null
},
"AsName": {
"O": "",
"L": ""
}
},
"Right": null,
"Tp": 0,
"On": null,
"Using": null,
"NaturalJoin": false,
"StraightJoin": false
}
},
"Where": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Op": 4,
"L": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Op": 7,
"L": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Name": {
"Schema": {
"O": "",
"L": ""
},
"Table": {
"O": "",
"L": ""
},
"Name": {
"O": "col3",
"L": "col3"
}
},
"Refer": null
},
"R": {
"Type": {
"Tp": 8,
"Flag": 128,
"Flen": 1,
"Decimal": 0,
"Charset": "binary",
"Collate": "binary",
"Elems": null
}
}
},
"R": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Op": 1,
"L": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Op": 7,
"L": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Name": {
"Schema": {
"O": "",
"L": ""
},
"Table": {
"O": "",
"L": ""
},
"Name": {
"O": "col3",
"L": "col3"
}
},
"Refer": null
},
"R": {
"Type": {
"Tp": 8,
"Flag": 128,
"Flen": 1,
"Decimal": 0,
"Charset": "binary",
"Collate": "binary",
"Elems": null
}
}
},
"R": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Op": 7,
"L": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Op": 7,
"L": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Name": {
"Schema": {
"O": "",
"L": ""
},
"Table": {
"O": "",
"L": ""
},
"Name": {
"O": "col1",
"L": "col1"
}
},
"Refer": null
},
"R": {
"Type": {
"Tp": 0,
"Flag": 0,
"Flen": 0,
"Decimal": 0,
"Charset": "",
"Collate": "",
"Elems": null
},
"Name": {
"Schema": {
"O": "",
"L": ""
},
"Table": {
"O": "",
"L": ""
},
"Name": {
"O": "col2",
"L": "col2"
}
},
"Refer": null
}
},
"R": {
"Type": {
"Tp": 253,
"Flag": 0,
"Flen": 3,
"Decimal": -1,
"Charset": "utf8mb4",
"Collate": "utf8mb4_bin",
"Elems": null
}
}
}
}
},
"Fields": {
"Fields": [
{
"Offset": 7,
"WildCard": {
"Table": {
"O": "",
"L": ""
},
"Schema": {
"O": "",
"L": ""
}
},
"Expr": null,
"AsName": {
"O": "",
"L": ""
},
"Auxiliary": false
}
]
},
"GroupBy": null,
"Having": null,
"WindowSpecs": null,
"OrderBy": null,
"Limit": null,
"LockTp": 0,
"TableHints": null,
"IsAfterUnionDistinct": false,
"IsInBraces": false
}
]
`,
}
err := GoldenDiff(func() {
for _, json := range jsons {
result := JSONFind(json, "Collate")
fmt.Println(result)
}
}, t.Name(), update)
if err != nil {
t.Error(err)
}
Log.Debug("Exiting function: %s", GetFunctionName())
} }
...@@ -26,7 +26,7 @@ import ( ...@@ -26,7 +26,7 @@ import (
"vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/sqlparser"
) )
// Profiling show profile输出的结果 // Profiling show profile 输出的结果
type Profiling struct { type Profiling struct {
Rows []ProfilingRow Rows []ProfilingRow
} }
...@@ -35,7 +35,7 @@ type Profiling struct { ...@@ -35,7 +35,7 @@ type Profiling struct {
type ProfilingRow struct { type ProfilingRow struct {
Status string Status string
Duration float64 Duration float64
// TODO: 支持show profile all,不过目前看all的信息过多有点眼花缭乱 // TODO: 支持show profile all, 不过目前看所有的信息过多有点眼花缭乱
} }
// Profiling 执行SQL,并对其 Profile // Profiling 执行SQL,并对其 Profile
...@@ -48,14 +48,14 @@ func (db *Connector) Profiling(sql string, params ...interface{}) ([]ProfilingRo ...@@ -48,14 +48,14 @@ func (db *Connector) Profiling(sql string, params ...interface{}) ([]ProfilingRo
return rows, errors.New("no need profiling") return rows, errors.New("no need profiling")
} }
// 测试环境如果检查是关闭的,则SQL不会被执行 // 测试环境如果检查是关闭的,则 SQL 不会被执行
if common.Config.TestDSN.Disable { if common.Config.TestDSN.Disable {
return rows, errors.New("dsn is disable") return rows, errors.New("dsn is disable")
} }
// 数据库安全性检查:如果 Connector 的 IP 端口与 TEST 环境不一致,则启用 SQL 白名单 // 数据库安全性检查:如果 Connector 的 IP 端口与 TEST 环境不一致,则启用 SQL 白名单
// 不在白名单中的 SQL 不允许执行 // 不在白名单中的 SQL 不允许执行
// 执行环境与test环境不相同 // 执行环境与 test 环境不相同
if db.Addr != common.Config.TestDSN.Addr && db.dangerousQuery(sql) { if db.Addr != common.Config.TestDSN.Addr && db.dangerousQuery(sql) {
return rows, fmt.Errorf("query execution deny: Execute SQL with DSN(%s/%s) '%s'", return rows, fmt.Errorf("query execution deny: Execute SQL with DSN(%s/%s) '%s'",
db.Addr, db.Database, fmt.Sprintf(sql, params...)) db.Addr, db.Database, fmt.Sprintf(sql, params...))
...@@ -114,7 +114,7 @@ func (db *Connector) Profiling(sql string, params ...interface{}) ([]ProfilingRo ...@@ -114,7 +114,7 @@ func (db *Connector) Profiling(sql string, params ...interface{}) ([]ProfilingRo
return rows, err return rows, err
} }
// FormatProfiling 格式化输出Profiling信息 // FormatProfiling 格式化输出 Profiling 信息
func FormatProfiling(rows []ProfilingRow) string { func FormatProfiling(rows []ProfilingRow) string {
str := []string{"| Status | Duration |"} str := []string{"| Status | Duration |"}
str = append(str, "| --- | --- |") str = append(str, "| --- | --- |")
......
...@@ -20,6 +20,7 @@ import ( ...@@ -20,6 +20,7 @@ import (
"testing" "testing"
"github.com/XiaoMi/soar/common" "github.com/XiaoMi/soar/common"
"github.com/kr/pretty" "github.com/kr/pretty"
) )
......
...@@ -41,25 +41,25 @@ type tableStatusRow struct { ...@@ -41,25 +41,25 @@ type tableStatusRow struct {
Engine []byte // 该表使用的存储引擎 Engine []byte // 该表使用的存储引擎
Version []byte // 该表的 .frm 文件版本号 Version []byte // 该表的 .frm 文件版本号
RowFormat []byte // 该表使用的行存储格式 RowFormat []byte // 该表使用的行存储格式
Rows int64 // 表行数, InnoDB 引擎中为预估值,甚至可能会有40%~50%的数值偏差 Rows uint64 // 表行数, InnoDB 引擎中为预估值,甚至可能会有40%~50%的数值偏差
AvgRowLength int // 平均行长度 AvgRowLength uint64 // 平均行长度
// MyISAM: Data_length 为数据文件的大小,单位为 bytes // MyISAM: Data_length 为数据文件的大小,单位为 bytes
// InnoDB: Data_length 为聚簇索引分配的近似内存量,单位为 bytes, 计算方式为聚簇索引数量乘以 InnoDB 页面大小 // InnoDB: Data_length 为聚簇索引分配的近似内存量,单位为 bytes, 计算方式为聚簇索引数量乘以 InnoDB 页面大小
// 其他不同的存储引擎中该值的意义可能不尽相同 // 其他不同的存储引擎中该值的意义可能不尽相同
DataLength int DataLength uint64
// MyISAM: Max_data_length 为数据文件长度的最大值。这是在给定使用的数据指针大小的情况下,可以存储在表中的数据的最大字节数 // MyISAM: Max_data_length 为数据文件长度的最大值。这是在给定使用的数据指针大小的情况下,可以存储在表中的数据的最大字节数
// InnoDB: 未使用 // InnoDB: 未使用
// 其他不同的存储引擎中该值的意义可能不尽相同 // 其他不同的存储引擎中该值的意义可能不尽相同
MaxDataLength int MaxDataLength uint64
// MyISAM: Index_length 为 index 文件的大小,单位为 bytes // MyISAM: Index_length 为 index 文件的大小,单位为 bytes
// InnoDB: Index_length 为非聚簇索引分配的近似内存量,单位为 bytes,计算方式为非聚簇索引数量乘以 InnoDB 页面大小 // InnoDB: Index_length 为非聚簇索引分配的近似内存量,单位为 bytes,计算方式为非聚簇索引数量乘以 InnoDB 页面大小
// 其他不同的存储引擎中该值的意义可能不尽相同 // 其他不同的存储引擎中该值的意义可能不尽相同
IndexLength int IndexLength uint64
DataFree int // 已分配但未使用的字节数 DataFree uint64 // 已分配但未使用的字节数
AutoIncrement []byte // 下一个自增值 AutoIncrement []byte // 下一个自增值
CreateTime []byte // 创建时间 CreateTime []byte // 创建时间
UpdateTime []byte // 最近一次更新时间,该值不准确 UpdateTime []byte // 最近一次更新时间,该值不准确
......
...@@ -90,7 +90,7 @@ func BuildEnv() (*VirtualEnv, *database.Connector) { ...@@ -90,7 +90,7 @@ func BuildEnv() (*VirtualEnv, *database.Connector) {
common.Config.TestDSN.Disable = true common.Config.TestDSN.Disable = true
} }
// 检查是否允许Online和Test一致,防止误操作 // 检查是否允许 Online 和 Test 一致,防止误操作
if common.FormatDSN(common.Config.OnlineDSN) == common.FormatDSN(common.Config.TestDSN) && if common.FormatDSN(common.Config.OnlineDSN) == common.FormatDSN(common.Config.TestDSN) &&
!common.Config.AllowOnlineAsTest { !common.Config.AllowOnlineAsTest {
common.Log.Warn("BuildEnv AllowOnlineAsTest: %s:********@%s/%s OnlineDSN can't config as TestDSN", common.Log.Warn("BuildEnv AllowOnlineAsTest: %s:********@%s/%s OnlineDSN can't config as TestDSN",
...@@ -108,7 +108,7 @@ func BuildEnv() (*VirtualEnv, *database.Connector) { ...@@ -108,7 +108,7 @@ func BuildEnv() (*VirtualEnv, *database.Connector) {
return vEnv, connOnline return vEnv, connOnline
} }
// RealDB 从测试环境中获取通过hash后的DB // RealDB 从测试环境中获取通过 hash 后的 DB
func (vEnv *VirtualEnv) RealDB(hash string) string { func (vEnv *VirtualEnv) RealDB(hash string) string {
if _, ok := vEnv.Hash2DB[hash]; ok { if _, ok := vEnv.Hash2DB[hash]; ok {
return vEnv.Hash2DB[hash] return vEnv.Hash2DB[hash]
...@@ -120,7 +120,7 @@ func (vEnv *VirtualEnv) RealDB(hash string) string { ...@@ -120,7 +120,7 @@ func (vEnv *VirtualEnv) RealDB(hash string) string {
return hash return hash
} }
// DBHash 从测试环境中根据DB找到对应的hash // DBHash 从测试环境中根据 DB 找到对应的 hash
func (vEnv *VirtualEnv) DBHash(db string) string { func (vEnv *VirtualEnv) DBHash(db string) string {
if _, ok := vEnv.DBRef[db]; ok { if _, ok := vEnv.DBRef[db]; ok {
return vEnv.DBRef[db] return vEnv.DBRef[db]
...@@ -194,15 +194,15 @@ func (vEnv *VirtualEnv) CleanupTestDatabase() { ...@@ -194,15 +194,15 @@ func (vEnv *VirtualEnv) CleanupTestDatabase() {
common.Log.Debug("CleanupTestDatabase done") common.Log.Debug("CleanupTestDatabase done")
} }
// BuildVirtualEnv rEnv为SQL源环境,DB使用的信息从接口获取 // BuildVirtualEnv rEnv 为 SQL 源环境,DB 使用的信息从接口获取
// 注意:如果是USE,DDL等语句,执行完第一条就会返回,后面的SQL不会执行 // 注意:如果是 USE, DDL 等语句,执行完第一条就会返回,后面的 SQL 不会执行
func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string) bool { func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string) bool {
var stmt sqlparser.Statement var stmt sqlparser.Statement
var err error var err error
// 置空错误信息 // 置空错误信息
vEnv.Error = nil vEnv.Error = nil
// 检测是否已经创建初始数据库,如果未创建则创建一个名称hash过的映射数据库 // 检测是否已经创建初始数据库,如果未创建则创建一个名称 hash 过的映射数据库
err = vEnv.createDatabase(rEnv) err = vEnv.createDatabase(rEnv)
common.LogIfWarn(err, "") common.LogIfWarn(err, "")
...@@ -212,7 +212,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string ...@@ -212,7 +212,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string
return true return true
} }
// 判断rEnv中是否指定了DB // 判断 rEnv 中是否指定了 DB
if rEnv.Database == "" { if rEnv.Database == "" {
common.Log.Error("BuildVirtualEnv no database specified, TestDSN init failed") common.Log.Error("BuildVirtualEnv no database specified, TestDSN init failed")
return false return false
...@@ -221,9 +221,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string ...@@ -221,9 +221,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string
// 库表提取 // 库表提取
meta := make(map[string]*common.DB) meta := make(map[string]*common.DB)
for _, sql := range SQLs { for _, sql := range SQLs {
common.Log.Debug("BuildVirtualEnv Database&TableName Mapping, SQL: %s", sql) common.Log.Debug("BuildVirtualEnv Database&TableName Mapping, SQL: %s", sql)
stmt, err = sqlparser.Parse(sql) stmt, err = sqlparser.Parse(sql)
if err != nil { if err != nil {
common.Log.Error("BuildVirtualEnv Error : %v", err) common.Log.Error("BuildVirtualEnv Error : %v", err)
...@@ -249,7 +247,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string ...@@ -249,7 +247,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string
// 为不影响其他SQL操作,复制一个Connector对象,将数据库切换到对应的DB上直接执行 // 为不影响其他SQL操作,复制一个Connector对象,将数据库切换到对应的DB上直接执行
vEnv.Database = vEnv.DBRef[rEnv.Database] vEnv.Database = vEnv.DBRef[rEnv.Database]
// 为了支持并发,需要将DB进行映射,但db.table这种形式无法保证DB的映射是正确的 // 为了支持并发,需要将DB进行映射,但 db.table 这种形式无法保证 DB 的映射是正确的
// TODO:暂不支持 create db.tableName (id int) 形式的建表语句 // TODO:暂不支持 create db.tableName (id int) 形式的建表语句
if stmt.Table.Qualifier.String() != "" { if stmt.Table.Qualifier.String() != "" {
common.Log.Error("BuildVirtualEnv DDL Not support db.tb format") common.Log.Error("BuildVirtualEnv DDL Not support db.tb format")
...@@ -300,7 +298,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string ...@@ -300,7 +298,7 @@ func (vEnv *VirtualEnv) BuildVirtualEnv(rEnv *database.Connector, SQLs ...string
meta := ast.GetMeta(stmt, nil) meta := ast.GetMeta(stmt, nil)
// 由于DB环境可能是变的,所以需要每一次都单独的提取库表结构,整体随着rEnv的变动而发生变化 // 由于 DB 环境可能是变的,所以需要每一次都单独的提取库表结构,整体随着 rEnv 的变动而发生变化
for db, table := range meta { for db, table := range meta {
if db == "" { if db == "" {
db = rEnv.Database db = rEnv.Database
...@@ -361,7 +359,7 @@ func (vEnv *VirtualEnv) createDatabase(rEnv *database.Connector) error { ...@@ -361,7 +359,7 @@ func (vEnv *VirtualEnv) createDatabase(rEnv *database.Connector) error {
// optimizer_YYMMDDHHmmss_xxxx // optimizer_YYMMDDHHmmss_xxxx
dbHash := fmt.Sprintf("optimizer_%s_%s", // Total 39 bytes dbHash := fmt.Sprintf("optimizer_%s_%s", // Total 39 bytes
time.Now().Format("060102150405"), // 12 Bytes 20180102030405 time.Now().Format("060102150405"), // 12 Bytes 180102030405
strings.ToLower(uniuri.New())) // 16 Bytes random string strings.ToLower(uniuri.New())) // 16 Bytes random string
common.Log.Debug("createDatabase, mapping `%s` :`%s`-->`%s`", rEnv.Database, rEnv.Database, dbHash) common.Log.Debug("createDatabase, mapping `%s` :`%s`-->`%s`", rEnv.Database, rEnv.Database, dbHash)
ddl, err := rEnv.ShowCreateDatabase(rEnv.Database) ddl, err := rEnv.ShowCreateDatabase(rEnv.Database)
...@@ -496,7 +494,7 @@ func (vEnv *VirtualEnv) GenTableColumns(meta common.Meta) common.TableColumns { ...@@ -496,7 +494,7 @@ func (vEnv *VirtualEnv) GenTableColumns(meta common.Meta) common.TableColumns {
} }
if len(tb.Column) == 0 { if len(tb.Column) == 0 {
// tb.column为空说明SQL里这个表是用的*来查询 // tb.column 为空说明 SQL 里这个表是用的*来查询
if err != nil { if err != nil {
common.Log.Error("ast.Rewrite ShowColumns, Error: %v", err) common.Log.Error("ast.Rewrite ShowColumns, Error: %v", err)
break break
......
#!/usr/bin/env bats
load test_helper
@test "Simple Query Optimizer" {
${SOAR_BIN_ENV} -query "select * from film where length > 120" | grep -v "散粒度" > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden
run golden_diff
[ $status -eq 0 ]
}
@test "Run all test cases" {
${SOAR_BIN} -list-test-sqls | ${SOAR_BIN_ENV} | grep -v "散粒度" > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden
run golden_diff
[ $status -eq 0 ]
}
online-dsn:
user: ""
password: '********'
net: tcp
addr: 127.0.0.1:3306
schema: information_schema
charset: utf8
collation: utf8_general_ci
loc: UTC
tls: ""
server-public-key: ""
maxallowedpacket: 4194304
params:
charset: utf8
timeout: 0
read-timeout: 0
write-timeout: 0
allow-native-passwords: true
allow-old-passwords: false
disable: false
test-dsn:
user: ""
password: '********'
net: tcp
addr: 127.0.0.1:3306
schema: information_schema
charset: utf8
collation: utf8_general_ci
loc: UTC
tls: ""
server-public-key: ""
maxallowedpacket: 4194304
params:
charset: utf8
timeout: 0
read-timeout: 0
write-timeout: 0
allow-native-passwords: true
allow-old-passwords: false
disable: false
allow-online-as-test: false
drop-test-temporary: true
cleanup-test-database: false
only-syntax-check: false
sampling-statistic-target: 100
sampling: false
sampling-condition: ""
profiling: false
trace: false
explain: true
delimiter: ;
log-level: 3
log-output: /dev/null
report-type: markdown
report-css: ""
report-javascript: ""
report-title: SQL优化分析报告
markdown-extensions: 94
markdown-html-flags: 0
ignore-rules:
- COL.011
rewrite-rules:
- delimiter
- orderbynull
- groupbyconst
- dmlorderby
- having
- star2columns
- insertcolumns
- distinctstar
blacklist: ""
max-join-table-count: 5
max-group-by-cols-count: 5
max-distinct-count: 5
max-index-cols-count: 5
max-text-cols-count: 2
max-total-rows: 9999999
max-query-cost: 9999
spaghetti-query-length: 2048
allow-drop-index: false
max-in-count: 10
max-index-bytes-percolumn: 767
max-index-bytes: 3072
allow-charsets:
- utf8
- utf8mb4
allow-collates: []
allow-engines:
- innodb
max-index-count: 10
max-column-count: 40
max-value-count: 100
index-prefix: idx_
unique-key-prefix: uk_
max-subquery-depth: 5
max-varchar-length: 1024
column-not-allow-type:
- boolean
min-cardinality: 0
explain-sql-report-type: pretty
explain-type: extended
explain-format: traditional
explain-warn-select-type:
- ""
explain-warn-access-type:
- ALL
explain-max-keys: 3
explain-min-keys: 0
explain-max-rows: 10000
explain-warn-extra:
- Using temporary
- Using filesort
explain-max-filtered: 100
explain-warn-scalability:
- O(n)
show-warnings: false
show-last-query-cost: false
query: ""
list-heuristic-rules: false
list-rewrite-rules: false
list-test-sqls: false
list-report-types: false
verbose: false
dry-run: true
max-pretty-sql-length: 1024
...@@ -2,19 +2,41 @@ ...@@ -2,19 +2,41 @@
load test_helper load test_helper
@test "Simple Query Optimizer" { @test "Test soar version" {
${SOAR_BIN_ENV} -query "select * from film where length > 120" | grep -v "散粒度" > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden run ${SOAR_BIN} -version
run golden_diff ${BATS_TEST_NAME} [ "$status" -eq 0 ]
[ $status -eq 0 ] [ "${lines[0]%% *}" == "Version:" ]
[ "${lines[1]%% *}" == "Branch:" ]
[ "${lines[2]%% *}" == "Compile:" ]
[ $(expr "${lines[2]}" : "Compile: $(date +'%Y-%m-%d').*") -ne 0 ]
} }
@test "Syntax Check" { @test "No arguments prints message" {
run ${SOAR_BIN} -query "select * frm film" -only-syntax-check run ${SOAR_BIN}
[ $status -eq 1 ] [ $status -eq 1 ]
[ "${lines[0]}" == 'Args format error, use --help see how to use it!' ]
}
@test "Run default printconfig cases" {
${SOAR_BIN} -print-config -log-output=/dev/null > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden
run golden_diff
[ $status -eq 0 ]
}
@test "Check config cases" {
run ${SOAR_BIN_ENV} -check-config
[ $status -eq 0 ]
[ -z ${output} ]
} }
@test "Run all test cases" { @test "Syntax Check OK" {
${SOAR_BIN} -list-test-sqls | ${SOAR_BIN_ENV} | grep -v "散粒度" > ${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden run ${SOAR_BIN} -query "select * from film" -only-syntax-check
run golden_diff ${BATS_TEST_NAME}
[ $status -eq 0 ] [ $status -eq 0 ]
[ -z $ouput ]
}
@test "Syntax Check Error" {
run ${SOAR_BIN} -query "select * frm film" -only-syntax-check
[ $status -eq 1 ]
[ -n $ouput ]
} }
#!/usr/bin/env bats
load test_helper
#!/usr/bin/env bats
load test_helper
@test "Check Query Optimizer" {
run ${SOAR_BIN} -query "select * from film where length > 120"
[ $status -eq 0 ]
}
...@@ -7,8 +7,7 @@ setup() { ...@@ -7,8 +7,7 @@ setup() {
mkdir -p "${BATS_TMP_DIRNAME}" mkdir -p "${BATS_TMP_DIRNAME}"
} }
# golden_diff like gofmt golden file check method, use this function check output different with template
golden_diff() { golden_diff() {
FUNC_NAME=$1 diff "${BATS_TMP_DIRNAME}/${BATS_TEST_NAME}.golden" "${BATS_FIXTURE_DIRNAME}/${BATS_TEST_NAME}.golden" >/dev/null
diff "${BATS_TMP_DIRNAME}/${FUNC_NAME}.golden" "${BATS_FIXTURE_DIRNAME}/${FUNC_NAME}.golden" >/dev/null
return $?
} }
// I stole this from golang, only changed package name
// and commented out line 1018-1020 of encode.go
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Represents JSON data structure using native Go types: booleans, floats,
// strings, arrays, and maps.
package json2
import (
"encoding"
"encoding/base64"
"errors"
"fmt"
"reflect"
"runtime"
"strconv"
"strings"
"unicode"
"unicode/utf16"
"unicode/utf8"
)
// Unmarshal parses the JSON-encoded data and stores the result
// in the value pointed to by v.
//
// Unmarshal uses the inverse of the encodings that
// Marshal uses, allocating maps, slices, and pointers as necessary,
// with the following additional rules:
//
// To unmarshal JSON into a pointer, Unmarshal first handles the case of
// the JSON being the JSON literal null. In that case, Unmarshal sets
// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
// the value pointed at by the pointer. If the pointer is nil, Unmarshal
// allocates a new value for it to point to.
//
// To unmarshal JSON into a struct, Unmarshal matches incoming object
// keys to the keys used by Marshal (either the struct field name or its tag),
// preferring an exact match but also accepting a case-insensitive match.
//
// To unmarshal JSON into an interface value,
// Unmarshal stores one of these in the interface value:
//
// bool, for JSON booleans
// float64, for JSON numbers
// string, for JSON strings
// []interface{}, for JSON arrays
// map[string]interface{}, for JSON objects
// nil for JSON null
//
// If a JSON value is not appropriate for a given target type,
// or if a JSON number overflows the target type, Unmarshal
// skips that field and completes the unmarshalling as best it can.
// If no more serious errors are encountered, Unmarshal returns
// an UnmarshalTypeError describing the earliest such error.
//
// When unmarshaling quoted strings, invalid UTF-8 or
// invalid UTF-16 surrogate pairs are not treated as an error.
// Instead, they are replaced by the Unicode replacement
// character U+FFFD.
//
func Unmarshal(data []byte, v interface{}) error {
// Check for well-formedness.
// Avoids filling out half a data structure
// before discovering a JSON syntax error.
var d decodeState
err := checkValid(data, &d.scan)
if err != nil {
return err
}
d.init(data)
return d.unmarshal(v)
}
// Unmarshaler is the interface implemented by objects
// that can unmarshal a JSON description of themselves.
// The input can be assumed to be a valid encoding of
// a JSON value. UnmarshalJSON must copy the JSON data
// if it wishes to retain the data after returning.
type Unmarshaler interface {
UnmarshalJSON([]byte) error
}
// An UnmarshalTypeError describes a JSON value that was
// not appropriate for a value of a specific Go type.
type UnmarshalTypeError struct {
Value string // description of JSON value - "bool", "array", "number -5"
Type reflect.Type // type of Go value it could not be assigned to
}
func (e *UnmarshalTypeError) Error() string {
return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
}
// An UnmarshalFieldError describes a JSON object key that
// led to an unexported (and therefore unwritable) struct field.
// (No longer used; kept for compatibility.)
type UnmarshalFieldError struct {
Key string
Type reflect.Type
Field reflect.StructField
}
func (e *UnmarshalFieldError) Error() string {
return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
}
// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
// (The argument to Unmarshal must be a non-nil pointer.)
type InvalidUnmarshalError struct {
Type reflect.Type
}
func (e *InvalidUnmarshalError) Error() string {
if e.Type == nil {
return "json: Unmarshal(nil)"
}
if e.Type.Kind() != reflect.Ptr {
return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
}
return "json: Unmarshal(nil " + e.Type.String() + ")"
}
func (d *decodeState) unmarshal(v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = r.(error)
}
}()
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return &InvalidUnmarshalError{reflect.TypeOf(v)}
}
d.scan.reset()
// We decode rv not rv.Elem because the Unmarshaler interface
// test must be applied at the top level of the value.
d.value(rv)
return d.savedError
}
// A Number represents a JSON number literal.
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
// decodeState represents the state while decoding a JSON value.
type decodeState struct {
data []byte
off int // read offset in data
scan scanner
nextscan scanner // for calls to nextValue
savedError error
tempstr string // scratch space to avoid some allocations
useNumber bool
}
// errPhase is used for errors that should not happen unless
// there is a bug in the JSON decoder or something is editing
// the data slice while the decoder executes.
var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
func (d *decodeState) init(data []byte) *decodeState {
d.data = data
d.off = 0
d.savedError = nil
return d
}
// error aborts the decoding by panicking with err.
func (d *decodeState) error(err error) {
panic(err)
}
// saveError saves the first err it is called with,
// for reporting at the end of the unmarshal.
func (d *decodeState) saveError(err error) {
if d.savedError == nil {
d.savedError = err
}
}
// next cuts off and returns the next full JSON value in d.data[d.off:].
// The next value is known to be an object or array, not a literal.
func (d *decodeState) next() []byte {
c := d.data[d.off]
item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
if err != nil {
d.error(err)
}
d.off = len(d.data) - len(rest)
// Our scanner has seen the opening brace/bracket
// and thinks we're still in the middle of the object.
// invent a closing brace/bracket to get it out.
if c == '{' {
d.scan.step(&d.scan, '}')
} else {
d.scan.step(&d.scan, ']')
}
return item
}
// scanWhile processes bytes in d.data[d.off:] until it
// receives a scan code not equal to op.
// It updates d.off and returns the new scan code.
func (d *decodeState) scanWhile(op int) int {
var newOp int
for {
if d.off >= len(d.data) {
newOp = d.scan.eof()
d.off = len(d.data) + 1 // mark processed EOF with len+1
} else {
c := int(d.data[d.off])
d.off++
newOp = d.scan.step(&d.scan, c)
}
if newOp != op {
break
}
}
return newOp
}
// value decodes a JSON value from d.data[d.off:] into the value.
// it updates d.off to point past the decoded value.
func (d *decodeState) value(v reflect.Value) {
if !v.IsValid() {
_, rest, err := nextValue(d.data[d.off:], &d.nextscan)
if err != nil {
d.error(err)
}
d.off = len(d.data) - len(rest)
// d.scan thinks we're still at the beginning of the item.
// Feed in an empty string - the shortest, simplest value -
// so that it knows we got to the end of the value.
if d.scan.redo {
// rewind.
d.scan.redo = false
d.scan.step = stateBeginValue
}
d.scan.step(&d.scan, '"')
d.scan.step(&d.scan, '"')
n := len(d.scan.parseState)
if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
// d.scan thinks we just read an object key; finish the object
d.scan.step(&d.scan, ':')
d.scan.step(&d.scan, '"')
d.scan.step(&d.scan, '"')
d.scan.step(&d.scan, '}')
}
return
}
switch op := d.scanWhile(scanSkipSpace); op {
default:
d.error(errPhase)
case scanBeginArray:
d.array(v)
case scanBeginObject:
d.object(v)
case scanBeginLiteral:
d.literal(v)
}
}
// indirect walks down v allocating pointers as needed,
// until it gets to a non-pointer.
// if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(Unmarshaler); ok {
return u, nil, reflect.Value{}
}
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
return nil, u, reflect.Value{}
}
}
v = v.Elem()
}
return nil, nil, v
}
// array consumes an array from d.data[d.off-1:], decoding into the value v.
// the first byte of the array ('[') has been read already.
func (d *decodeState) array(v reflect.Value) {
// Check for unmarshaler.
u, ut, pv := d.indirect(v, false)
if u != nil {
d.off--
err := u.UnmarshalJSON(d.next())
if err != nil {
d.error(err)
}
return
}
if ut != nil {
d.saveError(&UnmarshalTypeError{"array", v.Type()})
d.off--
d.next()
return
}
v = pv
// Check type of target.
switch v.Kind() {
case reflect.Interface:
if v.NumMethod() == 0 {
// Decoding into nil interface? Switch to non-reflect code.
v.Set(reflect.ValueOf(d.arrayInterface()))
return
}
// Otherwise it's invalid.
fallthrough
default:
d.saveError(&UnmarshalTypeError{"array", v.Type()})
d.off--
d.next()
return
case reflect.Array:
case reflect.Slice:
break
}
i := 0
for {
// Look ahead for ] - can only happen on first iteration.
op := d.scanWhile(scanSkipSpace)
if op == scanEndArray {
break
}
// Back up so d.value can have the byte we just read.
d.off--
d.scan.undo(op)
// Get element of array, growing if necessary.
if v.Kind() == reflect.Slice {
// Grow slice if necessary
if i >= v.Cap() {
newcap := v.Cap() + v.Cap()/2
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
reflect.Copy(newv, v)
v.Set(newv)
}
if i >= v.Len() {
v.SetLen(i + 1)
}
}
if i < v.Len() {
// Decode into element.
d.value(v.Index(i))
} else {
// Ran out of fixed array: skip.
d.value(reflect.Value{})
}
i++
// Next token must be , or ].
op = d.scanWhile(scanSkipSpace)
if op == scanEndArray {
break
}
if op != scanArrayValue {
d.error(errPhase)
}
}
if i < v.Len() {
if v.Kind() == reflect.Array {
// Array. Zero the rest.
z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
v.Index(i).Set(z)
}
} else {
v.SetLen(i)
}
}
if i == 0 && v.Kind() == reflect.Slice {
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
}
}
// object consumes an object from d.data[d.off-1:], decoding into the value v.
// the first byte of the object ('{') has been read already.
func (d *decodeState) object(v reflect.Value) {
// Check for unmarshaler.
u, ut, pv := d.indirect(v, false)
if u != nil {
d.off--
err := u.UnmarshalJSON(d.next())
if err != nil {
d.error(err)
}
return
}
if ut != nil {
d.saveError(&UnmarshalTypeError{"object", v.Type()})
d.off--
d.next() // skip over { } in input
return
}
v = pv
// Decoding into nil interface? Switch to non-reflect code.
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
v.Set(reflect.ValueOf(d.objectInterface()))
return
}
// Check type of target: struct or map[string]T
switch v.Kind() {
case reflect.Map:
// map must have string kind
t := v.Type()
if t.Key().Kind() != reflect.String {
d.saveError(&UnmarshalTypeError{"object", v.Type()})
break
}
if v.IsNil() {
v.Set(reflect.MakeMap(t))
}
case reflect.Struct:
default:
d.saveError(&UnmarshalTypeError{"object", v.Type()})
d.off--
d.next() // skip over { } in input
return
}
var mapElem reflect.Value
for {
// Read opening " of string key or closing }.
op := d.scanWhile(scanSkipSpace)
if op == scanEndObject {
// closing } - can only happen on first iteration.
break
}
if op != scanBeginLiteral {
d.error(errPhase)
}
// Read string key.
start := d.off - 1
op = d.scanWhile(scanContinue)
item := d.data[start : d.off-1]
key, ok := unquote(item)
if !ok {
d.error(errPhase)
}
// Figure out field corresponding to key.
var subv reflect.Value
destring := false // whether the value is wrapped in a string to be decoded first
if v.Kind() == reflect.Map {
elemType := v.Type().Elem()
if !mapElem.IsValid() {
mapElem = reflect.New(elemType).Elem()
} else {
mapElem.Set(reflect.Zero(elemType))
}
subv = mapElem
} else {
var f *field
fields := cachedTypeFields(v.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv = v
destring = f.quoted
for _, i := range f.index {
if subv.Kind() == reflect.Ptr {
if subv.IsNil() {
subv.Set(reflect.New(subv.Type().Elem()))
}
subv = subv.Elem()
}
subv = subv.Field(i)
}
}
}
// Read : before value.
if op == scanSkipSpace {
op = d.scanWhile(scanSkipSpace)
}
if op != scanObjectKey {
d.error(errPhase)
}
// Read value.
if destring {
d.value(reflect.ValueOf(&d.tempstr))
d.literalStore([]byte(d.tempstr), subv, true)
} else {
d.value(subv)
}
// Write value back to map;
// if using struct, subv points into struct already.
if v.Kind() == reflect.Map {
kv := reflect.ValueOf(key).Convert(v.Type().Key())
v.SetMapIndex(kv, subv)
}
// Next token must be , or }.
op = d.scanWhile(scanSkipSpace)
if op == scanEndObject {
break
}
if op != scanObjectValue {
d.error(errPhase)
}
}
}
// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
// The first byte of the literal has been read already
// (that's how the caller knows it's a literal).
func (d *decodeState) literal(v reflect.Value) {
// All bytes inside literal return scanContinue op code.
start := d.off - 1
op := d.scanWhile(scanContinue)
// Scan read one byte too far; back up.
d.off--
d.scan.undo(op)
d.literalStore(d.data[start:d.off], v, false)
}
// convertNumber converts the number literal s to a float64 or a Number
// depending on the setting of d.useNumber.
func (d *decodeState) convertNumber(s string) (interface{}, error) {
if d.useNumber {
return Number(s), nil
}
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0)}
}
return f, nil
}
var numberType = reflect.TypeOf(Number(""))
// literalStore decodes a literal stored in item into v.
//
// fromQuoted indicates whether this literal came from unwrapping a
// string from the ",string" struct tag option. this is used only to
// produce more helpful error messages.
func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
// Check for unmarshaler.
if len(item) == 0 {
//Empty string given
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
return
}
wantptr := item[0] == 'n' // null
u, ut, pv := d.indirect(v, wantptr)
if u != nil {
err := u.UnmarshalJSON(item)
if err != nil {
d.error(err)
}
return
}
if ut != nil {
if item[0] != '"' {
if fromQuoted {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
} else {
d.saveError(&UnmarshalTypeError{"string", v.Type()})
}
}
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
} else {
d.error(errPhase)
}
}
err := ut.UnmarshalText(s)
if err != nil {
d.error(err)
}
return
}
v = pv
switch c := item[0]; c {
case 'n': // null
switch v.Kind() {
case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
v.Set(reflect.Zero(v.Type()))
// otherwise, ignore null for primitives/string
}
case 't', 'f': // true, false
value := c == 't'
switch v.Kind() {
default:
if fromQuoted {
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
} else {
d.saveError(&UnmarshalTypeError{"bool", v.Type()})
}
case reflect.Bool:
v.SetBool(value)
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(value))
} else {
d.saveError(&UnmarshalTypeError{"bool", v.Type()})
}
}
case '"': // string
s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
} else {
d.error(errPhase)
}
}
switch v.Kind() {
default:
d.saveError(&UnmarshalTypeError{"string", v.Type()})
case reflect.Slice:
if v.Type() != byteSliceType {
d.saveError(&UnmarshalTypeError{"string", v.Type()})
break
}
b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
n, err := base64.StdEncoding.Decode(b, s)
if err != nil {
d.saveError(err)
break
}
v.Set(reflect.ValueOf(b[0:n]))
case reflect.String:
v.SetString(string(s))
case reflect.Interface:
if v.NumMethod() == 0 {
v.Set(reflect.ValueOf(string(s)))
} else {
d.saveError(&UnmarshalTypeError{"string", v.Type()})
}
}
default: // number
if c != '-' && (c < '0' || c > '9') {
if fromQuoted {
d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
} else {
d.error(errPhase)
}
}
s := string(item)
switch v.Kind() {
default:
if v.Kind() == reflect.String && v.Type() == numberType {
v.SetString(s)
break
}
if fromQuoted {
d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
} else {
d.error(&UnmarshalTypeError{"number", v.Type()})
}
case reflect.Interface:
n, err := d.convertNumber(s)
if err != nil {
d.saveError(err)
break
}
if v.NumMethod() != 0 {
d.saveError(&UnmarshalTypeError{"number", v.Type()})
break
}
v.Set(reflect.ValueOf(n))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || v.OverflowInt(n) {
d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
break
}
v.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
n, err := strconv.ParseUint(s, 10, 64)
if err != nil || v.OverflowUint(n) {
d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
break
}
v.SetUint(n)
case reflect.Float32, reflect.Float64:
n, err := strconv.ParseFloat(s, v.Type().Bits())
if err != nil || v.OverflowFloat(n) {
d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
break
}
v.SetFloat(n)
}
}
}
// The xxxInterface routines build up a value to be stored
// in an empty interface. They are not strictly necessary,
// but they avoid the weight of reflection in this common case.
// valueInterface is like value but returns interface{}
func (d *decodeState) valueInterface() interface{} {
switch d.scanWhile(scanSkipSpace) {
default:
d.error(errPhase)
panic("unreachable")
case scanBeginArray:
return d.arrayInterface()
case scanBeginObject:
return d.objectInterface()
case scanBeginLiteral:
return d.literalInterface()
}
}
// arrayInterface is like array but returns []interface{}.
func (d *decodeState) arrayInterface() []interface{} {
var v = make([]interface{}, 0)
for {
// Look ahead for ] - can only happen on first iteration.
op := d.scanWhile(scanSkipSpace)
if op == scanEndArray {
break
}
// Back up so d.value can have the byte we just read.
d.off--
d.scan.undo(op)
v = append(v, d.valueInterface())
// Next token must be , or ].
op = d.scanWhile(scanSkipSpace)
if op == scanEndArray {
break
}
if op != scanArrayValue {
d.error(errPhase)
}
}
return v
}
// objectInterface is like object but returns map[string]interface{}.
func (d *decodeState) objectInterface() map[string]interface{} {
m := make(map[string]interface{})
for {
// Read opening " of string key or closing }.
op := d.scanWhile(scanSkipSpace)
if op == scanEndObject {
// closing } - can only happen on first iteration.
break
}
if op != scanBeginLiteral {
d.error(errPhase)
}
// Read string key.
start := d.off - 1
op = d.scanWhile(scanContinue)
item := d.data[start : d.off-1]
key, ok := unquote(item)
if !ok {
d.error(errPhase)
}
// Read : before value.
if op == scanSkipSpace {
op = d.scanWhile(scanSkipSpace)
}
if op != scanObjectKey {
d.error(errPhase)
}
// Read value.
m[key] = d.valueInterface()
// Next token must be , or }.
op = d.scanWhile(scanSkipSpace)
if op == scanEndObject {
break
}
if op != scanObjectValue {
d.error(errPhase)
}
}
return m
}
// literalInterface is like literal but returns an interface value.
func (d *decodeState) literalInterface() interface{} {
// All bytes inside literal return scanContinue op code.
start := d.off - 1
op := d.scanWhile(scanContinue)
// Scan read one byte too far; back up.
d.off--
d.scan.undo(op)
item := d.data[start:d.off]
switch c := item[0]; c {
case 'n': // null
return nil
case 't', 'f': // true, false
return c == 't'
case '"': // string
s, ok := unquote(item)
if !ok {
d.error(errPhase)
}
return s
default: // number
if c != '-' && (c < '0' || c > '9') {
d.error(errPhase)
}
n, err := d.convertNumber(string(item))
if err != nil {
d.saveError(err)
}
return n
}
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
return -1
}
r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
if err != nil {
return -1
}
return rune(r)
}
// unquote converts a quoted JSON string literal s into an actual string t.
// The rules are different than for Go, so cannot use strconv.Unquote.
func unquote(s []byte) (t string, ok bool) {
s, ok = unquoteBytes(s)
t = string(s)
return
}
func unquoteBytes(s []byte) (t []byte, ok bool) {
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
return
}
s = s[1 : len(s)-1]
// Check for unusual characters. If there are none,
// then no unquoting is needed, so return a slice of the
// original bytes.
r := 0
for r < len(s) {
c := s[r]
if c == '\\' || c == '"' || c < ' ' {
break
}
if c < utf8.RuneSelf {
r++
continue
}
rr, size := utf8.DecodeRune(s[r:])
if rr == utf8.RuneError && size == 1 {
break
}
r += size
}
if r == len(s) {
return s, true
}
b := make([]byte, len(s)+2*utf8.UTFMax)
w := copy(b, s[0:r])
for r < len(s) {
// Out of room? Can only happen if s is full of
// malformed UTF-8 and we're replacing each
// byte with RuneError.
if w >= len(b)-2*utf8.UTFMax {
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
copy(nb, b[0:w])
b = nb
}
switch c := s[r]; {
case c == '\\':
r++
if r >= len(s) {
return
}
switch s[r] {
default:
return
case '"', '\\', '/', '\'':
b[w] = s[r]
r++
w++
case 'b':
b[w] = '\b'
r++
w++
case 'f':
b[w] = '\f'
r++
w++
case 'n':
b[w] = '\n'
r++
w++
case 'r':
b[w] = '\r'
r++
w++
case 't':
b[w] = '\t'
r++
w++
case 'u':
r--
rr := getu4(s[r:])
if rr < 0 {
return
}
r += 6
if utf16.IsSurrogate(rr) {
rr1 := getu4(s[r:])
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
// A valid pair; consume.
r += 6
w += utf8.EncodeRune(b[w:], dec)
break
}
// Invalid surrogate; fall back to replacement rune.
rr = unicode.ReplacementChar
}
w += utf8.EncodeRune(b[w:], rr)
}
// Quote, control characters are invalid.
case c == '"', c < ' ':
return
// ASCII
case c < utf8.RuneSelf:
b[w] = c
r++
w++
// Coerce to well-formed UTF-8.
default:
rr, size := utf8.DecodeRune(s[r:])
r += size
w += utf8.EncodeRune(b[w:], rr)
}
}
return b[0:w], true
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package json implements encoding and decoding of JSON objects as defined in
// RFC 4627. The mapping between JSON objects and Go values is described
// in the documentation for the Marshal and Unmarshal functions.
//
// See "JSON and Go" for an introduction to this package:
// http://golang.org/doc/articles/json_and_go.html
package json2
import (
"bytes"
"encoding"
"encoding/base64"
"math"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// Marshal returns the JSON encoding of v.
//
// Marshal traverses the value v recursively.
// If an encountered value implements the Marshaler interface
// and is not a nil pointer, Marshal calls its MarshalJSON method
// to produce JSON. The nil pointer exception is not strictly necessary
// but mimics a similar, necessary exception in the behavior of
// UnmarshalJSON.
//
// Otherwise, Marshal uses the following type-dependent default encodings:
//
// Boolean values encode as JSON booleans.
//
// Floating point, integer, and Number values encode as JSON numbers.
//
// String values encode as JSON strings. InvalidUTF8Error will be returned
// if an invalid UTF-8 sequence is encountered.
// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
// to keep some browsers from misinterpreting JSON output as HTML.
//
// Array and slice values encode as JSON arrays, except that
// []byte encodes as a base64-encoded string, and a nil slice
// encodes as the null JSON object.
//
// Struct values encode as JSON objects. Each exported struct field
// becomes a member of the object unless
// - the field's tag is "-", or
// - the field is empty and its tag specifies the "omitempty" option.
// The empty values are false, 0, any
// nil pointer or interface value, and any array, slice, map, or string of
// length zero. The object's default key string is the struct field name
// but can be specified in the struct field's tag value. The "json" key in
// the struct field's tag value is the key name, followed by an optional comma
// and options. Examples:
//
// // Field is ignored by this package.
// Field int `json:"-"`
//
// // Field appears in JSON as key "myName".
// Field int `json:"myName"`
//
// // Field appears in JSON as key "myName" and
// // the field is omitted from the object if its value is empty,
// // as defined above.
// Field int `json:"myName,omitempty"`
//
// // Field appears in JSON as key "Field" (the default), but
// // the field is skipped if empty.
// // Note the leading comma.
// Field int `json:",omitempty"`
//
// The "string" option signals that a field is stored as JSON inside a
// JSON-encoded string. It applies only to fields of string, floating point,
// or integer types. This extra level of encoding is sometimes used when
// communicating with JavaScript programs:
//
// Int64String int64 `json:",string"`
//
// The key name will be used if it's a non-empty string consisting of
// only Unicode letters, digits, dollar signs, percent signs, hyphens,
// underscores and slashes.
//
// Anonymous struct fields are usually marshaled as if their inner exported fields
// were fields in the outer struct, subject to the usual Go visibility rules amended
// as described in the next paragraph.
// An anonymous struct field with a name given in its JSON tag is treated as
// having that name, rather than being anonymous.
//
// The Go visibility rules for struct fields are amended for JSON when
// deciding which field to marshal or unmarshal. If there are
// multiple fields at the same level, and that level is the least
// nested (and would therefore be the nesting level selected by the
// usual Go rules), the following extra rules apply:
//
// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
// even if there are multiple untagged fields that would otherwise conflict.
// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
//
// Handling of anonymous struct fields is new in Go 1.1.
// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
// an anonymous struct field in both current and earlier versions, give the field
// a JSON tag of "-".
//
// Map values encode as JSON objects.
// The map's key type must be string; the object keys are used directly
// as map keys.
//
// Pointer values encode as the value pointed to.
// A nil pointer encodes as the null JSON object.
//
// Interface values encode as the value contained in the interface.
// A nil interface value encodes as the null JSON object.
//
// Channel, complex, and function values cannot be encoded in JSON.
// Attempting to encode such a value causes Marshal to return
// an UnsupportedTypeError.
//
// JSON cannot represent cyclic data structures and Marshal does not
// handle them. Passing cyclic structures to Marshal will result in
// an infinite recursion.
//
func Marshal(v interface{}) ([]byte, error) {
e := &encodeState{}
err := e.marshal(v)
if err != nil {
return nil, err
}
return e.Bytes(), nil
}
// MarshalIndent is like Marshal but applies Indent to format the output.
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
b, err := Marshal(v)
if err != nil {
return nil, err
}
var buf bytes.Buffer
err = Indent(&buf, b, prefix, indent)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
// so that the JSON will be safe to embed inside HTML <script> tags.
// For historical reasons, web browsers don't honor standard HTML
// escaping within <script> tags, so an alternative JSON encoding must
// be used.
func HTMLEscape(dst *bytes.Buffer, src []byte) {
// The characters can only appear in string literals,
// so just scan the string one byte at a time.
start := 0
for i, c := range src {
if c == '<' || c == '>' || c == '&' {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
}
if start < len(src) {
dst.Write(src[start:])
}
}
// Marshaler is the interface implemented by objects that
// can marshal themselves into valid JSON.
type Marshaler interface {
MarshalJSON() ([]byte, error)
}
// An UnsupportedTypeError is returned by Marshal when attempting
// to encode an unsupported value type.
type UnsupportedTypeError struct {
Type reflect.Type
}
func (e *UnsupportedTypeError) Error() string {
return "json: unsupported type: " + e.Type.String()
}
type UnsupportedValueError struct {
Value reflect.Value
Str string
}
func (e *UnsupportedValueError) Error() string {
return "json: unsupported value: " + e.Str
}
// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
// attempting to encode a string value with invalid UTF-8 sequences.
// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
// replacing invalid bytes with the Unicode replacement rune U+FFFD.
// This error is no longer generated but is kept for backwards compatibility
// with programs that might mention it.
type InvalidUTF8Error struct {
S string // the whole string value that caused the error
}
func (e *InvalidUTF8Error) Error() string {
return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
}
type MarshalerError struct {
Type reflect.Type
Err error
}
func (e *MarshalerError) Error() string {
return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
}
var hex = "0123456789abcdef"
// An encodeState encodes JSON into a bytes.Buffer.
type encodeState struct {
bytes.Buffer // accumulated output
scratch [64]byte
}
// TODO(bradfitz): use a sync.Cache here
var encodeStatePool = make(chan *encodeState, 8)
func newEncodeState() *encodeState {
select {
case e := <-encodeStatePool:
e.Reset()
return e
default:
return new(encodeState)
}
}
func putEncodeState(e *encodeState) {
select {
case encodeStatePool <- e:
default:
}
}
func (e *encodeState) marshal(v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
if s, ok := r.(string); ok {
panic(s)
}
err = r.(error)
}
}()
e.reflectValue(reflect.ValueOf(v))
return nil
}
func (e *encodeState) error(err error) {
panic(err)
}
var byteSliceType = reflect.TypeOf([]byte(nil))
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (e *encodeState) reflectValue(v reflect.Value) {
valueEncoder(v)(e, v, false)
}
type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
var encoderCache struct {
sync.RWMutex
m map[reflect.Type]encoderFunc
}
func valueEncoder(v reflect.Value) encoderFunc {
if !v.IsValid() {
return invalidValueEncoder
}
return typeEncoder(v.Type())
}
func typeEncoder(t reflect.Type) encoderFunc {
encoderCache.RLock()
f := encoderCache.m[t]
encoderCache.RUnlock()
if f != nil {
return f
}
// To deal with recursive types, populate the map with an
// indirect func before we build it. This type waits on the
// real func (f) to be ready and then calls it. This indirect
// func is only used for recursive types.
encoderCache.Lock()
if encoderCache.m == nil {
encoderCache.m = make(map[reflect.Type]encoderFunc)
}
var wg sync.WaitGroup
wg.Add(1)
encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
wg.Wait()
f(e, v, quoted)
}
encoderCache.Unlock()
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = newTypeEncoder(t, true)
wg.Done()
encoderCache.Lock()
encoderCache.m[t] = f
encoderCache.Unlock()
return f
}
var (
marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
)
// newTypeEncoder constructs an encoderFunc for a type.
// The returned encoder only checks CanAddr when allowAddr is true.
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
if t.Implements(marshalerType) {
return marshalerEncoder
}
if t.Kind() != reflect.Ptr && allowAddr {
if reflect.PtrTo(t).Implements(marshalerType) {
return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
}
}
if t.Implements(textMarshalerType) {
return textMarshalerEncoder
}
if t.Kind() != reflect.Ptr && allowAddr {
if reflect.PtrTo(t).Implements(textMarshalerType) {
return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
}
}
switch t.Kind() {
case reflect.Bool:
return boolEncoder
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return intEncoder
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return uintEncoder
case reflect.Float32:
return float32Encoder
case reflect.Float64:
return float64Encoder
case reflect.String:
return stringEncoder
case reflect.Interface:
return interfaceEncoder
case reflect.Struct:
return newStructEncoder(t)
case reflect.Map:
return newMapEncoder(t)
case reflect.Slice:
return newSliceEncoder(t)
case reflect.Array:
return newArrayEncoder(t)
case reflect.Ptr:
return newPtrEncoder(t)
default:
return unsupportedTypeEncoder
}
}
func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
e.WriteString("null")
}
func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
if v.Kind() == reflect.Ptr && v.IsNil() {
e.WriteString("null")
return
}
m := v.Interface().(Marshaler)
b, err := m.MarshalJSON()
if err == nil {
// copy JSON into buffer, checking validity.
err = compact(&e.Buffer, b, true)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
}
func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
va := v.Addr()
if va.IsNil() {
e.WriteString("null")
return
}
m := va.Interface().(Marshaler)
b, err := m.MarshalJSON()
if err == nil {
// copy JSON into buffer, checking validity.
err = compact(&e.Buffer, b, true)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
}
func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
if v.Kind() == reflect.Ptr && v.IsNil() {
e.WriteString("null")
return
}
m := v.Interface().(encoding.TextMarshaler)
b, err := m.MarshalText()
if err == nil {
_, err = e.stringBytes(b)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
}
func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
va := v.Addr()
if va.IsNil() {
e.WriteString("null")
return
}
m := va.Interface().(encoding.TextMarshaler)
b, err := m.MarshalText()
if err == nil {
_, err = e.stringBytes(b)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})
}
}
func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
if quoted {
e.WriteByte('"')
}
if v.Bool() {
e.WriteString("true")
} else {
e.WriteString("false")
}
if quoted {
e.WriteByte('"')
}
}
func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
if quoted {
e.WriteByte('"')
}
e.Write(b)
if quoted {
e.WriteByte('"')
}
}
func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
if quoted {
e.WriteByte('"')
}
e.Write(b)
if quoted {
e.WriteByte('"')
}
}
type floatEncoder int // number of bits
func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
f := v.Float()
if math.IsInf(f, 0) || math.IsNaN(f) {
e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
}
b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
if quoted {
e.WriteByte('"')
}
e.Write(b)
if quoted {
e.WriteByte('"')
}
}
var (
float32Encoder = (floatEncoder(32)).encode
float64Encoder = (floatEncoder(64)).encode
)
func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
if v.Type() == numberType {
numStr := v.String()
if numStr == "" {
numStr = "0" // Number's zero-val
}
e.WriteString(numStr)
return
}
if quoted {
sb, err := Marshal(v.String())
if err != nil {
e.error(err)
}
e.string(string(sb))
} else {
e.string(v.String())
}
}
func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
if v.IsNil() {
e.WriteString("null")
return
}
e.reflectValue(v.Elem())
}
func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
e.error(&UnsupportedTypeError{v.Type()})
}
type structEncoder struct {
fields []field
fieldEncs []encoderFunc
}
func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
e.WriteByte('{')
first := true
for i, f := range se.fields {
fv := fieldByIndex(v, f.index)
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
continue
}
if first {
first = false
} else {
e.WriteByte(',')
}
e.string(f.name)
e.WriteByte(':')
se.fieldEncs[i](e, fv, f.quoted)
}
e.WriteByte('}')
}
func newStructEncoder(t reflect.Type) encoderFunc {
fields := cachedTypeFields(t)
se := &structEncoder{
fields: fields,
fieldEncs: make([]encoderFunc, len(fields)),
}
for i, f := range fields {
se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
}
return se.encode
}
type mapEncoder struct {
elemEnc encoderFunc
}
func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
if v.IsNil() {
e.WriteString("null")
return
}
e.WriteByte('{')
var sv stringValues = v.MapKeys()
sort.Sort(sv)
for i, k := range sv {
if i > 0 {
e.WriteByte(',')
}
e.string(k.String())
e.WriteByte(':')
me.elemEnc(e, v.MapIndex(k), false)
}
e.WriteByte('}')
}
func newMapEncoder(t reflect.Type) encoderFunc {
if t.Key().Kind() != reflect.String {
return unsupportedTypeEncoder
}
me := &mapEncoder{typeEncoder(t.Elem())}
return me.encode
}
func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
if v.IsNil() {
e.WriteString("null")
return
}
s := v.Bytes()
e.WriteByte('"')
if len(s) < 1024 {
// for small buffers, using Encode directly is much faster.
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
base64.StdEncoding.Encode(dst, s)
e.Write(dst)
} else {
// for large buffers, avoid unnecessary extra temporary
// buffer space.
enc := base64.NewEncoder(base64.StdEncoding, e)
enc.Write(s)
enc.Close()
}
e.WriteByte('"')
}
// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
type sliceEncoder struct {
arrayEnc encoderFunc
}
func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
if v.IsNil() {
e.WriteString("null")
return
}
se.arrayEnc(e, v, false)
}
func newSliceEncoder(t reflect.Type) encoderFunc {
// Byte slices get special treatment; arrays don't.
if t.Elem().Kind() == reflect.Uint8 {
return encodeByteSlice
}
enc := &sliceEncoder{newArrayEncoder(t)}
return enc.encode
}
type arrayEncoder struct {
elemEnc encoderFunc
}
func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
e.WriteByte('[')
n := v.Len()
for i := 0; i < n; i++ {
if i > 0 {
e.WriteByte(',')
}
ae.elemEnc(e, v.Index(i), false)
}
e.WriteByte(']')
}
func newArrayEncoder(t reflect.Type) encoderFunc {
enc := &arrayEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type ptrEncoder struct {
elemEnc encoderFunc
}
func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
if v.IsNil() {
e.WriteString("null")
return
}
pe.elemEnc(e, v.Elem(), false)
}
func newPtrEncoder(t reflect.Type) encoderFunc {
enc := &ptrEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type condAddrEncoder struct {
canAddrEnc, elseEnc encoderFunc
}
func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
if v.CanAddr() {
ce.canAddrEnc(e, v, quoted)
} else {
ce.elseEnc(e, v, quoted)
}
}
// newCondAddrEncoder returns an encoder that checks whether its value
// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
return enc.encode
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
}
v = v.Field(i)
}
return v
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
// It implements the methods to sort by string.
type stringValues []reflect.Value
func (sv stringValues) Len() int { return len(sv) }
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
func (sv stringValues) get(i int) string { return sv[i].String() }
// NOTE: keep in sync with stringBytes below.
func (e *encodeState) string(s string) (int, error) {
len0 := e.Len()
e.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
i++
continue
}
if start < i {
e.WriteString(s[start:i])
}
switch b {
case '\\', '"':
e.WriteByte('\\')
e.WriteByte(b)
case '\n':
e.WriteByte('\\')
e.WriteByte('n')
case '\r':
e.WriteByte('\\')
e.WriteByte('r')
default:
// This encodes bytes < 0x20 except for \n and \r,
// as well as < and >. The latter are escaped because they
// can lead to security holes when user-controlled strings
// are rendered into JSON and served to some browsers.
e.WriteString(`\u00`)
e.WriteByte(hex[b>>4])
e.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
e.WriteString(s[start:i])
}
e.WriteString(`\ufffd`)
i += size
start = i
continue
}
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
if c == '\u2028' || c == '\u2029' {
if start < i {
e.WriteString(s[start:i])
}
e.WriteString(`\u202`)
e.WriteByte(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
e.WriteString(s[start:])
}
e.WriteByte('"')
return e.Len() - len0, nil
}
// NOTE: keep in sync with string above.
func (e *encodeState) stringBytes(s []byte) (int, error) {
len0 := e.Len()
e.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
i++
continue
}
if start < i {
e.Write(s[start:i])
}
switch b {
case '\\', '"':
e.WriteByte('\\')
e.WriteByte(b)
case '\n':
e.WriteByte('\\')
e.WriteByte('n')
case '\r':
e.WriteByte('\\')
e.WriteByte('r')
default:
// This encodes bytes < 0x20 except for \n and \r,
// as well as < and >. The latter are escaped because they
// can lead to security holes when user-controlled strings
// are rendered into JSON and served to some browsers.
e.WriteString(`\u00`)
e.WriteByte(hex[b>>4])
e.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRune(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
e.Write(s[start:i])
}
e.WriteString(`\ufffd`)
i += size
start = i
continue
}
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
if c == '\u2028' || c == '\u2029' {
if start < i {
e.Write(s[start:i])
}
e.WriteString(`\u202`)
e.WriteByte(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
e.Write(s[start:])
}
e.WriteByte('"')
return e.Len() - len0, nil
}
// A field represents a single field found in a struct.
type field struct {
name string
tag bool
index []int
typ reflect.Type
omitEmpty bool
quoted bool
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
// if sf.PkgPath != "" { // unexported
// continue
// }
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft,
opts.Contains("omitempty"), opts.Contains("string")})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, field{name: ft.Name(), index: index, typ: ft})
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json2
import "bytes"
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
var scan scanner
scan.reset()
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(&scan, int(c))
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst has no trailing newline, to make it easier
// to embed inside other formatted JSON data.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
var scan scanner
scan.reset()
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(&scan, int(c))
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json2
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, nextValue, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import "strconv"
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, int(c)) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// nextValue splits data after the next whole JSON value,
// returning that value and the bytes that follow it as separate slices.
// scan is passed in for use by nextValue to avoid an allocation.
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
scan.reset()
for i, c := range data {
v := scan.step(scan, int(c))
if v >= scanEnd {
switch v {
case scanError:
return nil, nil, scan.err
case scanEnd:
return data[0:i], data[i:], nil
}
}
}
if scan.eof() == scanError {
return nil, nil, scan.err
}
return data, nil, nil
}
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset() and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, int) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// 1-byte redo (see undo method)
redo bool
redoCode int
redoState func(*scanner, int) int
// total bytes consumed, updated by decoder.Decode
bytes int64
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
)
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.redo = false
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
func (s *scanner) pushParseState(p int) {
s.parseState = append(s.parseState, p)
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
s.redo = false
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c rune) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c int) int {
if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c int) int {
if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
s.pushParseState(parseObjectKey)
return scanBeginObject
case '[':
s.step = stateBeginValueOrEmpty
s.pushParseState(parseArrayValue)
return scanBeginArray
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 't': // beginning of true
s.step = stateT
return scanBeginLiteral
case 'f': // beginning of false
s.step = stateF
return scanBeginLiteral
case 'n': // beginning of null
s.step = stateN
return scanBeginLiteral
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
return s.error(c, "looking for beginning of value")
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c int) int {
if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c int) int {
if c <= ' ' && isSpace(rune(c)) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c int) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if c <= ' ' && isSpace(rune(c)) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginString
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValue
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c int) int {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c int) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c int) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
}
if c == 'u' {
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c int) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c int) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c int) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c int) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c int) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c int) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c int) int {
if c == '+' {
s.step = stateESign
return scanContinue
}
if c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c int) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return stateEndValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c int) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c int) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c int) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c int) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c int) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c int) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c int) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c int) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c int) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c int) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c int) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c int, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal
func quoteChar(c int) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}
// undo causes the scanner to return scanCode from the next state transition.
// This gives callers a simple 1-byte undo mechanism.
func (s *scanner) undo(scanCode int) {
if s.redo {
panic("json: invalid use of scanner")
}
s.redoCode = scanCode
s.redoState = s.step
s.step = stateRedo
s.redo = true
}
// stateRedo helps implement the scanner's 1-byte undo.
func stateRedo(s *scanner, c int) int {
s.redo = false
s.step = s.redoState
return s.redoCode
}
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json2
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON objects from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scan scanner
err error
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v interface{}) error {
if dec.err != nil {
return dec.err
}
n, err := dec.readValue()
if err != nil {
return err
}
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
dec.d.init(dec.buf[0:n])
err = dec.d.unmarshal(v)
// Slide rest of data down.
rest := copy(dec.buf, dec.buf[n:])
dec.buf = dec.buf[0:rest]
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf)
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := 0
var err error
Input:
for {
// Look in the buffer for a new value.
for i, c := range dec.buf[scanp:] {
dec.scan.bytes++
v := dec.scan.step(&dec.scan, int(c))
if v == scanEnd {
scanp += i
break Input
}
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
scanp += i + 1
break Input
}
if v == scanError {
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
scanp = len(dec.buf)
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
// Make room to read more into the buffer.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
var n int
n, err = dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
}
return scanp, nil
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(rune(c)) {
return true
}
}
return false
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
e encodeState
err error
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w}
}
// Encode writes the JSON encoding of v to the stream.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState()
err := e.marshal(v)
if err != nil {
return err
}
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
if _, err = enc.w.Write(e.Bytes()); err != nil {
enc.err = err
}
putEncodeState(e)
return err
}
// RawMessage is a raw encoded JSON object.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns *m as the JSON encoding of m.
func (m *RawMessage) MarshalJSON() ([]byte, error) {
return *m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json2
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}
...@@ -18,6 +18,7 @@ package ast ...@@ -18,6 +18,7 @@ package ast
import ( import (
"io" "io"
. "github.com/pingcap/parser/format"
"github.com/pingcap/parser/model" "github.com/pingcap/parser/model"
"github.com/pingcap/parser/types" "github.com/pingcap/parser/types"
) )
......
...@@ -16,6 +16,7 @@ package ast ...@@ -16,6 +16,7 @@ package ast
import ( import (
"github.com/pingcap/errors" "github.com/pingcap/errors"
"github.com/pingcap/parser/auth" "github.com/pingcap/parser/auth"
. "github.com/pingcap/parser/format"
"github.com/pingcap/parser/model" "github.com/pingcap/parser/model"
"github.com/pingcap/parser/types" "github.com/pingcap/parser/types"
) )
......
...@@ -18,6 +18,7 @@ import ( ...@@ -18,6 +18,7 @@ import (
"github.com/pingcap/errors" "github.com/pingcap/errors"
"github.com/pingcap/parser/auth" "github.com/pingcap/parser/auth"
. "github.com/pingcap/parser/format"
"github.com/pingcap/parser/model" "github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql" "github.com/pingcap/parser/mysql"
) )
...@@ -1293,6 +1294,7 @@ const ( ...@@ -1293,6 +1294,7 @@ const (
ShowStatus ShowStatus
ShowCollation ShowCollation
ShowCreateTable ShowCreateTable
ShowCreateUser
ShowGrants ShowGrants
ShowTriggers ShowTriggers
ShowProcedureStatus ShowProcedureStatus
...@@ -1323,7 +1325,7 @@ type ShowStmt struct { ...@@ -1323,7 +1325,7 @@ type ShowStmt struct {
Column *ColumnName // Used for `desc table column`. Column *ColumnName // Used for `desc table column`.
Flag int // Some flag parsed from sql, such as FULL. Flag int // Some flag parsed from sql, such as FULL.
Full bool Full bool
User *auth.UserIdentity // Used for show grants. User *auth.UserIdentity // Used for show grants/create user.
IfNotExists bool // Used for `show create database if not exists` IfNotExists bool // Used for `show create database if not exists`
// GlobalScope is used by show variables // GlobalScope is used by show variables
......
...@@ -20,6 +20,7 @@ import ( ...@@ -20,6 +20,7 @@ import (
"strings" "strings"
"github.com/pingcap/errors" "github.com/pingcap/errors"
. "github.com/pingcap/parser/format"
"github.com/pingcap/parser/model" "github.com/pingcap/parser/model"
"github.com/pingcap/parser/opcode" "github.com/pingcap/parser/opcode"
) )
......
...@@ -18,6 +18,7 @@ import ( ...@@ -18,6 +18,7 @@ import (
"io" "io"
"github.com/pingcap/errors" "github.com/pingcap/errors"
. "github.com/pingcap/parser/format"
"github.com/pingcap/parser/model" "github.com/pingcap/parser/model"
"github.com/pingcap/parser/types" "github.com/pingcap/parser/types"
) )
......
...@@ -20,6 +20,7 @@ import ( ...@@ -20,6 +20,7 @@ import (
"github.com/pingcap/errors" "github.com/pingcap/errors"
"github.com/pingcap/parser/auth" "github.com/pingcap/parser/auth"
. "github.com/pingcap/parser/format"
"github.com/pingcap/parser/model" "github.com/pingcap/parser/model"
"github.com/pingcap/parser/mysql" "github.com/pingcap/parser/mysql"
) )
......
...@@ -15,6 +15,7 @@ package ast ...@@ -15,6 +15,7 @@ package ast
import ( import (
"github.com/pingcap/errors" "github.com/pingcap/errors"
. "github.com/pingcap/parser/format"
"github.com/pingcap/parser/model" "github.com/pingcap/parser/model"
) )
......
...@@ -13,12 +13,6 @@ ...@@ -13,12 +13,6 @@
package ast package ast
import (
"fmt"
"io"
"strings"
)
// IsReadOnly checks whether the input ast is readOnly. // IsReadOnly checks whether the input ast is readOnly.
func IsReadOnly(node Node) bool { func IsReadOnly(node Node) bool {
switch st := node.(type) { switch st := node.(type) {
...@@ -65,152 +59,3 @@ func (checker *readOnlyChecker) Enter(in Node) (out Node, skipChildren bool) { ...@@ -65,152 +59,3 @@ func (checker *readOnlyChecker) Enter(in Node) (out Node, skipChildren bool) {
func (checker *readOnlyChecker) Leave(in Node) (out Node, ok bool) { func (checker *readOnlyChecker) Leave(in Node) (out Node, ok bool) {
return in, checker.readOnly return in, checker.readOnly
} }
//RestoreFlag mark the Restore format
type RestoreFlags uint64
// Mutually exclusive group of `RestoreFlags`:
// [RestoreStringSingleQuotes, RestoreStringDoubleQuotes]
// [RestoreKeyWordUppercase, RestoreKeyWordLowercase]
// [RestoreNameUppercase, RestoreNameLowercase]
// [RestoreNameDoubleQuotes, RestoreNameBackQuotes]
// The flag with the left position in each group has a higher priority.
const (
RestoreStringSingleQuotes RestoreFlags = 1 << iota
RestoreStringDoubleQuotes
RestoreStringEscapeBackslash
RestoreKeyWordUppercase
RestoreKeyWordLowercase
RestoreNameUppercase
RestoreNameLowercase
RestoreNameDoubleQuotes
RestoreNameBackQuotes
)
const (
DefaultRestoreFlags = RestoreStringSingleQuotes | RestoreKeyWordUppercase | RestoreNameBackQuotes
)
func (rf RestoreFlags) has(flag RestoreFlags) bool {
return rf&flag != 0
}
// HasStringSingleQuotesFlag returns a boolean indicating when `rf` has `RestoreStringSingleQuotes` flag.
func (rf RestoreFlags) HasStringSingleQuotesFlag() bool {
return rf.has(RestoreStringSingleQuotes)
}
// HasStringDoubleQuotesFlag returns a boolean indicating whether `rf` has `RestoreStringDoubleQuotes` flag.
func (rf RestoreFlags) HasStringDoubleQuotesFlag() bool {
return rf.has(RestoreStringDoubleQuotes)
}
// HasStringEscapeBackslashFlag returns a boolean indicating whether `rf` has `RestoreStringEscapeBackslash` flag.
func (rf RestoreFlags) HasStringEscapeBackslashFlag() bool {
return rf.has(RestoreStringEscapeBackslash)
}
// HasKeyWordUppercaseFlag returns a boolean indicating whether `rf` has `RestoreKeyWordUppercase` flag.
func (rf RestoreFlags) HasKeyWordUppercaseFlag() bool {
return rf.has(RestoreKeyWordUppercase)
}
// HasKeyWordLowercaseFlag returns a boolean indicating whether `rf` has `RestoreKeyWordLowercase` flag.
func (rf RestoreFlags) HasKeyWordLowercaseFlag() bool {
return rf.has(RestoreKeyWordLowercase)
}
// HasNameUppercaseFlag returns a boolean indicating whether `rf` has `RestoreNameUppercase` flag.
func (rf RestoreFlags) HasNameUppercaseFlag() bool {
return rf.has(RestoreNameUppercase)
}
// HasNameLowercaseFlag returns a boolean indicating whether `rf` has `RestoreNameLowercase` flag.
func (rf RestoreFlags) HasNameLowercaseFlag() bool {
return rf.has(RestoreNameLowercase)
}
// HasNameDoubleQuotesFlag returns a boolean indicating whether `rf` has `RestoreNameDoubleQuotes` flag.
func (rf RestoreFlags) HasNameDoubleQuotesFlag() bool {
return rf.has(RestoreNameDoubleQuotes)
}
// HasNameBackQuotesFlag returns a boolean indicating whether `rf` has `RestoreNameBackQuotes` flag.
func (rf RestoreFlags) HasNameBackQuotesFlag() bool {
return rf.has(RestoreNameBackQuotes)
}
// RestoreCtx is `Restore` context to hold flags and writer.
type RestoreCtx struct {
Flags RestoreFlags
In io.Writer
JoinLevel int
}
// NewRestoreCtx returns a new `RestoreCtx`.
func NewRestoreCtx(flags RestoreFlags, in io.Writer) *RestoreCtx {
return &RestoreCtx{flags, in, 0}
}
// WriteKeyWord writes the `keyWord` into writer.
// `keyWord` will be converted format(uppercase and lowercase for now) according to `RestoreFlags`.
func (ctx *RestoreCtx) WriteKeyWord(keyWord string) {
switch {
case ctx.Flags.HasKeyWordUppercaseFlag():
keyWord = strings.ToUpper(keyWord)
case ctx.Flags.HasKeyWordLowercaseFlag():
keyWord = strings.ToLower(keyWord)
}
fmt.Fprint(ctx.In, keyWord)
}
// WriteString writes the string into writer
// `str` may be wrapped in quotes and escaped according to RestoreFlags.
func (ctx *RestoreCtx) WriteString(str string) {
if ctx.Flags.HasStringEscapeBackslashFlag() {
str = strings.Replace(str, `\`, `\\`, -1)
}
quotes := ""
switch {
case ctx.Flags.HasStringSingleQuotesFlag():
str = strings.Replace(str, `'`, `''`, -1)
quotes = `'`
case ctx.Flags.HasStringDoubleQuotesFlag():
str = strings.Replace(str, `"`, `""`, -1)
quotes = `"`
}
fmt.Fprint(ctx.In, quotes, str, quotes)
}
// WriteName writes the name into writer
// `name` maybe wrapped in quotes and escaped according to RestoreFlags.
func (ctx *RestoreCtx) WriteName(name string) {
switch {
case ctx.Flags.HasNameUppercaseFlag():
name = strings.ToUpper(name)
case ctx.Flags.HasNameLowercaseFlag():
name = strings.ToLower(name)
}
quotes := ""
switch {
case ctx.Flags.HasNameDoubleQuotesFlag():
name = strings.Replace(name, `"`, `""`, -1)
quotes = `"`
case ctx.Flags.HasNameBackQuotesFlag():
name = strings.Replace(name, "`", "``", -1)
quotes = "`"
}
fmt.Fprint(ctx.In, quotes, name, quotes)
}
// WritePlain writes the plain text into writer without any handling.
func (ctx *RestoreCtx) WritePlain(plainText string) {
fmt.Fprint(ctx.In, plainText)
}
// WritePlainf write the plain text into writer without any handling.
func (ctx *RestoreCtx) WritePlainf(format string, a ...interface{}) {
fmt.Fprintf(ctx.In, format, a...)
}
...@@ -21,6 +21,7 @@ import ( ...@@ -21,6 +21,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"strings"
) )
const ( const (
...@@ -193,3 +194,152 @@ func OutputFormat(s string) string { ...@@ -193,3 +194,152 @@ func OutputFormat(s string) string {
return buf.String() return buf.String()
} }
//RestoreFlag mark the Restore format
type RestoreFlags uint64
// Mutually exclusive group of `RestoreFlags`:
// [RestoreStringSingleQuotes, RestoreStringDoubleQuotes]
// [RestoreKeyWordUppercase, RestoreKeyWordLowercase]
// [RestoreNameUppercase, RestoreNameLowercase]
// [RestoreNameDoubleQuotes, RestoreNameBackQuotes]
// The flag with the left position in each group has a higher priority.
const (
RestoreStringSingleQuotes RestoreFlags = 1 << iota
RestoreStringDoubleQuotes
RestoreStringEscapeBackslash
RestoreKeyWordUppercase
RestoreKeyWordLowercase
RestoreNameUppercase
RestoreNameLowercase
RestoreNameDoubleQuotes
RestoreNameBackQuotes
)
const (
DefaultRestoreFlags = RestoreStringSingleQuotes | RestoreKeyWordUppercase | RestoreNameBackQuotes
)
func (rf RestoreFlags) has(flag RestoreFlags) bool {
return rf&flag != 0
}
// HasStringSingleQuotesFlag returns a boolean indicating when `rf` has `RestoreStringSingleQuotes` flag.
func (rf RestoreFlags) HasStringSingleQuotesFlag() bool {
return rf.has(RestoreStringSingleQuotes)
}
// HasStringDoubleQuotesFlag returns a boolean indicating whether `rf` has `RestoreStringDoubleQuotes` flag.
func (rf RestoreFlags) HasStringDoubleQuotesFlag() bool {
return rf.has(RestoreStringDoubleQuotes)
}
// HasStringEscapeBackslashFlag returns a boolean indicating whether `rf` has `RestoreStringEscapeBackslash` flag.
func (rf RestoreFlags) HasStringEscapeBackslashFlag() bool {
return rf.has(RestoreStringEscapeBackslash)
}
// HasKeyWordUppercaseFlag returns a boolean indicating whether `rf` has `RestoreKeyWordUppercase` flag.
func (rf RestoreFlags) HasKeyWordUppercaseFlag() bool {
return rf.has(RestoreKeyWordUppercase)
}
// HasKeyWordLowercaseFlag returns a boolean indicating whether `rf` has `RestoreKeyWordLowercase` flag.
func (rf RestoreFlags) HasKeyWordLowercaseFlag() bool {
return rf.has(RestoreKeyWordLowercase)
}
// HasNameUppercaseFlag returns a boolean indicating whether `rf` has `RestoreNameUppercase` flag.
func (rf RestoreFlags) HasNameUppercaseFlag() bool {
return rf.has(RestoreNameUppercase)
}
// HasNameLowercaseFlag returns a boolean indicating whether `rf` has `RestoreNameLowercase` flag.
func (rf RestoreFlags) HasNameLowercaseFlag() bool {
return rf.has(RestoreNameLowercase)
}
// HasNameDoubleQuotesFlag returns a boolean indicating whether `rf` has `RestoreNameDoubleQuotes` flag.
func (rf RestoreFlags) HasNameDoubleQuotesFlag() bool {
return rf.has(RestoreNameDoubleQuotes)
}
// HasNameBackQuotesFlag returns a boolean indicating whether `rf` has `RestoreNameBackQuotes` flag.
func (rf RestoreFlags) HasNameBackQuotesFlag() bool {
return rf.has(RestoreNameBackQuotes)
}
// RestoreCtx is `Restore` context to hold flags and writer.
type RestoreCtx struct {
Flags RestoreFlags
In io.Writer
JoinLevel int
}
// NewRestoreCtx returns a new `RestoreCtx`.
func NewRestoreCtx(flags RestoreFlags, in io.Writer) *RestoreCtx {
return &RestoreCtx{flags, in, 0}
}
// WriteKeyWord writes the `keyWord` into writer.
// `keyWord` will be converted format(uppercase and lowercase for now) according to `RestoreFlags`.
func (ctx *RestoreCtx) WriteKeyWord(keyWord string) {
switch {
case ctx.Flags.HasKeyWordUppercaseFlag():
keyWord = strings.ToUpper(keyWord)
case ctx.Flags.HasKeyWordLowercaseFlag():
keyWord = strings.ToLower(keyWord)
}
fmt.Fprint(ctx.In, keyWord)
}
// WriteString writes the string into writer
// `str` may be wrapped in quotes and escaped according to RestoreFlags.
func (ctx *RestoreCtx) WriteString(str string) {
if ctx.Flags.HasStringEscapeBackslashFlag() {
str = strings.Replace(str, `\`, `\\`, -1)
}
quotes := ""
switch {
case ctx.Flags.HasStringSingleQuotesFlag():
str = strings.Replace(str, `'`, `''`, -1)
quotes = `'`
case ctx.Flags.HasStringDoubleQuotesFlag():
str = strings.Replace(str, `"`, `""`, -1)
quotes = `"`
}
fmt.Fprint(ctx.In, quotes, str, quotes)
}
// WriteName writes the name into writer
// `name` maybe wrapped in quotes and escaped according to RestoreFlags.
func (ctx *RestoreCtx) WriteName(name string) {
switch {
case ctx.Flags.HasNameUppercaseFlag():
name = strings.ToUpper(name)
case ctx.Flags.HasNameLowercaseFlag():
name = strings.ToLower(name)
}
quotes := ""
switch {
case ctx.Flags.HasNameDoubleQuotesFlag():
name = strings.Replace(name, `"`, `""`, -1)
quotes = `"`
case ctx.Flags.HasNameBackQuotesFlag():
name = strings.Replace(name, "`", "``", -1)
quotes = "`"
}
fmt.Fprint(ctx.In, quotes, name, quotes)
}
// WritePlain writes the plain text into writer without any handling.
func (ctx *RestoreCtx) WritePlain(plainText string) {
fmt.Fprint(ctx.In, plainText)
}
// WritePlainf write the plain text into writer without any handling.
func (ctx *RestoreCtx) WritePlainf(format string, a ...interface{}) {
fmt.Fprintf(ctx.In, format, a...)
}
...@@ -7,9 +7,9 @@ require ( ...@@ -7,9 +7,9 @@ require (
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186 github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a github.com/cznic/y v0.0.0-20170802143616-045f81c6662a
github.com/pingcap/check v0.0.0-20181213055612-5c2b07721bdb github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8
github.com/pingcap/errors v0.11.0 github.com/pingcap/errors v0.11.0
github.com/pingcap/tidb v0.0.0-20181217070741-096bb68e6bef github.com/pingcap/tidb v0.0.0-20180108134023-971629b9477a
github.com/pingcap/tipb v0.0.0-20181012112600-11e33c750323 github.com/pingcap/tipb v0.0.0-20181012112600-11e33c750323
github.com/sirupsen/logrus v1.2.0 github.com/sirupsen/logrus v1.2.0
golang.org/x/text v0.3.0 golang.org/x/text v0.3.0
......
...@@ -50,6 +50,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME ...@@ -50,6 +50,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
...@@ -58,6 +59,7 @@ github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff h1:kOkM9whyQYodu ...@@ -58,6 +59,7 @@ github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff h1:kOkM9whyQYodu
github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
...@@ -95,7 +97,6 @@ github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdc ...@@ -95,7 +97,6 @@ github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdc
github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8=
github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
...@@ -105,21 +106,24 @@ github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7l ...@@ -105,21 +106,24 @@ github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7l
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pingcap/check v0.0.0-20181213055612-5c2b07721bdb h1:RGm4hzUgf7wxELKAzOBV27WFMxBD33OQkDwX6VOs/W4= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
github.com/pingcap/check v0.0.0-20181213055612-5c2b07721bdb/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
github.com/pingcap/errors v0.11.0 h1:DCJQB8jrHbQ1VVlMFIrbj2ApScNNotVmkSNplu2yUt4= github.com/pingcap/errors v0.11.0 h1:DCJQB8jrHbQ1VVlMFIrbj2ApScNNotVmkSNplu2yUt4=
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/gofail v0.0.0-20181217135706-6a951c1e42c3 h1:04yuCf5NMvLU8rB2m4Qs3rynH7EYpMno3lHkewIOdMo=
github.com/pingcap/gofail v0.0.0-20181217135706-6a951c1e42c3/go.mod h1:DazNTg0PTldtpsQiT9I5tVJwV1onHMKBBgXzmJUlMns=
github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8=
github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
github.com/pingcap/kvproto v0.0.0-20181105061835-1b5d69cd1d26 h1:JK4VLNYbSn36QSbCnqALi2ySXdH0DfcMssT/zmLf4Ls= github.com/pingcap/kvproto v0.0.0-20181203065228-c14302da291c h1:Qf5St5XGwKgKQLar9lEXoeO0hJMVaFBj3JqvFguWtVg=
github.com/pingcap/kvproto v0.0.0-20181105061835-1b5d69cd1d26/go.mod h1:0gwbe1F2iBIjuQ9AH0DbQhL+Dpr5GofU8fgYyXk+ykk= github.com/pingcap/kvproto v0.0.0-20181203065228-c14302da291c/go.mod h1:Ja9XPjot9q4/3JyCZodnWDGNXt4pKemhIYCvVJM7P24=
github.com/pingcap/parser v0.0.0-20181214132045-732efe993f70/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/parser v0.0.0-20190108044100-02812c3c22e7/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA=
github.com/pingcap/pd v2.1.0-rc.4+incompatible h1:/buwGk04aHO5odk/+O8ZOXGs4qkUjYTJ2UpCJXna8NE= github.com/pingcap/pd v2.1.0-rc.4+incompatible h1:/buwGk04aHO5odk/+O8ZOXGs4qkUjYTJ2UpCJXna8NE=
github.com/pingcap/pd v2.1.0-rc.4+incompatible/go.mod h1:nD3+EoYes4+aNNODO99ES59V83MZSI+dFbhyr667a0E= github.com/pingcap/pd v2.1.0-rc.4+incompatible/go.mod h1:nD3+EoYes4+aNNODO99ES59V83MZSI+dFbhyr667a0E=
github.com/pingcap/tidb v0.0.0-20181217070741-096bb68e6bef h1:a00XEAUzCi+RlsZcAg/LJx3zTL6FY+lwPwyxz5ZlnsI= github.com/pingcap/tidb v0.0.0-20180108134023-971629b9477a h1:+UaU3bgZJtmgn1IJOWQU63z+HaVNhOfyxT8puMZ32rc=
github.com/pingcap/tidb v0.0.0-20181217070741-096bb68e6bef/go.mod h1:YrstANCcWGHO/mbgK4nofaNCj3zOpmkhmfMwlltzPSE= github.com/pingcap/tidb v0.0.0-20180108134023-971629b9477a/go.mod h1:ytMJRc0YwJxWtwJpuu60xXIXHSTGwi1jhmt3TnxifYw=
github.com/pingcap/tidb-tools v0.0.0-20181112132202-4860a0d5de03 h1:xVuo5U+l6XAWHsb+xhkZ8zz3jerIwDfCHAO6kR2Kaog= github.com/pingcap/tidb-tools v2.1.1-0.20181218072513-b2235d442b06+incompatible h1:Bsd+NHosPVowEGB3BCx+2d8wUQGDTXSSC5ljeNS6cXo=
github.com/pingcap/tidb-tools v0.0.0-20181112132202-4860a0d5de03/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tidb-tools v2.1.1-0.20181218072513-b2235d442b06+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM=
github.com/pingcap/tipb v0.0.0-20170310053819-1043caee48da/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
github.com/pingcap/tipb v0.0.0-20181012112600-11e33c750323 h1:mRKKzRjDNaUNPnAkPAHnRqpNmwNWBX1iA+hxlmvQ93I= github.com/pingcap/tipb v0.0.0-20181012112600-11e33c750323 h1:mRKKzRjDNaUNPnAkPAHnRqpNmwNWBX1iA+hxlmvQ93I=
github.com/pingcap/tipb v0.0.0-20181012112600-11e33c750323/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pingcap/tipb v0.0.0-20181012112600-11e33c750323/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
...@@ -178,24 +182,28 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf ...@@ -178,24 +182,28 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816 h1:mVFkLpejdFLXVUv9E42f3XJVfMdqd0IVLVIVLjZWn5o= golang.org/x/net v0.0.0-20181029044818-c44066c5c816 h1:mVFkLpejdFLXVUv9E42f3XJVfMdqd0IVLVIVLjZWn5o=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.0.0-20171214130843-f21a4dfb5e38/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f h1:FU37niK8AQ59mHcskRyQL7H0ErSeNh650vdcj8HqdSI=
google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0 h1:dz5IJGuC2BB7qXR5AyHNwAUBhZscK2xVez7mznh72sY= google.golang.org/grpc v1.16.0 h1:dz5IJGuC2BB7qXR5AyHNwAUBhZscK2xVez7mznh72sY=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
......
...@@ -123,11 +123,17 @@ func (s *Scanner) stmtText() string { ...@@ -123,11 +123,17 @@ func (s *Scanner) stmtText() string {
// Scanner satisfies yyLexer interface which need this function. // Scanner satisfies yyLexer interface which need this function.
func (s *Scanner) Errorf(format string, a ...interface{}) { func (s *Scanner) Errorf(format string, a ...interface{}) {
str := fmt.Sprintf(format, a...) str := fmt.Sprintf(format, a...)
val := s.r.s[s.r.pos().Offset:] col := s.r.p.Col
startPos := s.stmtStartPos
if s.r.s[startPos] == '\n' {
startPos++
col--
}
val := s.r.s[startPos:]
if len(val) > 2048 { if len(val) > 2048 {
val = val[:2048] val = val[:2048]
} }
err := fmt.Errorf("line %d column %d near \"%s\"%s (total length %d)", s.r.p.Line, s.r.p.Col, val, str, len(s.r.s)) err := fmt.Errorf("line %d column %d near \"%s\"%s (total length %d)", s.r.p.Line, col, val, str, len(s.r.s))
s.errs = append(s.errs, err) s.errs = append(s.errs, err)
} }
......
...@@ -399,7 +399,7 @@ var AllColumnPrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv} ...@@ -399,7 +399,7 @@ var AllColumnPrivs = []PrivilegeType{SelectPriv, InsertPriv, UpdatePriv}
const AllPrivilegeLiteral = "ALL PRIVILEGES" const AllPrivilegeLiteral = "ALL PRIVILEGES"
// DefaultSQLMode for GLOBAL_VARIABLES // DefaultSQLMode for GLOBAL_VARIABLES
const DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION" const DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"
// DefaultLengthOfMysqlTypes is the map for default physical length of MySQL data types. // DefaultLengthOfMysqlTypes is the map for default physical length of MySQL data types.
// See http://dev.mysql.com/doc/refman/5.7/en/storage-requirements.html // See http://dev.mysql.com/doc/refman/5.7/en/storage-requirements.html
......
因为 它太大了无法显示 source diff 。你可以改为 查看blob
...@@ -600,7 +600,7 @@ import ( ...@@ -600,7 +600,7 @@ import (
RevokeStmt "Revoke statement" RevokeStmt "Revoke statement"
RollbackStmt "ROLLBACK statement" RollbackStmt "ROLLBACK statement"
SetStmt "Set variable statement" SetStmt "Set variable statement"
ShowStmt "Show engines/databases/tables/columns/warnings/status statement" ShowStmt "Show engines/databases/tables/user/columns/warnings/status statement"
Statement "statement" Statement "statement"
TraceStmt "TRACE statement" TraceStmt "TRACE statement"
TraceableStmt "traceable statment" TraceableStmt "traceable statment"
...@@ -5907,6 +5907,14 @@ ShowStmt: ...@@ -5907,6 +5907,14 @@ ShowStmt:
DBName: $5.(string), DBName: $5.(string),
} }
} }
| "SHOW" "CREATE" "USER" Username
{
// See https://dev.mysql.com/doc/refman/5.7/en/show-create-user.html
$$ = &ast.ShowStmt{
Tp: ast.ShowCreateUser,
User: $4.(*auth.UserIdentity),
}
}
| "SHOW" "GRANTS" | "SHOW" "GRANTS"
{ {
// See https://dev.mysql.com/doc/refman/5.7/en/show-grants.html // See https://dev.mysql.com/doc/refman/5.7/en/show-grants.html
......
...@@ -20,6 +20,7 @@ import ( ...@@ -20,6 +20,7 @@ import (
"github.com/pingcap/errors" "github.com/pingcap/errors"
"github.com/pingcap/parser/ast" "github.com/pingcap/parser/ast"
"github.com/pingcap/parser/format"
"github.com/pingcap/parser/mysql" "github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/hack"
...@@ -69,7 +70,7 @@ type ValueExpr struct { ...@@ -69,7 +70,7 @@ type ValueExpr struct {
} }
// Restore implements Node interface. // Restore implements Node interface.
func (n *ValueExpr) Restore(ctx *ast.RestoreCtx) error { func (n *ValueExpr) Restore(ctx *format.RestoreCtx) error {
switch n.Kind() { switch n.Kind() {
case types.KindNull: case types.KindNull:
ctx.WriteKeyWord("NULL") ctx.WriteKeyWord("NULL")
...@@ -195,7 +196,7 @@ type ParamMarkerExpr struct { ...@@ -195,7 +196,7 @@ type ParamMarkerExpr struct {
} }
// Restore implements Node interface. // Restore implements Node interface.
func (n *ParamMarkerExpr) Restore(ctx *ast.RestoreCtx) error { func (n *ParamMarkerExpr) Restore(ctx *format.RestoreCtx) error {
ctx.WritePlain("?") ctx.WritePlain("?")
return nil return nil
} }
......
...@@ -2165,6 +2165,8 @@ var dateFormatParserTable = map[string]dateFormatParser{ ...@@ -2165,6 +2165,8 @@ var dateFormatParserTable = map[string]dateFormatParser{
"%S": secondsNumeric, // Seconds (00..59) "%S": secondsNumeric, // Seconds (00..59)
"%T": time24Hour, // Time, 24-hour (hh:mm:ss) "%T": time24Hour, // Time, 24-hour (hh:mm:ss)
"%Y": yearNumericFourDigits, // Year, numeric, four digits "%Y": yearNumericFourDigits, // Year, numeric, four digits
// Deprecated since MySQL 5.7.5
"%y": yearNumericTwoDigits, // Year, numeric (two digits)
// TODO: Add the following... // TODO: Add the following...
// "%a": abbreviatedWeekday, // Abbreviated weekday name (Sun..Sat) // "%a": abbreviatedWeekday, // Abbreviated weekday name (Sun..Sat)
// "%D": dayOfMonthWithSuffix, // Day of the month with English suffix (0th, 1st, 2nd, 3rd) // "%D": dayOfMonthWithSuffix, // Day of the month with English suffix (0th, 1st, 2nd, 3rd)
...@@ -2176,8 +2178,6 @@ var dateFormatParserTable = map[string]dateFormatParser{ ...@@ -2176,8 +2178,6 @@ var dateFormatParserTable = map[string]dateFormatParser{
// "%w": dayOfWeek, // Day of the week (0=Sunday..6=Saturday) // "%w": dayOfWeek, // Day of the week (0=Sunday..6=Saturday)
// "%X": yearOfWeek, // Year for the week where Sunday is the first day of the week, numeric, four digits; used with %V // "%X": yearOfWeek, // Year for the week where Sunday is the first day of the week, numeric, four digits; used with %V
// "%x": yearOfWeek, // Year for the week, where Monday is the first day of the week, numeric, four digits; used with %v // "%x": yearOfWeek, // Year for the week, where Monday is the first day of the week, numeric, four digits; used with %v
// Deprecated since MySQL 5.7.5
// "%y": yearTwoDigits, // Year, numeric (two digits)
} }
// GetFormatType checks the type(Duration, Date or Datetime) of a format string. // GetFormatType checks the type(Duration, Date or Datetime) of a format string.
...@@ -2235,7 +2235,7 @@ func matchDateWithToken(t *MysqlTime, date string, token string, ctx map[string] ...@@ -2235,7 +2235,7 @@ func matchDateWithToken(t *MysqlTime, date string, token string, ctx map[string]
} }
func parseDigits(input string, count int) (int, bool) { func parseDigits(input string, count int) (int, bool) {
if len(input) < count { if count <= 0 || len(input) < count {
return 0, false return 0, false
} }
...@@ -2432,12 +2432,31 @@ func microSeconds(t *MysqlTime, input string, ctx map[string]int) (string, bool) ...@@ -2432,12 +2432,31 @@ func microSeconds(t *MysqlTime, input string, ctx map[string]int) (string, bool)
} }
func yearNumericFourDigits(t *MysqlTime, input string, ctx map[string]int) (string, bool) { func yearNumericFourDigits(t *MysqlTime, input string, ctx map[string]int) (string, bool) {
v, succ := parseDigits(input, 4) return yearNumericNDigits(t, input, ctx, 4)
if !succ { }
func yearNumericTwoDigits(t *MysqlTime, input string, ctx map[string]int) (string, bool) {
return yearNumericNDigits(t, input, ctx, 2)
}
func yearNumericNDigits(t *MysqlTime, input string, ctx map[string]int, n int) (string, bool) {
effectiveCount, effectiveValue := 0, 0
for effectiveCount+1 <= n {
value, succeed := parseDigits(input, effectiveCount+1)
if !succeed {
break
}
effectiveCount++
effectiveValue = value
}
if effectiveCount == 0 {
return input, false return input, false
} }
t.year = uint16(v) if effectiveCount <= 2 {
return input[4:], true effectiveValue = adjustYear(effectiveValue)
}
t.year = uint16(effectiveValue)
return input[effectiveCount:], true
} }
func dayOfYearThreeDigits(t *MysqlTime, input string, ctx map[string]int) (string, bool) { func dayOfYearThreeDigits(t *MysqlTime, input string, ctx map[string]int) (string, bool) {
......
...@@ -111,106 +111,106 @@ ...@@ -111,106 +111,106 @@
"revisionTime": "2018-10-24T15:10:47Z" "revisionTime": "2018-10-24T15:10:47Z"
}, },
{ {
"checksumSHA1": "oPVvRBag6XbaB4dN38RkdejKr70=", "checksumSHA1": "DJypl3jfSRspsBR2fc6eyBktkq4=",
"path": "github.com/pingcap/parser", "path": "github.com/pingcap/parser",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "+KVexpbQ1kxBZA/iUahnFkIUGsU=", "checksumSHA1": "+IqkkjB5E83Q1mUjZ1V+75iHocA=",
"path": "github.com/pingcap/parser/ast", "path": "github.com/pingcap/parser/ast",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "skWGV4FNvD3vr+5olepaPPnylUw=", "checksumSHA1": "skWGV4FNvD3vr+5olepaPPnylUw=",
"path": "github.com/pingcap/parser/auth", "path": "github.com/pingcap/parser/auth",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "t4UHo966WzU9Z0IJkyGHRp0loOk=", "checksumSHA1": "t4UHo966WzU9Z0IJkyGHRp0loOk=",
"path": "github.com/pingcap/parser/charset", "path": "github.com/pingcap/parser/charset",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "SInoXbsRe0tnBwmatmtZYfSFbdk=", "checksumSHA1": "ohLJW2u9NJEzYIJL/AjOqcuKfMY=",
"path": "github.com/pingcap/parser/format", "path": "github.com/pingcap/parser/format",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "ZADwr2/PcEd9VI3XF9OvN4HkJ+8=", "checksumSHA1": "ZADwr2/PcEd9VI3XF9OvN4HkJ+8=",
"path": "github.com/pingcap/parser/model", "path": "github.com/pingcap/parser/model",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "kkqyRzO7TCqnABxjJEo+JclJZLM=", "checksumSHA1": "ge+W5BLlgqKIlvmappsPTLgVJLk=",
"path": "github.com/pingcap/parser/mysql", "path": "github.com/pingcap/parser/mysql",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "mxpiJJ3b08I0o0Sd2rJLYMwz7uw=", "checksumSHA1": "mxpiJJ3b08I0o0Sd2rJLYMwz7uw=",
"path": "github.com/pingcap/parser/opcode", "path": "github.com/pingcap/parser/opcode",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "XvnUllvwMYd6HrMvMiKnn4cGN2M=", "checksumSHA1": "XvnUllvwMYd6HrMvMiKnn4cGN2M=",
"path": "github.com/pingcap/parser/terror", "path": "github.com/pingcap/parser/terror",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "CpuZhpMNeho4tIFPwY2GUDvuEfQ=", "checksumSHA1": "CpuZhpMNeho4tIFPwY2GUDvuEfQ=",
"path": "github.com/pingcap/parser/types", "path": "github.com/pingcap/parser/types",
"revision": "5f15dc90ca5964d59634063e29c22ff6c7d9e49e", "revision": "35fab0be7fca9ea7f7b5350af83a8d1e2775abe4",
"revisionTime": "2019-01-05T06:04:45Z" "revisionTime": "2019-01-08T10:41:42Z"
}, },
{ {
"checksumSHA1": "MxoLdFWi8nwd0uqTJnYqw+JaDAY=", "checksumSHA1": "MxoLdFWi8nwd0uqTJnYqw+JaDAY=",
"path": "github.com/pingcap/tidb/sessionctx/stmtctx", "path": "github.com/pingcap/tidb/sessionctx/stmtctx",
"revision": "78a51a4626999279749c460f3f42a2e92897c2e3", "revision": "c68ee7318319b34fbad53d5abf18275b0273ae41",
"revisionTime": "2019-01-05T13:32:32Z" "revisionTime": "2019-01-08T12:33:36Z"
}, },
{ {
"checksumSHA1": "wlD7aGqTJ5eBQYK0ub4b2Ick1j8=", "checksumSHA1": "1PFyexjkPlACP5S2pRrT6TsjcQ0=",
"path": "github.com/pingcap/tidb/types", "path": "github.com/pingcap/tidb/types",
"revision": "78a51a4626999279749c460f3f42a2e92897c2e3", "revision": "c68ee7318319b34fbad53d5abf18275b0273ae41",
"revisionTime": "2019-01-05T13:32:32Z" "revisionTime": "2019-01-08T12:33:36Z"
}, },
{ {
"checksumSHA1": "DWVD7+ygtT66IQ+cqXmMJ5OVqUk=", "checksumSHA1": "DWVD7+ygtT66IQ+cqXmMJ5OVqUk=",
"path": "github.com/pingcap/tidb/types/json", "path": "github.com/pingcap/tidb/types/json",
"revision": "78a51a4626999279749c460f3f42a2e92897c2e3", "revision": "c68ee7318319b34fbad53d5abf18275b0273ae41",
"revisionTime": "2019-01-05T13:32:32Z" "revisionTime": "2019-01-08T12:33:36Z"
}, },
{ {
"checksumSHA1": "6vi/eCZXqNTa5eAUpxDZet4LPlY=", "checksumSHA1": "yKeU1hJFc7X3afXESYV0Wz5ZPXQ=",
"path": "github.com/pingcap/tidb/types/parser_driver", "path": "github.com/pingcap/tidb/types/parser_driver",
"revision": "78a51a4626999279749c460f3f42a2e92897c2e3", "revision": "c68ee7318319b34fbad53d5abf18275b0273ae41",
"revisionTime": "2019-01-05T13:32:32Z" "revisionTime": "2019-01-08T12:33:36Z"
}, },
{ {
"checksumSHA1": "SS7twHZofFKr8w/pwIKmkp3u5qU=", "checksumSHA1": "SS7twHZofFKr8w/pwIKmkp3u5qU=",
"path": "github.com/pingcap/tidb/util/execdetails", "path": "github.com/pingcap/tidb/util/execdetails",
"revision": "78a51a4626999279749c460f3f42a2e92897c2e3", "revision": "c68ee7318319b34fbad53d5abf18275b0273ae41",
"revisionTime": "2019-01-05T13:32:32Z" "revisionTime": "2019-01-08T12:33:36Z"
}, },
{ {
"checksumSHA1": "nUC7zVoAMNR2a+z2iGqHoN2AkFE=", "checksumSHA1": "nUC7zVoAMNR2a+z2iGqHoN2AkFE=",
"path": "github.com/pingcap/tidb/util/hack", "path": "github.com/pingcap/tidb/util/hack",
"revision": "78a51a4626999279749c460f3f42a2e92897c2e3", "revision": "c68ee7318319b34fbad53d5abf18275b0273ae41",
"revisionTime": "2019-01-05T13:32:32Z" "revisionTime": "2019-01-08T12:33:36Z"
}, },
{ {
"checksumSHA1": "xSyepiuqsoaaeDch7cXeumvVHKM=", "checksumSHA1": "xSyepiuqsoaaeDch7cXeumvVHKM=",
"path": "github.com/pingcap/tidb/util/memory", "path": "github.com/pingcap/tidb/util/memory",
"revision": "78a51a4626999279749c460f3f42a2e92897c2e3", "revision": "c68ee7318319b34fbad53d5abf18275b0273ae41",
"revisionTime": "2019-01-05T13:32:32Z" "revisionTime": "2019-01-08T12:33:36Z"
}, },
{ {
"checksumSHA1": "SmYeIK/fIYXNu8IKxD6HOVQVTuU=", "checksumSHA1": "SmYeIK/fIYXNu8IKxD6HOVQVTuU=",
...@@ -407,62 +407,62 @@ ...@@ -407,62 +407,62 @@
{ {
"checksumSHA1": "aKn1oKcY74N8TRLm3Ayt7Q4bbI4=", "checksumSHA1": "aKn1oKcY74N8TRLm3Ayt7Q4bbI4=",
"path": "vitess.io/vitess/go/bytes2", "path": "vitess.io/vitess/go/bytes2",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
}, },
{ {
"checksumSHA1": "JVCEN4UGRmg3TofIBdzZMZ3G0Ww=", "checksumSHA1": "JVCEN4UGRmg3TofIBdzZMZ3G0Ww=",
"path": "vitess.io/vitess/go/hack", "path": "vitess.io/vitess/go/hack",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
}, },
{ {
"checksumSHA1": "F5pcGq+2W1FHEjgktTdKOE6W8mk=", "checksumSHA1": "F5pcGq+2W1FHEjgktTdKOE6W8mk=",
"path": "vitess.io/vitess/go/sqltypes", "path": "vitess.io/vitess/go/sqltypes",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
}, },
{ {
"checksumSHA1": "ntFIQYkBS51G6y+FEkjFW40+HOU=", "checksumSHA1": "ntFIQYkBS51G6y+FEkjFW40+HOU=",
"path": "vitess.io/vitess/go/vt/log", "path": "vitess.io/vitess/go/vt/log",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
}, },
{ {
"checksumSHA1": "HHIcl3lpWkzLARkkNv94fVaObjo=", "checksumSHA1": "HHIcl3lpWkzLARkkNv94fVaObjo=",
"path": "vitess.io/vitess/go/vt/proto/query", "path": "vitess.io/vitess/go/vt/proto/query",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
}, },
{ {
"checksumSHA1": "YLWTmL+rvz0htn0niRMrIUI6rKc=", "checksumSHA1": "YLWTmL+rvz0htn0niRMrIUI6rKc=",
"path": "vitess.io/vitess/go/vt/proto/topodata", "path": "vitess.io/vitess/go/vt/proto/topodata",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
}, },
{ {
"checksumSHA1": "tNNlcSFFnlOauS2hXnrz/zA/wfk=", "checksumSHA1": "tNNlcSFFnlOauS2hXnrz/zA/wfk=",
"path": "vitess.io/vitess/go/vt/proto/vtgate", "path": "vitess.io/vitess/go/vt/proto/vtgate",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
}, },
{ {
"checksumSHA1": "qz32abYdmm9NfKTc++K0l1EvXXM=", "checksumSHA1": "qz32abYdmm9NfKTc++K0l1EvXXM=",
"path": "vitess.io/vitess/go/vt/proto/vtrpc", "path": "vitess.io/vitess/go/vt/proto/vtrpc",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
}, },
{ {
"checksumSHA1": "IDe+9Bn42lZVsuoYO/epdguiErk=", "checksumSHA1": "IDe+9Bn42lZVsuoYO/epdguiErk=",
"path": "vitess.io/vitess/go/vt/sqlparser", "path": "vitess.io/vitess/go/vt/sqlparser",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
}, },
{ {
"checksumSHA1": "Jx+gOh/kiBDSZxEIWHyYn9brjdo=", "checksumSHA1": "Jx+gOh/kiBDSZxEIWHyYn9brjdo=",
"path": "vitess.io/vitess/go/vt/vterrors", "path": "vitess.io/vitess/go/vt/vterrors",
"revision": "ae79dd48f3157c96d083c890f670011cdebf0a2b", "revision": "42f5c760cca59b18c4ea877284f36eb0be7d6468",
"revisionTime": "2019-01-04T23:32:40Z" "revisionTime": "2019-01-06T20:12:04Z"
} }
], ],
"rootPath": "github.com/XiaoMi/soar" "rootPath": "github.com/XiaoMi/soar"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册