提交 82efd3c2 编写于 作者: martianzhang's avatar martianzhang

add vendor

上级 7c5d8b1e
...@@ -9,6 +9,9 @@ import ( ...@@ -9,6 +9,9 @@ import (
// Trace just calls AddStack. // Trace just calls AddStack.
func Trace(err error) error { func Trace(err error) error {
if err == nil {
return nil
}
return AddStack(err) return AddStack(err)
} }
...@@ -52,6 +55,48 @@ func Annotatef(err error, format string, args ...interface{}) error { ...@@ -52,6 +55,48 @@ func Annotatef(err error, format string, args ...interface{}) error {
} }
} }
var emptyStack stack
// NewNoStackError creates error without error stack
// later duplicate trace will no longer generate Stack too.
func NewNoStackError(msg string) error {
return &fundamental{
msg: msg,
stack: &emptyStack,
}
}
// SuspendStack suspends stack generate for error.
func SuspendStack(err error) error {
if err == nil {
return err
}
cleared := clearStack(err)
if cleared {
return err
}
return &withStack{
err,
&emptyStack,
}
}
func clearStack(err error) (cleared bool) {
switch typedErr := err.(type) {
case *withMessage:
return clearStack(typedErr.Cause())
case *fundamental:
typedErr.stack = &emptyStack
return true
case *withStack:
typedErr.stack = &emptyStack
clearStack(typedErr.Cause())
return true
default:
return false
}
}
// ErrorStack will format a stack trace if it is available, otherwise it will be Error() // ErrorStack will format a stack trace if it is available, otherwise it will be Error()
// If the error is nil, the empty string is returned // If the error is nil, the empty string is returned
// Note that this just calls fmt.Sprintf("%+v", err) // Note that this just calls fmt.Sprintf("%+v", err)
......
# Parser # Parser
[![Go Report Card](https://goreportcard.com/badge/github.com/pingcap/parser)](https://goreportcard.com/report/github.com/pingcap/parser) [![CircleCI Status](https://circleci.com/gh/pingcap/parser.svg?style=shield)](https://circleci.com/gh/pingcap/parser) [![GoDoc](https://godoc.org/github.com/pingcap/parser?status.svg)](https://godoc.org/github.com/pingcap/parser) [![Go Report Card](https://goreportcard.com/badge/github.com/pingcap/parser)](https://goreportcard.com/report/github.com/pingcap/parser) [![CircleCI Status](https://circleci.com/gh/pingcap/parser.svg?style=shield)](https://circleci.com/gh/pingcap/parser) [![GoDoc](https://godoc.org/github.com/pingcap/parser?status.svg)](https://godoc.org/github.com/pingcap/parser)
[![codecov](https://codecov.io/gh/pingcap/parser/branch/master/graph/badge.svg)](https://codecov.io/gh/pingcap/parser)
TiDB SQL Parser TiDB SQL Parser
......
...@@ -98,11 +98,11 @@ func (n *CreateDatabaseStmt) Restore(ctx *RestoreCtx) error { ...@@ -98,11 +98,11 @@ func (n *CreateDatabaseStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("IF NOT EXISTS ") ctx.WriteKeyWord("IF NOT EXISTS ")
} }
ctx.WriteName(n.Name) ctx.WriteName(n.Name)
for _, option := range n.Options { for i, option := range n.Options {
ctx.WritePlain(" ") ctx.WritePlain(" ")
err := option.Restore(ctx) err := option.Restore(ctx)
if err != nil { if err != nil {
return errors.Trace(err) return errors.Annotatef(err, "An error occurred while splicing CreateDatabaseStmt DatabaseOption: [%v]", i)
} }
} }
return nil return nil
...@@ -118,6 +118,43 @@ func (n *CreateDatabaseStmt) Accept(v Visitor) (Node, bool) { ...@@ -118,6 +118,43 @@ func (n *CreateDatabaseStmt) Accept(v Visitor) (Node, bool) {
return v.Leave(n) return v.Leave(n)
} }
// AlterDatabaseStmt is a statement to change the structure of a database.
// See https://dev.mysql.com/doc/refman/5.7/en/alter-database.html
type AlterDatabaseStmt struct {
ddlNode
Name string
AlterDefaultDatabase bool
Options []*DatabaseOption
}
// Restore implements Node interface.
func (n *AlterDatabaseStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("ALTER DATABASE")
if !n.AlterDefaultDatabase {
ctx.WritePlain(" ")
ctx.WriteName(n.Name)
}
for i, option := range n.Options {
ctx.WritePlain(" ")
err := option.Restore(ctx)
if err != nil {
return errors.Annotatef(err, "An error occurred while splicing AlterDatabaseStmt DatabaseOption: [%v]", i)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *AlterDatabaseStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*AlterDatabaseStmt)
return v.Leave(n)
}
// DropDatabaseStmt is a statement to drop a database and all tables in the database. // DropDatabaseStmt is a statement to drop a database and all tables in the database.
// See https://dev.mysql.com/doc/refman/5.7/en/drop-database.html // See https://dev.mysql.com/doc/refman/5.7/en/drop-database.html
type DropDatabaseStmt struct { type DropDatabaseStmt struct {
...@@ -354,6 +391,14 @@ const ( ...@@ -354,6 +391,14 @@ const (
ColumnOptionCollate ColumnOptionCollate
) )
var (
invalidOptionForGeneratedColumn = map[ColumnOptionType]struct{}{
ColumnOptionAutoIncrement: {},
ColumnOptionOnUpdate: {},
ColumnOptionDefaultValue: {},
}
)
// ColumnOption is used for parsing column constraint info from SQL. // ColumnOption is used for parsing column constraint info from SQL.
type ColumnOption struct { type ColumnOption struct {
node node
...@@ -672,6 +717,23 @@ func (n *ColumnDef) Accept(v Visitor) (Node, bool) { ...@@ -672,6 +717,23 @@ func (n *ColumnDef) Accept(v Visitor) (Node, bool) {
return v.Leave(n) return v.Leave(n)
} }
// Validate checks if a column definition is legal.
// For example, generated column definitions that contain such
// column options as `ON UPDATE`, `AUTO_INCREMENT`, `DEFAULT`
// are illegal.
func (n *ColumnDef) Validate() bool {
generatedCol := false
illegalOpt4gc := false
for _, opt := range n.Options {
if opt.Tp == ColumnOptionGenerated {
generatedCol = true
}
_, found := invalidOptionForGeneratedColumn[opt.Tp]
illegalOpt4gc = illegalOpt4gc || found
}
return !(generatedCol && illegalOpt4gc)
}
// CreateTableStmt is a statement to create a table. // CreateTableStmt is a statement to create a table.
// See https://dev.mysql.com/doc/refman/5.7/en/create-table.html // See https://dev.mysql.com/doc/refman/5.7/en/create-table.html
type CreateTableStmt struct { type CreateTableStmt struct {
...@@ -684,7 +746,7 @@ type CreateTableStmt struct { ...@@ -684,7 +746,7 @@ type CreateTableStmt struct {
Constraints []*Constraint Constraints []*Constraint
Options []*TableOption Options []*TableOption
Partition *PartitionOptions Partition *PartitionOptions
OnDuplicate OnDuplicateCreateTableSelectType OnDuplicate OnDuplicateKeyHandlingType
Select ResultSetNode Select ResultSetNode
} }
...@@ -744,11 +806,11 @@ func (n *CreateTableStmt) Restore(ctx *RestoreCtx) error { ...@@ -744,11 +806,11 @@ func (n *CreateTableStmt) Restore(ctx *RestoreCtx) error {
if n.Select != nil { if n.Select != nil {
switch n.OnDuplicate { switch n.OnDuplicate {
case OnDuplicateCreateTableSelectError: case OnDuplicateKeyHandlingError:
ctx.WriteKeyWord(" AS ") ctx.WriteKeyWord(" AS ")
case OnDuplicateCreateTableSelectIgnore: case OnDuplicateKeyHandlingIgnore:
ctx.WriteKeyWord(" IGNORE AS ") ctx.WriteKeyWord(" IGNORE AS ")
case OnDuplicateCreateTableSelectReplace: case OnDuplicateKeyHandlingReplace:
ctx.WriteKeyWord(" REPLACE AS ") ctx.WriteKeyWord(" REPLACE AS ")
} }
...@@ -1180,6 +1242,7 @@ const ( ...@@ -1180,6 +1242,7 @@ const (
TableOptionRowFormat TableOptionRowFormat
TableOptionStatsPersistent TableOptionStatsPersistent
TableOptionShardRowID TableOptionShardRowID
TableOptionPreSplitRegion
TableOptionPackKeys TableOptionPackKeys
) )
...@@ -1201,15 +1264,16 @@ const ( ...@@ -1201,15 +1264,16 @@ const (
TokuDBRowFormatUncompressed TokuDBRowFormatUncompressed
) )
// OnDuplicateCreateTableSelectType is the option that handle unique key values in 'CREATE TABLE ... SELECT'. // OnDuplicateKeyHandlingType is the option that handle unique key values in 'CREATE TABLE ... SELECT' or `LOAD DATA`.
// See https://dev.mysql.com/doc/refman/5.7/en/create-table-select.html // See https://dev.mysql.com/doc/refman/5.7/en/create-table-select.html
type OnDuplicateCreateTableSelectType int // See https://dev.mysql.com/doc/refman/5.7/en/load-data.html
type OnDuplicateKeyHandlingType int
// OnDuplicateCreateTableSelect types // OnDuplicateKeyHandling types
const ( const (
OnDuplicateCreateTableSelectError OnDuplicateCreateTableSelectType = iota OnDuplicateKeyHandlingError OnDuplicateKeyHandlingType = iota
OnDuplicateCreateTableSelectIgnore OnDuplicateKeyHandlingIgnore
OnDuplicateCreateTableSelectReplace OnDuplicateKeyHandlingReplace
) )
// TableOption is used for parsing table option from SQL. // TableOption is used for parsing table option from SQL.
...@@ -1324,8 +1388,10 @@ func (n *TableOption) Restore(ctx *RestoreCtx) error { ...@@ -1324,8 +1388,10 @@ func (n *TableOption) Restore(ctx *RestoreCtx) error {
ctx.WritePlain(" /* TableOptionStatsPersistent is not supported */ ") ctx.WritePlain(" /* TableOptionStatsPersistent is not supported */ ")
case TableOptionShardRowID: case TableOptionShardRowID:
ctx.WriteKeyWord("SHARD_ROW_ID_BITS ") ctx.WriteKeyWord("SHARD_ROW_ID_BITS ")
ctx.WritePlain("= ") ctx.WritePlainf("= %d", n.UintValue)
ctx.WritePlainf("%d", n.UintValue) case TableOptionPreSplitRegion:
ctx.WriteKeyWord("PRE_SPLIT_REGIONS ")
ctx.WritePlainf("= %d", n.UintValue)
case TableOptionPackKeys: case TableOptionPackKeys:
// TODO: not support // TODO: not support
ctx.WriteKeyWord("PACK_KEYS ") ctx.WriteKeyWord("PACK_KEYS ")
......
...@@ -31,6 +31,7 @@ var ( ...@@ -31,6 +31,7 @@ var (
_ DMLNode = &SelectStmt{} _ DMLNode = &SelectStmt{}
_ DMLNode = &ShowStmt{} _ DMLNode = &ShowStmt{}
_ DMLNode = &LoadDataStmt{} _ DMLNode = &LoadDataStmt{}
_ DMLNode = &SplitIndexRegionStmt{}
_ Node = &Assignment{} _ Node = &Assignment{}
_ Node = &ByItem{} _ Node = &ByItem{}
...@@ -1113,18 +1114,27 @@ func (n *Assignment) Accept(v Visitor) (Node, bool) { ...@@ -1113,18 +1114,27 @@ func (n *Assignment) Accept(v Visitor) (Node, bool) {
return v.Leave(n) return v.Leave(n)
} }
type ColumnNameOrUserVar struct {
ColumnName *ColumnName
UserVar *VariableExpr
}
// LoadDataStmt is a statement to load data from a specified file, then insert this rows into an existing table. // LoadDataStmt is a statement to load data from a specified file, then insert this rows into an existing table.
// See https://dev.mysql.com/doc/refman/5.7/en/load-data.html // See https://dev.mysql.com/doc/refman/5.7/en/load-data.html
type LoadDataStmt struct { type LoadDataStmt struct {
dmlNode dmlNode
IsLocal bool IsLocal bool
Path string Path string
Table *TableName OnDuplicate OnDuplicateKeyHandlingType
Columns []*ColumnName Table *TableName
FieldsInfo *FieldsClause Columns []*ColumnName
LinesInfo *LinesClause FieldsInfo *FieldsClause
IgnoreLines uint64 LinesInfo *LinesClause
IgnoreLines uint64
ColumnAssignments []*Assignment
ColumnsAndUserVars []*ColumnNameOrUserVar
} }
// Restore implements Node interface. // Restore implements Node interface.
...@@ -1135,6 +1145,11 @@ func (n *LoadDataStmt) Restore(ctx *RestoreCtx) error { ...@@ -1135,6 +1145,11 @@ func (n *LoadDataStmt) Restore(ctx *RestoreCtx) error {
} }
ctx.WriteKeyWord("INFILE ") ctx.WriteKeyWord("INFILE ")
ctx.WriteString(n.Path) ctx.WriteString(n.Path)
if n.OnDuplicate == OnDuplicateKeyHandlingReplace {
ctx.WriteKeyWord(" REPLACE")
} else if n.OnDuplicate == OnDuplicateKeyHandlingIgnore {
ctx.WriteKeyWord(" IGNORE")
}
ctx.WriteKeyWord(" INTO TABLE ") ctx.WriteKeyWord(" INTO TABLE ")
if err := n.Table.Restore(ctx); err != nil { if err := n.Table.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore LoadDataStmt.Table") return errors.Annotate(err, "An error occurred while restore LoadDataStmt.Table")
...@@ -1146,18 +1161,39 @@ func (n *LoadDataStmt) Restore(ctx *RestoreCtx) error { ...@@ -1146,18 +1161,39 @@ func (n *LoadDataStmt) Restore(ctx *RestoreCtx) error {
ctx.WritePlainf("%d", n.IgnoreLines) ctx.WritePlainf("%d", n.IgnoreLines)
ctx.WriteKeyWord(" LINES") ctx.WriteKeyWord(" LINES")
} }
if len(n.Columns) != 0 { if len(n.ColumnsAndUserVars) != 0 {
ctx.WritePlain(" (") ctx.WritePlain(" (")
for i, column := range n.Columns { for i, c := range n.ColumnsAndUserVars {
if i != 0 { if i != 0 {
ctx.WritePlain(",") ctx.WritePlain(",")
} }
if err := column.Restore(ctx); err != nil { if c.ColumnName != nil {
return errors.Annotate(err, "An error occurred while restore LoadDataStmt.Columns") if err := c.ColumnName.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore LoadDataStmt.ColumnsAndUserVars")
}
} }
if c.UserVar != nil {
if err := c.UserVar.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore LoadDataStmt.ColumnsAndUserVars")
}
}
} }
ctx.WritePlain(")") ctx.WritePlain(")")
} }
if n.ColumnAssignments != nil {
ctx.WriteKeyWord(" SET")
for i, assign := range n.ColumnAssignments {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WritePlain(" ")
if err := assign.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore LoadDataStmt.ColumnAssignments")
}
}
}
return nil return nil
} }
...@@ -1182,9 +1218,28 @@ func (n *LoadDataStmt) Accept(v Visitor) (Node, bool) { ...@@ -1182,9 +1218,28 @@ func (n *LoadDataStmt) Accept(v Visitor) (Node, bool) {
} }
n.Columns[i] = node.(*ColumnName) n.Columns[i] = node.(*ColumnName)
} }
for i, assignment := range n.ColumnAssignments {
node, ok := assignment.Accept(v)
if !ok {
return n, false
}
n.ColumnAssignments[i] = node.(*Assignment)
}
return v.Leave(n) return v.Leave(n)
} }
const (
Terminated = iota
Enclosed
Escaped
)
type FieldItem struct {
Type int
Value string
}
// FieldsClause represents fields references clause in load data statement. // FieldsClause represents fields references clause in load data statement.
type FieldsClause struct { type FieldsClause struct {
Terminated string Terminated string
...@@ -1750,6 +1805,7 @@ const ( ...@@ -1750,6 +1805,7 @@ const (
ShowStatsBuckets ShowStatsBuckets
ShowStatsHealthy ShowStatsHealthy
ShowPlugins ShowPlugins
ShowProfile
ShowProfiles ShowProfiles
ShowMasterStatus ShowMasterStatus
ShowPrivileges ShowPrivileges
...@@ -1757,6 +1813,21 @@ const ( ...@@ -1757,6 +1813,21 @@ const (
ShowBindings ShowBindings
ShowPumpStatus ShowPumpStatus
ShowDrainerStatus ShowDrainerStatus
ShowOpenTables
ShowAnalyzeStatus
)
const (
ProfileTypeInvalid = iota
ProfileTypeCPU
ProfileTypeMemory
ProfileTypeBlockIo
ProfileTypeContextSwitch
ProfileTypePageFaults
ProfileTypeIpc
ProfileTypeSwaps
ProfileTypeSource
ProfileTypeAll
) )
// ShowStmt is a statement to provide information about databases, tables, columns and so on. // ShowStmt is a statement to provide information about databases, tables, columns and so on.
...@@ -1771,13 +1842,18 @@ type ShowStmt struct { ...@@ -1771,13 +1842,18 @@ type ShowStmt struct {
Column *ColumnName // Used for `desc table column`. Column *ColumnName // Used for `desc table column`.
Flag int // Some flag parsed from sql, such as FULL. Flag int // Some flag parsed from sql, such as FULL.
Full bool Full bool
User *auth.UserIdentity // Used for show grants/create user. User *auth.UserIdentity // Used for show grants/create user.
IfNotExists bool // Used for `show create database if not exists` Roles []*auth.RoleIdentity // Used for show grants .. using
IfNotExists bool // Used for `show create database if not exists`
// GlobalScope is used by `show variables` and `show bindings` // GlobalScope is used by `show variables` and `show bindings`
GlobalScope bool GlobalScope bool
Pattern *PatternLikeExpr Pattern *PatternLikeExpr
Where ExprNode Where ExprNode
ShowProfileTypes []int // Used for `SHOW PROFILE` syntax
ShowProfileArgs *int64 // Used for `SHOW PROFILE` syntax
ShowProfileLimit *Limit // Used for `SHOW PROFILE` syntax
} }
// Restore implements Node interface. // Restore implements Node interface.
...@@ -1847,6 +1923,17 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error { ...@@ -1847,6 +1923,17 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error {
return errors.Annotate(err, "An error occurred while restore ShowStmt.User") return errors.Annotate(err, "An error occurred while restore ShowStmt.User")
} }
} }
if n.Roles != nil {
ctx.WriteKeyWord(" USING ")
for i, r := range n.Roles {
if err := r.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore ShowStmt.User")
}
if i != len(n.Roles)-1 {
ctx.WritePlain(", ")
}
}
}
case ShowMasterStatus: case ShowMasterStatus:
ctx.WriteKeyWord("MASTER STATUS") ctx.WriteKeyWord("MASTER STATUS")
case ShowProcessList: case ShowProcessList:
...@@ -1874,6 +1961,47 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error { ...@@ -1874,6 +1961,47 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error {
} }
case ShowProfiles: case ShowProfiles:
ctx.WriteKeyWord("PROFILES") ctx.WriteKeyWord("PROFILES")
case ShowProfile:
ctx.WriteKeyWord("PROFILE")
if len(n.ShowProfileTypes) > 0 {
for i, tp := range n.ShowProfileTypes {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WritePlain(" ")
switch tp {
case ProfileTypeCPU:
ctx.WriteKeyWord("CPU")
case ProfileTypeMemory:
ctx.WriteKeyWord("MEMORY")
case ProfileTypeBlockIo:
ctx.WriteKeyWord("BLOCK IO")
case ProfileTypeContextSwitch:
ctx.WriteKeyWord("CONTEXT SWITCHES")
case ProfileTypeIpc:
ctx.WriteKeyWord("IPC")
case ProfileTypePageFaults:
ctx.WriteKeyWord("PAGE FAULTS")
case ProfileTypeSource:
ctx.WriteKeyWord("SOURCE")
case ProfileTypeSwaps:
ctx.WriteKeyWord("SWAPS")
case ProfileTypeAll:
ctx.WriteKeyWord("ALL")
}
}
}
if n.ShowProfileArgs != nil {
ctx.WriteKeyWord(" FOR QUERY ")
ctx.WritePlainf("%d", *n.ShowProfileArgs)
}
if n.ShowProfileLimit != nil {
ctx.WritePlain(" ")
if err := n.ShowProfileLimit.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore ShowStmt.WritePlain")
}
}
case ShowPrivileges: case ShowPrivileges:
ctx.WriteKeyWord("PRIVILEGES") ctx.WriteKeyWord("PRIVILEGES")
// ShowTargetFilterable // ShowTargetFilterable
...@@ -1889,6 +2017,9 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error { ...@@ -1889,6 +2017,9 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error {
restoreOptFull() restoreOptFull()
ctx.WriteKeyWord("TABLES") ctx.WriteKeyWord("TABLES")
restoreShowDatabaseNameOpt() restoreShowDatabaseNameOpt()
case ShowOpenTables:
ctx.WriteKeyWord("OPEN TABLES")
restoreShowDatabaseNameOpt()
case ShowTableStatus: case ShowTableStatus:
ctx.WriteKeyWord("TABLE STATUS") ctx.WriteKeyWord("TABLE STATUS")
restoreShowDatabaseNameOpt() restoreShowDatabaseNameOpt()
...@@ -1943,6 +2074,8 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error { ...@@ -1943,6 +2074,8 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("PUMP STATUS") ctx.WriteKeyWord("PUMP STATUS")
case ShowDrainerStatus: case ShowDrainerStatus:
ctx.WriteKeyWord("DRAINER STATUS") ctx.WriteKeyWord("DRAINER STATUS")
case ShowAnalyzeStatus:
ctx.WriteKeyWord("ANALYZE STATUS")
default: default:
return errors.New("Unknown ShowStmt type") return errors.New("Unknown ShowStmt type")
} }
...@@ -2011,7 +2144,7 @@ type WindowSpec struct { ...@@ -2011,7 +2144,7 @@ type WindowSpec struct {
Frame *FrameClause Frame *FrameClause
// OnlyAlias will set to true of the first following case. // OnlyAlias will set to true of the first following case.
// To make compatiable with MySQL, we need to distinguish `select func over w` from `select func over (w)`. // To make compatible with MySQL, we need to distinguish `select func over w` from `select func over (w)`.
OnlyAlias bool OnlyAlias bool
} }
...@@ -2270,3 +2403,61 @@ func (n *FrameBound) Accept(v Visitor) (Node, bool) { ...@@ -2270,3 +2403,61 @@ func (n *FrameBound) Accept(v Visitor) (Node, bool) {
} }
return v.Leave(n) return v.Leave(n)
} }
type SplitIndexRegionStmt struct {
dmlNode
Table *TableName
IndexName string
ValueLists [][]ExprNode
}
func (n *SplitIndexRegionStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("SPLIT TABLE ")
if err := n.Table.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore SplitIndexRegionStmt.Table")
}
ctx.WriteKeyWord(" INDEX ")
ctx.WriteName(n.IndexName)
ctx.WriteKeyWord(" BY ")
for i, row := range n.ValueLists {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WritePlain("(")
for j, v := range row {
if j != 0 {
ctx.WritePlain(",")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore SplitIndexRegionStmt.ValueLists[%d][%d]", i, j)
}
}
ctx.WritePlain(")")
}
return nil
}
func (n *SplitIndexRegionStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SplitIndexRegionStmt)
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
for i, list := range n.ValueLists {
for j, val := range list {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.ValueLists[i][j] = node.(ExprNode)
}
}
return v.Leave(n)
}
...@@ -158,9 +158,15 @@ func (n *BinaryOperationExpr) Restore(ctx *RestoreCtx) error { ...@@ -158,9 +158,15 @@ func (n *BinaryOperationExpr) Restore(ctx *RestoreCtx) error {
if err := n.L.Restore(ctx); err != nil { if err := n.L.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred when restore BinaryOperationExpr.L") return errors.Annotate(err, "An error occurred when restore BinaryOperationExpr.L")
} }
if ctx.Flags.HasSpacesAroundBinaryOperationFlag() {
ctx.WritePlain(" ")
}
if err := n.Op.Restore(ctx); err != nil { if err := n.Op.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred when restore BinaryOperationExpr.Op") return errors.Annotate(err, "An error occurred when restore BinaryOperationExpr.Op")
} }
if ctx.Flags.HasSpacesAroundBinaryOperationFlag() {
ctx.WritePlain(" ")
}
if err := n.R.Restore(ctx); err != nil { if err := n.R.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred when restore BinaryOperationExpr.R") return errors.Annotate(err, "An error occurred when restore BinaryOperationExpr.R")
} }
...@@ -283,13 +289,14 @@ func (n *CaseExpr) Restore(ctx *RestoreCtx) error { ...@@ -283,13 +289,14 @@ func (n *CaseExpr) Restore(ctx *RestoreCtx) error {
// Format the ExprNode into a Writer. // Format the ExprNode into a Writer.
func (n *CaseExpr) Format(w io.Writer) { func (n *CaseExpr) Format(w io.Writer) {
fmt.Fprint(w, "CASE ") fmt.Fprint(w, "CASE")
// Because the presence of `case when` syntax, `Value` could be nil and we need check this. // Because the presence of `case when` syntax, `Value` could be nil and we need check this.
if n.Value != nil { if n.Value != nil {
n.Value.Format(w)
fmt.Fprint(w, " ") fmt.Fprint(w, " ")
n.Value.Format(w)
} }
for _, clause := range n.WhenClauses { for _, clause := range n.WhenClauses {
fmt.Fprint(w, " ")
fmt.Fprint(w, "WHEN ") fmt.Fprint(w, "WHEN ")
clause.Expr.Format(w) clause.Expr.Format(w)
fmt.Fprint(w, " THEN ") fmt.Fprint(w, " THEN ")
......
...@@ -512,7 +512,7 @@ func (n *FuncCastExpr) Restore(ctx *RestoreCtx) error { ...@@ -512,7 +512,7 @@ func (n *FuncCastExpr) Restore(ctx *RestoreCtx) error {
return errors.Annotatef(err, "An error occurred while restore FuncCastExpr.Expr") return errors.Annotatef(err, "An error occurred while restore FuncCastExpr.Expr")
} }
ctx.WriteKeyWord(" AS ") ctx.WriteKeyWord(" AS ")
n.Tp.FormatAsCastType(ctx.In) n.Tp.RestoreAsCastType(ctx)
ctx.WritePlain(")") ctx.WritePlain(")")
case CastConvertFunction: case CastConvertFunction:
ctx.WriteKeyWord("CONVERT") ctx.WriteKeyWord("CONVERT")
...@@ -521,7 +521,7 @@ func (n *FuncCastExpr) Restore(ctx *RestoreCtx) error { ...@@ -521,7 +521,7 @@ func (n *FuncCastExpr) Restore(ctx *RestoreCtx) error {
return errors.Annotatef(err, "An error occurred while restore FuncCastExpr.Expr") return errors.Annotatef(err, "An error occurred while restore FuncCastExpr.Expr")
} }
ctx.WritePlain(", ") ctx.WritePlain(", ")
n.Tp.FormatAsCastType(ctx.In) n.Tp.RestoreAsCastType(ctx)
ctx.WritePlain(")") ctx.WritePlain(")")
case CastBinaryOperator: case CastBinaryOperator:
ctx.WriteKeyWord("BINARY ") ctx.WriteKeyWord("BINARY ")
......
...@@ -42,6 +42,7 @@ var ( ...@@ -42,6 +42,7 @@ var (
_ StmtNode = &RollbackStmt{} _ StmtNode = &RollbackStmt{}
_ StmtNode = &SetPwdStmt{} _ StmtNode = &SetPwdStmt{}
_ StmtNode = &SetRoleStmt{} _ StmtNode = &SetRoleStmt{}
_ StmtNode = &SetDefaultRoleStmt{}
_ StmtNode = &SetStmt{} _ StmtNode = &SetStmt{}
_ StmtNode = &UseStmt{} _ StmtNode = &UseStmt{}
_ StmtNode = &FlushStmt{} _ StmtNode = &FlushStmt{}
...@@ -67,6 +68,12 @@ const ( ...@@ -67,6 +68,12 @@ const (
DrainerType = "DRAINER" DrainerType = "DRAINER"
) )
// Transaction mode constants.
const (
Optimistic = "OPTIMISTIC"
Pessimistic = "PESSIMISTIC"
)
var ( var (
// ExplainFormats stores the valid formats for explain statement, used by validator. // ExplainFormats stores the valid formats for explain statement, used by validator.
ExplainFormats = []string{ ExplainFormats = []string{
...@@ -364,11 +371,17 @@ func (n *ExecuteStmt) Accept(v Visitor) (Node, bool) { ...@@ -364,11 +371,17 @@ func (n *ExecuteStmt) Accept(v Visitor) (Node, bool) {
// See https://dev.mysql.com/doc/refman/5.7/en/commit.html // See https://dev.mysql.com/doc/refman/5.7/en/commit.html
type BeginStmt struct { type BeginStmt struct {
stmtNode stmtNode
Mode string
} }
// Restore implements Node interface. // Restore implements Node interface.
func (n *BeginStmt) Restore(ctx *RestoreCtx) error { func (n *BeginStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("START TRANSACTION") if n.Mode == "" {
ctx.WriteKeyWord("START TRANSACTION")
} else {
ctx.WriteKeyWord("BEGIN ")
ctx.WriteKeyWord(n.Mode)
}
return nil return nil
} }
...@@ -847,6 +860,57 @@ func (n *SetRoleStmt) Accept(v Visitor) (Node, bool) { ...@@ -847,6 +860,57 @@ func (n *SetRoleStmt) Accept(v Visitor) (Node, bool) {
return v.Leave(n) return v.Leave(n)
} }
type SetDefaultRoleStmt struct {
stmtNode
SetRoleOpt SetRoleStmtType
RoleList []*auth.RoleIdentity
UserList []*auth.UserIdentity
}
func (n *SetDefaultRoleStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("SET DEFAULT ROLE")
switch n.SetRoleOpt {
case SetRoleNone:
ctx.WriteKeyWord(" NONE")
case SetRoleAll:
ctx.WriteKeyWord(" ALL")
default:
}
for i, role := range n.RoleList {
ctx.WritePlain(" ")
err := role.Restore(ctx)
if err != nil {
return errors.Annotate(err, "An error occurred while restore SetDefaultRoleStmt.RoleList")
}
if i != len(n.RoleList)-1 {
ctx.WritePlain(",")
}
}
ctx.WritePlain(" TO")
for i, user := range n.UserList {
ctx.WritePlain(" ")
err := user.Restore(ctx)
if err != nil {
return errors.Annotate(err, "An error occurred while restore SetDefaultRoleStmt.UserList")
}
if i != len(n.UserList)-1 {
ctx.WritePlain(",")
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *SetDefaultRoleStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetDefaultRoleStmt)
return v.Leave(n)
}
// UserSpec is used for parsing create user statement. // UserSpec is used for parsing create user statement.
type UserSpec struct { type UserSpec struct {
User *auth.UserIdentity User *auth.UserIdentity
...@@ -901,14 +965,119 @@ func (n *UserSpec) EncodedPassword() (string, bool) { ...@@ -901,14 +965,119 @@ func (n *UserSpec) EncodedPassword() (string, bool) {
return opt.HashString, true return opt.HashString, true
} }
const (
TslNone = iota
Ssl
X509
Cipher
Issuer
Subject
)
type TslOption struct {
Type int
Value string
}
func (t *TslOption) Restore(ctx *RestoreCtx) error {
switch t.Type {
case TslNone:
ctx.WriteKeyWord("NONE")
case Ssl:
ctx.WriteKeyWord("SSL")
case X509:
ctx.WriteKeyWord("X509")
case Cipher:
ctx.WriteKeyWord("CIPHER ")
ctx.WriteString(t.Value)
case Issuer:
ctx.WriteKeyWord("ISSUER ")
ctx.WriteString(t.Value)
case Subject:
ctx.WriteKeyWord("CIPHER")
ctx.WriteString(t.Value)
default:
return errors.Errorf("Unsupported TslOption.Type %d", t.Type)
}
return nil
}
const (
MaxQueriesPerHour = iota + 1
MaxUpdatesPerHour
MaxConnectionsPerHour
MaxUserConnections
)
type ResourceOption struct {
Type int
Count int64
}
func (r *ResourceOption) Restore(ctx *RestoreCtx) error {
switch r.Type {
case MaxQueriesPerHour:
ctx.WriteKeyWord("MAX_QUERIES_PER_HOUR ")
case MaxUpdatesPerHour:
ctx.WriteKeyWord("MAX_UPDATES_PER_HOUR ")
case MaxConnectionsPerHour:
ctx.WriteKeyWord("MAX_CONNECTIONS_PER_HOUR ")
case MaxUserConnections:
ctx.WriteKeyWord("MAX_USER_CONNECTIONS ")
default:
return errors.Errorf("Unsupported ResourceOption.Type %d", r.Type)
}
ctx.WritePlainf("%d", r.Count)
return nil
}
const (
PasswordExpire = iota + 1
PasswordExpireDefault
PasswordExpireNever
PasswordExpireInterval
Lock
Unlock
)
type PasswordOrLockOption struct {
Type int
Count int64
}
func (p *PasswordOrLockOption) Restore(ctx *RestoreCtx) error {
switch p.Type {
case PasswordExpire:
ctx.WriteKeyWord("PASSWORD EXPIRE")
case PasswordExpireDefault:
ctx.WriteKeyWord("PASSWORD EXPIRE DEFAULT")
case PasswordExpireNever:
ctx.WriteKeyWord("PASSWORD EXPIRE NEVER")
case PasswordExpireInterval:
ctx.WriteKeyWord("PASSWORD EXPIRE NEVER")
ctx.WritePlainf(" %d", p.Count)
ctx.WriteKeyWord(" DAY")
case Lock:
ctx.WriteKeyWord("ACCOUNT LOCK")
case Unlock:
ctx.WriteKeyWord("ACCOUNT UNLOCK")
default:
return errors.Errorf("Unsupported PasswordOrLockOption.Type %d", p.Type)
}
return nil
}
// CreateUserStmt creates user account. // CreateUserStmt creates user account.
// See https://dev.mysql.com/doc/refman/5.7/en/create-user.html // See https://dev.mysql.com/doc/refman/5.7/en/create-user.html
type CreateUserStmt struct { type CreateUserStmt struct {
stmtNode stmtNode
IsCreateRole bool IsCreateRole bool
IfNotExists bool IfNotExists bool
Specs []*UserSpec Specs []*UserSpec
TslOptions []*TslOption
ResourceOptions []*ResourceOption
PasswordOrLockOptions []*PasswordOrLockOption
} }
// Restore implements Node interface. // Restore implements Node interface.
...@@ -929,6 +1098,40 @@ func (n *CreateUserStmt) Restore(ctx *RestoreCtx) error { ...@@ -929,6 +1098,40 @@ func (n *CreateUserStmt) Restore(ctx *RestoreCtx) error {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.Specs[%d]", i) return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.Specs[%d]", i)
} }
} }
tslOptionLen := len(n.TslOptions)
if tslOptionLen != 0 {
ctx.WriteKeyWord(" REQUIRE ")
}
// Restore `tslOptions` reversely to keep order the same with original sql
for i := tslOptionLen; i > 0; i-- {
if i != tslOptionLen {
ctx.WriteKeyWord(" AND ")
}
if err := n.TslOptions[i-1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.TslOptions[%d]", i)
}
}
if len(n.ResourceOptions) != 0 {
ctx.WriteKeyWord(" WITH")
}
for i, v := range n.ResourceOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.ResourceOptions[%d]", i)
}
}
for i, v := range n.PasswordOrLockOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.PasswordOrLockOptions[%d]", i)
}
}
return nil return nil
} }
......
...@@ -35,12 +35,17 @@ type AnalyzeTableStmt struct { ...@@ -35,12 +35,17 @@ type AnalyzeTableStmt struct {
MaxNumBuckets uint64 MaxNumBuckets uint64
// IndexFlag is true when we only analyze indices for a table. // IndexFlag is true when we only analyze indices for a table.
IndexFlag bool IndexFlag bool
Incremental bool
} }
// Restore implements Node interface. // Restore implements Node interface.
func (n *AnalyzeTableStmt) Restore(ctx *RestoreCtx) error { func (n *AnalyzeTableStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("ANALYZE TABLE ") if n.Incremental {
ctx.WriteKeyWord("ANALYZE INCREMENTAL TABLE ")
} else {
ctx.WriteKeyWord("ANALYZE TABLE ")
}
for i, table := range n.TableNames { for i, table := range n.TableNames {
if i != 0 { if i != 0 {
ctx.WritePlain(",") ctx.WritePlain(",")
......
...@@ -18,6 +18,17 @@ import ( ...@@ -18,6 +18,17 @@ import (
"github.com/pingcap/errors" "github.com/pingcap/errors"
"github.com/pingcap/parser/mysql" "github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
)
const (
codeCollationCharsetMismatch = terror.ErrCode(mysql.ErrCollationCharsetMismatch)
codeUnknownCollation = terror.ErrCode(mysql.ErrUnknownCollation)
)
var (
ErrUnknownCollation = terror.ClassDDL.New(codeUnknownCollation, mysql.MySQLErrName[mysql.ErrUnknownCollation])
ErrCollationCharsetMismatch = terror.ClassDDL.New(codeCollationCharsetMismatch, mysql.MySQLErrName[mysql.ErrCollationCharsetMismatch])
) )
// Charset is a charset. // Charset is a charset.
...@@ -40,8 +51,10 @@ type Collation struct { ...@@ -40,8 +51,10 @@ type Collation struct {
} }
var charsets = make(map[string]*Charset) var charsets = make(map[string]*Charset)
var collationsMap = make(map[int]*Collation) var collationsIDMap = make(map[int]*Collation)
var collationsNameMap = make(map[string]*Collation)
var descs = make([]*Desc, 0, len(charsetInfos)) var descs = make([]*Desc, 0, len(charsetInfos))
var supportedCollations = make([]*Collation, 0, len(supportedCollationNames))
// All the supported charsets should be in the following table. // All the supported charsets should be in the following table.
var charsetInfos = []*Charset{ var charsetInfos = []*Charset{
...@@ -52,6 +65,15 @@ var charsetInfos = []*Charset{ ...@@ -52,6 +65,15 @@ var charsetInfos = []*Charset{
{CharsetBin, CollationBin, make(map[string]*Collation), "binary", 1}, {CharsetBin, CollationBin, make(map[string]*Collation), "binary", 1},
} }
// All the names supported collations should be in the following table.
var supportedCollationNames = map[string]struct{}{
CollationUTF8: {},
CollationUTF8MB4: {},
CollationASCII: {},
CollationLatin1: {},
CollationBin: {},
}
// Desc is a charset description. // Desc is a charset description.
type Desc struct { type Desc struct {
Name string Name string
...@@ -60,11 +82,16 @@ type Desc struct { ...@@ -60,11 +82,16 @@ type Desc struct {
Maxlen int Maxlen int
} }
// GetAllCharsets gets all charset descriptions in the local charsets. // GetSupportedCharsets gets descriptions for all charsets supported so far.
func GetAllCharsets() []*Desc { func GetSupportedCharsets() []*Desc {
return descs return descs
} }
// GetSupportedCollations gets information for all collations supported so far.
func GetSupportedCollations() []*Collation {
return supportedCollations
}
// ValidCharsetAndCollation checks the charset and the collation validity // ValidCharsetAndCollation checks the charset and the collation validity
// and returns a boolean. // and returns a boolean.
func ValidCharsetAndCollation(cs string, co string) bool { func ValidCharsetAndCollation(cs string, co string) bool {
...@@ -119,17 +146,20 @@ func GetCharsetInfo(cs string) (string, string, error) { ...@@ -119,17 +146,20 @@ func GetCharsetInfo(cs string) (string, string, error) {
// GetCharsetDesc gets charset descriptions in the local charsets. // GetCharsetDesc gets charset descriptions in the local charsets.
func GetCharsetDesc(cs string) (*Desc, error) { func GetCharsetDesc(cs string) (*Desc, error) {
c, ok := charsets[strings.ToLower(cs)] switch strings.ToLower(cs) {
if !ok { case CharsetUTF8:
return descs[0], nil
case CharsetUTF8MB4:
return descs[1], nil
case CharsetASCII:
return descs[2], nil
case CharsetLatin1:
return descs[3], nil
case CharsetBin:
return descs[4], nil
default:
return nil, errors.Errorf("Unknown charset %s", cs) return nil, errors.Errorf("Unknown charset %s", cs)
} }
desc := &Desc{
Name: c.Name,
DefaultCollation: c.DefaultCollation,
Desc: c.Desc,
Maxlen: c.Maxlen,
}
return desc, nil
} }
// GetCharsetInfoByID returns charset and collation for id as cs_number. // GetCharsetInfoByID returns charset and collation for id as cs_number.
...@@ -137,7 +167,7 @@ func GetCharsetInfoByID(coID int) (string, string, error) { ...@@ -137,7 +167,7 @@ func GetCharsetInfoByID(coID int) (string, string, error) {
if coID == mysql.DefaultCollationID { if coID == mysql.DefaultCollationID {
return mysql.DefaultCharset, mysql.DefaultCollationName, nil return mysql.DefaultCharset, mysql.DefaultCollationName, nil
} }
if collation, ok := collationsMap[coID]; ok { if collation, ok := collationsIDMap[coID]; ok {
return collation.CharsetName, collation.Name, nil return collation.CharsetName, collation.Name, nil
} }
return "", "", errors.Errorf("Unknown charset id %d", coID) return "", "", errors.Errorf("Unknown charset id %d", coID)
...@@ -148,6 +178,14 @@ func GetCollations() []*Collation { ...@@ -148,6 +178,14 @@ func GetCollations() []*Collation {
return collations return collations
} }
func GetCollationByName(name string) (*Collation, error) {
collation, ok := collationsNameMap[strings.ToLower(name)]
if !ok {
return nil, ErrUnknownCollation.GenWithStackByArgs(name)
}
return collation, nil
}
const ( const (
// CharsetBin is used for marking binary charset. // CharsetBin is used for marking binary charset.
CharsetBin = "binary" CharsetBin = "binary"
...@@ -179,10 +217,10 @@ var collations = []*Collation{ ...@@ -179,10 +217,10 @@ var collations = []*Collation{
{5, "latin1", "latin1_german1_ci", false}, {5, "latin1", "latin1_german1_ci", false},
{6, "hp8", "hp8_english_ci", true}, {6, "hp8", "hp8_english_ci", true},
{7, "koi8r", "koi8r_general_ci", true}, {7, "koi8r", "koi8r_general_ci", true},
{8, "latin1", "latin1_swedish_ci", true}, {8, "latin1", "latin1_swedish_ci", false},
{9, "latin2", "latin2_general_ci", true}, {9, "latin2", "latin2_general_ci", true},
{10, "swe7", "swe7_swedish_ci", true}, {10, "swe7", "swe7_swedish_ci", true},
{11, "ascii", "ascii_general_ci", true}, {11, "ascii", "ascii_general_ci", false},
{12, "ujis", "ujis_japanese_ci", true}, {12, "ujis", "ujis_japanese_ci", true},
{13, "sjis", "sjis_japanese_ci", true}, {13, "sjis", "sjis_japanese_ci", true},
{14, "cp1251", "cp1251_bulgarian_ci", false}, {14, "cp1251", "cp1251_bulgarian_ci", false},
...@@ -203,7 +241,7 @@ var collations = []*Collation{ ...@@ -203,7 +241,7 @@ var collations = []*Collation{
{30, "latin5", "latin5_turkish_ci", true}, {30, "latin5", "latin5_turkish_ci", true},
{31, "latin1", "latin1_german2_ci", false}, {31, "latin1", "latin1_german2_ci", false},
{32, "armscii8", "armscii8_general_ci", true}, {32, "armscii8", "armscii8_general_ci", true},
{33, "utf8", "utf8_general_ci", true}, {33, "utf8", "utf8_general_ci", false},
{34, "cp1250", "cp1250_czech_cs", false}, {34, "cp1250", "cp1250_czech_cs", false},
{35, "ucs2", "ucs2_general_ci", true}, {35, "ucs2", "ucs2_general_ci", true},
{36, "cp866", "cp866_general_ci", true}, {36, "cp866", "cp866_general_ci", true},
...@@ -215,9 +253,9 @@ var collations = []*Collation{ ...@@ -215,9 +253,9 @@ var collations = []*Collation{
{42, "latin7", "latin7_general_cs", false}, {42, "latin7", "latin7_general_cs", false},
{43, "macce", "macce_bin", false}, {43, "macce", "macce_bin", false},
{44, "cp1250", "cp1250_croatian_ci", false}, {44, "cp1250", "cp1250_croatian_ci", false},
{45, "utf8mb4", "utf8mb4_general_ci", true}, {45, "utf8mb4", "utf8mb4_general_ci", false},
{46, "utf8mb4", "utf8mb4_bin", false}, {46, "utf8mb4", "utf8mb4_bin", true},
{47, "latin1", "latin1_bin", false}, {47, "latin1", "latin1_bin", true},
{48, "latin1", "latin1_general_ci", false}, {48, "latin1", "latin1_general_ci", false},
{49, "latin1", "latin1_general_cs", false}, {49, "latin1", "latin1_general_cs", false},
{50, "cp1251", "cp1251_bin", false}, {50, "cp1251", "cp1251_bin", false},
...@@ -235,7 +273,7 @@ var collations = []*Collation{ ...@@ -235,7 +273,7 @@ var collations = []*Collation{
{62, "utf16le", "utf16le_bin", false}, {62, "utf16le", "utf16le_bin", false},
{63, "binary", "binary", true}, {63, "binary", "binary", true},
{64, "armscii8", "armscii8_bin", false}, {64, "armscii8", "armscii8_bin", false},
{65, "ascii", "ascii_bin", false}, {65, "ascii", "ascii_bin", true},
{66, "cp1250", "cp1250_bin", false}, {66, "cp1250", "cp1250_bin", false},
{67, "cp1256", "cp1256_bin", false}, {67, "cp1256", "cp1256_bin", false},
{68, "cp866", "cp866_bin", false}, {68, "cp866", "cp866_bin", false},
...@@ -252,7 +290,7 @@ var collations = []*Collation{ ...@@ -252,7 +290,7 @@ var collations = []*Collation{
{80, "cp850", "cp850_bin", false}, {80, "cp850", "cp850_bin", false},
{81, "cp852", "cp852_bin", false}, {81, "cp852", "cp852_bin", false},
{82, "swe7", "swe7_bin", false}, {82, "swe7", "swe7_bin", false},
{83, "utf8", "utf8_bin", false}, {83, "utf8", "utf8_bin", true},
{84, "big5", "big5_bin", false}, {84, "big5", "big5_bin", false},
{85, "euckr", "euckr_bin", false}, {85, "euckr", "euckr_bin", false},
{86, "gb2312", "gb2312_bin", false}, {86, "gb2312", "gb2312_bin", false},
...@@ -391,6 +429,7 @@ var collations = []*Collation{ ...@@ -391,6 +429,7 @@ var collations = []*Collation{
{245, "utf8mb4", "utf8mb4_croatian_ci", false}, {245, "utf8mb4", "utf8mb4_croatian_ci", false},
{246, "utf8mb4", "utf8mb4_unicode_520_ci", false}, {246, "utf8mb4", "utf8mb4_unicode_520_ci", false},
{247, "utf8mb4", "utf8mb4_vietnamese_ci", false}, {247, "utf8mb4", "utf8mb4_vietnamese_ci", false},
{255, "utf8mb4", "utf8mb4_0900_ai_ci", false},
} }
// init method always puts to the end of file. // init method always puts to the end of file.
...@@ -407,11 +446,18 @@ func init() { ...@@ -407,11 +446,18 @@ func init() {
} }
for _, c := range collations { for _, c := range collations {
collationsMap[c.ID] = c collationsIDMap[c.ID] = c
charset, ok := charsets[c.CharsetName]
if !ok { if _, ok := supportedCollationNames[c.Name]; ok {
continue supportedCollations = append(supportedCollations, c)
}
if charset, ok := charsets[c.CharsetName]; ok {
charset.Collations[c.Name] = c
} }
charset.Collations[c.Name] = c }
for id, name := range mysql.Collations {
collationsNameMap[name] = collationsIDMap[int(id)]
} }
} }
...@@ -19,6 +19,9 @@ jobs: ...@@ -19,6 +19,9 @@ jobs:
- run: - run:
name: "Build & Test" name: "Build & Test"
command: make test command: make test
- run:
name: "Upload coverage"
command: bash <(curl -s https://codecov.io/bash)
build-integration: build-integration:
docker: docker:
- image: golang:1.11 - image: golang:1.11
......
...@@ -111,6 +111,9 @@ func (d *sqlDigester) normalize(sql string) { ...@@ -111,6 +111,9 @@ func (d *sqlDigester) normalize(sql string) {
d.lexer.reset(sql) d.lexer.reset(sql)
for { for {
tok, pos, lit := d.lexer.scan() tok, pos, lit := d.lexer.scan()
if tok == invalid {
break
}
if tok == unicode.ReplacementChar && d.lexer.r.eof() { if tok == unicode.ReplacementChar && d.lexer.r.eof() {
break break
} }
......
...@@ -216,6 +216,8 @@ const ( ...@@ -216,6 +216,8 @@ const (
RestoreNameLowercase RestoreNameLowercase
RestoreNameDoubleQuotes RestoreNameDoubleQuotes
RestoreNameBackQuotes RestoreNameBackQuotes
RestoreSpacesAroundBinaryOperation
) )
const ( const (
...@@ -271,6 +273,11 @@ func (rf RestoreFlags) HasNameBackQuotesFlag() bool { ...@@ -271,6 +273,11 @@ func (rf RestoreFlags) HasNameBackQuotesFlag() bool {
return rf.has(RestoreNameBackQuotes) return rf.has(RestoreNameBackQuotes)
} }
// HasSpacesAroundBinaryOperationFlag returns a boolean indicating whether `rf` has `RestoreSpacesAroundBinaryOperation` flag.
func (rf RestoreFlags) HasSpacesAroundBinaryOperationFlag() bool {
return rf.has(RestoreSpacesAroundBinaryOperation)
}
// RestoreCtx is `Restore` context to hold flags and writer. // RestoreCtx is `Restore` context to hold flags and writer.
type RestoreCtx struct { type RestoreCtx struct {
Flags RestoreFlags Flags RestoreFlags
......
...@@ -8,7 +8,7 @@ require ( ...@@ -8,7 +8,7 @@ require (
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186 github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a github.com/cznic/y v0.0.0-20170802143616-045f81c6662a
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8
github.com/pingcap/errors v0.11.1 github.com/pingcap/errors v0.11.4
github.com/pingcap/tidb v0.0.0-20190321025159-e8299209340c github.com/pingcap/tidb v0.0.0-20190321025159-e8299209340c
github.com/pingcap/tipb v0.0.0-20190107072121-abbec73437b7 github.com/pingcap/tipb v0.0.0-20190107072121-abbec73437b7
github.com/sirupsen/logrus v1.3.0 github.com/sirupsen/logrus v1.3.0
......
...@@ -161,6 +161,7 @@ var tokenMap = map[string]int{ ...@@ -161,6 +161,7 @@ var tokenMap = map[string]int{
"BIT_OR": bitOr, "BIT_OR": bitOr,
"BIT_XOR": bitXor, "BIT_XOR": bitXor,
"BLOB": blobType, "BLOB": blobType,
"BLOCK": block,
"BOOL": boolType, "BOOL": boolType,
"BOOLEAN": booleanType, "BOOLEAN": booleanType,
"BOTH": both, "BOTH": both,
...@@ -196,9 +197,11 @@ var tokenMap = map[string]int{ ...@@ -196,9 +197,11 @@ var tokenMap = map[string]int{
"CONNECTION": connection, "CONNECTION": connection,
"CONSISTENT": consistent, "CONSISTENT": consistent,
"CONSTRAINT": constraint, "CONSTRAINT": constraint,
"CONTEXT": context,
"CONVERT": convert, "CONVERT": convert,
"COPY": copyKwd, "COPY": copyKwd,
"COUNT": count, "COUNT": count,
"CPU": cpu,
"CREATE": create, "CREATE": create,
"CROSS": cross, "CROSS": cross,
"CURRENT": current, "CURRENT": current,
...@@ -261,6 +264,7 @@ var tokenMap = map[string]int{ ...@@ -261,6 +264,7 @@ var tokenMap = map[string]int{
"EXPLAIN": explain, "EXPLAIN": explain,
"EXTRACT": extract, "EXTRACT": extract,
"FALSE": falseKwd, "FALSE": falseKwd,
"FAULTS": faultsSym,
"FIELDS": fields, "FIELDS": fields,
"FIRST": first, "FIRST": first,
"FIXED": fixed, "FIXED": fixed,
...@@ -293,6 +297,7 @@ var tokenMap = map[string]int{ ...@@ -293,6 +297,7 @@ var tokenMap = map[string]int{
"IF": ifKwd, "IF": ifKwd,
"IGNORE": ignore, "IGNORE": ignore,
"IN": in, "IN": in,
"INCREMENTAL": incremental,
"INDEX": index, "INDEX": index,
"INDEXES": indexes, "INDEXES": indexes,
"INFILE": infile, "INFILE": infile,
...@@ -306,6 +311,8 @@ var tokenMap = map[string]int{ ...@@ -306,6 +311,8 @@ var tokenMap = map[string]int{
"INT3": int3Type, "INT3": int3Type,
"INT4": int4Type, "INT4": int4Type,
"INT8": int8Type, "INT8": int8Type,
"IO": io,
"IPC": ipc,
"INTEGER": integerType, "INTEGER": integerType,
"INTERVAL": interval, "INTERVAL": interval,
"INTERNAL": internal, "INTERNAL": internal,
...@@ -352,6 +359,7 @@ var tokenMap = map[string]int{ ...@@ -352,6 +359,7 @@ var tokenMap = map[string]int{
"MEDIUMBLOB": mediumblobType, "MEDIUMBLOB": mediumblobType,
"MEDIUMINT": mediumIntType, "MEDIUMINT": mediumIntType,
"MEDIUMTEXT": mediumtextType, "MEDIUMTEXT": mediumtextType,
"MEMORY": memory,
"MERGE": merge, "MERGE": merge,
"MICROSECOND": microsecond, "MICROSECOND": microsecond,
"MIN": min, "MIN": min,
...@@ -382,15 +390,18 @@ var tokenMap = map[string]int{ ...@@ -382,15 +390,18 @@ var tokenMap = map[string]int{
"OFFSET": offset, "OFFSET": offset,
"ON": on, "ON": on,
"ONLY": only, "ONLY": only,
"OPTIMISTIC": optimistic,
"OPTION": option, "OPTION": option,
"OPTIONALLY": optionally, "OPTIONALLY": optionally,
"OR": or, "OR": or,
"ORDER": order, "ORDER": order,
"OUTER": outer, "OUTER": outer,
"PACK_KEYS": packKeys, "PACK_KEYS": packKeys,
"PAGE": pageSym,
"PARTITION": partition, "PARTITION": partition,
"PARTITIONS": partitions, "PARTITIONS": partitions,
"PASSWORD": password, "PASSWORD": password,
"PESSIMISTIC": pessimistic,
"PLUGINS": plugins, "PLUGINS": plugins,
"POSITION": position, "POSITION": position,
"PRECEDING": preceding, "PRECEDING": preceding,
...@@ -401,6 +412,7 @@ var tokenMap = map[string]int{ ...@@ -401,6 +412,7 @@ var tokenMap = map[string]int{
"PROCEDURE": procedure, "PROCEDURE": procedure,
"PROCESS": process, "PROCESS": process,
"PROCESSLIST": processlist, "PROCESSLIST": processlist,
"PROFILE": profile,
"PROFILES": profiles, "PROFILES": profiles,
"PUMP": pump, "PUMP": pump,
"QUARTER": quarter, "QUARTER": quarter,
...@@ -408,6 +420,7 @@ var tokenMap = map[string]int{ ...@@ -408,6 +420,7 @@ var tokenMap = map[string]int{
"QUERIES": queries, "QUERIES": queries,
"QUICK": quick, "QUICK": quick,
"SHARD_ROW_ID_BITS": shardRowIDBits, "SHARD_ROW_ID_BITS": shardRowIDBits,
"PRE_SPLIT_REGIONS": preSplitRegions,
"RANGE": rangeKwd, "RANGE": rangeKwd,
"RECOVER": recover, "RECOVER": recover,
"READ": read, "READ": read,
...@@ -454,10 +467,12 @@ var tokenMap = map[string]int{ ...@@ -454,10 +467,12 @@ var tokenMap = map[string]int{
"SMALLINT": smallIntType, "SMALLINT": smallIntType,
"SNAPSHOT": snapshot, "SNAPSHOT": snapshot,
"SOME": some, "SOME": some,
"SPLIT": split,
"SQL": sql, "SQL": sql,
"SQL_CACHE": sqlCache, "SQL_CACHE": sqlCache,
"SQL_CALC_FOUND_ROWS": sqlCalcFoundRows, "SQL_CALC_FOUND_ROWS": sqlCalcFoundRows,
"SQL_NO_CACHE": sqlNoCache, "SQL_NO_CACHE": sqlNoCache,
"SOURCE": source,
"SSL": ssl, "SSL": ssl,
"START": start, "START": start,
"STARTING": starting, "STARTING": starting,
...@@ -468,6 +483,9 @@ var tokenMap = map[string]int{ ...@@ -468,6 +483,9 @@ var tokenMap = map[string]int{
"STATS_META": statsMeta, "STATS_META": statsMeta,
"STATS_PERSISTENT": statsPersistent, "STATS_PERSISTENT": statsPersistent,
"STATUS": status, "STATUS": status,
"SWAPS": swaps,
"SWITCHES": switchesSym,
"OPEN": open,
"STD": stddevPop, "STD": stddevPop,
"STDDEV": stddevPop, "STDDEV": stddevPop,
"STDDEV_POP": stddevPop, "STDDEV_POP": stddevPop,
...@@ -513,6 +531,7 @@ var tokenMap = map[string]int{ ...@@ -513,6 +531,7 @@ var tokenMap = map[string]int{
"TOKUDB_ZLIB": tokudbZlib, "TOKUDB_ZLIB": tokudbZlib,
"TOP": top, "TOP": top,
"TRACE": trace, "TRACE": trace,
"TRADITIONAL": traditional,
"TRAILING": trailing, "TRAILING": trailing,
"TRANSACTION": transaction, "TRANSACTION": transaction,
"TRIGGER": trigger, "TRIGGER": trigger,
......
...@@ -29,63 +29,65 @@ type ActionType byte ...@@ -29,63 +29,65 @@ type ActionType byte
// List DDL actions. // List DDL actions.
const ( const (
ActionNone ActionType = 0 ActionNone ActionType = 0
ActionCreateSchema ActionType = 1 ActionCreateSchema ActionType = 1
ActionDropSchema ActionType = 2 ActionDropSchema ActionType = 2
ActionCreateTable ActionType = 3 ActionCreateTable ActionType = 3
ActionDropTable ActionType = 4 ActionDropTable ActionType = 4
ActionAddColumn ActionType = 5 ActionAddColumn ActionType = 5
ActionDropColumn ActionType = 6 ActionDropColumn ActionType = 6
ActionAddIndex ActionType = 7 ActionAddIndex ActionType = 7
ActionDropIndex ActionType = 8 ActionDropIndex ActionType = 8
ActionAddForeignKey ActionType = 9 ActionAddForeignKey ActionType = 9
ActionDropForeignKey ActionType = 10 ActionDropForeignKey ActionType = 10
ActionTruncateTable ActionType = 11 ActionTruncateTable ActionType = 11
ActionModifyColumn ActionType = 12 ActionModifyColumn ActionType = 12
ActionRebaseAutoID ActionType = 13 ActionRebaseAutoID ActionType = 13
ActionRenameTable ActionType = 14 ActionRenameTable ActionType = 14
ActionSetDefaultValue ActionType = 15 ActionSetDefaultValue ActionType = 15
ActionShardRowID ActionType = 16 ActionShardRowID ActionType = 16
ActionModifyTableComment ActionType = 17 ActionModifyTableComment ActionType = 17
ActionRenameIndex ActionType = 18 ActionRenameIndex ActionType = 18
ActionAddTablePartition ActionType = 19 ActionAddTablePartition ActionType = 19
ActionDropTablePartition ActionType = 20 ActionDropTablePartition ActionType = 20
ActionCreateView ActionType = 21 ActionCreateView ActionType = 21
ActionModifyTableCharsetAndCollate ActionType = 22 ActionModifyTableCharsetAndCollate ActionType = 22
ActionTruncateTablePartition ActionType = 23 ActionTruncateTablePartition ActionType = 23
ActionDropView ActionType = 24 ActionDropView ActionType = 24
ActionRecoverTable ActionType = 25 ActionRecoverTable ActionType = 25
ActionModifySchemaCharsetAndCollate ActionType = 26
) )
// AddIndexStr is a string related to the operation of "add index". // AddIndexStr is a string related to the operation of "add index".
const AddIndexStr = "add index" const AddIndexStr = "add index"
var actionMap = map[ActionType]string{ var actionMap = map[ActionType]string{
ActionCreateSchema: "create schema", ActionCreateSchema: "create schema",
ActionDropSchema: "drop schema", ActionDropSchema: "drop schema",
ActionCreateTable: "create table", ActionCreateTable: "create table",
ActionDropTable: "drop table", ActionDropTable: "drop table",
ActionAddColumn: "add column", ActionAddColumn: "add column",
ActionDropColumn: "drop column", ActionDropColumn: "drop column",
ActionAddIndex: AddIndexStr, ActionAddIndex: AddIndexStr,
ActionDropIndex: "drop index", ActionDropIndex: "drop index",
ActionAddForeignKey: "add foreign key", ActionAddForeignKey: "add foreign key",
ActionDropForeignKey: "drop foreign key", ActionDropForeignKey: "drop foreign key",
ActionTruncateTable: "truncate table", ActionTruncateTable: "truncate table",
ActionModifyColumn: "modify column", ActionModifyColumn: "modify column",
ActionRebaseAutoID: "rebase auto_increment ID", ActionRebaseAutoID: "rebase auto_increment ID",
ActionRenameTable: "rename table", ActionRenameTable: "rename table",
ActionSetDefaultValue: "set default value", ActionSetDefaultValue: "set default value",
ActionShardRowID: "shard row ID", ActionShardRowID: "shard row ID",
ActionModifyTableComment: "modify table comment", ActionModifyTableComment: "modify table comment",
ActionRenameIndex: "rename index", ActionRenameIndex: "rename index",
ActionAddTablePartition: "add partition", ActionAddTablePartition: "add partition",
ActionDropTablePartition: "drop partition", ActionDropTablePartition: "drop partition",
ActionCreateView: "create view", ActionCreateView: "create view",
ActionModifyTableCharsetAndCollate: "modify table charset and collate", ActionModifyTableCharsetAndCollate: "modify table charset and collate",
ActionTruncateTablePartition: "truncate partition", ActionTruncateTablePartition: "truncate partition",
ActionDropView: "drop view", ActionDropView: "drop view",
ActionRecoverTable: "recover table", ActionRecoverTable: "recover table",
ActionModifySchemaCharsetAndCollate: "modify schema charset and collate",
} }
// String return current ddl action in string // String return current ddl action in string
......
...@@ -175,9 +175,15 @@ const ( ...@@ -175,9 +175,15 @@ const (
// After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error. // After version V2.1.2 (PR#8738) , TiDB add UTF8 check, then the user upgrade from v2.0.8 insert some UTF8MB4 characters will got error.
// This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number. // This is not compatibility for user. Then we try to fix this in PR #9820, and increase the version number.
TableInfoVersion2 = uint16(2) TableInfoVersion2 = uint16(2)
// TableInfoVersion3 means the table info version is 3.
// This version aims to deal with upper-cased charset name in TableInfo stored by versions prior to TiDB v2.1.9:
// TiDB always suppose all charsets / collations as lower-cased and try to convert them if they're not.
// However, the convert is missed in some scenarios before v2.1.9, so for all those tables prior to TableInfoVersion3, their
// charsets / collations will be converted to lower-case while loading from the storage.
TableInfoVersion3 = uint16(3)
// CurrLatestTableInfoVersion means the latest table info in the current TiDB. // CurrLatestTableInfoVersion means the latest table info in the current TiDB.
CurrLatestTableInfoVersion = TableInfoVersion2 CurrLatestTableInfoVersion = TableInfoVersion3
) )
// ExtraHandleName is the name of ExtraHandle Column. // ExtraHandleName is the name of ExtraHandle Column.
...@@ -214,6 +220,10 @@ type TableInfo struct { ...@@ -214,6 +220,10 @@ type TableInfo struct {
ShardRowIDBits uint64 ShardRowIDBits uint64
// MaxShardRowIDBits uses to record the max ShardRowIDBits be used so far. // MaxShardRowIDBits uses to record the max ShardRowIDBits be used so far.
MaxShardRowIDBits uint64 `json:"max_shard_row_id_bits"` MaxShardRowIDBits uint64 `json:"max_shard_row_id_bits"`
// PreSplitRegions specify the pre-split region when create table.
// The pre-split region num is 2^(PreSplitRegions-1).
// And the PreSplitRegions should less than or equal to ShardRowIDBits.
PreSplitRegions uint64 `json:"pre_split_regions"`
Partition *PartitionInfo `json:"partition"` Partition *PartitionInfo `json:"partition"`
...@@ -716,8 +726,8 @@ func ColumnToProto(c *ColumnInfo) *tipb.ColumnInfo { ...@@ -716,8 +726,8 @@ func ColumnToProto(c *ColumnInfo) *tipb.ColumnInfo {
// TODO: update it when more collate is supported. // TODO: update it when more collate is supported.
func collationToProto(c string) int32 { func collationToProto(c string) int32 {
v := mysql.CollationNames[c] v := mysql.CollationNames[c]
if v == mysql.BinaryCollationID { if v == mysql.BinaryDefaultCollationID {
return int32(mysql.BinaryCollationID) return int32(mysql.BinaryDefaultCollationID)
} }
// We only support binary and utf8_bin collation. // We only support binary and utf8_bin collation.
// Setting other collations to utf8_bin for old data compatibility. // Setting other collations to utf8_bin for old data compatibility.
......
...@@ -20,15 +20,15 @@ func CharsetNameToID(charset string) uint8 { ...@@ -20,15 +20,15 @@ func CharsetNameToID(charset string) uint8 {
// Use quick path for TiDB to avoid access CharsetIDs map // Use quick path for TiDB to avoid access CharsetIDs map
// "SHOW CHARACTER SET;" to see all the supported character sets. // "SHOW CHARACTER SET;" to see all the supported character sets.
if charset == "utf8mb4" { if charset == "utf8mb4" {
return UTF8MB4CollationID return UTF8MB4DefaultCollationID
} else if charset == "binary" { } else if charset == "binary" {
return BinaryCollationID return BinaryDefaultCollationID
} else if charset == "utf8" { } else if charset == "utf8" {
return UTF8CollationID return UTF8DefaultCollationID
} else if charset == "ascii" { } else if charset == "ascii" {
return ASCIICollationID return ASCIIDefaultCollationID
} else if charset == "latin1" { } else if charset == "latin1" {
return Latin1CollationID return Latin1DefaultCollationID
} else { } else {
return CharsetIDs[charset] return CharsetIDs[charset]
} }
...@@ -41,10 +41,10 @@ var CharsetIDs = map[string]uint8{ ...@@ -41,10 +41,10 @@ var CharsetIDs = map[string]uint8{
"cp850": 4, "cp850": 4,
"hp8": 6, "hp8": 6,
"koi8r": 7, "koi8r": 7,
"latin1": Latin1CollationID, "latin1": Latin1DefaultCollationID,
"latin2": 9, "latin2": 9,
"swe7": 10, "swe7": 10,
"ascii": ASCIICollationID, "ascii": ASCIIDefaultCollationID,
"ujis": 12, "ujis": 12,
"sjis": 13, "sjis": 13,
"hebrew": 16, "hebrew": 16,
...@@ -57,7 +57,7 @@ var CharsetIDs = map[string]uint8{ ...@@ -57,7 +57,7 @@ var CharsetIDs = map[string]uint8{
"gbk": 28, "gbk": 28,
"latin5": 30, "latin5": 30,
"armscii8": 32, "armscii8": 32,
"utf8": UTF8CollationID, "utf8": UTF8DefaultCollationID,
"ucs2": 35, "ucs2": 35,
"cp866": 36, "cp866": 36,
"keybcs2": 37, "keybcs2": 37,
...@@ -65,14 +65,14 @@ var CharsetIDs = map[string]uint8{ ...@@ -65,14 +65,14 @@ var CharsetIDs = map[string]uint8{
"macroman": 39, "macroman": 39,
"cp852": 40, "cp852": 40,
"latin7": 41, "latin7": 41,
"utf8mb4": UTF8MB4CollationID, "utf8mb4": UTF8MB4DefaultCollationID,
"cp1251": 51, "cp1251": 51,
"utf16": 54, "utf16": 54,
"utf16le": 56, "utf16le": 56,
"cp1256": 57, "cp1256": 57,
"cp1257": 59, "cp1257": 59,
"utf32": 60, "utf32": 60,
"binary": BinaryCollationID, "binary": BinaryDefaultCollationID,
"geostd8": 92, "geostd8": 92,
"cp932": 95, "cp932": 95,
"eucjpms": 97, "eucjpms": 97,
...@@ -85,10 +85,10 @@ var Charsets = map[string]string{ ...@@ -85,10 +85,10 @@ var Charsets = map[string]string{
"cp850": "cp850_general_ci", "cp850": "cp850_general_ci",
"hp8": "hp8_english_ci", "hp8": "hp8_english_ci",
"koi8r": "koi8r_general_ci", "koi8r": "koi8r_general_ci",
"latin1": "latin1_swedish_ci", "latin1": "latin1_bin",
"latin2": "latin2_general_ci", "latin2": "latin2_general_ci",
"swe7": "swe7_swedish_ci", "swe7": "swe7_swedish_ci",
"ascii": "ascii_general_ci", "ascii": "ascii_bin",
"ujis": "ujis_japanese_ci", "ujis": "ujis_japanese_ci",
"sjis": "sjis_japanese_ci", "sjis": "sjis_japanese_ci",
"hebrew": "hebrew_general_ci", "hebrew": "hebrew_general_ci",
...@@ -101,7 +101,7 @@ var Charsets = map[string]string{ ...@@ -101,7 +101,7 @@ var Charsets = map[string]string{
"gbk": "gbk_chinese_ci", "gbk": "gbk_chinese_ci",
"latin5": "latin5_turkish_ci", "latin5": "latin5_turkish_ci",
"armscii8": "armscii8_general_ci", "armscii8": "armscii8_general_ci",
"utf8": "utf8_general_ci", "utf8": "utf8_bin",
"ucs2": "ucs2_general_ci", "ucs2": "ucs2_general_ci",
"cp866": "cp866_general_ci", "cp866": "cp866_general_ci",
"keybcs2": "keybcs2_general_ci", "keybcs2": "keybcs2_general_ci",
...@@ -109,7 +109,7 @@ var Charsets = map[string]string{ ...@@ -109,7 +109,7 @@ var Charsets = map[string]string{
"macroman": "macroman_general_ci", "macroman": "macroman_general_ci",
"cp852": "cp852_general_ci", "cp852": "cp852_general_ci",
"latin7": "latin7_general_ci", "latin7": "latin7_general_ci",
"utf8mb4": "utf8mb4_general_ci", "utf8mb4": "utf8mb4_bin",
"cp1251": "cp1251_general_ci", "cp1251": "cp1251_general_ci",
"utf16": "utf16_general_ci", "utf16": "utf16_general_ci",
"utf16le": "utf16le_general_ci", "utf16le": "utf16le_general_ci",
...@@ -122,7 +122,7 @@ var Charsets = map[string]string{ ...@@ -122,7 +122,7 @@ var Charsets = map[string]string{
"eucjpms": "eucjpms_japanese_ci", "eucjpms": "eucjpms_japanese_ci",
} }
// Collations maps MySQL default collation ID to its name. // Collations maps MySQL collation ID to its name.
var Collations = map[uint8]string{ var Collations = map[uint8]string{
1: "big5_chinese_ci", 1: "big5_chinese_ci",
2: "latin2_czech_cs", 2: "latin2_czech_cs",
...@@ -343,9 +343,10 @@ var Collations = map[uint8]string{ ...@@ -343,9 +343,10 @@ var Collations = map[uint8]string{
245: "utf8mb4_croatian_ci", 245: "utf8mb4_croatian_ci",
246: "utf8mb4_unicode_520_ci", 246: "utf8mb4_unicode_520_ci",
247: "utf8mb4_vietnamese_ci", 247: "utf8mb4_vietnamese_ci",
255: "utf8mb4_0900_ai_ci",
} }
// CollationNames maps MySQL default collation name to its ID // CollationNames maps MySQL collation name to its ID
var CollationNames = map[string]uint8{ var CollationNames = map[string]uint8{
"big5_chinese_ci": 1, "big5_chinese_ci": 1,
"latin2_czech_cs": 2, "latin2_czech_cs": 2,
...@@ -566,6 +567,7 @@ var CollationNames = map[string]uint8{ ...@@ -566,6 +567,7 @@ var CollationNames = map[string]uint8{
"utf8mb4_croatian_ci": 245, "utf8mb4_croatian_ci": 245,
"utf8mb4_unicode_520_ci": 246, "utf8mb4_unicode_520_ci": 246,
"utf8mb4_vietnamese_ci": 247, "utf8mb4_vietnamese_ci": 247,
"utf8mb4_0900_ai_ci": 255,
} }
// MySQL collation information. // MySQL collation information.
...@@ -574,15 +576,15 @@ const ( ...@@ -574,15 +576,15 @@ const (
UTF8MB4Charset = "utf8mb4" UTF8MB4Charset = "utf8mb4"
DefaultCharset = UTF8MB4Charset DefaultCharset = UTF8MB4Charset
// DefaultCollationID is utf8mb4_bin(46) // DefaultCollationID is utf8mb4_bin(46)
DefaultCollationID = 46 DefaultCollationID = 46
Latin1CollationID = 8 Latin1DefaultCollationID = 47
ASCIICollationID = 11 ASCIIDefaultCollationID = 65
UTF8CollationID = 33 UTF8DefaultCollationID = 83
UTF8MB4CollationID = 45 UTF8MB4DefaultCollationID = 46
BinaryCollationID = 63 BinaryDefaultCollationID = 63
UTF8DefaultCollation = "utf8_bin" UTF8DefaultCollation = "utf8_bin"
UTF8MB4DefaultCollation = "utf8mb4_bin" UTF8MB4DefaultCollation = "utf8mb4_bin"
DefaultCollationName = UTF8MB4DefaultCollation DefaultCollationName = UTF8MB4DefaultCollation
// MaxBytesOfCharacter, is the max bytes length of a character, // MaxBytesOfCharacter, is the max bytes length of a character,
// refer to RFC3629, in UTF-8, characters from the U+0000..U+10FFFF range // refer to RFC3629, in UTF-8, characters from the U+0000..U+10FFFF range
......
...@@ -882,6 +882,7 @@ const ( ...@@ -882,6 +882,7 @@ const (
ErrMustChangePasswordLogin = 1862 ErrMustChangePasswordLogin = 1862
ErrRowInWrongPartition = 1863 ErrRowInWrongPartition = 1863
ErrErrorLast = 1863 ErrErrorLast = 1863
ErrInvalidJSONData = 3069
ErrGeneratedColumnFunctionIsNotAllowed = 3102 ErrGeneratedColumnFunctionIsNotAllowed = 3102
ErrBadGeneratedColumn = 3105 ErrBadGeneratedColumn = 3105
ErrUnsupportedOnGeneratedColumn = 3106 ErrUnsupportedOnGeneratedColumn = 3106
...@@ -890,10 +891,11 @@ const ( ...@@ -890,10 +891,11 @@ const (
ErrGeneratedColumnRefAutoInc = 3109 ErrGeneratedColumnRefAutoInc = 3109
ErrInvalidJSONText = 3140 ErrInvalidJSONText = 3140
ErrInvalidJSONPath = 3143 ErrInvalidJSONPath = 3143
ErrInvalidJSONData = 3146 ErrInvalidTypeForJSON = 3146
ErrInvalidJSONPathWildcard = 3149 ErrInvalidJSONPathWildcard = 3149
ErrInvalidJSONContainsPathType = 3150 ErrInvalidJSONContainsPathType = 3150
ErrJSONUsedAsKey = 3152 ErrJSONUsedAsKey = 3152
ErrBadUser = 3162
ErrRoleNotGranted = 3530 ErrRoleNotGranted = 3530
ErrWindowNoSuchWindow = 3579 ErrWindowNoSuchWindow = 3579
ErrWindowCircularityInWindowGraph = 3580 ErrWindowCircularityInWindowGraph = 3580
...@@ -921,6 +923,8 @@ const ( ...@@ -921,6 +923,8 @@ const (
ErrMemExceedThreshold = 8001 ErrMemExceedThreshold = 8001
ErrForUpdateCantRetry = 8002 ErrForUpdateCantRetry = 8002
ErrAdminCheckTable = 8003 ErrAdminCheckTable = 8003
ErrTxnTooLarge = 8004
ErrWriteConflictInTiDB = 8005
ErrInvalidPluginID = 8101 ErrInvalidPluginID = 8101
ErrInvalidPluginManifest = 8102 ErrInvalidPluginManifest = 8102
ErrInvalidPluginName = 8103 ErrInvalidPluginName = 8103
...@@ -938,6 +942,5 @@ const ( ...@@ -938,6 +942,5 @@ const (
ErrResolveLockTimeout = 9004 ErrResolveLockTimeout = 9004
ErrRegionUnavailable = 9005 ErrRegionUnavailable = 9005
ErrGCTooEarly = 9006 ErrGCTooEarly = 9006
ErrWriteConflict = 9007
ErrTxnTooLarge = 9500
) )
...@@ -885,9 +885,10 @@ var MySQLErrName = map[uint16]string{ ...@@ -885,9 +885,10 @@ var MySQLErrName = map[uint16]string{
ErrDependentByGeneratedColumn: "Column '%s' has a generated column dependency.", ErrDependentByGeneratedColumn: "Column '%s' has a generated column dependency.",
ErrGeneratedColumnFunctionIsNotAllowed: "Expression of generated column '%s' contains a disallowed function.", ErrGeneratedColumnFunctionIsNotAllowed: "Expression of generated column '%s' contains a disallowed function.",
ErrGeneratedColumnRefAutoInc: "Generated column '%s' cannot refer to auto-increment column.", ErrGeneratedColumnRefAutoInc: "Generated column '%s' cannot refer to auto-increment column.",
ErrInvalidJSONData: "Invalid JSON data provided to function %s: %s",
ErrInvalidJSONText: "Invalid JSON text: %-.192s", ErrInvalidJSONText: "Invalid JSON text: %-.192s",
ErrInvalidJSONPath: "Invalid JSON path expression %s.", ErrInvalidJSONPath: "Invalid JSON path expression %s.",
ErrInvalidJSONData: "Invalid data type for JSON data", ErrInvalidTypeForJSON: "Invalid data type for JSON data in argument %d to function %s; a JSON string or JSON type is required.",
ErrInvalidJSONPathWildcard: "In this situation, path expressions may not contain the * and ** tokens.", ErrInvalidJSONPathWildcard: "In this situation, path expressions may not contain the * and ** tokens.",
ErrInvalidJSONContainsPathType: "The second argument can only be either 'one' or 'all'.", ErrInvalidJSONContainsPathType: "The second argument can only be either 'one' or 'all'.",
ErrJSONUsedAsKey: "JSON column '%-.192s' cannot be used in key specification.", ErrJSONUsedAsKey: "JSON column '%-.192s' cannot be used in key specification.",
...@@ -915,10 +916,11 @@ var MySQLErrName = map[uint16]string{ ...@@ -915,10 +916,11 @@ var MySQLErrName = map[uint16]string{
ErrRoleNotGranted: "%s is is not granted to %s", ErrRoleNotGranted: "%s is is not granted to %s",
// TiDB errors. // TiDB errors.
ErrMemExceedThreshold: "%s holds %dB memory, exceeds threshold %dB.%s", ErrMemExceedThreshold: "%s holds %dB memory, exceeds threshold %dB.%s",
ErrForUpdateCantRetry: "[%d] can not retry select for update statement", ErrForUpdateCantRetry: "[%d] can not retry select for update statement",
ErrAdminCheckTable: "TiDB admin check table failed.", ErrAdminCheckTable: "TiDB admin check table failed.",
ErrTxnTooLarge: "Transaction is too large",
ErrWriteConflictInTiDB: "Write conflict, txnStartTS %d is stale",
ErrInvalidPluginID: "Wrong plugin id: %s, valid plugin id is [name]-[version], both name and version should not contain '-'", ErrInvalidPluginID: "Wrong plugin id: %s, valid plugin id is [name]-[version], both name and version should not contain '-'",
ErrInvalidPluginManifest: "Cannot read plugin %s's manifest", ErrInvalidPluginManifest: "Cannot read plugin %s's manifest",
ErrInvalidPluginName: "Plugin load with %s but got wrong name %s", ErrInvalidPluginName: "Plugin load with %s but got wrong name %s",
...@@ -936,6 +938,5 @@ var MySQLErrName = map[uint16]string{ ...@@ -936,6 +938,5 @@ var MySQLErrName = map[uint16]string{
ErrResolveLockTimeout: "Resolve lock timeout", ErrResolveLockTimeout: "Resolve lock timeout",
ErrRegionUnavailable: "Region is unavailable", ErrRegionUnavailable: "Region is unavailable",
ErrGCTooEarly: "GC life time is shorter than transaction duration, transaction starts at %v, GC safe point is %v", ErrGCTooEarly: "GC life time is shorter than transaction duration, transaction starts at %v, GC safe point is %v",
ErrWriteConflict: "Write conflict, txnStartTS=%d, conflictStartTS=%d, conflictCommitTS=%d, key=%s",
ErrTxnTooLarge: "Transaction is too large",
} }
...@@ -239,7 +239,15 @@ func (e *Error) FastGen(format string, args ...interface{}) error { ...@@ -239,7 +239,15 @@ func (e *Error) FastGen(format string, args ...interface{}) error {
err := *e err := *e
err.message = format err.message = format
err.args = args err.args = args
return &err return errors.SuspendStack(&err)
}
// FastGen generates a new *Error with the same class and code, and a new arguments.
// This will not call runtime.Caller to get file and line.
func (e *Error) FastGenByArgs(args ...interface{}) error {
err := *e
err.args = args
return errors.SuspendStack(&err)
} }
// Equal checks if err is equal to e. // Equal checks if err is equal to e.
......
{ {
mv go.mod1 go.mod mv go.mod1 go.mod
mv go.sum1 go.sum mv go.sum1 go.sum
GO111MODULE=on go test ./... GO111MODULE=on go test -race -covermode=atomic -coverprofile=coverage.txt ./...
} || { } || {
mv go.mod go.mod1 mv go.mod go.mod1
mv go.sum go.sum1 mv go.sum go.sum1
......
...@@ -252,54 +252,63 @@ func (ft *FieldType) Restore(ctx *format.RestoreCtx) error { ...@@ -252,54 +252,63 @@ func (ft *FieldType) Restore(ctx *format.RestoreCtx) error {
return nil return nil
} }
// FormatAsCastType is used for write AST back to string. // RestoreAsCastType is used for write AST back to string.
func (ft *FieldType) FormatAsCastType(w io.Writer) { func (ft *FieldType) RestoreAsCastType(ctx *format.RestoreCtx) {
switch ft.Tp { switch ft.Tp {
case mysql.TypeVarString: case mysql.TypeVarString:
if ft.Charset == charset.CharsetBin && ft.Collate == charset.CollationBin { if ft.Charset == charset.CharsetBin && ft.Collate == charset.CollationBin {
fmt.Fprint(w, "BINARY") ctx.WriteKeyWord("BINARY")
} else { } else {
fmt.Fprint(w, "CHAR") ctx.WriteKeyWord("CHAR")
} }
if ft.Flen != UnspecifiedLength { if ft.Flen != UnspecifiedLength {
fmt.Fprintf(w, "(%d)", ft.Flen) ctx.WritePlainf("(%d)", ft.Flen)
} }
if ft.Flag&mysql.BinaryFlag != 0 { if ft.Flag&mysql.BinaryFlag != 0 {
fmt.Fprint(w, " BINARY") ctx.WriteKeyWord(" BINARY")
} }
if ft.Charset != charset.CharsetBin && ft.Charset != mysql.DefaultCharset { if ft.Charset != charset.CharsetBin && ft.Charset != mysql.DefaultCharset {
fmt.Fprintf(w, " CHARACTER SET %s", ft.Charset) ctx.WriteKeyWord(" CHARSET ")
ctx.WriteKeyWord(ft.Charset)
} }
case mysql.TypeDate: case mysql.TypeDate:
fmt.Fprint(w, "DATE") ctx.WriteKeyWord("DATE")
case mysql.TypeDatetime: case mysql.TypeDatetime:
fmt.Fprint(w, "DATETIME") ctx.WriteKeyWord("DATETIME")
if ft.Decimal > 0 { if ft.Decimal > 0 {
fmt.Fprintf(w, "(%d)", ft.Decimal) ctx.WritePlainf("(%d)", ft.Decimal)
} }
case mysql.TypeNewDecimal: case mysql.TypeNewDecimal:
fmt.Fprint(w, "DECIMAL") ctx.WriteKeyWord("DECIMAL")
if ft.Flen > 0 && ft.Decimal > 0 { if ft.Flen > 0 && ft.Decimal > 0 {
fmt.Fprintf(w, "(%d, %d)", ft.Flen, ft.Decimal) ctx.WritePlainf("(%d, %d)", ft.Flen, ft.Decimal)
} else if ft.Flen > 0 { } else if ft.Flen > 0 {
fmt.Fprintf(w, "(%d)", ft.Flen) ctx.WritePlainf("(%d)", ft.Flen)
} }
case mysql.TypeDuration: case mysql.TypeDuration:
fmt.Fprint(w, "TIME") ctx.WriteKeyWord("TIME")
if ft.Decimal > 0 { if ft.Decimal > 0 {
fmt.Fprintf(w, "(%d)", ft.Decimal) ctx.WritePlainf("(%d)", ft.Decimal)
} }
case mysql.TypeLonglong: case mysql.TypeLonglong:
if ft.Flag&mysql.UnsignedFlag != 0 { if ft.Flag&mysql.UnsignedFlag != 0 {
fmt.Fprint(w, "UNSIGNED") ctx.WriteKeyWord("UNSIGNED")
} else { } else {
fmt.Fprint(w, "SIGNED") ctx.WriteKeyWord("SIGNED")
} }
case mysql.TypeJSON: case mysql.TypeJSON:
fmt.Fprint(w, "JSON") ctx.WriteKeyWord("JSON")
} }
} }
// FormatAsCastType is used for write AST back to string.
func (ft *FieldType) FormatAsCastType(w io.Writer) {
var sb strings.Builder
restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &sb)
ft.RestoreAsCastType(restoreCtx)
fmt.Fprint(w, sb.String())
}
// VarStorageLen indicates this column is a variable length column. // VarStorageLen indicates this column is a variable length column.
const VarStorageLen = -1 const VarStorageLen = -1
......
...@@ -31,6 +31,9 @@ const ( ...@@ -31,6 +31,9 @@ const (
codeErrSyntax = terror.ErrCode(mysql.ErrSyntax) codeErrSyntax = terror.ErrCode(mysql.ErrSyntax)
codeErrUnknownCharacterSet = terror.ErrCode(mysql.ErrUnknownCharacterSet) codeErrUnknownCharacterSet = terror.ErrCode(mysql.ErrUnknownCharacterSet)
codeErrInvalidYearColumnLength = terror.ErrCode(mysql.ErrInvalidYearColumnLength) codeErrInvalidYearColumnLength = terror.ErrCode(mysql.ErrInvalidYearColumnLength)
codeWrongArgument = terror.ErrCode(mysql.ErrWrongArguments)
codeWrongFieldTerminators = terror.ErrCode(mysql.ErrWrongFieldTerminators)
codeTooBigDisplayWidth = terror.ErrCode(mysql.ErrTooBigDisplaywidth)
) )
var ( var (
...@@ -42,6 +45,13 @@ var ( ...@@ -42,6 +45,13 @@ var (
ErrUnknownCharacterSet = terror.ClassParser.New(codeErrUnknownCharacterSet, mysql.MySQLErrName[mysql.ErrUnknownCharacterSet]) ErrUnknownCharacterSet = terror.ClassParser.New(codeErrUnknownCharacterSet, mysql.MySQLErrName[mysql.ErrUnknownCharacterSet])
// ErrInvalidYearColumnLength returns for illegal column length for year type. // ErrInvalidYearColumnLength returns for illegal column length for year type.
ErrInvalidYearColumnLength = terror.ClassParser.New(codeErrInvalidYearColumnLength, mysql.MySQLErrName[mysql.ErrInvalidYearColumnLength]) ErrInvalidYearColumnLength = terror.ClassParser.New(codeErrInvalidYearColumnLength, mysql.MySQLErrName[mysql.ErrInvalidYearColumnLength])
// ErrWrongArguments returns for illegal argument.
ErrWrongArguments = terror.ClassParser.New(codeWrongArgument, mysql.MySQLErrName[mysql.ErrWrongArguments])
// ErrWrongFieldTerminators returns for illegal field terminators.
ErrWrongFieldTerminators = terror.ClassParser.New(codeWrongFieldTerminators, mysql.MySQLErrName[mysql.ErrWrongFieldTerminators])
// ErrTooBigDisplayWidth returns for data display width exceed limit .
ErrTooBigDisplayWidth = terror.ClassParser.New(codeTooBigDisplayWidth, mysql.MySQLErrName[mysql.ErrTooBigDisplaywidth])
// SpecFieldPattern special result field pattern // SpecFieldPattern special result field pattern
SpecFieldPattern = regexp.MustCompile(`(\/\*!(M?[0-9]{5,6})?|\*\/)`) SpecFieldPattern = regexp.MustCompile(`(\/\*!(M?[0-9]{5,6})?|\*\/)`)
specCodePattern = regexp.MustCompile(`\/\*!(M?[0-9]{5,6})?([^*]|\*+[^*/])*\*+\/`) specCodePattern = regexp.MustCompile(`\/\*!(M?[0-9]{5,6})?([^*]|\*+[^*/])*\*+\/`)
...@@ -55,6 +65,9 @@ func init() { ...@@ -55,6 +65,9 @@ func init() {
codeErrParse: mysql.ErrParse, codeErrParse: mysql.ErrParse,
codeErrUnknownCharacterSet: mysql.ErrUnknownCharacterSet, codeErrUnknownCharacterSet: mysql.ErrUnknownCharacterSet,
codeErrInvalidYearColumnLength: mysql.ErrInvalidYearColumnLength, codeErrInvalidYearColumnLength: mysql.ErrInvalidYearColumnLength,
codeWrongArgument: mysql.ErrWrongArguments,
codeWrongFieldTerminators: mysql.ErrWrongFieldTerminators,
codeTooBigDisplayWidth: mysql.ErrTooBigDisplaywidth,
} }
terror.ErrClassToMySQLCodes[terror.ClassParser] = parserMySQLErrCodes terror.ErrClassToMySQLCodes[terror.ClassParser] = parserMySQLErrCodes
} }
......
...@@ -15,6 +15,8 @@ package stmtctx ...@@ -15,6 +15,8 @@ package stmtctx
import ( import (
"math" "math"
"sort"
"strconv"
"sync" "sync"
"time" "time"
...@@ -93,8 +95,10 @@ type StatementContext struct { ...@@ -93,8 +95,10 @@ type StatementContext struct {
message string message string
warnings []SQLWarn warnings []SQLWarn
errorCount uint16
histogramsNotLoad bool histogramsNotLoad bool
execDetails execdetails.ExecDetails execDetails execdetails.ExecDetails
allExecDetails []*execdetails.ExecDetails
} }
// PrevAffectedRows is the affected-rows value(DDL is 0, DML is the number of affected rows). // PrevAffectedRows is the affected-rows value(DDL is 0, DML is the number of affected rows).
PrevAffectedRows int64 PrevAffectedRows int64
...@@ -265,31 +269,42 @@ func (sc *StatementContext) WarningCount() uint16 { ...@@ -265,31 +269,42 @@ func (sc *StatementContext) WarningCount() uint16 {
return wc return wc
} }
// NumWarnings gets warning count. It's different from `WarningCount` in that const zero = "0"
// `WarningCount` return the warning count of the last executed command, so if
// the last command is a SHOW statement, `WarningCount` return 0. On the other // NumErrorWarnings gets warning and error count.
// hand, `NumWarnings` always return number of warnings(or errors if `errOnly` func (sc *StatementContext) NumErrorWarnings() (ec, wc string) {
// is set). var (
func (sc *StatementContext) NumWarnings(errOnly bool) uint16 { ecNum uint16
var wc uint16 wcNum int
)
sc.mu.Lock() sc.mu.Lock()
defer sc.mu.Unlock() ecNum = sc.mu.errorCount
if errOnly { wcNum = len(sc.mu.warnings)
for _, warn := range sc.mu.warnings { sc.mu.Unlock()
if warn.Level == WarnLevelError {
wc++ if ecNum == 0 {
} ec = zero
}
} else { } else {
wc = uint16(len(sc.mu.warnings)) ec = strconv.Itoa(int(ecNum))
} }
return wc
if wcNum == 0 {
wc = zero
} else {
wc = strconv.Itoa(wcNum)
}
return
} }
// SetWarnings sets warnings. // SetWarnings sets warnings.
func (sc *StatementContext) SetWarnings(warns []SQLWarn) { func (sc *StatementContext) SetWarnings(warns []SQLWarn) {
sc.mu.Lock() sc.mu.Lock()
sc.mu.warnings = warns sc.mu.warnings = warns
for _, w := range warns {
if w.Level == WarnLevelError {
sc.mu.errorCount++
}
}
sc.mu.Unlock() sc.mu.Unlock()
} }
...@@ -316,6 +331,7 @@ func (sc *StatementContext) AppendError(warn error) { ...@@ -316,6 +331,7 @@ func (sc *StatementContext) AppendError(warn error) {
sc.mu.Lock() sc.mu.Lock()
if len(sc.mu.warnings) < math.MaxUint16 { if len(sc.mu.warnings) < math.MaxUint16 {
sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelError, warn}) sc.mu.warnings = append(sc.mu.warnings, SQLWarn{WarnLevelError, warn})
sc.mu.errorCount++
} }
sc.mu.Unlock() sc.mu.Unlock()
} }
...@@ -375,7 +391,10 @@ func (sc *StatementContext) ResetForRetry() { ...@@ -375,7 +391,10 @@ func (sc *StatementContext) ResetForRetry() {
sc.mu.copied = 0 sc.mu.copied = 0
sc.mu.touched = 0 sc.mu.touched = 0
sc.mu.message = "" sc.mu.message = ""
sc.mu.errorCount = 0
sc.mu.warnings = nil sc.mu.warnings = nil
sc.mu.execDetails = execdetails.ExecDetails{}
sc.mu.allExecDetails = make([]*execdetails.ExecDetails, 0, 4)
sc.mu.Unlock() sc.mu.Unlock()
sc.TableIDs = sc.TableIDs[:0] sc.TableIDs = sc.TableIDs[:0]
sc.IndexIDs = sc.IndexIDs[:0] sc.IndexIDs = sc.IndexIDs[:0]
...@@ -392,6 +411,7 @@ func (sc *StatementContext) MergeExecDetails(details *execdetails.ExecDetails, c ...@@ -392,6 +411,7 @@ func (sc *StatementContext) MergeExecDetails(details *execdetails.ExecDetails, c
sc.mu.execDetails.RequestCount++ sc.mu.execDetails.RequestCount++
sc.mu.execDetails.TotalKeys += details.TotalKeys sc.mu.execDetails.TotalKeys += details.TotalKeys
sc.mu.execDetails.ProcessedKeys += details.ProcessedKeys sc.mu.execDetails.ProcessedKeys += details.ProcessedKeys
sc.mu.allExecDetails = append(sc.mu.allExecDetails, details)
} }
sc.mu.execDetails.CommitDetail = commitDetails sc.mu.execDetails.CommitDetail = commitDetails
sc.mu.Unlock() sc.mu.Unlock()
...@@ -423,3 +443,46 @@ func (sc *StatementContext) ShouldIgnoreOverflowError() bool { ...@@ -423,3 +443,46 @@ func (sc *StatementContext) ShouldIgnoreOverflowError() bool {
} }
return false return false
} }
// CopTasksDetails returns some useful information of cop-tasks during execution.
func (sc *StatementContext) CopTasksDetails() *CopTasksDetails {
sc.mu.Lock()
defer sc.mu.Unlock()
n := len(sc.mu.allExecDetails)
d := &CopTasksDetails{NumCopTasks: n}
if n == 0 {
return d
}
d.AvgProcessTime = sc.mu.execDetails.ProcessTime / time.Duration(n)
d.AvgWaitTime = sc.mu.execDetails.WaitTime / time.Duration(n)
sort.Slice(sc.mu.allExecDetails, func(i, j int) bool {
return sc.mu.allExecDetails[i].ProcessTime < sc.mu.allExecDetails[j].ProcessTime
})
d.P90ProcessTime = sc.mu.allExecDetails[n*9/10].ProcessTime
d.MaxProcessTime = sc.mu.allExecDetails[n-1].ProcessTime
d.MaxProcessAddress = sc.mu.allExecDetails[n-1].CalleeAddress
sort.Slice(sc.mu.allExecDetails, func(i, j int) bool {
return sc.mu.allExecDetails[i].WaitTime < sc.mu.allExecDetails[j].WaitTime
})
d.P90WaitTime = sc.mu.allExecDetails[n*9/10].WaitTime
d.MaxWaitTime = sc.mu.allExecDetails[n-1].WaitTime
d.MaxWaitAddress = sc.mu.allExecDetails[n-1].CalleeAddress
return d
}
//CopTasksDetails collects some useful information of cop-tasks during execution.
type CopTasksDetails struct {
NumCopTasks int
AvgProcessTime time.Duration
P90ProcessTime time.Duration
MaxProcessAddress string
MaxProcessTime time.Duration
AvgWaitTime time.Duration
P90WaitTime time.Duration
MaxWaitAddress string
MaxWaitTime time.Duration
}
...@@ -173,6 +173,99 @@ func ConvertFloatToUint(sc *stmtctx.StatementContext, fval float64, upperBound u ...@@ -173,6 +173,99 @@ func ConvertFloatToUint(sc *stmtctx.StatementContext, fval float64, upperBound u
return uint64(val), nil return uint64(val), nil
} }
// convertScientificNotation converts a decimal string with scientific notation to a normal decimal string.
// 1E6 => 1000000, .12345E+5 => 12345
func convertScientificNotation(str string) (string, error) {
// https://golang.org/ref/spec#Floating-point_literals
eIdx := -1
point := -1
for i := 0; i < len(str); i++ {
if str[i] == '.' {
point = i
}
if str[i] == 'e' || str[i] == 'E' {
eIdx = i
if point == -1 {
point = i
}
break
}
}
if eIdx == -1 {
return str, nil
}
exp, err := strconv.ParseInt(str[eIdx+1:], 10, 64)
if err != nil {
return "", errors.WithStack(err)
}
f := str[:eIdx]
if exp == 0 {
return f, nil
} else if exp > 0 { // move point right
if point+int(exp) == len(f)-1 { // 123.456 >> 3 = 123456. = 123456
return f[:point] + f[point+1:], nil
} else if point+int(exp) < len(f)-1 { // 123.456 >> 2 = 12345.6
return f[:point] + f[point+1:point+1+int(exp)] + "." + f[point+1+int(exp):], nil
}
// 123.456 >> 5 = 12345600
return f[:point] + f[point+1:] + strings.Repeat("0", point+int(exp)-len(f)+1), nil
} else { // move point left
exp = -exp
if int(exp) < point { // 123.456 << 2 = 1.23456
return f[:point-int(exp)] + "." + f[point-int(exp):point] + f[point+1:], nil
}
// 123.456 << 5 = 0.00123456
return "0." + strings.Repeat("0", int(exp)-point) + f[:point] + f[point+1:], nil
}
}
func convertDecimalStrToUint(sc *stmtctx.StatementContext, str string, upperBound uint64, tp byte) (uint64, error) {
str, err := convertScientificNotation(str)
if err != nil {
return 0, err
}
var intStr, fracStr string
p := strings.Index(str, ".")
if p == -1 {
intStr = str
} else {
intStr = str[:p]
fracStr = str[p+1:]
}
intStr = strings.TrimLeft(intStr, "0")
if intStr == "" {
intStr = "0"
}
if sc.ShouldClipToZero() && intStr[0] == '-' {
return 0, overflow(str, tp)
}
var round uint64
if fracStr != "" && fracStr[0] >= '5' {
round++
}
upperBound -= round
upperStr := strconv.FormatUint(upperBound, 10)
if len(intStr) > len(upperStr) ||
(len(intStr) == len(upperStr) && intStr > upperStr) {
return upperBound, overflow(str, tp)
}
val, err := strconv.ParseUint(intStr, 10, 64)
if err != nil {
return val, err
}
return val + round, nil
}
// ConvertDecimalToUint converts a decimal to a uint by converting it to a string first to avoid float overflow (#10181).
func ConvertDecimalToUint(sc *stmtctx.StatementContext, d *MyDecimal, upperBound uint64, tp byte) (uint64, error) {
return convertDecimalStrToUint(sc, string(d.ToString()), upperBound, tp)
}
// StrToInt converts a string to an integer at the best-effort. // StrToInt converts a string to an integer at the best-effort.
func StrToInt(sc *stmtctx.StatementContext, str string) (int64, error) { func StrToInt(sc *stmtctx.StatementContext, str string) (int64, error) {
str = strings.TrimSpace(str) str = strings.TrimSpace(str)
......
...@@ -911,11 +911,7 @@ func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) ( ...@@ -911,11 +911,7 @@ func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) (
val, err = ConvertIntToUint(sc, ival, upperBound, tp) val, err = ConvertIntToUint(sc, ival, upperBound, tp)
} }
case KindMysqlDecimal: case KindMysqlDecimal:
fval, err1 := d.GetMysqlDecimal().ToFloat64() val, err = ConvertDecimalToUint(sc, d.GetMysqlDecimal(), upperBound, tp)
val, err = ConvertFloatToUint(sc, fval, upperBound, tp)
if err == nil {
err = err1
}
case KindMysqlEnum: case KindMysqlEnum:
val, err = ConvertFloatToUint(sc, d.GetMysqlEnum().ToNumber(), upperBound, tp) val, err = ConvertFloatToUint(sc, d.GetMysqlEnum().ToNumber(), upperBound, tp)
case KindMysqlSet: case KindMysqlSet:
...@@ -1835,14 +1831,14 @@ func DatumsToStrNoErr(datums []Datum) string { ...@@ -1835,14 +1831,14 @@ func DatumsToStrNoErr(datums []Datum) string {
return str return str
} }
// CopyDatum returns a new copy of the datum. // CloneDatum returns a new copy of the datum.
// TODO: Abandon this function. // TODO: Abandon this function.
func CopyDatum(datum Datum) Datum { func CloneDatum(datum Datum) Datum {
return *datum.Copy() return *datum.Copy()
} }
// CopyRow deep copies a Datum slice. // CloneRow deep copies a Datum slice.
func CopyRow(dr []Datum) []Datum { func CloneRow(dr []Datum) []Datum {
c := make([]Datum, len(dr)) c := make([]Datum, len(dr))
for i, d := range dr { for i, d := range dr {
c[i] = *d.Copy() c[i] = *d.Copy()
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
package types package types
import ( import (
"github.com/pingcap/errors"
"github.com/pingcap/parser/mysql" "github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror" "github.com/pingcap/parser/terror"
parser_types "github.com/pingcap/parser/types" parser_types "github.com/pingcap/parser/types"
...@@ -59,28 +60,43 @@ var ( ...@@ -59,28 +60,43 @@ var (
ErrWarnDataOutOfRange = terror.ClassTypes.New(codeDataOutOfRange, mysql.MySQLErrName[mysql.ErrWarnDataOutOfRange]) ErrWarnDataOutOfRange = terror.ClassTypes.New(codeDataOutOfRange, mysql.MySQLErrName[mysql.ErrWarnDataOutOfRange])
// ErrDuplicatedValueInType is returned when enum column has duplicated value. // ErrDuplicatedValueInType is returned when enum column has duplicated value.
ErrDuplicatedValueInType = terror.ClassTypes.New(codeDuplicatedValueInType, mysql.MySQLErrName[mysql.ErrDuplicatedValueInType]) ErrDuplicatedValueInType = terror.ClassTypes.New(codeDuplicatedValueInType, mysql.MySQLErrName[mysql.ErrDuplicatedValueInType])
// ErrDatetimeFunctionOverflow is returned when the calculation in datetime function cause overflow.
ErrDatetimeFunctionOverflow = terror.ClassTypes.New(codeDatetimeFunctionOverflow, mysql.MySQLErrName[mysql.ErrDatetimeFunctionOverflow])
// ErrInvalidTimeFormat is returned when the time format is not correct.
ErrInvalidTimeFormat = terror.ClassTypes.New(mysql.ErrTruncatedWrongValue, "invalid time format: '%v'")
// ErrInvalidWeekModeFormat is returned when the week mode is wrong.
ErrInvalidWeekModeFormat = terror.ClassTypes.New(mysql.ErrTruncatedWrongValue, "invalid week mode format: '%v'")
// ErrInvalidYearFormat is returned when the input is not a valid year format.
ErrInvalidYearFormat = errors.New("invalid year format")
// ErrInvalidYear is returned when the input value is not a valid year.
ErrInvalidYear = errors.New("invalid year")
// ErrIncorrectDatetimeValue is returned when the input is not valid date time value.
ErrIncorrectDatetimeValue = terror.ClassTypes.New(mysql.ErrTruncatedWrongValue, "Incorrect datetime value: '%s'")
// ErrTruncatedWrongValue is returned then
ErrTruncatedWrongValue = terror.ClassTypes.New(mysql.ErrTruncatedWrongValue, mysql.MySQLErrName[mysql.ErrTruncatedWrongValue])
) )
const ( const (
codeBadNumber terror.ErrCode = 1 codeBadNumber terror.ErrCode = 1
codeDataTooLong = terror.ErrCode(mysql.ErrDataTooLong) codeDataTooLong = terror.ErrCode(mysql.ErrDataTooLong)
codeIllegalValueForType = terror.ErrCode(mysql.ErrIllegalValueForType) codeIllegalValueForType = terror.ErrCode(mysql.ErrIllegalValueForType)
codeTruncated = terror.ErrCode(mysql.WarnDataTruncated) codeTruncated = terror.ErrCode(mysql.WarnDataTruncated)
codeOverflow = terror.ErrCode(mysql.ErrDataOutOfRange) codeOverflow = terror.ErrCode(mysql.ErrDataOutOfRange)
codeDivByZero = terror.ErrCode(mysql.ErrDivisionByZero) codeDivByZero = terror.ErrCode(mysql.ErrDivisionByZero)
codeTooBigDisplayWidth = terror.ErrCode(mysql.ErrTooBigDisplaywidth) codeTooBigDisplayWidth = terror.ErrCode(mysql.ErrTooBigDisplaywidth)
codeTooBigFieldLength = terror.ErrCode(mysql.ErrTooBigFieldlength) codeTooBigFieldLength = terror.ErrCode(mysql.ErrTooBigFieldlength)
codeTooBigSet = terror.ErrCode(mysql.ErrTooBigSet) codeTooBigSet = terror.ErrCode(mysql.ErrTooBigSet)
codeTooBigScale = terror.ErrCode(mysql.ErrTooBigScale) codeTooBigScale = terror.ErrCode(mysql.ErrTooBigScale)
codeTooBigPrecision = terror.ErrCode(mysql.ErrTooBigPrecision) codeTooBigPrecision = terror.ErrCode(mysql.ErrTooBigPrecision)
codeWrongFieldSpec = terror.ErrCode(mysql.ErrWrongFieldSpec) codeWrongFieldSpec = terror.ErrCode(mysql.ErrWrongFieldSpec)
codeTruncatedWrongValue = terror.ErrCode(mysql.ErrTruncatedWrongValue) codeTruncatedWrongValue = terror.ErrCode(mysql.ErrTruncatedWrongValue)
codeUnknown = terror.ErrCode(mysql.ErrUnknown) codeUnknown = terror.ErrCode(mysql.ErrUnknown)
codeInvalidDefault = terror.ErrCode(mysql.ErrInvalidDefault) codeInvalidDefault = terror.ErrCode(mysql.ErrInvalidDefault)
codeMBiggerThanD = terror.ErrCode(mysql.ErrMBiggerThanD) codeMBiggerThanD = terror.ErrCode(mysql.ErrMBiggerThanD)
codeDataOutOfRange = terror.ErrCode(mysql.ErrWarnDataOutOfRange) codeDataOutOfRange = terror.ErrCode(mysql.ErrWarnDataOutOfRange)
codeDuplicatedValueInType = terror.ErrCode(mysql.ErrDuplicatedValueInType) codeDuplicatedValueInType = terror.ErrCode(mysql.ErrDuplicatedValueInType)
codeDatetimeFunctionOverflow = terror.ErrCode(mysql.ErrDatetimeFunctionOverflow)
) )
var ( var (
...@@ -92,23 +108,24 @@ var ( ...@@ -92,23 +108,24 @@ var (
func init() { func init() {
typesMySQLErrCodes := map[terror.ErrCode]uint16{ typesMySQLErrCodes := map[terror.ErrCode]uint16{
codeDataTooLong: mysql.ErrDataTooLong, codeDataTooLong: mysql.ErrDataTooLong,
codeIllegalValueForType: mysql.ErrIllegalValueForType, codeIllegalValueForType: mysql.ErrIllegalValueForType,
codeTruncated: mysql.WarnDataTruncated, codeTruncated: mysql.WarnDataTruncated,
codeOverflow: mysql.ErrDataOutOfRange, codeOverflow: mysql.ErrDataOutOfRange,
codeDivByZero: mysql.ErrDivisionByZero, codeDivByZero: mysql.ErrDivisionByZero,
codeTooBigDisplayWidth: mysql.ErrTooBigDisplaywidth, codeTooBigDisplayWidth: mysql.ErrTooBigDisplaywidth,
codeTooBigFieldLength: mysql.ErrTooBigFieldlength, codeTooBigFieldLength: mysql.ErrTooBigFieldlength,
codeTooBigSet: mysql.ErrTooBigSet, codeTooBigSet: mysql.ErrTooBigSet,
codeTooBigScale: mysql.ErrTooBigScale, codeTooBigScale: mysql.ErrTooBigScale,
codeTooBigPrecision: mysql.ErrTooBigPrecision, codeTooBigPrecision: mysql.ErrTooBigPrecision,
codeWrongFieldSpec: mysql.ErrWrongFieldSpec, codeWrongFieldSpec: mysql.ErrWrongFieldSpec,
codeTruncatedWrongValue: mysql.ErrTruncatedWrongValue, codeTruncatedWrongValue: mysql.ErrTruncatedWrongValue,
codeUnknown: mysql.ErrUnknown, codeUnknown: mysql.ErrUnknown,
codeInvalidDefault: mysql.ErrInvalidDefault, codeInvalidDefault: mysql.ErrInvalidDefault,
codeMBiggerThanD: mysql.ErrMBiggerThanD, codeMBiggerThanD: mysql.ErrMBiggerThanD,
codeDataOutOfRange: mysql.ErrWarnDataOutOfRange, codeDataOutOfRange: mysql.ErrWarnDataOutOfRange,
codeDuplicatedValueInType: mysql.ErrDuplicatedValueInType, codeDuplicatedValueInType: mysql.ErrDuplicatedValueInType,
codeDatetimeFunctionOverflow: mysql.ErrDatetimeFunctionOverflow,
} }
terror.ErrClassToMySQLCodes[terror.ClassTypes] = typesMySQLErrCodes terror.ErrClassToMySQLCodes[terror.ClassTypes] = typesMySQLErrCodes
} }
...@@ -149,6 +149,67 @@ func decodeEscapedUnicode(s []byte) (char [4]byte, size int, err error) { ...@@ -149,6 +149,67 @@ func decodeEscapedUnicode(s []byte) (char [4]byte, size int, err error) {
return return
} }
// quoteString escapes interior quote and other characters for JSON_QUOTE
// https://dev.mysql.com/doc/refman/5.7/en/json-creation-functions.html#function_json-quote
// TODO: add JSON_QUOTE builtin
func quoteString(s string) string {
var escapeByteMap = map[byte]string{
'\\': "\\\\",
'"': "\\\"",
'\b': "\\b",
'\f': "\\f",
'\n': "\\n",
'\r': "\\r",
'\t': "\\t",
}
ret := new(bytes.Buffer)
ret.WriteByte('"')
start := 0
hasEscaped := false
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
escaped, ok := escapeByteMap[b]
if ok {
if start < i {
ret.WriteString(s[start:i])
}
hasEscaped = true
ret.WriteString(escaped)
i++
start = i
} else {
i++
}
} else {
c, size := utf8.DecodeRune([]byte(s[i:]))
if c == utf8.RuneError && size == 1 { // refer to codes of `binary.marshalStringTo`
if start < i {
ret.WriteString(s[start:i])
}
hasEscaped = true
ret.WriteString(`\ufffd`)
i += size
start = i
continue
}
i += size
}
}
if start < len(s) {
ret.WriteString(s[start:])
}
if hasEscaped {
ret.WriteByte('"')
return ret.String()
}
return ret.String()[1:]
}
// Extract receives several path expressions as arguments, matches them in bj, and returns: // Extract receives several path expressions as arguments, matches them in bj, and returns:
// ret: target JSON matched any path expressions. maybe autowrapped as an array. // ret: target JSON matched any path expressions. maybe autowrapped as an array.
// found: true if any path expressions matched. // found: true if any path expressions matched.
...@@ -778,3 +839,148 @@ func (bj BinaryJSON) GetElemDepth() int { ...@@ -778,3 +839,148 @@ func (bj BinaryJSON) GetElemDepth() int {
return 1 return 1
} }
} }
// extractCallbackFn: the type of CALLBACK function for extractToCallback
type extractCallbackFn func(fullpath PathExpression, bj BinaryJSON) (stop bool, err error)
// extractToCallback: callback alternative of extractTo
// would be more effective when walk through the whole JSON is unnecessary
// NOTICE: path [0] & [*] for JSON object other than array is INVALID, which is different from extractTo.
func (bj BinaryJSON) extractToCallback(pathExpr PathExpression, callbackFn extractCallbackFn, fullpath PathExpression) (stop bool, err error) {
if len(pathExpr.legs) == 0 {
return callbackFn(fullpath, bj)
}
currentLeg, subPathExpr := pathExpr.popOneLeg()
if currentLeg.typ == pathLegIndex && bj.TypeCode == TypeCodeArray {
elemCount := bj.GetElemCount()
if currentLeg.arrayIndex == arrayIndexAsterisk {
for i := 0; i < elemCount; i++ {
//buf = bj.arrayGetElem(i).extractTo(buf, subPathExpr)
path := fullpath.pushBackOneIndexLeg(i)
stop, err = bj.arrayGetElem(i).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else if currentLeg.arrayIndex < elemCount {
//buf = bj.arrayGetElem(currentLeg.arrayIndex).extractTo(buf, subPathExpr)
path := fullpath.pushBackOneIndexLeg(currentLeg.arrayIndex)
stop, err = bj.arrayGetElem(currentLeg.arrayIndex).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else if currentLeg.typ == pathLegKey && bj.TypeCode == TypeCodeObject {
elemCount := bj.GetElemCount()
if currentLeg.dotKey == "*" {
for i := 0; i < elemCount; i++ {
//buf = bj.objectGetVal(i).extractTo(buf, subPathExpr)
path := fullpath.pushBackOneKeyLeg(string(bj.objectGetKey(i)))
stop, err = bj.objectGetVal(i).extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else {
child, ok := bj.objectSearchKey(hack.Slice(currentLeg.dotKey))
if ok {
//buf = child.extractTo(buf, subPathExpr)
path := fullpath.pushBackOneKeyLeg(currentLeg.dotKey)
stop, err = child.extractToCallback(subPathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
}
} else if currentLeg.typ == pathLegDoubleAsterisk {
//buf = bj.extractTo(buf, subPathExpr)
stop, err = bj.extractToCallback(subPathExpr, callbackFn, fullpath)
if stop || err != nil {
return
}
if bj.TypeCode == TypeCodeArray {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
//buf = bj.arrayGetElem(i).extractTo(buf, pathExpr)
path := fullpath.pushBackOneIndexLeg(i)
stop, err = bj.arrayGetElem(i).extractToCallback(pathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
} else if bj.TypeCode == TypeCodeObject {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
//buf = bj.objectGetVal(i).extractTo(buf, pathExpr)
path := fullpath.pushBackOneKeyLeg(string(bj.objectGetKey(i)))
stop, err = bj.objectGetVal(i).extractToCallback(pathExpr, callbackFn, path)
if stop || err != nil {
return
}
}
}
}
return false, nil
}
// BinaryJSONWalkFunc is used as callback function for BinaryJSON.Walk
type BinaryJSONWalkFunc func(fullpath PathExpression, bj BinaryJSON) (stop bool, err error)
// Walk traverse BinaryJSON objects
func (bj BinaryJSON) Walk(walkFn BinaryJSONWalkFunc, pathExprList ...PathExpression) (err error) {
pathSet := make(map[string]bool)
var doWalk extractCallbackFn
doWalk = func(fullpath PathExpression, bj BinaryJSON) (stop bool, err error) {
pathStr := fullpath.String()
if _, ok := pathSet[pathStr]; ok {
return false, nil
}
stop, err = walkFn(fullpath, bj)
pathSet[pathStr] = true
if stop || err != nil {
return
}
if bj.TypeCode == TypeCodeArray {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
path := fullpath.pushBackOneIndexLeg(i)
stop, err = doWalk(path, bj.arrayGetElem(i))
if stop || err != nil {
return
}
}
} else if bj.TypeCode == TypeCodeObject {
elemCount := bj.GetElemCount()
for i := 0; i < elemCount; i++ {
path := fullpath.pushBackOneKeyLeg(string(bj.objectGetKey(i)))
stop, err = doWalk(path, bj.objectGetVal(i))
if stop || err != nil {
return
}
}
}
return false, nil
}
fullpath := PathExpression{legs: make([]pathLeg, 0, 32), flags: pathExpressionFlag(0)}
if len(pathExprList) > 0 {
for _, pathExpr := range pathExprList {
var stop bool
stop, err = bj.extractToCallback(pathExpr, doWalk, fullpath)
if stop || err != nil {
return err
}
}
} else {
_, err = doWalk(fullpath, bj)
if err != nil {
return
}
}
return nil
}
...@@ -117,6 +117,30 @@ func (pe PathExpression) popOneLastLeg() (PathExpression, pathLeg) { ...@@ -117,6 +117,30 @@ func (pe PathExpression) popOneLastLeg() (PathExpression, pathLeg) {
return PathExpression{legs: pe.legs[:lastLegIdx]}, lastLeg return PathExpression{legs: pe.legs[:lastLegIdx]}, lastLeg
} }
// pushBackOneIndexLeg pushback one leg of INDEX type
func (pe PathExpression) pushBackOneIndexLeg(index int) PathExpression {
newPe := PathExpression{
legs: append(pe.legs, pathLeg{typ: pathLegIndex, arrayIndex: index}),
flags: pe.flags,
}
if index == -1 {
newPe.flags |= pathExpressionContainsAsterisk
}
return newPe
}
// pushBackOneKeyLeg pushback one leg of KEY type
func (pe PathExpression) pushBackOneKeyLeg(key string) PathExpression {
newPe := PathExpression{
legs: append(pe.legs, pathLeg{typ: pathLegKey, dotKey: key}),
flags: pe.flags,
}
if key == "*" {
newPe.flags |= pathExpressionContainsAsterisk
}
return newPe
}
// ContainsAnyAsterisk returns true if pe contains any asterisk. // ContainsAnyAsterisk returns true if pe contains any asterisk.
func (pe PathExpression) ContainsAnyAsterisk() bool { func (pe PathExpression) ContainsAnyAsterisk() bool {
return pe.flags.containsAnyAsterisk() return pe.flags.containsAnyAsterisk()
...@@ -212,3 +236,27 @@ func isBlank(c rune) bool { ...@@ -212,3 +236,27 @@ func isBlank(c rune) bool {
} }
return false return false
} }
func (pe PathExpression) String() string {
var s strings.Builder
s.WriteString("$")
for _, leg := range pe.legs {
switch leg.typ {
case pathLegIndex:
if leg.arrayIndex == -1 {
s.WriteString("[*]")
} else {
s.WriteString("[")
s.WriteString(strconv.Itoa(leg.arrayIndex))
s.WriteString("]")
}
case pathLegKey:
s.WriteString(".")
s.WriteString(quoteString(leg.dotKey))
case pathLegDoubleAsterisk:
s.WriteString("**")
}
}
return s.String()
}
...@@ -29,18 +29,7 @@ import ( ...@@ -29,18 +29,7 @@ import (
"github.com/pingcap/parser/terror" "github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/logutil"
) tidbMath "github.com/pingcap/tidb/util/math"
// Portable analogs of some common call errors.
var (
ErrInvalidTimeFormat = terror.ClassTypes.New(mysql.ErrTruncatedWrongValue, "invalid time format: '%v'")
ErrInvalidWeekModeFormat = terror.ClassTypes.New(mysql.ErrTruncatedWrongValue, "invalid week mode format: '%v'")
ErrInvalidYearFormat = errors.New("invalid year format")
ErrInvalidYear = errors.New("invalid year")
ErrZeroDate = errors.New("datetime zero in date")
ErrIncorrectDatetimeValue = terror.ClassTypes.New(mysql.ErrTruncatedWrongValue, "Incorrect datetime value: '%s'")
ErrDatetimeFunctionOverflow = terror.ClassTypes.New(mysql.ErrDatetimeFunctionOverflow, mysql.MySQLErrName[mysql.ErrDatetimeFunctionOverflow])
ErrTruncatedWrongValue = terror.ClassTypes.New(mysql.ErrTruncatedWrongValue, mysql.MySQLErrName[mysql.ErrTruncatedWrongValue])
) )
// Time format without fractional seconds precision. // Time format without fractional seconds precision.
...@@ -196,6 +185,13 @@ var ( ...@@ -196,6 +185,13 @@ var (
} }
) )
const (
// GoDurationDay is the gotime.Duration which equals to a Day.
GoDurationDay = gotime.Hour * 24
// GoDurationWeek is the gotime.Duration which equals to a Week.
GoDurationWeek = GoDurationDay * 7
)
// FromGoTime translates time.Time to mysql time internal representation. // FromGoTime translates time.Time to mysql time internal representation.
func FromGoTime(t gotime.Time) MysqlTime { func FromGoTime(t gotime.Time) MysqlTime {
year, month, day := t.Date() year, month, day := t.Date()
...@@ -1198,9 +1194,9 @@ func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, ...@@ -1198,9 +1194,9 @@ func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration,
// TruncateOverflowMySQLTime truncates d when it overflows, and return ErrTruncatedWrongVal. // TruncateOverflowMySQLTime truncates d when it overflows, and return ErrTruncatedWrongVal.
func TruncateOverflowMySQLTime(d gotime.Duration) (gotime.Duration, error) { func TruncateOverflowMySQLTime(d gotime.Duration) (gotime.Duration, error) {
if d > MaxTime { if d > MaxTime {
return MaxTime, ErrTruncatedWrongVal.GenWithStackByArgs("time", d.String()) return MaxTime, ErrTruncatedWrongVal.GenWithStackByArgs("time", d)
} else if d < MinTime { } else if d < MinTime {
return MinTime, ErrTruncatedWrongVal.GenWithStackByArgs("time", d.String()) return MinTime, ErrTruncatedWrongVal.GenWithStackByArgs("time", d)
} }
return d, nil return d, nil
...@@ -1474,7 +1470,7 @@ func checkDateRange(t MysqlTime) error { ...@@ -1474,7 +1470,7 @@ func checkDateRange(t MysqlTime) error {
func checkMonthDay(year, month, day int, allowInvalidDate bool) error { func checkMonthDay(year, month, day int, allowInvalidDate bool) error {
if month < 0 || month > 12 { if month < 0 || month > 12 {
return errors.Trace(ErrInvalidTimeFormat.GenWithStackByArgs(month)) return errors.Trace(ErrIncorrectDatetimeValue.GenWithStackByArgs(month))
} }
maxDay := 31 maxDay := 31
...@@ -1482,13 +1478,13 @@ func checkMonthDay(year, month, day int, allowInvalidDate bool) error { ...@@ -1482,13 +1478,13 @@ func checkMonthDay(year, month, day int, allowInvalidDate bool) error {
if month > 0 { if month > 0 {
maxDay = maxDaysInMonth[month-1] maxDay = maxDaysInMonth[month-1]
} }
if month == 2 && year%4 != 0 { if month == 2 && !isLeapYear(uint16(year)) {
maxDay = 28 maxDay = 28
} }
} }
if day < 0 || day > maxDay { if day < 0 || day > maxDay {
return errors.Trace(ErrInvalidTimeFormat.GenWithStackByArgs(day)) return errors.Trace(ErrIncorrectDatetimeValue.GenWithStackByArgs(day))
} }
return nil return nil
} }
...@@ -1545,6 +1541,7 @@ func checkDatetimeType(t MysqlTime, allowZeroInDate, allowInvalidDate bool) erro ...@@ -1545,6 +1541,7 @@ func checkDatetimeType(t MysqlTime, allowZeroInDate, allowInvalidDate bool) erro
// ExtractDatetimeNum extracts time value number from datetime unit and format. // ExtractDatetimeNum extracts time value number from datetime unit and format.
func ExtractDatetimeNum(t *Time, unit string) (int64, error) { func ExtractDatetimeNum(t *Time, unit string) (int64, error) {
// TODO: Consider time_zone variable.
switch strings.ToUpper(unit) { switch strings.ToUpper(unit) {
case "DAY": case "DAY":
return int64(t.Time.Day()), nil return int64(t.Time.Day()), nil
...@@ -1552,12 +1549,7 @@ func ExtractDatetimeNum(t *Time, unit string) (int64, error) { ...@@ -1552,12 +1549,7 @@ func ExtractDatetimeNum(t *Time, unit string) (int64, error) {
week := t.Time.Week(0) week := t.Time.Week(0)
return int64(week), nil return int64(week), nil
case "MONTH": case "MONTH":
// TODO: Consider time_zone variable. return int64(t.Time.Month()), nil
t1, err := t.Time.GoTime(gotime.Local)
if err != nil {
return 0, errors.Trace(err)
}
return int64(t1.Month()), nil
case "QUARTER": case "QUARTER":
m := int64(t.Time.Month()) m := int64(t.Time.Month())
// 1 - 3 -> 1 // 1 - 3 -> 1
...@@ -1619,41 +1611,110 @@ func ExtractDurationNum(d *Duration, unit string) (int64, error) { ...@@ -1619,41 +1611,110 @@ func ExtractDurationNum(d *Duration, unit string) (int64, error) {
} }
} }
func extractSingleTimeValue(unit string, format string) (int64, int64, int64, float64, error) { // parseSingleTimeValue parse the format according the given unit. If we set strictCheck true, we'll check whether
fv, err := strconv.ParseFloat(format, 64) // the converted value not exceed the range of MySQL's TIME type.
// The first four returned values are year, month, day and nanosecond.
func parseSingleTimeValue(unit string, format string, strictCheck bool) (int64, int64, int64, int64, error) {
// Format is a preformatted number, it format should be A[.[B]].
decimalPointPos := strings.IndexRune(format, '.')
if decimalPointPos == -1 {
decimalPointPos = len(format)
}
sign := int64(1)
if len(format) > 0 && format[0] == '-' {
sign = int64(-1)
}
iv, err := strconv.ParseInt(format[0:decimalPointPos], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(format) return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(format)
} }
iv := int64(math.Round(fv)) riv := iv // Rounded integer value
dv := int64(0)
lf := len(format) - 1
// Has fraction part
if decimalPointPos < lf {
if lf-decimalPointPos >= 6 {
// MySQL rounds down to 1e-6.
if dv, err = strconv.ParseInt(format[decimalPointPos+1:decimalPointPos+7], 10, 64); err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(format)
}
} else {
if dv, err = strconv.ParseInt(format[decimalPointPos+1:]+"000000"[:6-(lf-decimalPointPos)], 10, 64); err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(format)
}
}
if dv >= 500000 { // Round up, and we should keep 6 digits for microsecond, so dv should in [000000, 999999].
riv += sign
}
if unit != "SECOND" {
err = ErrTruncatedWrongValue.GenWithStackByArgs(format)
}
}
switch strings.ToUpper(unit) { switch strings.ToUpper(unit) {
case "MICROSECOND": case "MICROSECOND":
return 0, 0, 0, fv * float64(gotime.Microsecond), nil if strictCheck && tidbMath.Abs(riv) > TimeMaxValueSeconds*1000 {
return 0, 0, 0, 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
dayCount := riv / int64(GoDurationDay/gotime.Microsecond)
riv %= int64(GoDurationDay / gotime.Microsecond)
return 0, 0, dayCount, riv * int64(gotime.Microsecond), err
case "SECOND": case "SECOND":
return 0, 0, 0, fv * float64(gotime.Second), nil if strictCheck && tidbMath.Abs(iv) > TimeMaxValueSeconds {
return 0, 0, 0, 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
dayCount := iv / int64(GoDurationDay/gotime.Second)
iv %= int64(GoDurationDay / gotime.Second)
return 0, 0, dayCount, iv*int64(gotime.Second) + dv*int64(gotime.Microsecond), err
case "MINUTE": case "MINUTE":
return 0, 0, 0, float64(iv * int64(gotime.Minute)), nil if strictCheck && tidbMath.Abs(riv) > TimeMaxHour*60+TimeMaxMinute {
return 0, 0, 0, 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
dayCount := riv / int64(GoDurationDay/gotime.Minute)
riv %= int64(GoDurationDay / gotime.Minute)
return 0, 0, dayCount, riv * int64(gotime.Minute), err
case "HOUR": case "HOUR":
return 0, 0, 0, float64(iv * int64(gotime.Hour)), nil if strictCheck && tidbMath.Abs(riv) > TimeMaxHour {
return 0, 0, 0, 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
dayCount := riv / 24
riv %= 24
return 0, 0, dayCount, riv * int64(gotime.Hour), err
case "DAY": case "DAY":
return 0, 0, iv, 0, nil if strictCheck && tidbMath.Abs(riv) > TimeMaxHour/24 {
return 0, 0, 0, 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
return 0, 0, riv, 0, err
case "WEEK": case "WEEK":
return 0, 0, 7 * iv, 0, nil if strictCheck && 7*tidbMath.Abs(riv) > TimeMaxHour/24 {
return 0, 0, 0, 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
return 0, 0, 7 * riv, 0, err
case "MONTH": case "MONTH":
return 0, iv, 0, 0, nil if strictCheck && tidbMath.Abs(riv) > 1 {
return 0, 0, 0, 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
return 0, riv, 0, 0, err
case "QUARTER": case "QUARTER":
return 0, 3 * iv, 0, 0, nil if strictCheck {
return 0, 0, 0, 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
return 0, 3 * riv, 0, 0, err
case "YEAR": case "YEAR":
return iv, 0, 0, 0, nil if strictCheck {
return 0, 0, 0, 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
return riv, 0, 0, 0, err
} }
return 0, 0, 0, 0, errors.Errorf("invalid singel timeunit - %s", unit) return 0, 0, 0, 0, errors.Errorf("invalid singel timeunit - %s", unit)
} }
// extractTimeValue extracts years, months, days, microseconds from a string // parseTimeValue gets years, months, days, nanoseconds from a string
// nanosecond will not exceed length of single day
// MySQL permits any punctuation delimiter in the expr format. // MySQL permits any punctuation delimiter in the expr format.
// See https://dev.mysql.com/doc/refman/8.0/en/expressions.html#temporal-intervals // See https://dev.mysql.com/doc/refman/8.0/en/expressions.html#temporal-intervals
func extractTimeValue(format string, index, cnt int) (int64, int64, int64, float64, error) { func parseTimeValue(format string, index, cnt int) (int64, int64, int64, int64, error) {
neg := false neg := false
originalFmt := format originalFmt := format
format = strings.TrimSpace(format) format = strings.TrimSpace(format)
...@@ -1691,57 +1752,160 @@ func extractTimeValue(format string, index, cnt int) (int64, int64, int64, float ...@@ -1691,57 +1752,160 @@ func extractTimeValue(format string, index, cnt int) (int64, int64, int64, float
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt) return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt)
} }
hours, err := strconv.ParseFloat(fields[HourIndex], 64) hours, err := strconv.ParseInt(fields[HourIndex], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt) return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt)
} }
minutes, err := strconv.ParseFloat(fields[MinuteIndex], 64) minutes, err := strconv.ParseInt(fields[MinuteIndex], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt) return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt)
} }
seconds, err := strconv.ParseFloat(fields[SecondIndex], 64) seconds, err := strconv.ParseInt(fields[SecondIndex], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt) return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt)
} }
microseconds, err := strconv.ParseFloat(alignFrac(fields[MicrosecondIndex], MaxFsp), 64) microseconds, err := strconv.ParseInt(alignFrac(fields[MicrosecondIndex], MaxFsp), 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt) return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt)
} }
durations := hours*float64(gotime.Hour) + minutes*float64(gotime.Minute) + seconds = hours*3600 + minutes*60 + seconds
seconds*float64(gotime.Second) + microseconds*float64(gotime.Microsecond) days += seconds / (3600 * 24)
seconds %= 3600 * 24
return years, months, days, seconds*int64(gotime.Second) + microseconds*int64(gotime.Microsecond), nil
}
return years, months, days, durations, nil func parseAndValidateDurationValue(format string, index, cnt int) (int64, error) {
year, month, day, nano, err := parseTimeValue(format, index, cnt)
if err != nil {
return 0, err
}
if year != 0 || month != 0 || tidbMath.Abs(day) > TimeMaxHour/24 {
return 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
dur := day*int64(GoDurationDay) + nano
if tidbMath.Abs(dur) > int64(MaxTime) {
return 0, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
}
return dur, nil
} }
// ExtractTimeValue extracts time value from time unit and format. // ParseDurationValue parses time value from time unit and format.
func ExtractTimeValue(unit string, format string) (int64, int64, int64, float64, error) { // Returns y years m months d days + n nanoseconds
// Nanoseconds will no longer than one day.
func ParseDurationValue(unit string, format string) (y int64, m int64, d int64, n int64, _ error) {
switch strings.ToUpper(unit) { switch strings.ToUpper(unit) {
case "MICROSECOND", "SECOND", "MINUTE", "HOUR", "DAY", "WEEK", "MONTH", "QUARTER", "YEAR": case "MICROSECOND", "SECOND", "MINUTE", "HOUR", "DAY", "WEEK", "MONTH", "QUARTER", "YEAR":
return extractSingleTimeValue(unit, format) return parseSingleTimeValue(unit, format, false)
case "SECOND_MICROSECOND":
return parseTimeValue(format, MicrosecondIndex, SecondMicrosecondMaxCnt)
case "MINUTE_MICROSECOND":
return parseTimeValue(format, MicrosecondIndex, MinuteMicrosecondMaxCnt)
case "MINUTE_SECOND":
return parseTimeValue(format, SecondIndex, MinuteSecondMaxCnt)
case "HOUR_MICROSECOND":
return parseTimeValue(format, MicrosecondIndex, HourMicrosecondMaxCnt)
case "HOUR_SECOND":
return parseTimeValue(format, SecondIndex, HourSecondMaxCnt)
case "HOUR_MINUTE":
return parseTimeValue(format, MinuteIndex, HourMinuteMaxCnt)
case "DAY_MICROSECOND":
return parseTimeValue(format, MicrosecondIndex, DayMicrosecondMaxCnt)
case "DAY_SECOND":
return parseTimeValue(format, SecondIndex, DaySecondMaxCnt)
case "DAY_MINUTE":
return parseTimeValue(format, MinuteIndex, DayMinuteMaxCnt)
case "DAY_HOUR":
return parseTimeValue(format, HourIndex, DayHourMaxCnt)
case "YEAR_MONTH":
return parseTimeValue(format, MonthIndex, YearMonthMaxCnt)
default:
return 0, 0, 0, 0, errors.Errorf("invalid single timeunit - %s", unit)
}
}
// ExtractDurationValue extract the value from format to Duration.
func ExtractDurationValue(unit string, format string) (Duration, error) {
unit = strings.ToUpper(unit)
switch unit {
case "MICROSECOND", "SECOND", "MINUTE", "HOUR", "DAY", "WEEK", "MONTH", "QUARTER", "YEAR":
_, month, day, nano, err := parseSingleTimeValue(unit, format, true)
if err != nil {
return ZeroDuration, err
}
dur := Duration{Duration: gotime.Duration((month*30+day)*int64(GoDurationDay) + nano)}
if unit == "MICROSECOND" {
dur.Fsp = MaxFsp
}
return dur, err
case "SECOND_MICROSECOND": case "SECOND_MICROSECOND":
return extractTimeValue(format, MicrosecondIndex, SecondMicrosecondMaxCnt) d, err := parseAndValidateDurationValue(format, MicrosecondIndex, SecondMicrosecondMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: MaxFsp}, nil
case "MINUTE_MICROSECOND": case "MINUTE_MICROSECOND":
return extractTimeValue(format, MicrosecondIndex, MinuteMicrosecondMaxCnt) d, err := parseAndValidateDurationValue(format, MicrosecondIndex, MinuteMicrosecondMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: MaxFsp}, nil
case "MINUTE_SECOND": case "MINUTE_SECOND":
return extractTimeValue(format, SecondIndex, MinuteSecondMaxCnt) d, err := parseAndValidateDurationValue(format, SecondIndex, MinuteSecondMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: MaxFsp}, nil
case "HOUR_MICROSECOND": case "HOUR_MICROSECOND":
return extractTimeValue(format, MicrosecondIndex, HourMicrosecondMaxCnt) d, err := parseAndValidateDurationValue(format, MicrosecondIndex, HourMicrosecondMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: MaxFsp}, nil
case "HOUR_SECOND": case "HOUR_SECOND":
return extractTimeValue(format, SecondIndex, HourSecondMaxCnt) d, err := parseAndValidateDurationValue(format, SecondIndex, HourSecondMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: MaxFsp}, nil
case "HOUR_MINUTE": case "HOUR_MINUTE":
return extractTimeValue(format, MinuteIndex, HourMinuteMaxCnt) d, err := parseAndValidateDurationValue(format, MinuteIndex, HourMinuteMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: 0}, nil
case "DAY_MICROSECOND": case "DAY_MICROSECOND":
return extractTimeValue(format, MicrosecondIndex, DayMicrosecondMaxCnt) d, err := parseAndValidateDurationValue(format, MicrosecondIndex, DayMicrosecondMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: MaxFsp}, nil
case "DAY_SECOND": case "DAY_SECOND":
return extractTimeValue(format, SecondIndex, DaySecondMaxCnt) d, err := parseAndValidateDurationValue(format, SecondIndex, DaySecondMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: MaxFsp}, nil
case "DAY_MINUTE": case "DAY_MINUTE":
return extractTimeValue(format, MinuteIndex, DayMinuteMaxCnt) d, err := parseAndValidateDurationValue(format, MinuteIndex, DayMinuteMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: 0}, nil
case "DAY_HOUR": case "DAY_HOUR":
return extractTimeValue(format, HourIndex, DayHourMaxCnt) d, err := parseAndValidateDurationValue(format, HourIndex, DayHourMaxCnt)
if err != nil {
return ZeroDuration, err
}
return Duration{Duration: gotime.Duration(d), Fsp: 0}, nil
case "YEAR_MONTH": case "YEAR_MONTH":
return extractTimeValue(format, MonthIndex, YearMonthMaxCnt) _, err := parseAndValidateDurationValue(format, MonthIndex, YearMonthMaxCnt)
if err != nil {
return ZeroDuration, err
}
// MONTH must exceed the limit of mysql's duration. So just return overflow error.
return ZeroDuration, ErrDatetimeFunctionOverflow.GenWithStackByArgs("time")
default: default:
return 0, 0, 0, 0, errors.Errorf("invalid singel timeunit - %s", unit) return ZeroDuration, errors.Errorf("invalid single timeunit - %s", unit)
} }
} }
......
...@@ -22,6 +22,7 @@ import ( ...@@ -22,6 +22,7 @@ import (
"runtime" "runtime"
"sort" "sort"
"strings" "strings"
"time"
"github.com/pingcap/errors" "github.com/pingcap/errors"
zaplog "github.com/pingcap/log" zaplog "github.com/pingcap/log"
...@@ -208,7 +209,9 @@ func (f *textFormatter) Format(entry *log.Entry) ([]byte, error) { ...@@ -208,7 +209,9 @@ func (f *textFormatter) Format(entry *log.Entry) ([]byte, error) {
const ( const (
// SlowLogTimeFormat is the time format for slow log. // SlowLogTimeFormat is the time format for slow log.
SlowLogTimeFormat = "2006-01-02-15:04:05.999999999 -0700" SlowLogTimeFormat = time.RFC3339Nano
// OldSlowLogTimeFormat is the first version of the the time format for slow log, This is use for compatibility.
OldSlowLogTimeFormat = "2006-01-02-15:04:05.999999999 -0700"
) )
type slowLogFormatter struct{} type slowLogFormatter struct{}
......
...@@ -15,8 +15,8 @@ package math ...@@ -15,8 +15,8 @@ package math
import "math" import "math"
// http://cavaliercoder.com/blog/optimized-abs-for-int64-in-go.html // Abs implement the abs function according to http://cavaliercoder.com/blog/optimized-abs-for-int64-in-go.html
func abs(n int64) int64 { func Abs(n int64) int64 {
y := n >> 63 y := n >> 63
return (n ^ y) - y return (n ^ y) - y
} }
...@@ -46,5 +46,5 @@ func StrLenOfInt64Fast(x int64) int { ...@@ -46,5 +46,5 @@ func StrLenOfInt64Fast(x int64) int {
if x < 0 { if x < 0 {
size = 1 // add "-" sign on the length count size = 1 // add "-" sign on the length count
} }
return size + StrLenOfUint64Fast(uint64(abs(x))) return size + StrLenOfUint64Fast(uint64(Abs(x)))
} }
...@@ -43,9 +43,10 @@ type Tracker struct { ...@@ -43,9 +43,10 @@ type Tracker struct {
children []*Tracker // The children memory trackers children []*Tracker // The children memory trackers
} }
label string // Label of this "Tracker". label fmt.Stringer // Label of this "Tracker".
bytesConsumed int64 // Consumed bytes. bytesConsumed int64 // Consumed bytes.
bytesLimit int64 // Negative value means no limit. bytesLimit int64 // Negative value means no limit.
maxConsumed int64 // max number of bytes consumed during execution.
actionOnExceed ActionOnExceed actionOnExceed ActionOnExceed
parent *Tracker // The parent memory tracker. parent *Tracker // The parent memory tracker.
} }
...@@ -53,7 +54,7 @@ type Tracker struct { ...@@ -53,7 +54,7 @@ type Tracker struct {
// NewTracker creates a memory tracker. // NewTracker creates a memory tracker.
// 1. "label" is the label used in the usage string. // 1. "label" is the label used in the usage string.
// 2. "bytesLimit < 0" means no limit. // 2. "bytesLimit < 0" means no limit.
func NewTracker(label string, bytesLimit int64) *Tracker { func NewTracker(label fmt.Stringer, bytesLimit int64) *Tracker {
return &Tracker{ return &Tracker{
label: label, label: label,
bytesLimit: bytesLimit, bytesLimit: bytesLimit,
...@@ -67,7 +68,7 @@ func (t *Tracker) SetActionOnExceed(a ActionOnExceed) { ...@@ -67,7 +68,7 @@ func (t *Tracker) SetActionOnExceed(a ActionOnExceed) {
} }
// SetLabel sets the label of a Tracker. // SetLabel sets the label of a Tracker.
func (t *Tracker) SetLabel(label string) { func (t *Tracker) SetLabel(label fmt.Stringer) {
t.label = label t.label = label
} }
...@@ -142,6 +143,19 @@ func (t *Tracker) Consume(bytes int64) { ...@@ -142,6 +143,19 @@ func (t *Tracker) Consume(bytes int64) {
if atomic.AddInt64(&tracker.bytesConsumed, bytes) >= tracker.bytesLimit && tracker.bytesLimit > 0 { if atomic.AddInt64(&tracker.bytesConsumed, bytes) >= tracker.bytesLimit && tracker.bytesLimit > 0 {
rootExceed = tracker rootExceed = tracker
} }
if tracker.parent == nil {
// since we only need a total memory usage during execution,
// we only record max consumed bytes in root(statement-level) for performance.
for {
maxNow := atomic.LoadInt64(&tracker.maxConsumed)
consumed := atomic.LoadInt64(&tracker.bytesConsumed)
if consumed > maxNow && !atomic.CompareAndSwapInt64(&tracker.maxConsumed, maxNow, consumed) {
continue
}
break
}
}
} }
if rootExceed != nil { if rootExceed != nil {
rootExceed.actionOnExceed.Action(rootExceed) rootExceed.actionOnExceed.Action(rootExceed)
...@@ -153,6 +167,11 @@ func (t *Tracker) BytesConsumed() int64 { ...@@ -153,6 +167,11 @@ func (t *Tracker) BytesConsumed() int64 {
return atomic.LoadInt64(&t.bytesConsumed) return atomic.LoadInt64(&t.bytesConsumed)
} }
// MaxConsumed returns max number of bytes consumed during execution.
func (t *Tracker) MaxConsumed() int64 {
return atomic.LoadInt64(&t.maxConsumed)
}
// String returns the string representation of this Tracker tree. // String returns the string representation of this Tracker tree.
func (t *Tracker) String() string { func (t *Tracker) String() string {
buffer := bytes.NewBufferString("\n") buffer := bytes.NewBufferString("\n")
......
...@@ -105,10 +105,10 @@ ...@@ -105,10 +105,10 @@
"revisionTime": "2018-09-19T09:01:24Z" "revisionTime": "2018-09-19T09:01:24Z"
}, },
{ {
"checksumSHA1": "QPt6+cvPrmKkLeujdqEKgRH84Mw=", "checksumSHA1": "wFLw18XvRl2NnXP3wReuyB4KXk4=",
"path": "github.com/pingcap/errors", "path": "github.com/pingcap/errors",
"revision": "1176802fff62540cc87d289bd40c52a2d6b2ea16", "revision": "fc6e4ce558343e6eab2450e7653502fee61d9ad6",
"revisionTime": "2018-10-24T15:10:47Z" "revisionTime": "2019-05-15T08:44:45Z"
}, },
{ {
"checksumSHA1": "eIqELR/hEESdiBCcpk5kmdu2e3U=", "checksumSHA1": "eIqELR/hEESdiBCcpk5kmdu2e3U=",
...@@ -117,118 +117,118 @@ ...@@ -117,118 +117,118 @@
"revisionTime": "2019-03-07T07:54:52Z" "revisionTime": "2019-03-07T07:54:52Z"
}, },
{ {
"checksumSHA1": "Spi5+PnYjhKAZ1u62Ym5OGzBkbo=", "checksumSHA1": "IB9wW2GmSlnLVDRkaUXcF3CJ15g=",
"path": "github.com/pingcap/parser", "path": "github.com/pingcap/parser",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "2E4g3rvXdR/N0IA7vTngW+I2dlI=", "checksumSHA1": "qeft79GIpt7bP++Qlg1UNSdXL3E=",
"path": "github.com/pingcap/parser/ast", "path": "github.com/pingcap/parser/ast",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "xiv40YqnvHcbIhaEzJqjh5K7ehM=", "checksumSHA1": "xiv40YqnvHcbIhaEzJqjh5K7ehM=",
"path": "github.com/pingcap/parser/auth", "path": "github.com/pingcap/parser/auth",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "TNatzumortkzsN5ShBPORCLX0ww=", "checksumSHA1": "EvDXpplklIXmKqLclzWzaN/uHKQ=",
"path": "github.com/pingcap/parser/charset", "path": "github.com/pingcap/parser/charset",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "ohLJW2u9NJEzYIJL/AjOqcuKfMY=", "checksumSHA1": "Aao6Mul/qqogOwPwM2arBKZkYZs=",
"path": "github.com/pingcap/parser/format", "path": "github.com/pingcap/parser/format",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "owbn76pdlOudJtIi4vPJ+3SlKuk=", "checksumSHA1": "YN9BYMOMxEXjrUCPPYQREN90BC0=",
"path": "github.com/pingcap/parser/model", "path": "github.com/pingcap/parser/model",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "10eiqFEB//6VqEAaViZGoQP2zoQ=", "checksumSHA1": "/qaOJqnSLO0dZbyQDnq75wUPiLo=",
"path": "github.com/pingcap/parser/mysql", "path": "github.com/pingcap/parser/mysql",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "olapD16WCMBU9vrA5PtlERGFfXw=", "checksumSHA1": "olapD16WCMBU9vrA5PtlERGFfXw=",
"path": "github.com/pingcap/parser/opcode", "path": "github.com/pingcap/parser/opcode",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "TF2rMYy9ewgZpFsJb+jaGXXqZqc=", "checksumSHA1": "kNunWp0HfikkRiZlOzfD1bvHruM=",
"path": "github.com/pingcap/parser/terror", "path": "github.com/pingcap/parser/terror",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "99wS/t3dZNvqLZ+DK/V9D4or3R8=", "checksumSHA1": "abJKAbu4Cro4oJZ2IeI+n+0R87A=",
"path": "github.com/pingcap/parser/types", "path": "github.com/pingcap/parser/types",
"revision": "cdceeb2c5476084c21987c42bc9405cdcbc290ef", "revision": "89ae120307cc42a1cccc4daba2c589966db77055",
"revisionTime": "2019-04-08T06:41:40Z" "revisionTime": "2019-05-23T11:32:41Z"
}, },
{ {
"checksumSHA1": "hOYJW5hSqstxumyNG8+tItUtmhU=", "checksumSHA1": "t0O+34iPgOlRt020Cn36smUWhwQ=",
"path": "github.com/pingcap/tidb/sessionctx/stmtctx", "path": "github.com/pingcap/tidb/sessionctx/stmtctx",
"revision": "f6a36e0b3634759b0e8f8afef63c70c06707279c", "revision": "cc74145ffa9e48edcae0fb394618ada43b2776c0",
"revisionTime": "2019-04-09T02:17:41Z" "revisionTime": "2019-05-24T06:40:04Z"
}, },
{ {
"checksumSHA1": "p2EXSv26CqDA/O8oA1jkig+42tU=", "checksumSHA1": "1INT6BSMg5WA9x4ftRegJBhDJQg=",
"path": "github.com/pingcap/tidb/types", "path": "github.com/pingcap/tidb/types",
"revision": "f6a36e0b3634759b0e8f8afef63c70c06707279c", "revision": "cc74145ffa9e48edcae0fb394618ada43b2776c0",
"revisionTime": "2019-04-09T02:17:41Z" "revisionTime": "2019-05-24T06:40:04Z"
}, },
{ {
"checksumSHA1": "fPdBwAtPVKOr7YAyOMnRxyHixoM=", "checksumSHA1": "PwXMuapqcWj1+hMEcRIJhLJ3NsY=",
"path": "github.com/pingcap/tidb/types/json", "path": "github.com/pingcap/tidb/types/json",
"revision": "f6a36e0b3634759b0e8f8afef63c70c06707279c", "revision": "cc74145ffa9e48edcae0fb394618ada43b2776c0",
"revisionTime": "2019-04-09T02:17:41Z" "revisionTime": "2019-05-24T06:40:04Z"
}, },
{ {
"checksumSHA1": "45zWX5Q6D6aTEWtc4p/lbD9WD4o=", "checksumSHA1": "45zWX5Q6D6aTEWtc4p/lbD9WD4o=",
"path": "github.com/pingcap/tidb/types/parser_driver", "path": "github.com/pingcap/tidb/types/parser_driver",
"revision": "f6a36e0b3634759b0e8f8afef63c70c06707279c", "revision": "cc74145ffa9e48edcae0fb394618ada43b2776c0",
"revisionTime": "2019-04-09T02:17:41Z" "revisionTime": "2019-05-24T06:40:04Z"
}, },
{ {
"checksumSHA1": "za/7NvrgGTXpUf/A4/MCtgeNp+Y=", "checksumSHA1": "za/7NvrgGTXpUf/A4/MCtgeNp+Y=",
"path": "github.com/pingcap/tidb/util/execdetails", "path": "github.com/pingcap/tidb/util/execdetails",
"revision": "f6a36e0b3634759b0e8f8afef63c70c06707279c", "revision": "cc74145ffa9e48edcae0fb394618ada43b2776c0",
"revisionTime": "2019-04-09T02:17:41Z" "revisionTime": "2019-05-24T06:40:04Z"
}, },
{ {
"checksumSHA1": "RdbHgQWMHjRtKjqPcTX81k1V3sw=", "checksumSHA1": "RdbHgQWMHjRtKjqPcTX81k1V3sw=",
"path": "github.com/pingcap/tidb/util/hack", "path": "github.com/pingcap/tidb/util/hack",
"revision": "f6a36e0b3634759b0e8f8afef63c70c06707279c", "revision": "cc74145ffa9e48edcae0fb394618ada43b2776c0",
"revisionTime": "2019-04-09T02:17:41Z" "revisionTime": "2019-05-24T06:40:04Z"
}, },
{ {
"checksumSHA1": "wlgkvTvOR4cyv/X16Kt07HzAWeo=", "checksumSHA1": "JYbZwZe2uuqKVVV40ZU4G9zGEBE=",
"path": "github.com/pingcap/tidb/util/logutil", "path": "github.com/pingcap/tidb/util/logutil",
"revision": "f6a36e0b3634759b0e8f8afef63c70c06707279c", "revision": "cc74145ffa9e48edcae0fb394618ada43b2776c0",
"revisionTime": "2019-04-09T02:17:41Z" "revisionTime": "2019-05-24T06:40:04Z"
}, },
{ {
"checksumSHA1": "UoBGdswa5v8jGAVQxP3RRrMsq9w=", "checksumSHA1": "OveQu0ABBJmMEwmmthqSRQC2Ef0=",
"path": "github.com/pingcap/tidb/util/math", "path": "github.com/pingcap/tidb/util/math",
"revision": "f6a36e0b3634759b0e8f8afef63c70c06707279c", "revision": "cc74145ffa9e48edcae0fb394618ada43b2776c0",
"revisionTime": "2019-04-09T02:17:41Z" "revisionTime": "2019-05-24T06:40:04Z"
}, },
{ {
"checksumSHA1": "0teuFRow8w3BahNYK6IrAtgZsUs=", "checksumSHA1": "9q+/RZZoN4cq/FbCUCD0uVAyqeU=",
"path": "github.com/pingcap/tidb/util/memory", "path": "github.com/pingcap/tidb/util/memory",
"revision": "f6a36e0b3634759b0e8f8afef63c70c06707279c", "revision": "cc74145ffa9e48edcae0fb394618ada43b2776c0",
"revisionTime": "2019-04-09T02:17:41Z" "revisionTime": "2019-05-24T06:40:04Z"
}, },
{ {
"checksumSHA1": "QPIBwDNUFF5Whrnd41S3mkKa4gQ=", "checksumSHA1": "QPIBwDNUFF5Whrnd41S3mkKa4gQ=",
...@@ -485,62 +485,62 @@ ...@@ -485,62 +485,62 @@
{ {
"checksumSHA1": "aKn1oKcY74N8TRLm3Ayt7Q4bbI4=", "checksumSHA1": "aKn1oKcY74N8TRLm3Ayt7Q4bbI4=",
"path": "vitess.io/vitess/go/bytes2", "path": "vitess.io/vitess/go/bytes2",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
}, },
{ {
"checksumSHA1": "bhE6CGQgZTIgLPp9lnvlKW/47xc=", "checksumSHA1": "bhE6CGQgZTIgLPp9lnvlKW/47xc=",
"path": "vitess.io/vitess/go/hack", "path": "vitess.io/vitess/go/hack",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
}, },
{ {
"checksumSHA1": "w4BH8HL/CgT6aBWojJeZHOj5DZg=", "checksumSHA1": "RERqgxOX48XzRIoe5fQzvWSJV0Y=",
"path": "vitess.io/vitess/go/sqltypes", "path": "vitess.io/vitess/go/sqltypes",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
}, },
{ {
"checksumSHA1": "vAIRxI6MHsq3x1hLQwIyw5AvqtI=", "checksumSHA1": "vAIRxI6MHsq3x1hLQwIyw5AvqtI=",
"path": "vitess.io/vitess/go/vt/log", "path": "vitess.io/vitess/go/vt/log",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
}, },
{ {
"checksumSHA1": "87Zndvk3Y+M+QxMx3uFa0iSbvWY=", "checksumSHA1": "87Zndvk3Y+M+QxMx3uFa0iSbvWY=",
"path": "vitess.io/vitess/go/vt/proto/query", "path": "vitess.io/vitess/go/vt/proto/query",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
}, },
{ {
"checksumSHA1": "xpcb9NfXMEeHhEPStbJntIfa5GQ=", "checksumSHA1": "xpcb9NfXMEeHhEPStbJntIfa5GQ=",
"path": "vitess.io/vitess/go/vt/proto/topodata", "path": "vitess.io/vitess/go/vt/proto/topodata",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
}, },
{ {
"checksumSHA1": "7rQUJ9mz64dMJpFhIGNkMvG2Zqs=", "checksumSHA1": "l9fmSuOJyoq+EKM4QxfoSw8hLPY=",
"path": "vitess.io/vitess/go/vt/proto/vtgate", "path": "vitess.io/vitess/go/vt/proto/vtgate",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
}, },
{ {
"checksumSHA1": "qz32abYdmm9NfKTc++K0l1EvXXM=", "checksumSHA1": "qz32abYdmm9NfKTc++K0l1EvXXM=",
"path": "vitess.io/vitess/go/vt/proto/vtrpc", "path": "vitess.io/vitess/go/vt/proto/vtrpc",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
}, },
{ {
"checksumSHA1": "WIWzi5IyyoFxkJDG+Oj/DnwpEXg=", "checksumSHA1": "/V79kL29yMBxAofQBL/XqxJv/GE=",
"path": "vitess.io/vitess/go/vt/sqlparser", "path": "vitess.io/vitess/go/vt/sqlparser",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
}, },
{ {
"checksumSHA1": "U6rh56fSka/7xLGnl1OnWgBItn8=", "checksumSHA1": "qhGH2j3onpaSh+fbs1fKPoTxUcw=",
"path": "vitess.io/vitess/go/vt/vterrors", "path": "vitess.io/vitess/go/vt/vterrors",
"revision": "1dc74aace0998ae1424845f9a4590c60ad622e45", "revision": "eb2d057927b37c5a6f144ab5baa762881cffae8d",
"revisionTime": "2019-04-07T19:11:39Z" "revisionTime": "2019-05-23T12:28:24Z"
} }
], ],
"rootPath": "github.com/XiaoMi/soar" "rootPath": "github.com/XiaoMi/soar"
......
...@@ -57,6 +57,11 @@ func BuildBindVariables(in map[string]interface{}) (map[string]*querypb.BindVari ...@@ -57,6 +57,11 @@ func BuildBindVariables(in map[string]interface{}) (map[string]*querypb.BindVari
return out, nil return out, nil
} }
// Int8BindVariable converts an int8 to a bind var.
func Int8BindVariable(v int8) *querypb.BindVariable {
return ValueBindVariable(NewInt8(v))
}
// Int32BindVariable converts an int32 to a bind var. // Int32BindVariable converts an int32 to a bind var.
func Int32BindVariable(v int32) *querypb.BindVariable { func Int32BindVariable(v int32) *querypb.BindVariable {
return ValueBindVariable(NewInt32(v)) return ValueBindVariable(NewInt32(v))
...@@ -99,6 +104,11 @@ func BuildBindVariable(v interface{}) (*querypb.BindVariable, error) { ...@@ -99,6 +104,11 @@ func BuildBindVariable(v interface{}) (*querypb.BindVariable, error) {
return StringBindVariable(v), nil return StringBindVariable(v), nil
case []byte: case []byte:
return BytesBindVariable(v), nil return BytesBindVariable(v), nil
case bool:
if v {
return Int8BindVariable(1), nil
}
return Int8BindVariable(0), nil
case int: case int:
return &querypb.BindVariable{ return &querypb.BindVariable{
Type: querypb.Type_INT64, Type: querypb.Type_INT64,
......
...@@ -100,6 +100,11 @@ func NewInt64(v int64) Value { ...@@ -100,6 +100,11 @@ func NewInt64(v int64) Value {
return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10)) return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10))
} }
// NewInt8 builds an Int8 Value.
func NewInt8(v int8) Value {
return MakeTrusted(Int8, strconv.AppendInt(nil, int64(v), 10))
}
// NewInt32 builds an Int64 Value. // NewInt32 builds an Int64 Value.
func NewInt32(v int32) Value { func NewInt32(v int32) Value {
return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10)) return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10))
......
...@@ -532,7 +532,7 @@ func (node *Stream) walkSubtree(visit Visit) error { ...@@ -532,7 +532,7 @@ func (node *Stream) walkSubtree(visit Visit) error {
// the row and re-inserts with new values. For that reason we keep it as an Insert struct. // the row and re-inserts with new values. For that reason we keep it as an Insert struct.
// Replaces are currently disallowed in sharded schemas because // Replaces are currently disallowed in sharded schemas because
// of the implications the deletion part may have on vindexes. // of the implications the deletion part may have on vindexes.
// If you add fields here, consider adding them to calls to validateSubquerySamePlan. // If you add fields here, consider adding them to calls to validateUnshardedRoute.
type Insert struct { type Insert struct {
Action string Action string
Comments Comments Comments Comments
...@@ -584,7 +584,7 @@ func (Values) iInsertRows() {} ...@@ -584,7 +584,7 @@ func (Values) iInsertRows() {}
func (*ParenSelect) iInsertRows() {} func (*ParenSelect) iInsertRows() {}
// Update represents an UPDATE statement. // Update represents an UPDATE statement.
// If you add fields here, consider adding them to calls to validateSubquerySamePlan. // If you add fields here, consider adding them to calls to validateUnshardedRoute.
type Update struct { type Update struct {
Comments Comments Comments Comments
Ignore string Ignore string
...@@ -618,7 +618,7 @@ func (node *Update) walkSubtree(visit Visit) error { ...@@ -618,7 +618,7 @@ func (node *Update) walkSubtree(visit Visit) error {
} }
// Delete represents a DELETE statement. // Delete represents a DELETE statement.
// If you add fields here, consider adding them to calls to validateSubquerySamePlan. // If you add fields here, consider adding them to calls to validateUnshardedRoute.
type Delete struct { type Delete struct {
Comments Comments Comments Comments
Targets TableNames Targets TableNames
......
...@@ -88,6 +88,9 @@ func (nz *normalizer) WalkSelect(node SQLNode) (bool, error) { ...@@ -88,6 +88,9 @@ func (nz *normalizer) WalkSelect(node SQLNode) (bool, error) {
// Common node types that never contain SQLVals or ListArgs but create a lot of object // Common node types that never contain SQLVals or ListArgs but create a lot of object
// allocations. // allocations.
return false, nil return false, nil
case OrderBy, GroupBy:
// do not make a bind var for order by column_position
return false, nil
} }
return true, nil return true, nil
} }
......
...@@ -53,23 +53,31 @@ func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVaria ...@@ -53,23 +53,31 @@ func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVaria
} }
var buf strings.Builder var buf strings.Builder
buf.Grow(len(pq.Query)) buf.Grow(len(pq.Query))
if err := pq.Append(&buf, bindVariables, extras); err != nil {
return "", err
}
return buf.String(), nil
}
// Append appends the generated query to the provided buffer.
func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) error {
current := 0 current := 0
for _, loc := range pq.bindLocations { for _, loc := range pq.bindLocations {
buf.WriteString(pq.Query[current:loc.offset]) buf.WriteString(pq.Query[current:loc.offset])
name := pq.Query[loc.offset : loc.offset+loc.length] name := pq.Query[loc.offset : loc.offset+loc.length]
if encodable, ok := extras[name[1:]]; ok { if encodable, ok := extras[name[1:]]; ok {
encodable.EncodeSQL(&buf) encodable.EncodeSQL(buf)
} else { } else {
supplied, _, err := FetchBindVar(name, bindVariables) supplied, _, err := FetchBindVar(name, bindVariables)
if err != nil { if err != nil {
return "", err return err
} }
EncodeValue(&buf, supplied) EncodeValue(buf, supplied)
} }
current = loc.offset + loc.length current = loc.offset + loc.length
} }
buf.WriteString(pq.Query[current:]) buf.WriteString(pq.Query[current:])
return buf.String(), nil return nil
} }
// MarshalJSON is a custom JSON marshaler for ParsedQuery. // MarshalJSON is a custom JSON marshaler for ParsedQuery.
......
...@@ -181,7 +181,7 @@ func skipToEnd(yylex interface{}) { ...@@ -181,7 +181,7 @@ func skipToEnd(yylex interface{}) {
%token <bytes> NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL %token <bytes> NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL
// Supported SHOW tokens // Supported SHOW tokens
%token <bytes> COLLATION DATABASES TABLES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS VSCHEMA VSCHEMA_TABLES VITESS_TARGET FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS %token <bytes> COLLATION DATABASES SCHEMAS TABLES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS VSCHEMA VSCHEMA_TABLES VITESS_TARGET FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS
// SET tokens // SET tokens
%token <bytes> NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE %token <bytes> NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE
...@@ -1485,6 +1485,10 @@ show_statement: ...@@ -1485,6 +1485,10 @@ show_statement:
{ {
$$ = &Show{Type: string($2)} $$ = &Show{Type: string($2)}
} }
| SHOW SCHEMAS ddl_skip_to_end
{
$$ = &Show{Type: string($2)}
}
| SHOW ENGINES | SHOW ENGINES
{ {
$$ = &Show{Type: string($2)} $$ = &Show{Type: string($2)}
...@@ -2450,50 +2454,18 @@ function_call_keyword: ...@@ -2450,50 +2454,18 @@ function_call_keyword:
{ {
$$ = &ConvertUsingExpr{Expr: $3, Type: $5} $$ = &ConvertUsingExpr{Expr: $3, Type: $5}
} }
| SUBSTR openb column_name ',' value_expression closeb
{
$$ = &SubstrExpr{Name: $3, From: $5, To: nil}
}
| SUBSTR openb column_name ',' value_expression ',' value_expression closeb
{
$$ = &SubstrExpr{Name: $3, From: $5, To: $7}
}
| SUBSTR openb column_name FROM value_expression FOR value_expression closeb | SUBSTR openb column_name FROM value_expression FOR value_expression closeb
{ {
$$ = &SubstrExpr{Name: $3, From: $5, To: $7} $$ = &SubstrExpr{Name: $3, From: $5, To: $7}
} }
| SUBSTRING openb column_name ',' value_expression closeb
{
$$ = &SubstrExpr{Name: $3, From: $5, To: nil}
}
| SUBSTRING openb column_name ',' value_expression ',' value_expression closeb
{
$$ = &SubstrExpr{Name: $3, From: $5, To: $7}
}
| SUBSTRING openb column_name FROM value_expression FOR value_expression closeb | SUBSTRING openb column_name FROM value_expression FOR value_expression closeb
{ {
$$ = &SubstrExpr{Name: $3, From: $5, To: $7} $$ = &SubstrExpr{Name: $3, From: $5, To: $7}
} }
| SUBSTR openb STRING ',' value_expression closeb
{
$$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: nil}
}
| SUBSTR openb STRING ',' value_expression ',' value_expression closeb
{
$$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: $7}
}
| SUBSTR openb STRING FROM value_expression FOR value_expression closeb | SUBSTR openb STRING FROM value_expression FOR value_expression closeb
{ {
$$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: $7} $$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: $7}
} }
| SUBSTRING openb STRING ',' value_expression closeb
{
$$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: nil}
}
| SUBSTRING openb STRING ',' value_expression ',' value_expression closeb
{
$$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: $7}
}
| SUBSTRING openb STRING FROM value_expression FOR value_expression closeb | SUBSTRING openb STRING FROM value_expression FOR value_expression closeb
{ {
$$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: $7} $$ = &SubstrExpr{StrVal: NewStrVal($3), From: $5, To: $7}
...@@ -2626,6 +2598,14 @@ function_call_conflict: ...@@ -2626,6 +2598,14 @@ function_call_conflict:
{ {
$$ = &FuncExpr{Name: NewColIdent("replace"), Exprs: $3} $$ = &FuncExpr{Name: NewColIdent("replace"), Exprs: $3}
} }
| SUBSTR openb select_expression_list closeb
{
$$ = &FuncExpr{Name: NewColIdent("substr"), Exprs: $3}
}
| SUBSTRING openb select_expression_list closeb
{
$$ = &FuncExpr{Name: NewColIdent("substr"), Exprs: $3}
}
match_option: match_option:
/*empty*/ /*empty*/
...@@ -3364,6 +3344,7 @@ non_reserved_keyword: ...@@ -3364,6 +3344,7 @@ non_reserved_keyword:
| REPEATABLE | REPEATABLE
| RESTRICT | RESTRICT
| ROLLBACK | ROLLBACK
| SCHEMAS
| SESSION | SESSION
| SERIALIZABLE | SERIALIZABLE
| SHARE | SHARE
......
...@@ -318,7 +318,7 @@ var keywords = map[string]int{ ...@@ -318,7 +318,7 @@ var keywords = map[string]int{
"rlike": REGEXP, "rlike": REGEXP,
"rollback": ROLLBACK, "rollback": ROLLBACK,
"schema": SCHEMA, "schema": SCHEMA,
"schemas": UNUSED, "schemas": SCHEMAS,
"second_microsecond": UNUSED, "second_microsecond": UNUSED,
"select": SELECT, "select": SELECT,
"sensitive": UNUSED, "sensitive": UNUSED,
......
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
package vterrors package vterrors
import ( import (
"flag"
"fmt" "fmt"
"io" "io"
...@@ -77,6 +78,14 @@ import ( ...@@ -77,6 +78,14 @@ import (
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
) )
// LogErrStacks controls whether or not printing errors includes the
// embedded stack trace in the output.
var LogErrStacks bool
func init() {
flag.BoolVar(&LogErrStacks, "LogErrStacks", false, "log stack traces in errors")
}
// New returns an error with the supplied message. // New returns an error with the supplied message.
// New also records the stack trace at the point it was called. // New also records the stack trace at the point it was called.
func New(code vtrpcpb.Code, message string) error { func New(code vtrpcpb.Code, message string) error {
...@@ -122,7 +131,9 @@ func (f *fundamental) Format(s fmt.State, verb rune) { ...@@ -122,7 +131,9 @@ func (f *fundamental) Format(s fmt.State, verb rune) {
case 'v': case 'v':
panicIfError(io.WriteString(s, "Code: "+f.code.String()+"\n")) panicIfError(io.WriteString(s, "Code: "+f.code.String()+"\n"))
panicIfError(io.WriteString(s, f.msg+"\n")) panicIfError(io.WriteString(s, f.msg+"\n"))
f.stack.Format(s, verb) if LogErrStacks {
f.stack.Format(s, verb)
}
return return
case 's': case 's':
panicIfError(io.WriteString(s, f.msg)) panicIfError(io.WriteString(s, f.msg))
...@@ -198,7 +209,9 @@ func (w *wrapping) Format(s fmt.State, verb rune) { ...@@ -198,7 +209,9 @@ func (w *wrapping) Format(s fmt.State, verb rune) {
if rune('v') == verb { if rune('v') == verb {
panicIfError(fmt.Fprintf(s, "%v\n", w.Cause())) panicIfError(fmt.Fprintf(s, "%v\n", w.Cause()))
panicIfError(io.WriteString(s, w.msg)) panicIfError(io.WriteString(s, w.msg))
w.stack.Format(s, verb) if LogErrStacks {
w.stack.Format(s, verb)
}
return return
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册