提交 4e66ded1 编写于 作者: martianzhang's avatar martianzhang

update vendor

上级 fe9cb687
#!/bin/bash
## Generate Repository Version
tag="$(git describe --tags --always)"
version="$(git log --date=iso --pretty=format:"%cd" -1) ${tag}"
if [ "X${version}" == "X" ]; then
version="not a git repo"
tag="not a git repo"
fi
git_dirty=$(git diff --no-ext-diff 2>/dev/null | wc -l)
compile="$(date +"%F %T %z") by $(go version)"
branch=$(git rev-parse --abbrev-ref HEAD)
dev_path=$(
cd "$(dirname "$0")" || exit
pwd
)
cat <<EOF | gofmt >common/version.go
package common
// -version输出信息
const (
Version = "${version}"
Compile = "${compile}"
Branch = "${branch}"
GitDirty= ${git_dirty}
DevPath = "${dev_path}"
)
EOF
XIAOMI=$(git ls-remote --get-url | grep XiaoMi)
if [ "x${XIAOMI}" != "x" ]; then
echo "${tag}" | awk -F '-' '{print $1}' > VERSION
fi
...@@ -2000,6 +2000,7 @@ type AlterTableSpec struct { ...@@ -2000,6 +2000,7 @@ type AlterTableSpec struct {
OrderByList []*AlterOrderItem OrderByList []*AlterOrderItem
NewTable *TableName NewTable *TableName
NewColumns []*ColumnDef NewColumns []*ColumnDef
NewConstraints []*Constraint
OldColumnName *ColumnName OldColumnName *ColumnName
NewColumnName *ColumnName NewColumnName *ColumnName
Position *ColumnPosition Position *ColumnPosition
...@@ -2072,6 +2073,7 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error { ...@@ -2072,6 +2073,7 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
return errors.Annotate(err, "An error occurred while restore AlterTableSpec.Position") return errors.Annotate(err, "An error occurred while restore AlterTableSpec.Position")
} }
} else { } else {
lenCols := len(n.NewColumns)
ctx.WritePlain("(") ctx.WritePlain("(")
for i, col := range n.NewColumns { for i, col := range n.NewColumns {
if i != 0 { if i != 0 {
...@@ -2081,6 +2083,14 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error { ...@@ -2081,6 +2083,14 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
return errors.Annotatef(err, "An error occurred while restore AlterTableSpec.NewColumns[%d]", i) return errors.Annotatef(err, "An error occurred while restore AlterTableSpec.NewColumns[%d]", i)
} }
} }
for i, constraint := range n.NewConstraints {
if i != 0 || lenCols >= 1 {
ctx.WritePlain(", ")
}
if err := constraint.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterTableSpec.NewConstraints[%d]", i)
}
}
ctx.WritePlain(")") ctx.WritePlain(")")
} }
case AlterTableAddConstraint: case AlterTableAddConstraint:
...@@ -2454,6 +2464,13 @@ func (n *AlterTableSpec) Accept(v Visitor) (Node, bool) { ...@@ -2454,6 +2464,13 @@ func (n *AlterTableSpec) Accept(v Visitor) (Node, bool) {
} }
col = node.(*ColumnDef) col = node.(*ColumnDef)
} }
for _, constraint := range n.NewConstraints {
node, ok := constraint.Accept(v)
if !ok {
return n, false
}
constraint = node.(*Constraint)
}
if n.OldColumnName != nil { if n.OldColumnName != nil {
node, ok := n.OldColumnName.Accept(v) node, ok := n.OldColumnName.Accept(v)
if !ok { if !ok {
......
...@@ -2100,6 +2100,9 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error { ...@@ -2100,6 +2100,9 @@ func (n *ShowStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteName(n.IndexName.String()) ctx.WriteName(n.IndexName.String())
} }
ctx.WriteKeyWord(" REGIONS") ctx.WriteKeyWord(" REGIONS")
if err := restoreShowLikeOrWhereOpt(); err != nil {
return err
}
return nil return nil
default: default:
return errors.New("Unknown ShowStmt type") return errors.New("Unknown ShowStmt type")
......
...@@ -321,6 +321,7 @@ type Prepared struct { ...@@ -321,6 +321,7 @@ type Prepared struct {
Params []ParamMarkerExpr Params []ParamMarkerExpr
SchemaVersion int64 SchemaVersion int64
UseCache bool UseCache bool
CachedPlan interface{}
} }
// ExecuteStmt is a statement to execute PreparedStmt. // ExecuteStmt is a statement to execute PreparedStmt.
...@@ -2083,7 +2084,7 @@ func (n *TableOptimizerHint) Restore(ctx *RestoreCtx) error { ...@@ -2083,7 +2084,7 @@ func (n *TableOptimizerHint) Restore(ctx *RestoreCtx) error {
} }
table.Restore(ctx) table.Restore(ctx)
} }
case "index", "use_index_merge": case "use_index", "ignore_index", "use_index_merge":
n.Tables[0].Restore(ctx) n.Tables[0].Restore(ctx)
ctx.WritePlain(" ") ctx.WritePlain(" ")
for i, index := range n.Indexes { for i, index := range n.Indexes {
......
...@@ -312,6 +312,7 @@ var tokenMap = map[string]int{ ...@@ -312,6 +312,7 @@ var tokenMap = map[string]int{
"IDENTIFIED": identified, "IDENTIFIED": identified,
"IF": ifKwd, "IF": ifKwd,
"IGNORE": ignore, "IGNORE": ignore,
"IGNORE_INDEX": hintIgnoreIndex,
"IMPORT": importKwd, "IMPORT": importKwd,
"IN": in, "IN": in,
"INCREMENTAL": incremental, "INCREMENTAL": incremental,
...@@ -341,7 +342,6 @@ var tokenMap = map[string]int{ ...@@ -341,7 +342,6 @@ var tokenMap = map[string]int{
"IS": is, "IS": is,
"ISSUER": issuer, "ISSUER": issuer,
"ISOLATION": isolation, "ISOLATION": isolation,
"USE_TOJA": hintUseToja,
"JOBS": jobs, "JOBS": jobs,
"JOB": job, "JOB": job,
"JOIN": join, "JOIN": join,
...@@ -613,6 +613,7 @@ var tokenMap = map[string]int{ ...@@ -613,6 +613,7 @@ var tokenMap = map[string]int{
"TYPE": tp, "TYPE": tp,
"UNBOUNDED": unbounded, "UNBOUNDED": unbounded,
"UNCOMMITTED": uncommitted, "UNCOMMITTED": uncommitted,
"UNICODE": unicodeSym,
"UNDEFINED": undefined, "UNDEFINED": undefined,
"UNION": union, "UNION": union,
"UNIQUE": unique, "UNIQUE": unique,
...@@ -622,8 +623,10 @@ var tokenMap = map[string]int{ ...@@ -622,8 +623,10 @@ var tokenMap = map[string]int{
"UPDATE": update, "UPDATE": update,
"USAGE": usage, "USAGE": usage,
"USE": use, "USE": use,
"USE_INDEX": hintUseIndex,
"USE_INDEX_MERGE": hintUseIndexMerge, "USE_INDEX_MERGE": hintUseIndexMerge,
"USE_PLAN_CACHE": hintUsePlanCache, "USE_PLAN_CACHE": hintUsePlanCache,
"USE_TOJA": hintUseToja,
"USER": user, "USER": user,
"USING": using, "USING": using,
"UTC_DATE": utcDate, "UTC_DATE": utcDate,
......
...@@ -886,6 +886,8 @@ const ( ...@@ -886,6 +886,8 @@ const (
ErrInvalidFieldSize = 3013 ErrInvalidFieldSize = 3013
ErrInvalidJSONData = 3069 ErrInvalidJSONData = 3069
ErrGeneratedColumnFunctionIsNotAllowed = 3102 ErrGeneratedColumnFunctionIsNotAllowed = 3102
ErrUnsupportedAlterInplaceOnVirtualColumn = 3103
ErrWrongFKOptionForGeneratedColumn = 3104
ErrBadGeneratedColumn = 3105 ErrBadGeneratedColumn = 3105
ErrUnsupportedOnGeneratedColumn = 3106 ErrUnsupportedOnGeneratedColumn = 3106
ErrGeneratedColumnNonPrior = 3107 ErrGeneratedColumnNonPrior = 3107
......
...@@ -879,11 +879,13 @@ var MySQLErrName = map[uint16]string{ ...@@ -879,11 +879,13 @@ var MySQLErrName = map[uint16]string{
ErrAlterOperationNotSupportedReasonNotNull: "cannot silently convert NULL values, as required in this SQLMODE", ErrAlterOperationNotSupportedReasonNotNull: "cannot silently convert NULL values, as required in this SQLMODE",
ErrMustChangePasswordLogin: "Your password has expired. To log in you must change it using a client that supports expired passwords.", ErrMustChangePasswordLogin: "Your password has expired. To log in you must change it using a client that supports expired passwords.",
ErrRowInWrongPartition: "Found a row in wrong partition %s", ErrRowInWrongPartition: "Found a row in wrong partition %s",
ErrGeneratedColumnFunctionIsNotAllowed: "Expression of generated column '%s' contains a disallowed function.",
ErrUnsupportedAlterInplaceOnVirtualColumn: "INPLACE ADD or DROP of virtual columns cannot be combined with other ALTER TABLE actions.",
ErrWrongFKOptionForGeneratedColumn: "Cannot define foreign key with %s clause on a generated column.",
ErrBadGeneratedColumn: "The value specified for generated column '%s' in table '%s' is not allowed.", ErrBadGeneratedColumn: "The value specified for generated column '%s' in table '%s' is not allowed.",
ErrUnsupportedOnGeneratedColumn: "'%s' is not supported for generated columns.", ErrUnsupportedOnGeneratedColumn: "'%s' is not supported for generated columns.",
ErrGeneratedColumnNonPrior: "Generated column can refer only to generated columns defined prior to it.", ErrGeneratedColumnNonPrior: "Generated column can refer only to generated columns defined prior to it.",
ErrDependentByGeneratedColumn: "Column '%s' has a generated column dependency.", ErrDependentByGeneratedColumn: "Column '%s' has a generated column dependency.",
ErrGeneratedColumnFunctionIsNotAllowed: "Expression of generated column '%s' contains a disallowed function.",
ErrGeneratedColumnRefAutoInc: "Generated column '%s' cannot refer to auto-increment column.", ErrGeneratedColumnRefAutoInc: "Generated column '%s' cannot refer to auto-increment column.",
ErrInvalidFieldSize: "Invalid size for column '%s'.", ErrInvalidFieldSize: "Invalid size for column '%s'.",
ErrInvalidJSONData: "Invalid JSON data provided to function %s: %s", ErrInvalidJSONData: "Invalid JSON data provided to function %s: %s",
......
...@@ -200,7 +200,7 @@ import ( ...@@ -200,7 +200,7 @@ import (
over "OVER" over "OVER"
packKeys "PACK_KEYS" packKeys "PACK_KEYS"
partition "PARTITION" partition "PARTITION"
parser "PARSER" parser "PARSER"
percentRank "PERCENT_RANK" percentRank "PERCENT_RANK"
precisionType "PRECISION" precisionType "PRECISION"
primary "PRIMARY" primary "PRIMARY"
...@@ -494,6 +494,7 @@ import ( ...@@ -494,6 +494,7 @@ import (
tp "TYPE" tp "TYPE"
unbounded "UNBOUNDED" unbounded "UNBOUNDED"
uncommitted "UNCOMMITTED" uncommitted "UNCOMMITTED"
unicodeSym "UNICODE"
unknown "UNKNOWN" unknown "UNKNOWN"
user "USER" user "USER"
undefined "UNDEFINED" undefined "UNDEFINED"
...@@ -536,7 +537,7 @@ import ( ...@@ -536,7 +537,7 @@ import (
now "NOW" now "NOW"
position "POSITION" position "POSITION"
recent "RECENT" recent "RECENT"
std "STD" std "STD"
stddev "STDDEV" stddev "STDDEV"
stddevPop "STDDEV_POP" stddevPop "STDDEV_POP"
stddevSamp "STDDEV_SAMP" stddevSamp "STDDEV_SAMP"
...@@ -589,6 +590,8 @@ import ( ...@@ -589,6 +590,8 @@ import (
hintINLJ "INL_JOIN" hintINLJ "INL_JOIN"
hintHASHAGG "HASH_AGG" hintHASHAGG "HASH_AGG"
hintSTREAMAGG "STREAM_AGG" hintSTREAMAGG "STREAM_AGG"
hintUseIndex "USE_INDEX"
hintIgnoreIndex "IGNORE_INDEX"
hintUseIndexMerge "USE_INDEX_MERGE" hintUseIndexMerge "USE_INDEX_MERGE"
hintNoIndexMerge "NO_INDEX_MERGE" hintNoIndexMerge "NO_INDEX_MERGE"
hintUseToja "USE_TOJA" hintUseToja "USE_TOJA"
...@@ -729,13 +732,13 @@ import ( ...@@ -729,13 +732,13 @@ import (
ReplaceIntoStmt "REPLACE INTO statement" ReplaceIntoStmt "REPLACE INTO statement"
RecoverTableStmt "recover table statement" RecoverTableStmt "recover table statement"
RevokeStmt "Revoke statement" RevokeStmt "Revoke statement"
RevokeRoleStmt "Revoke role statement" RevokeRoleStmt "Revoke role statement"
RollbackStmt "ROLLBACK statement" RollbackStmt "ROLLBACK statement"
SplitRegionStmt "Split index region statement" SplitRegionStmt "Split index region statement"
SetStmt "Set variable statement" SetStmt "Set variable statement"
ChangeStmt "Change statement" ChangeStmt "Change statement"
SetRoleStmt "Set active role statement" SetRoleStmt "Set active role statement"
SetDefaultRoleStmt "Set default statement for some user" SetDefaultRoleStmt "Set default statement for some user"
ShowStmt "Show engines/databases/tables/user/columns/warnings/status statement" ShowStmt "Show engines/databases/tables/user/columns/warnings/status statement"
Statement "statement" Statement "statement"
TraceStmt "TRACE statement" TraceStmt "TRACE statement"
...@@ -824,7 +827,7 @@ import ( ...@@ -824,7 +827,7 @@ import (
FieldItem "Field item for load data clause" FieldItem "Field item for load data clause"
FieldItemList "Field items for load data clause" FieldItemList "Field items for load data clause"
FuncDatetimePrec "Function datetime precision" FuncDatetimePrec "Function datetime precision"
GetFormatSelector "{DATE|DATETIME|TIME|TIMESTAMP}" GetFormatSelector "{DATE|DATETIME|TIME|TIMESTAMP}"
GlobalScope "The scope of variable" GlobalScope "The scope of variable"
GroupByClause "GROUP BY clause" GroupByClause "GROUP BY clause"
HashString "Hashed string" HashString "Hashed string"
...@@ -1066,6 +1069,7 @@ import ( ...@@ -1066,6 +1069,7 @@ import (
Precision "Floating-point precision option" Precision "Floating-point precision option"
OptBinary "Optional BINARY" OptBinary "Optional BINARY"
OptBinMod "Optional BINARY mode" OptBinMod "Optional BINARY mode"
OptCharsetWithOptBinary "Optional BINARY or ASCII or UNICODE or BYTE"
OptCharset "Optional Character setting" OptCharset "Optional Character setting"
OptCollate "Optional Collate setting" OptCollate "Optional Collate setting"
IgnoreLines "Ignore num(int) lines" IgnoreLines "Ignore num(int) lines"
...@@ -1107,6 +1111,7 @@ import ( ...@@ -1107,6 +1111,7 @@ import (
NChar "{NCHAR|NATIONAL CHARACTER|NATIONAL CHAR}" NChar "{NCHAR|NATIONAL CHARACTER|NATIONAL CHAR}"
Varchar "{VARCHAR|VARCHARACTER|CHARACTER VARYING|CHAR VARYING}" Varchar "{VARCHAR|VARCHARACTER|CHARACTER VARYING|CHAR VARYING}"
NVarchar "{NATIONAL VARCHAR|NATIONAL VARCHARACTER|NVARCHAR|NCHAR VARCHAR|NATIONAL CHARACTER VARYING|NATIONAL CHAR VARYING|NCHAR VARYING}" NVarchar "{NATIONAL VARCHAR|NATIONAL VARCHARACTER|NVARCHAR|NCHAR VARCHAR|NATIONAL CHARACTER VARYING|NATIONAL CHAR VARYING|NCHAR VARYING}"
Year "{YEAR|SQL_TSI_YEAR}"
DeallocateSym "Deallocate or drop" DeallocateSym "Deallocate or drop"
OuterOpt "optional OUTER clause" OuterOpt "optional OUTER clause"
CrossOpt "Cross join option" CrossOpt "Cross join option"
...@@ -1283,12 +1288,24 @@ AlterTableSpec: ...@@ -1283,12 +1288,24 @@ AlterTableSpec:
Position: $5.(*ast.ColumnPosition), Position: $5.(*ast.ColumnPosition),
} }
} }
| "ADD" ColumnKeywordOpt IfNotExists '(' ColumnDefList ')' | "ADD" ColumnKeywordOpt IfNotExists '(' TableElementList ')'
{ {
tes := $5.([]interface {})
var columnDefs []*ast.ColumnDef
var constraints []*ast.Constraint
for _, te := range tes {
switch te := te.(type) {
case *ast.ColumnDef:
columnDefs = append(columnDefs, te)
case *ast.Constraint:
constraints = append(constraints, te)
}
}
$$ = &ast.AlterTableSpec{ $$ = &ast.AlterTableSpec{
IfNotExists: $3.(bool), IfNotExists: $3.(bool),
Tp: ast.AlterTableAddColumns, Tp: ast.AlterTableAddColumns,
NewColumns: $5.([]*ast.ColumnDef), NewColumns: columnDefs,
NewConstraints: constraints,
} }
} }
| "ADD" Constraint | "ADD" Constraint
...@@ -2079,7 +2096,7 @@ AnalyzeOption: ...@@ -2079,7 +2096,7 @@ AnalyzeOption:
/*******************************************************************************************/ /*******************************************************************************************/
Assignment: Assignment:
ColumnName eq Expression ColumnName eq ExprOrDefault
{ {
$$ = &ast.Assignment{Column: $1.(*ast.ColumnName), Expr:$3} $$ = &ast.Assignment{Column: $1.(*ast.ColumnName), Expr:$3}
} }
...@@ -2151,7 +2168,7 @@ ColumnDef: ...@@ -2151,7 +2168,7 @@ ColumnDef:
yylex.AppendError(yylex.Errorf("Invalid column definition")) yylex.AppendError(yylex.Errorf("Invalid column definition"))
return 1 return 1
} }
$$ = colDef $$ = colDef
} }
| ColumnName "SERIAL" ColumnOptionListOpt | ColumnName "SERIAL" ColumnOptionListOpt
{ {
...@@ -2332,7 +2349,7 @@ ColumnOption: ...@@ -2332,7 +2349,7 @@ ColumnOption:
{ {
$$ = &ast.ColumnOption{Tp: ast.ColumnOptionComment, Expr: ast.NewValueExpr($2)} $$ = &ast.ColumnOption{Tp: ast.ColumnOptionComment, Expr: ast.NewValueExpr($2)}
} }
| "CHECK" '(' Expression ')' EnforcedOrNotOrNotNullOpt | ConstraintKeywordOpt "CHECK" '(' Expression ')' EnforcedOrNotOrNotNullOpt
{ {
// See https://dev.mysql.com/doc/refman/5.7/en/create-table.html // See https://dev.mysql.com/doc/refman/5.7/en/create-table.html
// The CHECK clause is parsed but ignored by all storage engines. // The CHECK clause is parsed but ignored by all storage engines.
...@@ -2340,10 +2357,10 @@ ColumnOption: ...@@ -2340,10 +2357,10 @@ ColumnOption:
optionCheck := &ast.ColumnOption{ optionCheck := &ast.ColumnOption{
Tp: ast.ColumnOptionCheck, Tp: ast.ColumnOptionCheck,
Expr: $3, Expr: $4,
Enforced: true, Enforced: true,
} }
switch $5.(int) { switch $6.(int) {
case 0: case 0:
$$ = []*ast.ColumnOption{optionCheck, {Tp: ast.ColumnOptionNotNull}} $$ = []*ast.ColumnOption{optionCheck, {Tp: ast.ColumnOptionNotNull}}
case 1: case 1:
...@@ -3186,6 +3203,10 @@ PartDefOption: ...@@ -3186,6 +3203,10 @@ PartDefOption:
{ {
$$ = &ast.TableOption{Tp: ast.TableOptionEngine, StrValue: $3.(string)} $$ = &ast.TableOption{Tp: ast.TableOptionEngine, StrValue: $3.(string)}
} }
| "STORAGE" "ENGINE" EqOpt StringName
{
$$ = &ast.TableOption{Tp: ast.TableOptionEngine, StrValue: $4.(string)}
}
| "INSERT_METHOD" EqOpt StringName | "INSERT_METHOD" EqOpt StringName
{ {
$$ = &ast.TableOption{Tp: ast.TableOptionInsertMethod, StrValue: $3.(string)} $$ = &ast.TableOption{Tp: ast.TableOptionInsertMethod, StrValue: $3.(string)}
...@@ -3235,7 +3256,7 @@ PartDefValuesOpt: ...@@ -3235,7 +3256,7 @@ PartDefValuesOpt:
{ {
$$ = &ast.PartitionDefinitionClauseIn{} $$ = &ast.PartitionDefinitionClauseIn{}
} }
| "VALUES" "IN" '(' ExpressionList ')' | "VALUES" "IN" '(' MaxValueOrExpressionList ')'
{ {
exprs := $4.([]ast.ExprNode) exprs := $4.([]ast.ExprNode)
values := make([][]ast.ExprNode, 0, len(exprs)) values := make([][]ast.ExprNode, 0, len(exprs))
...@@ -4330,12 +4351,12 @@ UnReservedKeyword: ...@@ -4330,12 +4351,12 @@ UnReservedKeyword:
| "MAX_USER_CONNECTIONS" | "REPLICATION" | "CLIENT" | "SLAVE" | "RELOAD" | "TEMPORARY" | "ROUTINE" | "EVENT" | "ALGORITHM" | "DEFINER" | "INVOKER" | "MERGE" | "TEMPTABLE" | "UNDEFINED" | "SECURITY" | "CASCADED" | "MAX_USER_CONNECTIONS" | "REPLICATION" | "CLIENT" | "SLAVE" | "RELOAD" | "TEMPORARY" | "ROUTINE" | "EVENT" | "ALGORITHM" | "DEFINER" | "INVOKER" | "MERGE" | "TEMPTABLE" | "UNDEFINED" | "SECURITY" | "CASCADED"
| "RECOVER" | "CIPHER" | "SUBJECT" | "ISSUER" | "X509" | "NEVER" | "EXPIRE" | "ACCOUNT" | "INCREMENTAL" | "CPU" | "MEMORY" | "BLOCK" | "IO" | "CONTEXT" | "SWITCHES" | "PAGE" | "FAULTS" | "IPC" | "SWAPS" | "SOURCE" | "RECOVER" | "CIPHER" | "SUBJECT" | "ISSUER" | "X509" | "NEVER" | "EXPIRE" | "ACCOUNT" | "INCREMENTAL" | "CPU" | "MEMORY" | "BLOCK" | "IO" | "CONTEXT" | "SWITCHES" | "PAGE" | "FAULTS" | "IPC" | "SWAPS" | "SOURCE"
| "TRADITIONAL" | "SQL_BUFFER_RESULT" | "DIRECTORY" | "HISTORY" | "LIST" | "NODEGROUP" | "SYSTEM_TIME" | "PARTIAL" | "SIMPLE" | "REMOVE" | "PARTITIONING" | "STORAGE" | "DISK" | "STATS_SAMPLE_PAGES" | "SECONDARY_ENGINE" | "SECONDARY_LOAD" | "SECONDARY_UNLOAD" | "VALIDATION" | "TRADITIONAL" | "SQL_BUFFER_RESULT" | "DIRECTORY" | "HISTORY" | "LIST" | "NODEGROUP" | "SYSTEM_TIME" | "PARTIAL" | "SIMPLE" | "REMOVE" | "PARTITIONING" | "STORAGE" | "DISK" | "STATS_SAMPLE_PAGES" | "SECONDARY_ENGINE" | "SECONDARY_LOAD" | "SECONDARY_UNLOAD" | "VALIDATION"
| "WITHOUT" | "RTREE" | "EXCHANGE" | "COLUMN_FORMAT" | "REPAIR" | "IMPORT" | "DISCARD" | "TABLE_CHECKSUM" | "WITHOUT" | "RTREE" | "EXCHANGE" | "COLUMN_FORMAT" | "REPAIR" | "IMPORT" | "DISCARD" | "TABLE_CHECKSUM" | "UNICODE"
| "SQL_TSI_DAY" | "SQL_TSI_HOUR" | "SQL_TSI_MINUTE" | "SQL_TSI_MONTH" | "SQL_TSI_QUARTER" | "SQL_TSI_SECOND" | "SQL_TSI_WEEK" | "SQL_TSI_YEAR" | "INVISIBLE" | "VISIBLE" | "TYPE" | "SQL_TSI_DAY" | "SQL_TSI_HOUR" | "SQL_TSI_MINUTE" | "SQL_TSI_MONTH" | "SQL_TSI_QUARTER" | "SQL_TSI_SECOND" | "SQL_TSI_WEEK" | "SQL_TSI_YEAR" | "INVISIBLE" | "VISIBLE" | "TYPE"
TiDBKeyword: TiDBKeyword:
"ADMIN" | "AGG_TO_COP" |"BUCKETS" | "CANCEL" | "CMSKETCH" | "DDL" | "DEPTH" | "DRAINER" | "JOBS" | "JOB" | "NODE_ID" | "NODE_STATE" | "PUMP" | "SAMPLES" | "STATS" | "STATS_META" | "STATS_HISTOGRAMS" | "STATS_BUCKETS" | "STATS_HEALTHY" | "TIDB" "ADMIN" | "AGG_TO_COP" |"BUCKETS" | "CANCEL" | "CMSKETCH" | "DDL" | "DEPTH" | "DRAINER" | "JOBS" | "JOB" | "NODE_ID" | "NODE_STATE" | "PUMP" | "SAMPLES" | "STATS" | "STATS_META" | "STATS_HISTOGRAMS" | "STATS_BUCKETS" | "STATS_HEALTHY" | "TIDB"
| "HASH_JOIN" | "SM_JOIN" | "INL_JOIN" | "HASH_AGG" | "STREAM_AGG" | "USE_INDEX_MERGE" | "NO_INDEX_MERGE" | "USE_TOJA" | "ENABLE_PLAN_CACHE" | "USE_PLAN_CACHE" | "HASH_JOIN" | "SM_JOIN" | "INL_JOIN" | "HASH_AGG" | "STREAM_AGG" | "USE_INDEX" | "IGNORE_INDEX" | "USE_INDEX_MERGE" | "NO_INDEX_MERGE" | "USE_TOJA" | "ENABLE_PLAN_CACHE" | "USE_PLAN_CACHE"
| "READ_CONSISTENT_REPLICA" | "READ_FROM_STORAGE" | "QB_NAME" | "QUERY_TYPE" | "MEMORY_QUOTA" | "OLAP" | "OLTP" | "TOPN" | "TIKV" | "TIFLASH" | "SPLIT" | "OPTIMISTIC" | "PESSIMISTIC" | "WIDTH" | "REGIONS" | "READ_CONSISTENT_REPLICA" | "READ_FROM_STORAGE" | "QB_NAME" | "QUERY_TYPE" | "MEMORY_QUOTA" | "OLAP" | "OLTP" | "TOPN" | "TIKV" | "TIFLASH" | "SPLIT" | "OPTIMISTIC" | "PESSIMISTIC" | "WIDTH" | "REGIONS"
NotKeywordToken: NotKeywordToken:
...@@ -4454,7 +4475,7 @@ ExprOrDefault: ...@@ -4454,7 +4475,7 @@ ExprOrDefault:
} }
ColumnSetValue: ColumnSetValue:
ColumnName eq Expression ColumnName eq ExprOrDefault
{ {
$$ = &ast.Assignment{ $$ = &ast.Assignment{
Column: $1.(*ast.ColumnName), Column: $1.(*ast.ColumnName),
...@@ -6648,7 +6669,16 @@ OptimizerHintList: ...@@ -6648,7 +6669,16 @@ OptimizerHintList:
} }
TableOptimizerHintOpt: TableOptimizerHintOpt:
index '(' QueryBlockOpt HintTable IndexNameList ')' hintUseIndex '(' QueryBlockOpt HintTable IndexNameList ')'
{
$$ = &ast.TableOptimizerHint{
HintName: model.NewCIStr($1),
QBName: $3.(model.CIStr),
Tables: []ast.HintTable{$4.(ast.HintTable)},
Indexes: $5.([]model.CIStr),
}
}
| hintIgnoreIndex '(' QueryBlockOpt HintTable IndexNameList ')'
{ {
$$ = &ast.TableOptimizerHint{ $$ = &ast.TableOptimizerHint{
HintName: model.NewCIStr($1), HintName: model.NewCIStr($1),
...@@ -7685,20 +7715,29 @@ ShowStmt: ...@@ -7685,20 +7715,29 @@ ShowStmt:
User: $4.(*auth.UserIdentity), User: $4.(*auth.UserIdentity),
} }
} }
| "SHOW" "TABLE" TableName "REGIONS" | "SHOW" "TABLE" TableName "REGIONS" WhereClauseOptional
{ {
$$ = &ast.ShowStmt{
stmt := &ast.ShowStmt{
Tp: ast.ShowRegions, Tp: ast.ShowRegions,
Table: $3.(*ast.TableName), Table: $3.(*ast.TableName),
} }
if $5 != nil {
stmt.Where = $5.(ast.ExprNode)
}
$$ = stmt
} }
| "SHOW" "TABLE" TableName "INDEX" Identifier "REGIONS" | "SHOW" "TABLE" TableName "INDEX" Identifier "REGIONS" WhereClauseOptional
{ {
$$ = &ast.ShowStmt{ stmt := &ast.ShowStmt{
Tp: ast.ShowRegions, Tp: ast.ShowRegions,
Table: $3.(*ast.TableName), Table: $3.(*ast.TableName),
IndexName: model.NewCIStr($5), IndexName: model.NewCIStr($5),
} }
if $7 != nil {
stmt.Where = $7.(ast.ExprNode)
}
$$ = stmt
} }
| "SHOW" "GRANTS" | "SHOW" "GRANTS"
{ {
...@@ -8716,10 +8755,8 @@ NumericType: ...@@ -8716,10 +8755,8 @@ NumericType:
{ {
x := types.NewFieldType($1.(byte)) x := types.NewFieldType($1.(byte))
x.Flen = $2.(int) x.Flen = $2.(int)
if x.Flen == types.UnspecifiedLength || x.Flen == 0 { if x.Flen == types.UnspecifiedLength {
x.Flen = 1 x.Flen = 1
} else if x.Flen > mysql.MaxBitDisplayWidth {
yylex.AppendError(ErrTooBigDisplayWidth.GenWithStackByArgs(x.Flen))
} }
$$ = x $$ = x
} }
...@@ -8909,7 +8946,7 @@ StringType: ...@@ -8909,7 +8946,7 @@ StringType:
x.Flag |= mysql.BinaryFlag x.Flag |= mysql.BinaryFlag
$$ = $1.(*types.FieldType) $$ = $1.(*types.FieldType)
} }
| TextType OptBinary | TextType OptCharsetWithOptBinary
{ {
x := $1.(*types.FieldType) x := $1.(*types.FieldType)
x.Charset = $2.(*ast.OptBinary).Charset x.Charset = $2.(*ast.OptBinary).Charset
...@@ -8940,7 +8977,7 @@ StringType: ...@@ -8940,7 +8977,7 @@ StringType:
x.Collate = charset.CollationBin x.Collate = charset.CollationBin
$$ = x $$ = x
} }
| "LONG" Varchar OptBinary | "LONG" Varchar OptCharsetWithOptBinary
{ {
x := types.NewFieldType(mysql.TypeMediumBlob) x := types.NewFieldType(mysql.TypeMediumBlob)
x.Charset = $3.(*ast.OptBinary).Charset x.Charset = $3.(*ast.OptBinary).Charset
...@@ -8949,7 +8986,7 @@ StringType: ...@@ -8949,7 +8986,7 @@ StringType:
} }
$$ = x $$ = x
} }
| "LONG" OptBinary | "LONG" OptCharsetWithOptBinary
{ {
x := types.NewFieldType(mysql.TypeMediumBlob) x := types.NewFieldType(mysql.TypeMediumBlob)
x.Charset = $2.(*ast.OptBinary).Charset x.Charset = $2.(*ast.OptBinary).Charset
...@@ -8984,6 +9021,10 @@ NVarchar: ...@@ -8984,6 +9021,10 @@ NVarchar:
| "NATIONAL" "CHAR" "VARYING" | "NATIONAL" "CHAR" "VARYING"
| "NCHAR" "VARYING" | "NCHAR" "VARYING"
Year:
"YEAR"
| "SQL_TSI_YEAR"
BlobType: BlobType:
"TINYBLOB" "TINYBLOB"
...@@ -9037,6 +9078,37 @@ TextType: ...@@ -9037,6 +9078,37 @@ TextType:
$$ = x $$ = x
} }
OptCharsetWithOptBinary:
OptBinary
{
$$ = $1
}
| "ASCII"
{
$$ = &ast.OptBinary{
IsBinary: false,
Charset: charset.CharsetLatin1,
}
}
| "UNICODE"
{
name, _, err := charset.GetCharsetInfo("ucs2")
if err != nil {
yylex.AppendError(ErrUnknownCharacterSet.GenWithStackByArgs("ucs2"))
return 1
}
$$ = &ast.OptBinary{
IsBinary: false,
Charset: name,
}
}
| "BYTE"
{
$$ = &ast.OptBinary{
IsBinary: false,
Charset: "",
}
}
DateAndTimeType: DateAndTimeType:
"DATE" "DATE"
...@@ -9074,7 +9146,7 @@ DateAndTimeType: ...@@ -9074,7 +9146,7 @@ DateAndTimeType:
} }
$$ = x $$ = x
} }
| "YEAR" OptFieldLen FieldOpts | Year OptFieldLen FieldOpts
{ {
x := types.NewFieldType(mysql.TypeYear) x := types.NewFieldType(mysql.TypeYear)
x.Flen = $2.(int) x.Flen = $2.(int)
......
...@@ -125,6 +125,7 @@ func init() { ...@@ -125,6 +125,7 @@ func init() {
codeTruncatedWrongValue: mysql.ErrTruncatedWrongValue, codeTruncatedWrongValue: mysql.ErrTruncatedWrongValue,
codeUnknown: mysql.ErrUnknown, codeUnknown: mysql.ErrUnknown,
codeInvalidDefault: mysql.ErrInvalidDefault, codeInvalidDefault: mysql.ErrInvalidDefault,
codeInvalidFieldSize: mysql.ErrInvalidFieldSize,
codeMBiggerThanD: mysql.ErrMBiggerThanD, codeMBiggerThanD: mysql.ErrMBiggerThanD,
codeDataOutOfRange: mysql.ErrWarnDataOutOfRange, codeDataOutOfRange: mysql.ErrWarnDataOutOfRange,
codeDuplicatedValueInType: mysql.ErrDuplicatedValueInType, codeDuplicatedValueInType: mysql.ErrDuplicatedValueInType,
......
...@@ -253,11 +253,47 @@ func (crs *CopRuntimeStats) String() string { ...@@ -253,11 +253,47 @@ func (crs *CopRuntimeStats) String() string {
procTimes[n-1], procTimes[0], procTimes[n*4/5], procTimes[n*19/20], totalRows, totalIters, totalTasks) procTimes[n-1], procTimes[0], procTimes[n*4/5], procTimes[n*19/20], totalRows, totalIters, totalTasks)
} }
// ReaderRuntimeStats collects stats for TableReader, IndexReader and IndexLookupReader
type ReaderRuntimeStats struct {
sync.Mutex
copRespTime []time.Duration
}
// recordOneCopTask record once cop response time to update maxcopRespTime
func (rrs *ReaderRuntimeStats) recordOneCopTask(t time.Duration) {
rrs.Lock()
defer rrs.Unlock()
rrs.copRespTime = append(rrs.copRespTime, t)
}
func (rrs *ReaderRuntimeStats) String() string {
size := len(rrs.copRespTime)
if size == 0 {
return ""
}
if size == 1 {
return fmt.Sprintf("rpc time:%v", rrs.copRespTime[0])
}
sort.Slice(rrs.copRespTime, func(i, j int) bool {
return rrs.copRespTime[i] < rrs.copRespTime[j]
})
vMax, vMin := rrs.copRespTime[size-1], rrs.copRespTime[0]
vP80, vP95 := rrs.copRespTime[size*4/5], rrs.copRespTime[size*19/20]
sum := 0.0
for _, t := range rrs.copRespTime {
sum += float64(t)
}
vAvg := time.Duration(sum / float64(size))
return fmt.Sprintf("rpc max:%v, min:%v, avg:%v, p80:%v, p95:%v", vMax, vMin, vAvg, vP80, vP95)
}
// RuntimeStatsColl collects executors's execution info. // RuntimeStatsColl collects executors's execution info.
type RuntimeStatsColl struct { type RuntimeStatsColl struct {
mu sync.Mutex mu sync.Mutex
rootStats map[string]*RuntimeStats rootStats map[string]*RuntimeStats
copStats map[string]*CopRuntimeStats copStats map[string]*CopRuntimeStats
readerStats map[string]*ReaderRuntimeStats
} }
// RuntimeStats collects one executor's execution info. // RuntimeStats collects one executor's execution info.
...@@ -273,7 +309,7 @@ type RuntimeStats struct { ...@@ -273,7 +309,7 @@ type RuntimeStats struct {
// NewRuntimeStatsColl creates new executor collector. // NewRuntimeStatsColl creates new executor collector.
func NewRuntimeStatsColl() *RuntimeStatsColl { func NewRuntimeStatsColl() *RuntimeStatsColl {
return &RuntimeStatsColl{rootStats: make(map[string]*RuntimeStats), return &RuntimeStatsColl{rootStats: make(map[string]*RuntimeStats),
copStats: make(map[string]*CopRuntimeStats)} copStats: make(map[string]*CopRuntimeStats), readerStats: make(map[string]*ReaderRuntimeStats)}
} }
// GetRootStats gets execStat for a executor. // GetRootStats gets execStat for a executor.
...@@ -306,6 +342,12 @@ func (e *RuntimeStatsColl) RecordOneCopTask(planID, address string, summary *tip ...@@ -306,6 +342,12 @@ func (e *RuntimeStatsColl) RecordOneCopTask(planID, address string, summary *tip
copStats.RecordOneCopTask(address, summary) copStats.RecordOneCopTask(address, summary)
} }
// RecordOneReaderStats records a specific stats for TableReader, IndexReader and IndexLookupReader.
func (e *RuntimeStatsColl) RecordOneReaderStats(planID string, copRespTime time.Duration) {
readerStats := e.GetReaderStats(planID)
readerStats.recordOneCopTask(copRespTime)
}
// ExistsRootStats checks if the planID exists in the rootStats collection. // ExistsRootStats checks if the planID exists in the rootStats collection.
func (e *RuntimeStatsColl) ExistsRootStats(planID string) bool { func (e *RuntimeStatsColl) ExistsRootStats(planID string) bool {
e.mu.Lock() e.mu.Lock()
...@@ -322,6 +364,18 @@ func (e *RuntimeStatsColl) ExistsCopStats(planID string) bool { ...@@ -322,6 +364,18 @@ func (e *RuntimeStatsColl) ExistsCopStats(planID string) bool {
return exists return exists
} }
// GetReaderStats gets the ReaderRuntimeStats specified by planID.
func (e *RuntimeStatsColl) GetReaderStats(planID string) *ReaderRuntimeStats {
e.mu.Lock()
defer e.mu.Unlock()
stats, exists := e.readerStats[planID]
if !exists {
stats = &ReaderRuntimeStats{copRespTime: make([]time.Duration, 0, 20)}
e.readerStats[planID] = stats
}
return stats
}
// Record records executor's execution. // Record records executor's execution.
func (e *RuntimeStats) Record(d time.Duration, rowNum int) { func (e *RuntimeStats) Record(d time.Duration, rowNum int) {
atomic.AddInt32(&e.loop, 1) atomic.AddInt32(&e.loop, 1)
......
...@@ -30,6 +30,7 @@ import ( ...@@ -30,6 +30,7 @@ import (
zaplog "github.com/pingcap/log" zaplog "github.com/pingcap/log"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"go.uber.org/zap" "go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2" "gopkg.in/natefinch/lumberjack.v2"
) )
...@@ -72,8 +73,8 @@ type LogConfig struct { ...@@ -72,8 +73,8 @@ type LogConfig struct {
} }
// NewLogConfig creates a LogConfig. // NewLogConfig creates a LogConfig.
func NewLogConfig(level, format, slowQueryFile string, fileCfg FileLogConfig, disableTimestamp bool) *LogConfig { func NewLogConfig(level, format, slowQueryFile string, fileCfg FileLogConfig, disableTimestamp bool, opts ...func(*zaplog.Config)) *LogConfig {
return &LogConfig{ c := &LogConfig{
Config: zaplog.Config{ Config: zaplog.Config{
Level: level, Level: level,
Format: format, Format: format,
...@@ -82,6 +83,10 @@ func NewLogConfig(level, format, slowQueryFile string, fileCfg FileLogConfig, di ...@@ -82,6 +83,10 @@ func NewLogConfig(level, format, slowQueryFile string, fileCfg FileLogConfig, di
}, },
SlowQueryFile: slowQueryFile, SlowQueryFile: slowQueryFile,
} }
for _, opt := range opts {
opt(&c.Config)
}
return c
} }
// isSKippedPackageName tests wether path name is on log library calling stack. // isSKippedPackageName tests wether path name is on log library calling stack.
...@@ -279,7 +284,7 @@ func InitLogger(cfg *LogConfig) error { ...@@ -279,7 +284,7 @@ func InitLogger(cfg *LogConfig) error {
// InitZapLogger initializes a zap logger with cfg. // InitZapLogger initializes a zap logger with cfg.
func InitZapLogger(cfg *LogConfig) error { func InitZapLogger(cfg *LogConfig) error {
gl, props, err := zaplog.InitLogger(&cfg.Config) gl, props, err := zaplog.InitLogger(&cfg.Config, zap.AddStacktrace(zapcore.FatalLevel))
if err != nil { if err != nil {
return errors.Trace(err) return errors.Trace(err)
} }
......
...@@ -35,7 +35,7 @@ import ( ...@@ -35,7 +35,7 @@ import (
// //
// NOTE: We only protect concurrent access to "bytesConsumed" and "children", // NOTE: We only protect concurrent access to "bytesConsumed" and "children",
// that is to say: // that is to say:
// 1. Only "BytesConsumed()", "Consume()", "AttachTo()" and "Detach" are thread-safe. // 1. Only "BytesConsumed()", "Consume()" and "AttachTo()" are thread-safe.
// 2. Other operations of a Tracker tree is not thread-safe. // 2. Other operations of a Tracker tree is not thread-safe.
type Tracker struct { type Tracker struct {
mu struct { mu struct {
...@@ -53,7 +53,7 @@ type Tracker struct { ...@@ -53,7 +53,7 @@ type Tracker struct {
// NewTracker creates a memory tracker. // NewTracker creates a memory tracker.
// 1. "label" is the label used in the usage string. // 1. "label" is the label used in the usage string.
// 2. "bytesLimit < 0" means no limit. // 2. "bytesLimit <= 0" means no limit.
func NewTracker(label fmt.Stringer, bytesLimit int64) *Tracker { func NewTracker(label fmt.Stringer, bytesLimit int64) *Tracker {
return &Tracker{ return &Tracker{
label: label, label: label,
...@@ -62,7 +62,13 @@ func NewTracker(label fmt.Stringer, bytesLimit int64) *Tracker { ...@@ -62,7 +62,13 @@ func NewTracker(label fmt.Stringer, bytesLimit int64) *Tracker {
} }
} }
// SetActionOnExceed sets the action when memory usage is out of memory quota. // SetBytesLimit sets the bytes limit for this tracker.
// "bytesLimit <= 0" means no limit.
func (t *Tracker) SetBytesLimit(bytesLimit int64) {
t.bytesLimit = bytesLimit
}
// SetActionOnExceed sets the action when memory usage exceeds bytesLimit.
func (t *Tracker) SetActionOnExceed(a ActionOnExceed) { func (t *Tracker) SetActionOnExceed(a ActionOnExceed) {
t.actionOnExceed = a t.actionOnExceed = a
} }
...@@ -72,6 +78,11 @@ func (t *Tracker) SetLabel(label fmt.Stringer) { ...@@ -72,6 +78,11 @@ func (t *Tracker) SetLabel(label fmt.Stringer) {
t.label = label t.label = label
} }
// Label gets the label of a Tracker.
func (t *Tracker) Label() fmt.Stringer {
return t.label
}
// AttachTo attaches this memory tracker as a child to another Tracker. If it // AttachTo attaches this memory tracker as a child to another Tracker. If it
// already has a parent, this function will remove it from the old parent. // already has a parent, this function will remove it from the old parent.
// Its consumed memory usage is used to update all its ancestors. // Its consumed memory usage is used to update all its ancestors.
...@@ -131,12 +142,15 @@ func (t *Tracker) ReplaceChild(oldChild, newChild *Tracker) { ...@@ -131,12 +142,15 @@ func (t *Tracker) ReplaceChild(oldChild, newChild *Tracker) {
} }
// Consume is used to consume a memory usage. "bytes" can be a negative value, // Consume is used to consume a memory usage. "bytes" can be a negative value,
// which means this is a memory release operation. // which means this is a memory release operation. When memory usage of a tracker
// exceeds its bytesLimit, the tracker calls its action, so does each of its ancestors.
func (t *Tracker) Consume(bytes int64) { func (t *Tracker) Consume(bytes int64) {
var rootExceed *Tracker
for tracker := t; tracker != nil; tracker = tracker.parent { for tracker := t; tracker != nil; tracker = tracker.parent {
if atomic.AddInt64(&tracker.bytesConsumed, bytes) >= tracker.bytesLimit && tracker.bytesLimit > 0 { if atomic.AddInt64(&tracker.bytesConsumed, bytes) >= tracker.bytesLimit && tracker.bytesLimit > 0 {
rootExceed = tracker // TODO(fengliyuan): try to find a way to avoid logging at each tracker in chain.
if tracker.actionOnExceed != nil {
tracker.actionOnExceed.Action(tracker)
}
} }
for { for {
...@@ -148,9 +162,6 @@ func (t *Tracker) Consume(bytes int64) { ...@@ -148,9 +162,6 @@ func (t *Tracker) Consume(bytes int64) {
break break
} }
} }
if rootExceed != nil {
rootExceed.actionOnExceed.Action(rootExceed)
}
} }
// BytesConsumed returns the consumed memory usage value in bytes. // BytesConsumed returns the consumed memory usage value in bytes.
......
...@@ -129,118 +129,118 @@ ...@@ -129,118 +129,118 @@
"revisionTime": "2019-03-07T07:54:52Z" "revisionTime": "2019-03-07T07:54:52Z"
}, },
{ {
"checksumSHA1": "JPMpA0GItqyzBSfsCuIQd1TkK2o=", "checksumSHA1": "6Q3DjwHqeCoRIxBT+Jy5ctnK6hw=",
"path": "github.com/pingcap/parser", "path": "github.com/pingcap/parser",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "rNycpMFm1SPArRiqljKlCdtbKlE=", "checksumSHA1": "QDkkRc/x4HH3FdHZ9pLPgeAw9+A=",
"path": "github.com/pingcap/parser/ast", "path": "github.com/pingcap/parser/ast",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "xiv40YqnvHcbIhaEzJqjh5K7ehM=", "checksumSHA1": "xiv40YqnvHcbIhaEzJqjh5K7ehM=",
"path": "github.com/pingcap/parser/auth", "path": "github.com/pingcap/parser/auth",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "EvDXpplklIXmKqLclzWzaN/uHKQ=", "checksumSHA1": "EvDXpplklIXmKqLclzWzaN/uHKQ=",
"path": "github.com/pingcap/parser/charset", "path": "github.com/pingcap/parser/charset",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "Aao6Mul/qqogOwPwM2arBKZkYZs=", "checksumSHA1": "Aao6Mul/qqogOwPwM2arBKZkYZs=",
"path": "github.com/pingcap/parser/format", "path": "github.com/pingcap/parser/format",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "GAJ7IUg0t8DCKJbJQxJLkklEj2E=", "checksumSHA1": "GAJ7IUg0t8DCKJbJQxJLkklEj2E=",
"path": "github.com/pingcap/parser/model", "path": "github.com/pingcap/parser/model",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "vPXc18Mug99WhSvLXC/JtwOdEmA=", "checksumSHA1": "pN8v8r1syhLlLXw9TOq6bFgJfnY=",
"path": "github.com/pingcap/parser/mysql", "path": "github.com/pingcap/parser/mysql",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "olapD16WCMBU9vrA5PtlERGFfXw=", "checksumSHA1": "olapD16WCMBU9vrA5PtlERGFfXw=",
"path": "github.com/pingcap/parser/opcode", "path": "github.com/pingcap/parser/opcode",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "L6rzy3sJU1RPf7AkJN+0zcwW/YY=", "checksumSHA1": "L6rzy3sJU1RPf7AkJN+0zcwW/YY=",
"path": "github.com/pingcap/parser/terror", "path": "github.com/pingcap/parser/terror",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "u1Lmm4Fa3su4ElZMN4w0hPzFZl4=", "checksumSHA1": "u1Lmm4Fa3su4ElZMN4w0hPzFZl4=",
"path": "github.com/pingcap/parser/types", "path": "github.com/pingcap/parser/types",
"revision": "275a827cf4e3798ca50ae8b5224895f0919e70ed", "revision": "978b8272c04e599620715e8493ca26775e8c0e5f",
"revisionTime": "2019-09-02T03:07:20Z" "revisionTime": "2019-09-12T03:26:24Z"
}, },
{ {
"checksumSHA1": "ryt2yutvbgdMuS5uvtaiJqqvZXQ=", "checksumSHA1": "ryt2yutvbgdMuS5uvtaiJqqvZXQ=",
"path": "github.com/pingcap/tidb/sessionctx/stmtctx", "path": "github.com/pingcap/tidb/sessionctx/stmtctx",
"revision": "67d4f913dd3f305b39c4d6ba53a6ff82fc040c39", "revision": "4e545cfa580a38dfd31946f2ecad3f459f9ceaee",
"revisionTime": "2019-09-02T13:30:13Z" "revisionTime": "2019-09-16T09:47:52Z"
}, },
{ {
"checksumSHA1": "sNH6BHp65A9tc3DBD15aOI6GDko=", "checksumSHA1": "xqDC/XGpST9lvxuOJt4+LkXOvnw=",
"path": "github.com/pingcap/tidb/types", "path": "github.com/pingcap/tidb/types",
"revision": "67d4f913dd3f305b39c4d6ba53a6ff82fc040c39", "revision": "4e545cfa580a38dfd31946f2ecad3f459f9ceaee",
"revisionTime": "2019-09-02T13:30:13Z" "revisionTime": "2019-09-16T09:47:52Z"
}, },
{ {
"checksumSHA1": "OSOQVeP518zWu3RoYSDWoh7DIjg=", "checksumSHA1": "OSOQVeP518zWu3RoYSDWoh7DIjg=",
"path": "github.com/pingcap/tidb/types/json", "path": "github.com/pingcap/tidb/types/json",
"revision": "67d4f913dd3f305b39c4d6ba53a6ff82fc040c39", "revision": "4e545cfa580a38dfd31946f2ecad3f459f9ceaee",
"revisionTime": "2019-09-02T13:30:13Z" "revisionTime": "2019-09-16T09:47:52Z"
}, },
{ {
"checksumSHA1": "45zWX5Q6D6aTEWtc4p/lbD9WD4o=", "checksumSHA1": "45zWX5Q6D6aTEWtc4p/lbD9WD4o=",
"path": "github.com/pingcap/tidb/types/parser_driver", "path": "github.com/pingcap/tidb/types/parser_driver",
"revision": "67d4f913dd3f305b39c4d6ba53a6ff82fc040c39", "revision": "4e545cfa580a38dfd31946f2ecad3f459f9ceaee",
"revisionTime": "2019-09-02T13:30:13Z" "revisionTime": "2019-09-16T09:47:52Z"
}, },
{ {
"checksumSHA1": "uH6u5fhPvRiiOUCG8bJfphXm4jo=", "checksumSHA1": "oCrNchmOGNQTnrkjk5CxFZpu2rE=",
"path": "github.com/pingcap/tidb/util/execdetails", "path": "github.com/pingcap/tidb/util/execdetails",
"revision": "67d4f913dd3f305b39c4d6ba53a6ff82fc040c39", "revision": "4e545cfa580a38dfd31946f2ecad3f459f9ceaee",
"revisionTime": "2019-09-02T13:30:13Z" "revisionTime": "2019-09-16T09:47:52Z"
}, },
{ {
"checksumSHA1": "zw1limoYLowZjRm8wgicyjC72+U=", "checksumSHA1": "zw1limoYLowZjRm8wgicyjC72+U=",
"path": "github.com/pingcap/tidb/util/hack", "path": "github.com/pingcap/tidb/util/hack",
"revision": "67d4f913dd3f305b39c4d6ba53a6ff82fc040c39", "revision": "4e545cfa580a38dfd31946f2ecad3f459f9ceaee",
"revisionTime": "2019-09-02T13:30:13Z" "revisionTime": "2019-09-16T09:47:52Z"
}, },
{ {
"checksumSHA1": "fDbwnQlRCKnr5y6MY799BEd4WlQ=", "checksumSHA1": "SZhLPQR66Rd4kWkva6W3sJmSNLY=",
"path": "github.com/pingcap/tidb/util/logutil", "path": "github.com/pingcap/tidb/util/logutil",
"revision": "67d4f913dd3f305b39c4d6ba53a6ff82fc040c39", "revision": "4e545cfa580a38dfd31946f2ecad3f459f9ceaee",
"revisionTime": "2019-09-02T13:30:13Z" "revisionTime": "2019-09-16T09:47:52Z"
}, },
{ {
"checksumSHA1": "OveQu0ABBJmMEwmmthqSRQC2Ef0=", "checksumSHA1": "OveQu0ABBJmMEwmmthqSRQC2Ef0=",
"path": "github.com/pingcap/tidb/util/math", "path": "github.com/pingcap/tidb/util/math",
"revision": "67d4f913dd3f305b39c4d6ba53a6ff82fc040c39", "revision": "4e545cfa580a38dfd31946f2ecad3f459f9ceaee",
"revisionTime": "2019-09-02T13:30:13Z" "revisionTime": "2019-09-16T09:47:52Z"
}, },
{ {
"checksumSHA1": "loL2JgZDLapEOgfM/XUJI5f0HVs=", "checksumSHA1": "YCueNgfYAVAWPgnLF2tBu7ikN1g=",
"path": "github.com/pingcap/tidb/util/memory", "path": "github.com/pingcap/tidb/util/memory",
"revision": "67d4f913dd3f305b39c4d6ba53a6ff82fc040c39", "revision": "4e545cfa580a38dfd31946f2ecad3f459f9ceaee",
"revisionTime": "2019-09-02T13:30:13Z" "revisionTime": "2019-09-16T09:47:52Z"
}, },
{ {
"checksumSHA1": "QPIBwDNUFF5Whrnd41S3mkKa4gQ=", "checksumSHA1": "QPIBwDNUFF5Whrnd41S3mkKa4gQ=",
...@@ -497,68 +497,68 @@ ...@@ -497,68 +497,68 @@
{ {
"checksumSHA1": "aKn1oKcY74N8TRLm3Ayt7Q4bbI4=", "checksumSHA1": "aKn1oKcY74N8TRLm3Ayt7Q4bbI4=",
"path": "vitess.io/vitess/go/bytes2", "path": "vitess.io/vitess/go/bytes2",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "bhE6CGQgZTIgLPp9lnvlKW/47xc=", "checksumSHA1": "bhE6CGQgZTIgLPp9lnvlKW/47xc=",
"path": "vitess.io/vitess/go/hack", "path": "vitess.io/vitess/go/hack",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "y2C3mKrC39Tffb/614ZYa/qEVGU=", "checksumSHA1": "IpNRu9mF+hsO3XRHzhW2Tz4G81o=",
"path": "vitess.io/vitess/go/sqltypes", "path": "vitess.io/vitess/go/sqltypes",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "vAIRxI6MHsq3x1hLQwIyw5AvqtI=", "checksumSHA1": "vAIRxI6MHsq3x1hLQwIyw5AvqtI=",
"path": "vitess.io/vitess/go/vt/log", "path": "vitess.io/vitess/go/vt/log",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "//MHnGEq9xApvIMdwQaRrQf5ZWo=", "checksumSHA1": "//MHnGEq9xApvIMdwQaRrQf5ZWo=",
"path": "vitess.io/vitess/go/vt/proto/binlogdata", "path": "vitess.io/vitess/go/vt/proto/binlogdata",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "u8uuZWMqaXgQ1MduggrgIHU50FI=", "checksumSHA1": "u8uuZWMqaXgQ1MduggrgIHU50FI=",
"path": "vitess.io/vitess/go/vt/proto/query", "path": "vitess.io/vitess/go/vt/proto/query",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "rJ1Iqz/lvaKikIUx4oEFfYJtoBQ=", "checksumSHA1": "rJ1Iqz/lvaKikIUx4oEFfYJtoBQ=",
"path": "vitess.io/vitess/go/vt/proto/topodata", "path": "vitess.io/vitess/go/vt/proto/topodata",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "Bv8lucvoH9AnJSYiWX8MIrJl4zY=", "checksumSHA1": "Bv8lucvoH9AnJSYiWX8MIrJl4zY=",
"path": "vitess.io/vitess/go/vt/proto/vtgate", "path": "vitess.io/vitess/go/vt/proto/vtgate",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "HeUJu5njPq9iznpAOcrLpLD7f9w=", "checksumSHA1": "HeUJu5njPq9iznpAOcrLpLD7f9w=",
"path": "vitess.io/vitess/go/vt/proto/vtrpc", "path": "vitess.io/vitess/go/vt/proto/vtrpc",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "6g61BeExwL7AYkuujmuo+SUUvaM=", "checksumSHA1": "QP+KJhlTW4o4uRt2uFFiI9tjriI=",
"path": "vitess.io/vitess/go/vt/sqlparser", "path": "vitess.io/vitess/go/vt/sqlparser",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
}, },
{ {
"checksumSHA1": "z9+F/lA1Xrl5S16LKssUH8VL6hs=", "checksumSHA1": "z9+F/lA1Xrl5S16LKssUH8VL6hs=",
"path": "vitess.io/vitess/go/vt/vterrors", "path": "vitess.io/vitess/go/vt/vterrors",
"revision": "74ae4e28066640597b0a6b1163fb99cd0b53b3ff", "revision": "521e55710e066343d8234470e88601ddf2b169d4",
"revisionTime": "2019-08-30T03:35:45Z" "revisionTime": "2019-09-16T06:01:21Z"
} }
], ],
"rootPath": "github.com/XiaoMi/soar" "rootPath": "github.com/XiaoMi/soar"
......
...@@ -19,7 +19,6 @@ package sqltypes ...@@ -19,7 +19,6 @@ package sqltypes
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"math"
"strconv" "strconv"
...@@ -28,9 +27,6 @@ import ( ...@@ -28,9 +27,6 @@ import (
"vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vterrors"
) )
// TODO(sougou): change these functions to be more permissive.
// Most string to number conversions should quietly convert to 0.
// numeric represents a numeric value extracted from // numeric represents a numeric value extracted from
// a Value, used for arithmetic operations. // a Value, used for arithmetic operations.
type numeric struct { type numeric struct {
...@@ -50,8 +46,14 @@ func Add(v1, v2 Value) (Value, error) { ...@@ -50,8 +46,14 @@ func Add(v1, v2 Value) (Value, error) {
} }
lv1, err := newNumeric(v1) lv1, err := newNumeric(v1)
if err != nil {
return NULL, err
}
lv2, err := newNumeric(v2) lv2, err := newNumeric(v2)
if err != nil {
return NULL, err
}
lresult, err := addNumericWithError(lv1, lv2) lresult, err := addNumericWithError(lv1, lv2)
if err != nil { if err != nil {
...@@ -61,6 +63,30 @@ func Add(v1, v2 Value) (Value, error) { ...@@ -61,6 +63,30 @@ func Add(v1, v2 Value) (Value, error) {
return castFromNumeric(lresult, lresult.typ), nil return castFromNumeric(lresult, lresult.typ), nil
} }
// Subtract takes two values and subtracts them
func Subtract(v1, v2 Value) (Value, error) {
if v1.IsNull() || v2.IsNull() {
return NULL, nil
}
lv1, err := newNumeric(v1)
if err != nil {
return NULL, err
}
lv2, err := newNumeric(v2)
if err != nil {
return NULL, err
}
lresult, err := subtractNumericWithError(lv1, lv2)
if err != nil {
return NULL, err
}
return castFromNumeric(lresult, lresult.typ), nil
}
// NullsafeAdd adds two Values in a null-safe manner. A null value // NullsafeAdd adds two Values in a null-safe manner. A null value
// is treated as 0. If both values are null, then a null is returned. // is treated as 0. If both values are null, then a null is returned.
// If both values are not null, a numeric value is built // If both values are not null, a numeric value is built
...@@ -243,7 +269,10 @@ func ToInt64(v Value) (int64, error) { ...@@ -243,7 +269,10 @@ func ToInt64(v Value) (int64, error) {
// ToFloat64 converts Value to float64. // ToFloat64 converts Value to float64.
func ToFloat64(v Value) (float64, error) { func ToFloat64(v Value) (float64, error) {
num, _ := newNumeric(v) num, err := newNumeric(v)
if err != nil {
return 0, err
}
switch num.typ { switch num.typ {
case Int64: case Int64:
return float64(num.ival), nil return float64(num.ival), nil
...@@ -373,7 +402,32 @@ func addNumericWithError(v1, v2 numeric) (numeric, error) { ...@@ -373,7 +402,32 @@ func addNumericWithError(v1, v2 numeric) (numeric, error) {
return floatPlusAny(v1.fval, v2), nil return floatPlusAny(v1.fval, v2), nil
} }
panic("unreachable") panic("unreachable")
}
func subtractNumericWithError(v1, v2 numeric) (numeric, error) {
switch v1.typ {
case Int64:
switch v2.typ {
case Int64:
return intMinusIntWithError(v1.ival, v2.ival)
case Uint64:
return intMinusUintWithError(v1.ival, v2.uval)
case Float64:
return anyMinusFloat(v1, v2.fval), nil
}
case Uint64:
switch v2.typ {
case Int64:
return uintMinusIntWithError(v1.uval, v2.ival)
case Uint64:
return uintMinusUintWithError(v1.uval, v2.uval)
case Float64:
return anyMinusFloat(v1, v2.fval), nil
}
case Float64:
return floatMinusAny(v1.fval, v2), nil
}
panic("unreachable")
} }
// prioritize reorders the input parameters // prioritize reorders the input parameters
...@@ -388,7 +442,6 @@ func prioritize(v1, v2 numeric) (altv1, altv2 numeric) { ...@@ -388,7 +442,6 @@ func prioritize(v1, v2 numeric) (altv1, altv2 numeric) {
if v2.typ == Float64 { if v2.typ == Float64 {
return v2, v1 return v2, v1
} }
} }
return v1, v2 return v1, v2
} }
...@@ -415,36 +468,67 @@ func intPlusIntWithError(v1, v2 int64) (numeric, error) { ...@@ -415,36 +468,67 @@ func intPlusIntWithError(v1, v2 int64) (numeric, error) {
return numeric{typ: Int64, ival: result}, nil return numeric{typ: Int64, ival: result}, nil
} }
func intMinusIntWithError(v1, v2 int64) (numeric, error) {
result := v1 - v2
if (result < v1) != (v2 > 0) {
return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v - %v", v1, v2)
}
return numeric{typ: Int64, ival: result}, nil
}
func intMinusUintWithError(v1 int64, v2 uint64) (numeric, error) {
if v1 < 0 || v1 < int64(v2) {
return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v - %v", v1, v2)
}
return uintMinusUintWithError(uint64(v1), v2)
}
func uintPlusInt(v1 uint64, v2 int64) numeric { func uintPlusInt(v1 uint64, v2 int64) numeric {
return uintPlusUint(v1, uint64(v2)) return uintPlusUint(v1, uint64(v2))
} }
func uintPlusIntWithError(v1 uint64, v2 int64) (numeric, error) { func uintPlusIntWithError(v1 uint64, v2 int64) (numeric, error) {
if v2 >= math.MaxInt64 && v1 > 0 { if v2 < 0 && v1 < uint64(v2) {
return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT value is out of range in %v + %v", v1, v2) return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v + %v", v1, v2)
} }
// convert to int -> uint is because for numeric operators (such as + or -)
//convert to int -> uint is because for numeric operators (such as + or -) // where one of the operands is an unsigned integer, the result is unsigned by default.
//where one of the operands is an unsigned integer, the result is unsigned by default.
return uintPlusUintWithError(v1, uint64(v2)) return uintPlusUintWithError(v1, uint64(v2))
} }
func uintMinusIntWithError(v1 uint64, v2 int64) (numeric, error) {
if int64(v1) < v2 && v2 > 0 {
return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v - %v", v1, v2)
}
// uint - (- int) = uint + int
if v2 < 0 {
return uintPlusIntWithError(v1, -v2)
}
return uintMinusUintWithError(v1, uint64(v2))
}
func uintPlusUint(v1, v2 uint64) numeric { func uintPlusUint(v1, v2 uint64) numeric {
result := v1 + v2 result := v1 + v2
if result < v2 { if result < v2 {
return numeric{typ: Float64, fval: float64(v1) + float64(v2)} return numeric{typ: Float64, fval: float64(v1) + float64(v2)}
} }
return numeric{typ: Uint64, uval: result} return numeric{typ: Uint64, uval: result}
} }
func uintPlusUintWithError(v1, v2 uint64) (numeric, error) { func uintPlusUintWithError(v1, v2 uint64) (numeric, error) {
result := v1 + v2 result := v1 + v2
if result < v2 { if result < v2 {
return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v + %v", v1, v2) return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v + %v", v1, v2)
} }
return numeric{typ: Uint64, uval: result}, nil
}
func uintMinusUintWithError(v1, v2 uint64) (numeric, error) {
result := v1 - v2
if v2 > v1 {
return numeric{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "BIGINT UNSIGNED value is out of range in %v - %v", v1, v2)
}
return numeric{typ: Uint64, uval: result}, nil return numeric{typ: Uint64, uval: result}, nil
} }
...@@ -458,6 +542,26 @@ func floatPlusAny(v1 float64, v2 numeric) numeric { ...@@ -458,6 +542,26 @@ func floatPlusAny(v1 float64, v2 numeric) numeric {
return numeric{typ: Float64, fval: v1 + v2.fval} return numeric{typ: Float64, fval: v1 + v2.fval}
} }
func floatMinusAny(v1 float64, v2 numeric) numeric {
switch v2.typ {
case Int64:
v2.fval = float64(v2.ival)
case Uint64:
v2.fval = float64(v2.uval)
}
return numeric{typ: Float64, fval: v1 - v2.fval}
}
func anyMinusFloat(v1 numeric, v2 float64) numeric {
switch v1.typ {
case Int64:
v1.fval = float64(v1.ival)
case Uint64:
v1.fval = float64(v1.uval)
}
return numeric{typ: Float64, fval: v1.fval - v2}
}
func castFromNumeric(v numeric, resultType querypb.Type) Value { func castFromNumeric(v numeric, resultType querypb.Type) Value {
switch { switch {
case IsSigned(resultType): case IsSigned(resultType):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
MAKEFLAGS = -s MAKEFLAGS = -s
sql.go: sql.y sql.go: sql.y
goyacc -o sql.go sql.y go run golang.org/x/tools/cmd/goyacc -o sql.go sql.y
gofmt -w sql.go gofmt -w sql.go
clean: clean:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册