提交 b2a3445f 编写于 作者: martianzhang's avatar martianzhang

update vendor

上级 dc1bb566
......@@ -2792,7 +2792,7 @@ func (q *Query4Audit) RuleUniqueKeyDup() Rule {
switch node := tiStmt.(type) {
case *tidb.CreateIndexStmt:
// create index
if node.Unique {
if node.KeyType == tidb.IndexKeyTypeUnique {
re := regexp.MustCompile(`(?i)(create\s+(unique)\s)`)
rule = HeuristicRules["KEY.009"]
if position := re.FindIndex([]byte(q.Query)); len(position) > 0 {
......
......@@ -342,10 +342,6 @@ var TokenString = map[int]string{
sqlparser.ZEROFILL: "zerofill",
sqlparser.DATABASES: "databases",
sqlparser.TABLES: "tables",
sqlparser.VITESS_KEYSPACES: "vitess_keyspaces",
sqlparser.VITESS_SHARDS: "vitess_shards",
sqlparser.VITESS_TABLETS: "vitess_tablets",
sqlparser.VSCHEMA_TABLES: "vschema_tables",
sqlparser.NAMES: "names",
sqlparser.CHARSET: "charset",
sqlparser.GLOBAL: "global",
......@@ -366,6 +362,8 @@ var TokenString = map[int]string{
sqlparser.SUBSTRING: "substring",
sqlparser.GROUP_CONCAT: "group_concat",
sqlparser.SEPARATOR: "separator",
sqlparser.VSCHEMA: "vschema",
sqlparser.SEQUENCE: "sequence",
sqlparser.MATCH: "match",
sqlparser.AGAINST: "against",
sqlparser.BOOLEAN: "boolean",
......
Changes by Version
==================
1.1.0 (2019-03-23)
-------------------
Notable changes:
- The library is now released under Apache 2.0 license
- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
- 'golang.org/x/net/context' is replaced with 'context' from the standard library
List of all changes:
- Export StartSpanFromContextWithTracer (#214) <Aaron Delaney>
- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) <Mike Goldsmith>
- Use Set() instead of Add() in HTTPHeadersCarrier (#191) <jeremyxu2010>
- Update license to Apache 2.0 (#181) <Andrea Kao>
- Replace 'golang.org/x/net/context' with 'context' (#176) <Tony Ghita>
- Port of Python opentracing/harness/api_check.py to Go (#146) <chris erway>
- Fix race condition in MockSpan.Context() (#170) <Brad>
- Add PeerHostIPv4.SetString() (#155) <NeoCN>
- Add a Noop log field type to log to allow for optional fields (#150) <Matt Ho>
1.0.2 (2017-04-26)
-------------------
- Add more semantic tags (#139) <Rustam Zagirov>
1.0.1 (2017-02-06)
-------------------
- Correct spelling in comments <Ben Sigelman>
- Address race in nextMockID() (#123) <bill fumerola>
- log: avoid panic marshaling nil error (#131) <Anthony Voutas>
- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) <Yuri Shkuro>
- Drop Go 1.5 that fails in Travis (#129) <Yuri Shkuro>
- Add convenience methods Key() and Value() to log.Field <Ben Sigelman>
- Add convenience methods to log.Field (2 years, 6 months ago) <Radu Berinde>
1.0.0 (2016-09-26)
-------------------
- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 The OpenTracing Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
.DEFAULT_GOAL := test-and-lint
.PHONY: test-and-lint
test-and-lint: test lint
.PHONY: test
test:
go test -v -cover -race ./...
.PHONY: cover
cover:
go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
.PHONY: lint
lint:
go fmt ./...
golint ./...
@# Run again with magic to exit non-zero if golint outputs anything.
@! (golint ./... | read dummy)
go vet ./...
[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
# OpenTracing API for Go
This package is a Go platform API for OpenTracing.
## Required Reading
In order to understand the Go platform API, one must first be familiar with the
[OpenTracing project](https://opentracing.io) and
[terminology](https://opentracing.io/specification/) more specifically.
## API overview for those adding instrumentation
Everyday consumers of this `opentracing` package really only need to worry
about a couple of key abstractions: the `StartSpan` function, the `Span`
interface, and binding a `Tracer` at `main()`-time. Here are code snippets
demonstrating some important use cases.
#### Singleton initialization
The simplest starting point is `./default_tracer.go`. As early as possible, call
```go
import "github.com/opentracing/opentracing-go"
import ".../some_tracing_impl"
func main() {
opentracing.SetGlobalTracer(
// tracing impl specific:
some_tracing_impl.New(...),
)
...
}
```
#### Non-Singleton initialization
If you prefer direct control to singletons, manage ownership of the
`opentracing.Tracer` implementation explicitly.
#### Creating a Span given an existing Go `context.Context`
If you use `context.Context` in your application, OpenTracing's Go library will
happily rely on it for `Span` propagation. To start a new (blocking child)
`Span`, you can use `StartSpanFromContext`.
```go
func xyz(ctx context.Context, ...) {
...
span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
defer span.Finish()
span.LogFields(
log.String("event", "soft error"),
log.String("type", "cache timeout"),
log.Int("waited.millis", 1500))
...
}
```
#### Starting an empty trace by creating a "root span"
It's always possible to create a "root" `Span` with no parent or other causal
reference.
```go
func xyz() {
...
sp := opentracing.StartSpan("operation_name")
defer sp.Finish()
...
}
```
#### Creating a (child) Span given an existing (parent) Span
```go
func xyz(parentSpan opentracing.Span, ...) {
...
sp := opentracing.StartSpan(
"operation_name",
opentracing.ChildOf(parentSpan.Context()))
defer sp.Finish()
...
}
```
#### Serializing to the wire
```go
func makeSomeRequest(ctx context.Context) ... {
if span := opentracing.SpanFromContext(ctx); span != nil {
httpClient := &http.Client{}
httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
// Transmit the span's TraceContext as HTTP headers on our
// outbound request.
opentracing.GlobalTracer().Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(httpReq.Header))
resp, err := httpClient.Do(httpReq)
...
}
...
}
```
#### Deserializing from the wire
```go
http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
var serverSpan opentracing.Span
appSpecificOperationName := ...
wireContext, err := opentracing.GlobalTracer().Extract(
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header))
if err != nil {
// Optionally record something about err here
}
// Create the span referring to the RPC client if available.
// If wireContext == nil, a root span will be created.
serverSpan = opentracing.StartSpan(
appSpecificOperationName,
ext.RPCServerOption(wireContext))
defer serverSpan.Finish()
ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
...
}
```
#### Conditionally capture a field using `log.Noop`
In some situations, you may want to dynamically decide whether or not
to log a field. For example, you may want to capture additional data,
such as a customer ID, in non-production environments:
```go
func Customer(order *Order) log.Field {
if os.Getenv("ENVIRONMENT") == "dev" {
return log.String("customer", order.Customer.ID)
}
return log.Noop()
}
```
#### Goroutine-safety
The entire public API is goroutine-safe and does not require external
synchronization.
## API pointers for those implementing a tracing system
Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
## API compatibility
For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
## Tracer test suite
A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
## Licensing
[Apache 2.0 License](./LICENSE).
package opentracing
type registeredTracer struct {
tracer Tracer
isRegistered bool
}
var (
globalTracer = registeredTracer{NoopTracer{}, false}
)
// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
// opentracing.Tracer instance) should call SetGlobalTracer as early as
// possible in main(), prior to calling the `StartSpan` global func below.
// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
// (etc) globals are noops.
func SetGlobalTracer(tracer Tracer) {
globalTracer = registeredTracer{tracer, true}
}
// GlobalTracer returns the global singleton `Tracer` implementation.
// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
// implementation that drops all data handed to it.
func GlobalTracer() Tracer {
return globalTracer.tracer
}
// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
func StartSpan(operationName string, opts ...StartSpanOption) Span {
return globalTracer.tracer.StartSpan(operationName, opts...)
}
// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
func InitGlobalTracer(tracer Tracer) {
SetGlobalTracer(tracer)
}
// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
func IsGlobalTracerRegistered() bool {
return globalTracer.isRegistered
}
package opentracing
import "context"
type contextKey struct{}
var activeSpanKey = contextKey{}
// ContextWithSpan returns a new `context.Context` that holds a reference to
// `span`'s SpanContext.
func ContextWithSpan(ctx context.Context, span Span) context.Context {
return context.WithValue(ctx, activeSpanKey, span)
}
// SpanFromContext returns the `Span` previously associated with `ctx`, or
// `nil` if no such `Span` could be found.
//
// NOTE: context.Context != SpanContext: the former is Go's intra-process
// context propagation mechanism, and the latter houses OpenTracing's per-Span
// identity and baggage information.
func SpanFromContext(ctx context.Context) Span {
val := ctx.Value(activeSpanKey)
if sp, ok := val.(Span); ok {
return sp
}
return nil
}
// StartSpanFromContext starts and returns a Span with `operationName`, using
// any Span found within `ctx` as a ChildOfRef. If no such parent could be
// found, StartSpanFromContext creates a root (parentless) Span.
//
// The second return value is a context.Context object built around the
// returned Span.
//
// Example usage:
//
// SomeFunction(ctx context.Context, ...) {
// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
// defer sp.Finish()
// ...
// }
func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
}
// StartSpanFromContextWithTracer starts and returns a span with `operationName`
// using a span found within the context as a ChildOfRef. If that doesn't exist
// it creates a root span. It also returns a context.Context object built
// around the returned span.
//
// It's behavior is identical to StartSpanFromContext except that it takes an explicit
// tracer as opposed to using the global tracer.
func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
if parentSpan := SpanFromContext(ctx); parentSpan != nil {
opts = append(opts, ChildOf(parentSpan.Context()))
}
span := tracer.StartSpan(operationName, opts...)
return span, ContextWithSpan(ctx, span)
}
package log
import (
"fmt"
"math"
)
type fieldType int
const (
stringType fieldType = iota
boolType
intType
int32Type
uint32Type
int64Type
uint64Type
float32Type
float64Type
errorType
objectType
lazyLoggerType
noopType
)
// Field instances are constructed via LogBool, LogString, and so on.
// Tracing implementations may then handle them via the Field.Marshal
// method.
//
// "heavily influenced by" (i.e., partially stolen from)
// https://github.com/uber-go/zap
type Field struct {
key string
fieldType fieldType
numericVal int64
stringVal string
interfaceVal interface{}
}
// String adds a string-valued key:value pair to a Span.LogFields() record
func String(key, val string) Field {
return Field{
key: key,
fieldType: stringType,
stringVal: val,
}
}
// Bool adds a bool-valued key:value pair to a Span.LogFields() record
func Bool(key string, val bool) Field {
var numericVal int64
if val {
numericVal = 1
}
return Field{
key: key,
fieldType: boolType,
numericVal: numericVal,
}
}
// Int adds an int-valued key:value pair to a Span.LogFields() record
func Int(key string, val int) Field {
return Field{
key: key,
fieldType: intType,
numericVal: int64(val),
}
}
// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
func Int32(key string, val int32) Field {
return Field{
key: key,
fieldType: int32Type,
numericVal: int64(val),
}
}
// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
func Int64(key string, val int64) Field {
return Field{
key: key,
fieldType: int64Type,
numericVal: val,
}
}
// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
func Uint32(key string, val uint32) Field {
return Field{
key: key,
fieldType: uint32Type,
numericVal: int64(val),
}
}
// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
func Uint64(key string, val uint64) Field {
return Field{
key: key,
fieldType: uint64Type,
numericVal: int64(val),
}
}
// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
func Float32(key string, val float32) Field {
return Field{
key: key,
fieldType: float32Type,
numericVal: int64(math.Float32bits(val)),
}
}
// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
func Float64(key string, val float64) Field {
return Field{
key: key,
fieldType: float64Type,
numericVal: int64(math.Float64bits(val)),
}
}
// Error adds an error with the key "error" to a Span.LogFields() record
func Error(err error) Field {
return Field{
key: "error",
fieldType: errorType,
interfaceVal: err,
}
}
// Object adds an object-valued key:value pair to a Span.LogFields() record
func Object(key string, obj interface{}) Field {
return Field{
key: key,
fieldType: objectType,
interfaceVal: obj,
}
}
// LazyLogger allows for user-defined, late-bound logging of arbitrary data
type LazyLogger func(fv Encoder)
// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
// implementation will call the LazyLogger function at an indefinite time in
// the future (after Lazy() returns).
func Lazy(ll LazyLogger) Field {
return Field{
fieldType: lazyLoggerType,
interfaceVal: ll,
}
}
// Noop creates a no-op log field that should be ignored by the tracer.
// It can be used to capture optional fields, for example those that should
// only be logged in non-production environment:
//
// func customerField(order *Order) log.Field {
// if os.Getenv("ENVIRONMENT") == "dev" {
// return log.String("customer", order.Customer.ID)
// }
// return log.Noop()
// }
//
// span.LogFields(log.String("event", "purchase"), customerField(order))
//
func Noop() Field {
return Field{
fieldType: noopType,
}
}
// Encoder allows access to the contents of a Field (via a call to
// Field.Marshal).
//
// Tracer implementations typically provide an implementation of Encoder;
// OpenTracing callers typically do not need to concern themselves with it.
type Encoder interface {
EmitString(key, value string)
EmitBool(key string, value bool)
EmitInt(key string, value int)
EmitInt32(key string, value int32)
EmitInt64(key string, value int64)
EmitUint32(key string, value uint32)
EmitUint64(key string, value uint64)
EmitFloat32(key string, value float32)
EmitFloat64(key string, value float64)
EmitObject(key string, value interface{})
EmitLazyLogger(value LazyLogger)
}
// Marshal passes a Field instance through to the appropriate
// field-type-specific method of an Encoder.
func (lf Field) Marshal(visitor Encoder) {
switch lf.fieldType {
case stringType:
visitor.EmitString(lf.key, lf.stringVal)
case boolType:
visitor.EmitBool(lf.key, lf.numericVal != 0)
case intType:
visitor.EmitInt(lf.key, int(lf.numericVal))
case int32Type:
visitor.EmitInt32(lf.key, int32(lf.numericVal))
case int64Type:
visitor.EmitInt64(lf.key, int64(lf.numericVal))
case uint32Type:
visitor.EmitUint32(lf.key, uint32(lf.numericVal))
case uint64Type:
visitor.EmitUint64(lf.key, uint64(lf.numericVal))
case float32Type:
visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
case float64Type:
visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
case errorType:
if err, ok := lf.interfaceVal.(error); ok {
visitor.EmitString(lf.key, err.Error())
} else {
visitor.EmitString(lf.key, "<nil>")
}
case objectType:
visitor.EmitObject(lf.key, lf.interfaceVal)
case lazyLoggerType:
visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
case noopType:
// intentionally left blank
}
}
// Key returns the field's key.
func (lf Field) Key() string {
return lf.key
}
// Value returns the field's value as interface{}.
func (lf Field) Value() interface{} {
switch lf.fieldType {
case stringType:
return lf.stringVal
case boolType:
return lf.numericVal != 0
case intType:
return int(lf.numericVal)
case int32Type:
return int32(lf.numericVal)
case int64Type:
return int64(lf.numericVal)
case uint32Type:
return uint32(lf.numericVal)
case uint64Type:
return uint64(lf.numericVal)
case float32Type:
return math.Float32frombits(uint32(lf.numericVal))
case float64Type:
return math.Float64frombits(uint64(lf.numericVal))
case errorType, objectType, lazyLoggerType:
return lf.interfaceVal
case noopType:
return nil
default:
return nil
}
}
// String returns a string representation of the key and value.
func (lf Field) String() string {
return fmt.Sprint(lf.key, ":", lf.Value())
}
package log
import "fmt"
// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
// a la Span.LogFields().
func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
if len(keyValues)%2 != 0 {
return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
}
fields := make([]Field, len(keyValues)/2)
for i := 0; i*2 < len(keyValues); i++ {
key, ok := keyValues[i*2].(string)
if !ok {
return nil, fmt.Errorf(
"non-string key (pair #%d): %T",
i, keyValues[i*2])
}
switch typedVal := keyValues[i*2+1].(type) {
case bool:
fields[i] = Bool(key, typedVal)
case string:
fields[i] = String(key, typedVal)
case int:
fields[i] = Int(key, typedVal)
case int8:
fields[i] = Int32(key, int32(typedVal))
case int16:
fields[i] = Int32(key, int32(typedVal))
case int32:
fields[i] = Int32(key, typedVal)
case int64:
fields[i] = Int64(key, typedVal)
case uint:
fields[i] = Uint64(key, uint64(typedVal))
case uint64:
fields[i] = Uint64(key, typedVal)
case uint8:
fields[i] = Uint32(key, uint32(typedVal))
case uint16:
fields[i] = Uint32(key, uint32(typedVal))
case uint32:
fields[i] = Uint32(key, typedVal)
case float32:
fields[i] = Float32(key, typedVal)
case float64:
fields[i] = Float64(key, typedVal)
default:
// When in doubt, coerce to a string
fields[i] = String(key, fmt.Sprint(typedVal))
}
}
return fields, nil
}
package opentracing
import "github.com/opentracing/opentracing-go/log"
// A NoopTracer is a trivial, minimum overhead implementation of Tracer
// for which all operations are no-ops.
//
// The primary use of this implementation is in libraries, such as RPC
// frameworks, that make tracing an optional feature controlled by the
// end user. A no-op implementation allows said libraries to use it
// as the default Tracer and to write instrumentation that does
// not need to keep checking if the tracer instance is nil.
//
// For the same reason, the NoopTracer is the default "global" tracer
// (see GlobalTracer and SetGlobalTracer functions).
//
// WARNING: NoopTracer does not support baggage propagation.
type NoopTracer struct{}
type noopSpan struct{}
type noopSpanContext struct{}
var (
defaultNoopSpanContext = noopSpanContext{}
defaultNoopSpan = noopSpan{}
defaultNoopTracer = NoopTracer{}
)
const (
emptyString = ""
)
// noopSpanContext:
func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
// noopSpan:
func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan }
func (n noopSpan) BaggageItem(key string) string { return emptyString }
func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
func (n noopSpan) LogFields(fields ...log.Field) {}
func (n noopSpan) LogKV(keyVals ...interface{}) {}
func (n noopSpan) Finish() {}
func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
func (n noopSpan) SetOperationName(operationName string) Span { return n }
func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
func (n noopSpan) LogEvent(event string) {}
func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
func (n noopSpan) Log(data LogData) {}
// StartSpan belongs to the Tracer interface.
func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
return defaultNoopSpan
}
// Inject belongs to the Tracer interface.
func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
return nil
}
// Extract belongs to the Tracer interface.
func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
return nil, ErrSpanContextNotFound
}
package opentracing
import (
"errors"
"net/http"
)
///////////////////////////////////////////////////////////////////////////////
// CORE PROPAGATION INTERFACES:
///////////////////////////////////////////////////////////////////////////////
var (
// ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
// Tracer.Extract() is not recognized by the Tracer implementation.
ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
// ErrSpanContextNotFound occurs when the `carrier` passed to
// Tracer.Extract() is valid and uncorrupted but has insufficient
// information to extract a SpanContext.
ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
// ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
// operate on a SpanContext which it is not prepared to handle (for
// example, since it was created by a different tracer implementation).
ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
// ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
// implementations expect a different type of `carrier` than they are
// given.
ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
// ErrSpanContextCorrupted occurs when the `carrier` passed to
// Tracer.Extract() is of the expected type but is corrupted.
ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
)
///////////////////////////////////////////////////////////////////////////////
// BUILTIN PROPAGATION FORMATS:
///////////////////////////////////////////////////////////////////////////////
// BuiltinFormat is used to demarcate the values within package `opentracing`
// that are intended for use with the Tracer.Inject() and Tracer.Extract()
// methods.
type BuiltinFormat byte
const (
// Binary represents SpanContexts as opaque binary data.
//
// For Tracer.Inject(): the carrier must be an `io.Writer`.
//
// For Tracer.Extract(): the carrier must be an `io.Reader`.
Binary BuiltinFormat = iota
// TextMap represents SpanContexts as key:value string pairs.
//
// Unlike HTTPHeaders, the TextMap format does not restrict the key or
// value character sets in any way.
//
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
//
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
TextMap
// HTTPHeaders represents SpanContexts as HTTP header string pairs.
//
// Unlike TextMap, the HTTPHeaders format requires that the keys and values
// be valid as HTTP headers as-is (i.e., character casing may be unstable
// and special characters are disallowed in keys, values should be
// URL-escaped, etc).
//
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
//
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
//
// See HTTPHeadersCarrier for an implementation of both TextMapWriter
// and TextMapReader that defers to an http.Header instance for storage.
// For example, Inject():
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := span.Tracer().Inject(
// span.Context(), opentracing.HTTPHeaders, carrier)
//
// Or Extract():
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(
// opentracing.HTTPHeaders, carrier)
//
HTTPHeaders
)
// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
// it, the caller can encode a SpanContext for propagation as entries in a map
// of unicode strings.
type TextMapWriter interface {
// Set a key:value pair to the carrier. Multiple calls to Set() for the
// same key leads to undefined behavior.
//
// NOTE: The backing store for the TextMapWriter may contain data unrelated
// to SpanContext. As such, Inject() and Extract() implementations that
// call the TextMapWriter and TextMapReader interfaces must agree on a
// prefix or other convention to distinguish their own key:value pairs.
Set(key, val string)
}
// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
// the caller can decode a propagated SpanContext as entries in a map of
// unicode strings.
type TextMapReader interface {
// ForeachKey returns TextMap contents via repeated calls to the `handler`
// function. If any call to `handler` returns a non-nil error, ForeachKey
// terminates and returns that error.
//
// NOTE: The backing store for the TextMapReader may contain data unrelated
// to SpanContext. As such, Inject() and Extract() implementations that
// call the TextMapWriter and TextMapReader interfaces must agree on a
// prefix or other convention to distinguish their own key:value pairs.
//
// The "foreach" callback pattern reduces unnecessary copying in some cases
// and also allows implementations to hold locks while the map is read.
ForeachKey(handler func(key, val string) error) error
}
// TextMapCarrier allows the use of regular map[string]string
// as both TextMapWriter and TextMapReader.
type TextMapCarrier map[string]string
// ForeachKey conforms to the TextMapReader interface.
func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
for k, v := range c {
if err := handler(k, v); err != nil {
return err
}
}
return nil
}
// Set implements Set() of opentracing.TextMapWriter
func (c TextMapCarrier) Set(key, val string) {
c[key] = val
}
// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
//
// Example usage for server side:
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
//
// Example usage for client side:
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := tracer.Inject(
// span.Context(),
// opentracing.HTTPHeaders,
// carrier)
//
type HTTPHeadersCarrier http.Header
// Set conforms to the TextMapWriter interface.
func (c HTTPHeadersCarrier) Set(key, val string) {
h := http.Header(c)
h.Set(key, val)
}
// ForeachKey conforms to the TextMapReader interface.
func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
for k, vals := range c {
for _, v := range vals {
if err := handler(k, v); err != nil {
return err
}
}
}
return nil
}
package opentracing
import (
"time"
"github.com/opentracing/opentracing-go/log"
)
// SpanContext represents Span state that must propagate to descendant Spans and across process
// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
type SpanContext interface {
// ForeachBaggageItem grants access to all baggage items stored in the
// SpanContext.
// The handler function will be called for each baggage key/value pair.
// The ordering of items is not guaranteed.
//
// The bool return value indicates if the handler wants to continue iterating
// through the rest of the baggage items; for example if the handler is trying to
// find some baggage item by pattern matching the name, it can return false
// as soon as the item is found to stop further iterations.
ForeachBaggageItem(handler func(k, v string) bool)
}
// Span represents an active, un-finished span in the OpenTracing system.
//
// Spans are created by the Tracer interface.
type Span interface {
// Sets the end timestamp and finalizes Span state.
//
// With the exception of calls to Context() (which are always allowed),
// Finish() must be the last call made to any span instance, and to do
// otherwise leads to undefined behavior.
Finish()
// FinishWithOptions is like Finish() but with explicit control over
// timestamps and log data.
FinishWithOptions(opts FinishOptions)
// Context() yields the SpanContext for this Span. Note that the return
// value of Context() is still valid after a call to Span.Finish(), as is
// a call to Span.Context() after a call to Span.Finish().
Context() SpanContext
// Sets or changes the operation name.
//
// Returns a reference to this Span for chaining.
SetOperationName(operationName string) Span
// Adds a tag to the span.
//
// If there is a pre-existing tag set for `key`, it is overwritten.
//
// Tag values can be numeric types, strings, or bools. The behavior of
// other tag value types is undefined at the OpenTracing level. If a
// tracing system does not know how to handle a particular value type, it
// may ignore the tag, but shall not panic.
//
// Returns a reference to this Span for chaining.
SetTag(key string, value interface{}) Span
// LogFields is an efficient and type-checked way to record key:value
// logging data about a Span, though the programming interface is a little
// more verbose than LogKV(). Here's an example:
//
// span.LogFields(
// log.String("event", "soft error"),
// log.String("type", "cache timeout"),
// log.Int("waited.millis", 1500))
//
// Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
LogFields(fields ...log.Field)
// LogKV is a concise, readable way to record key:value logging data about
// a Span, though unfortunately this also makes it less efficient and less
// type-safe than LogFields(). Here's an example:
//
// span.LogKV(
// "event", "soft error",
// "type", "cache timeout",
// "waited.millis", 1500)
//
// For LogKV (as opposed to LogFields()), the parameters must appear as
// key-value pairs, like
//
// span.LogKV(key1, val1, key2, val2, key3, val3, ...)
//
// The keys must all be strings. The values may be strings, numeric types,
// bools, Go error instances, or arbitrary structs.
//
// (Note to implementors: consider the log.InterleavedKVToFields() helper)
LogKV(alternatingKeyValues ...interface{})
// SetBaggageItem sets a key:value pair on this Span and its SpanContext
// that also propagates to descendants of this Span.
//
// SetBaggageItem() enables powerful functionality given a full-stack
// opentracing integration (e.g., arbitrary application data from a mobile
// app can make it, transparently, all the way into the depths of a storage
// system), and with it some powerful costs: use this feature with care.
//
// IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
// *future* causal descendants of the associated Span.
//
// IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
// value is copied into every local *and remote* child of the associated
// Span, and that can add up to a lot of network and cpu overhead.
//
// Returns a reference to this Span for chaining.
SetBaggageItem(restrictedKey, value string) Span
// Gets the value for a baggage item given its key. Returns the empty string
// if the value isn't found in this Span.
BaggageItem(restrictedKey string) string
// Provides access to the Tracer that created this Span.
Tracer() Tracer
// Deprecated: use LogFields or LogKV
LogEvent(event string)
// Deprecated: use LogFields or LogKV
LogEventWithPayload(event string, payload interface{})
// Deprecated: use LogFields or LogKV
Log(data LogData)
}
// LogRecord is data associated with a single Span log. Every LogRecord
// instance must specify at least one Field.
type LogRecord struct {
Timestamp time.Time
Fields []log.Field
}
// FinishOptions allows Span.FinishWithOptions callers to override the finish
// timestamp and provide log data via a bulk interface.
type FinishOptions struct {
// FinishTime overrides the Span's finish time, or implicitly becomes
// time.Now() if FinishTime.IsZero().
//
// FinishTime must resolve to a timestamp that's >= the Span's StartTime
// (per StartSpanOptions).
FinishTime time.Time
// LogRecords allows the caller to specify the contents of many LogFields()
// calls with a single slice. May be nil.
//
// None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
// be set explicitly). Also, they must be >= the Span's start timestamp and
// <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
// behavior of FinishWithOptions() is undefined.
//
// If specified, the caller hands off ownership of LogRecords at
// FinishWithOptions() invocation time.
//
// If specified, the (deprecated) BulkLogData must be nil or empty.
LogRecords []LogRecord
// BulkLogData is DEPRECATED.
BulkLogData []LogData
}
// LogData is DEPRECATED
type LogData struct {
Timestamp time.Time
Event string
Payload interface{}
}
// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
func (ld *LogData) ToLogRecord() LogRecord {
var literalTimestamp time.Time
if ld.Timestamp.IsZero() {
literalTimestamp = time.Now()
} else {
literalTimestamp = ld.Timestamp
}
rval := LogRecord{
Timestamp: literalTimestamp,
}
if ld.Payload == nil {
rval.Fields = []log.Field{
log.String("event", ld.Event),
}
} else {
rval.Fields = []log.Field{
log.String("event", ld.Event),
log.Object("payload", ld.Payload),
}
}
return rval
}
package opentracing
import "time"
// Tracer is a simple, thin interface for Span creation and SpanContext
// propagation.
type Tracer interface {
// Create, start, and return a new Span with the given `operationName` and
// incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
// from the "functional options" pattern, per
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
//
// A Span with no SpanReference options (e.g., opentracing.ChildOf() or
// opentracing.FollowsFrom()) becomes the root of its own trace.
//
// Examples:
//
// var tracer opentracing.Tracer = ...
//
// // The root-span case:
// sp := tracer.StartSpan("GetFeed")
//
// // The vanilla child span case:
// sp := tracer.StartSpan(
// "GetFeed",
// opentracing.ChildOf(parentSpan.Context()))
//
// // All the bells and whistles:
// sp := tracer.StartSpan(
// "GetFeed",
// opentracing.ChildOf(parentSpan.Context()),
// opentracing.Tag{"user_agent", loggedReq.UserAgent},
// opentracing.StartTime(loggedReq.Timestamp),
// )
//
StartSpan(operationName string, opts ...StartSpanOption) Span
// Inject() takes the `sm` SpanContext instance and injects it for
// propagation within `carrier`. The actual type of `carrier` depends on
// the value of `format`.
//
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
// and each has an expected carrier type.
//
// Other packages may declare their own `format` values, much like the keys
// used by `context.Context` (see https://godoc.org/context#WithValue).
//
// Example usage (sans error handling):
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := tracer.Inject(
// span.Context(),
// opentracing.HTTPHeaders,
// carrier)
//
// NOTE: All opentracing.Tracer implementations MUST support all
// BuiltinFormats.
//
// Implementations may return opentracing.ErrUnsupportedFormat if `format`
// is not supported by (or not known by) the implementation.
//
// Implementations may return opentracing.ErrInvalidCarrier or any other
// implementation-specific error if the format is supported but injection
// fails anyway.
//
// See Tracer.Extract().
Inject(sm SpanContext, format interface{}, carrier interface{}) error
// Extract() returns a SpanContext instance given `format` and `carrier`.
//
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
// and each has an expected carrier type.
//
// Other packages may declare their own `format` values, much like the keys
// used by `context.Context` (see
// https://godoc.org/golang.org/x/net/context#WithValue).
//
// Example usage (with StartSpan):
//
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
//
// // ... assuming the ultimate goal here is to resume the trace with a
// // server-side Span:
// var serverSpan opentracing.Span
// if err == nil {
// span = tracer.StartSpan(
// rpcMethodName, ext.RPCServerOption(clientContext))
// } else {
// span = tracer.StartSpan(rpcMethodName)
// }
//
//
// NOTE: All opentracing.Tracer implementations MUST support all
// BuiltinFormats.
//
// Return values:
// - A successful Extract returns a SpanContext instance and a nil error
// - If there was simply no SpanContext to extract in `carrier`, Extract()
// returns (nil, opentracing.ErrSpanContextNotFound)
// - If `format` is unsupported or unrecognized, Extract() returns (nil,
// opentracing.ErrUnsupportedFormat)
// - If there are more fundamental problems with the `carrier` object,
// Extract() may return opentracing.ErrInvalidCarrier,
// opentracing.ErrSpanContextCorrupted, or implementation-specific
// errors.
//
// See Tracer.Inject().
Extract(format interface{}, carrier interface{}) (SpanContext, error)
}
// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
// mechanism to override the start timestamp, specify Span References, and make
// a single Tag or multiple Tags available at Span start time.
//
// StartSpan() callers should look at the StartSpanOption interface and
// implementations available in this package.
//
// Tracer implementations can convert a slice of `StartSpanOption` instances
// into a `StartSpanOptions` struct like so:
//
// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
// sso := opentracing.StartSpanOptions{}
// for _, o := range opts {
// o.Apply(&sso)
// }
// ...
// }
//
type StartSpanOptions struct {
// Zero or more causal references to other Spans (via their SpanContext).
// If empty, start a "root" Span (i.e., start a new trace).
References []SpanReference
// StartTime overrides the Span's start time, or implicitly becomes
// time.Now() if StartTime.IsZero().
StartTime time.Time
// Tags may have zero or more entries; the restrictions on map values are
// identical to those for Span.SetTag(). May be nil.
//
// If specified, the caller hands off ownership of Tags at
// StartSpan() invocation time.
Tags map[string]interface{}
}
// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
//
// StartSpanOption borrows from the "functional options" pattern, per
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type StartSpanOption interface {
Apply(*StartSpanOptions)
}
// SpanReferenceType is an enum type describing different categories of
// relationships between two Spans. If Span-2 refers to Span-1, the
// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
// ChildOfRef means that Span-1 created Span-2.
//
// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
// or Span-2 may be sitting in a distributed queue behind Span-1.
type SpanReferenceType int
const (
// ChildOfRef refers to a parent Span that caused *and* somehow depends
// upon the new child Span. Often (but not always), the parent Span cannot
// finish until the child Span does.
//
// An timing diagram for a ChildOfRef that's blocked on the new Span:
//
// [-Parent Span---------]
// [-Child Span----]
//
// See http://opentracing.io/spec/
//
// See opentracing.ChildOf()
ChildOfRef SpanReferenceType = iota
// FollowsFromRef refers to a parent Span that does not depend in any way
// on the result of the new child Span. For instance, one might use
// FollowsFromRefs to describe pipeline stages separated by queues,
// or a fire-and-forget cache insert at the tail end of a web request.
//
// A FollowsFromRef Span is part of the same logical trace as the new Span:
// i.e., the new Span is somehow caused by the work of its FollowsFromRef.
//
// All of the following could be valid timing diagrams for children that
// "FollowFrom" a parent.
//
// [-Parent Span-] [-Child Span-]
//
//
// [-Parent Span--]
// [-Child Span-]
//
//
// [-Parent Span-]
// [-Child Span-]
//
// See http://opentracing.io/spec/
//
// See opentracing.FollowsFrom()
FollowsFromRef
)
// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
// referenced SpanContext. See the SpanReferenceType documentation for
// supported relationships. If SpanReference is created with
// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
// syntax for starting spans:
//
// sc, _ := tracer.Extract(someFormat, someCarrier)
// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
//
// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
// not add the parent span reference to the options.
type SpanReference struct {
Type SpanReferenceType
ReferencedContext SpanContext
}
// Apply satisfies the StartSpanOption interface.
func (r SpanReference) Apply(o *StartSpanOptions) {
if r.ReferencedContext != nil {
o.References = append(o.References, r)
}
}
// ChildOf returns a StartSpanOption pointing to a dependent parent span.
// If sc == nil, the option has no effect.
//
// See ChildOfRef, SpanReference
func ChildOf(sc SpanContext) SpanReference {
return SpanReference{
Type: ChildOfRef,
ReferencedContext: sc,
}
}
// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
// the child Span but does not directly depend on its result in any way.
// If sc == nil, the option has no effect.
//
// See FollowsFromRef, SpanReference
func FollowsFrom(sc SpanContext) SpanReference {
return SpanReference{
Type: FollowsFromRef,
ReferencedContext: sc,
}
}
// StartTime is a StartSpanOption that sets an explicit start timestamp for the
// new Span.
type StartTime time.Time
// Apply satisfies the StartSpanOption interface.
func (t StartTime) Apply(o *StartSpanOptions) {
o.StartTime = time.Time(t)
}
// Tags are a generic map from an arbitrary string key to an opaque value type.
// The underlying tracing system is responsible for interpreting and
// serializing the values.
type Tags map[string]interface{}
// Apply satisfies the StartSpanOption interface.
func (t Tags) Apply(o *StartSpanOptions) {
if o.Tags == nil {
o.Tags = make(map[string]interface{})
}
for k, v := range t {
o.Tags[k] = v
}
}
// Tag may be passed as a StartSpanOption to add a tag to new spans,
// or its Set method may be used to apply the tag to an existing Span,
// for example:
//
// tracer.StartSpan("opName", Tag{"Key", value})
//
// or
//
// Tag{"key", value}.Set(span)
type Tag struct {
Key string
Value interface{}
}
// Apply satisfies the StartSpanOption interface.
func (t Tag) Apply(o *StartSpanOptions) {
if o.Tags == nil {
o.Tags = make(map[string]interface{})
}
o.Tags[t.Key] = t.Value
}
// Set applies the tag to an existing Span.
func (t Tag) Set(s Span) {
s.SetTag(t.Key, t.Value)
}
......@@ -14,8 +14,6 @@
package ast
import (
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/parser/auth"
. "github.com/pingcap/parser/format"
......@@ -60,6 +58,7 @@ const (
DatabaseOptionNone DatabaseOptionType = iota
DatabaseOptionCharset
DatabaseOptionCollate
DatabaseOptionEncryption
)
// DatabaseOption represents database option.
......@@ -79,6 +78,10 @@ func (n *DatabaseOption) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("COLLATE")
ctx.WritePlain(" = ")
ctx.WritePlain(n.Value)
case DatabaseOptionEncryption:
ctx.WriteKeyWord("ENCRYPTION")
ctx.WritePlain(" = ")
ctx.WriteString(n.Value)
default:
return errors.Errorf("invalid DatabaseOptionType: %d", n.Tp)
}
......@@ -253,6 +256,8 @@ func (n *ReferenceDef) Restore(ctx *RestoreCtx) error {
return errors.Annotate(err, "An error occurred while splicing ReferenceDef")
}
}
if n.IndexColNames != nil {
ctx.WritePlain("(")
for i, indexColNames := range n.IndexColNames {
if i > 0 {
......@@ -263,6 +268,8 @@ func (n *ReferenceDef) Restore(ctx *RestoreCtx) error {
}
}
ctx.WritePlain(")")
}
if n.Match != MatchNone {
ctx.WriteKeyWord(" MATCH ")
switch n.Match {
......@@ -420,6 +427,7 @@ const (
ColumnOptionReference
ColumnOptionCollate
ColumnOptionCheck
ColumnOptionColumnFormat
)
var (
......@@ -514,6 +522,9 @@ func (n *ColumnOption) Restore(ctx *RestoreCtx) error {
} else {
ctx.WriteKeyWord(" NOT ENFORCED")
}
case ColumnOptionColumnFormat:
ctx.WriteKeyWord("COLUMN_FORMAT ")
ctx.WriteKeyWord(n.StrValue)
default:
return errors.New("An error occurred while splicing ColumnOption")
}
......@@ -821,6 +832,7 @@ type CreateTableStmt struct {
ddlNode
IfNotExists bool
IsTemporary bool
Table *TableName
ReferTable *TableName
Cols []*ColumnDef
......@@ -833,7 +845,11 @@ type CreateTableStmt struct {
// Restore implements Node interface.
func (n *CreateTableStmt) Restore(ctx *RestoreCtx) error {
if n.IsTemporary {
ctx.WriteKeyWord("CREATE TEMPORARY TABLE ")
} else {
ctx.WriteKeyWord("CREATE TABLE ")
}
if n.IfNotExists {
ctx.WriteKeyWord("IF NOT EXISTS ")
}
......@@ -1197,6 +1213,56 @@ func (n *CreateViewStmt) Accept(v Visitor) (Node, bool) {
return v.Leave(n)
}
// IndexLockAndAlgorithm stores the algorithm option and the lock option.
type IndexLockAndAlgorithm struct {
node
LockTp LockType
AlgorithmTp AlgorithmType
}
// Restore implements Node interface.
func (n *IndexLockAndAlgorithm) Restore(ctx *RestoreCtx) error {
hasPrevOption := false
if n.AlgorithmTp != AlgorithmTypeDefault {
ctx.WriteKeyWord("ALGORITHM")
ctx.WritePlain(" = ")
ctx.WriteKeyWord(n.AlgorithmTp.String())
hasPrevOption = true
}
if n.LockTp != LockTypeDefault {
if hasPrevOption {
ctx.WritePlain(" ")
}
ctx.WriteKeyWord("LOCK")
ctx.WritePlain(" = ")
ctx.WriteKeyWord(n.LockTp.String())
}
return nil
}
// Accept implements Node Accept interface.
func (n *IndexLockAndAlgorithm) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*IndexLockAndAlgorithm)
return v.Leave(n)
}
// IndexKeyType is the type for index key.
type IndexKeyType int
// Index key types.
const (
IndexKeyTypeNone IndexKeyType = iota
IndexKeyTypeUnique
IndexKeyTypeSpatial
IndexKeyTypeFullText
)
// CreateIndexStmt is a statement to create an index.
// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html
type CreateIndexStmt struct {
......@@ -1208,16 +1274,22 @@ type CreateIndexStmt struct {
IndexName string
Table *TableName
Unique bool
IndexColNames []*IndexColName
IndexOption *IndexOption
KeyType IndexKeyType
LockAlg *IndexLockAndAlgorithm
}
// Restore implements Node interface.
func (n *CreateIndexStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("CREATE ")
if n.Unique {
switch n.KeyType {
case IndexKeyTypeUnique:
ctx.WriteKeyWord("UNIQUE ")
case IndexKeyTypeSpatial:
ctx.WriteKeyWord("SPATIAL ")
case IndexKeyTypeFullText:
ctx.WriteKeyWord("FULLTEXT ")
}
ctx.WriteKeyWord("INDEX ")
if n.IfNotExists {
......@@ -1247,6 +1319,13 @@ func (n *CreateIndexStmt) Restore(ctx *RestoreCtx) error {
}
}
if n.LockAlg != nil {
ctx.WritePlain(" ")
if err := n.LockAlg.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore CreateIndexStmt.LockAlg")
}
}
return nil
}
......@@ -1276,6 +1355,13 @@ func (n *CreateIndexStmt) Accept(v Visitor) (Node, bool) {
}
n.IndexOption = node.(*IndexOption)
}
if n.LockAlg != nil {
node, ok := n.LockAlg.Accept(v)
if !ok {
return n, false
}
n.LockAlg = node.(*IndexLockAndAlgorithm)
}
return v.Leave(n)
}
......@@ -1287,6 +1373,7 @@ type DropIndexStmt struct {
IfExists bool
IndexName string
Table *TableName
LockAlg *IndexLockAndAlgorithm
}
// Restore implements Node interface.
......@@ -1302,6 +1389,13 @@ func (n *DropIndexStmt) Restore(ctx *RestoreCtx) error {
return errors.Annotate(err, "An error occurred while add index")
}
if n.LockAlg != nil {
ctx.WritePlain(" ")
if err := n.LockAlg.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore CreateIndexStmt.LockAlg")
}
}
return nil
}
......@@ -1317,6 +1411,13 @@ func (n *DropIndexStmt) Accept(v Visitor) (Node, bool) {
return n, false
}
n.Table = node.(*TableName)
if n.LockAlg != nil {
node, ok := n.LockAlg.Accept(v)
if !ok {
return n, false
}
n.LockAlg = node.(*IndexLockAndAlgorithm)
}
return v.Leave(n)
}
......@@ -1442,6 +1543,7 @@ const (
TableOptionDelayKeyWrite
TableOptionRowFormat
TableOptionStatsPersistent
TableOptionStatsAutoRecalc
TableOptionShardRowID
TableOptionPreSplitRegion
TableOptionPackKeys
......@@ -1451,6 +1553,10 @@ const (
TableOptionIndexDirectory
TableOptionStorageMedia
TableOptionStatsSamplePages
TableOptionSecondaryEngine
TableOptionSecondaryEngineNull
TableOptionInsertMethod
TableOptionTableCheckSum
)
// RowFormat types
......@@ -1486,6 +1592,7 @@ const (
// TableOption is used for parsing table option from SQL.
type TableOption struct {
Tp TableOptionType
Default bool
StrValue string
UintValue uint64
}
......@@ -1593,6 +1700,14 @@ func (n *TableOption) Restore(ctx *RestoreCtx) error {
ctx.WritePlain("= ")
ctx.WriteKeyWord("DEFAULT")
ctx.WritePlain(" /* TableOptionStatsPersistent is not supported */ ")
case TableOptionStatsAutoRecalc:
ctx.WriteKeyWord("STATS_AUTO_RECALC ")
ctx.WritePlain("= ")
if n.Default {
ctx.WriteKeyWord("DEFAULT")
} else {
ctx.WritePlainf("%d", n.UintValue)
}
case TableOptionShardRowID:
ctx.WriteKeyWord("SHARD_ROW_ID_BITS ")
ctx.WritePlainf("= %d", n.UintValue)
......@@ -1626,11 +1741,27 @@ func (n *TableOption) Restore(ctx *RestoreCtx) error {
case TableOptionStatsSamplePages:
ctx.WriteKeyWord("STATS_SAMPLE_PAGES ")
ctx.WritePlain("= ")
if n.UintValue == 0 {
if n.Default {
ctx.WriteKeyWord("DEFAULT")
} else {
ctx.WritePlainf("%d", n.UintValue)
}
case TableOptionSecondaryEngine:
ctx.WriteKeyWord("SECONDARY_ENGINE ")
ctx.WritePlain("= ")
ctx.WriteString(n.StrValue)
case TableOptionSecondaryEngineNull:
ctx.WriteKeyWord("SECONDARY_ENGINE ")
ctx.WritePlain("= ")
ctx.WriteKeyWord("NULL")
case TableOptionInsertMethod:
ctx.WriteKeyWord("INSERT_METHOD ")
ctx.WritePlain("= ")
ctx.WriteString(n.StrValue)
case TableOptionTableCheckSum:
ctx.WriteKeyWord("TABLE_CHECKSUM ")
ctx.WritePlain("= ")
ctx.WritePlainf("%d", n.UintValue)
default:
return errors.Errorf("invalid TableOption: %d", n.Tp)
}
......@@ -1705,6 +1836,7 @@ const (
AlterTableDropForeignKey
AlterTableModifyColumn
AlterTableChangeColumn
AlterTableRenameColumn
AlterTableRenameTable
AlterTableAlterColumn
AlterTableLock
......@@ -1719,6 +1851,20 @@ const (
AlterTableEnableKeys
AlterTableDisableKeys
AlterTableRemovePartitioning
AlterTableWithValidation
AlterTableWithoutValidation
AlterTableSecondaryLoad
AlterTableSecondaryUnload
AlterTableRebuildPartition
AlterTableReorganizePartition
AlterTableCheckPartitions
AlterTableExchangePartition
AlterTableOptimizePartition
AlterTableRepairPartition
AlterTableImportPartitionTablespace
AlterTableDiscardPartitionTablespace
AlterTableAlterCheck
AlterTableDropCheck
// TODO: Add more actions
)
......@@ -1749,29 +1895,29 @@ const (
LockTypeExclusive
)
// AlterAlgorithm is the algorithm of the DDL operations.
// AlgorithmType is the algorithm of the DDL operations.
// See https://dev.mysql.com/doc/refman/8.0/en/alter-table.html#alter-table-performance.
type AlterAlgorithm byte
type AlgorithmType byte
// DDL alter algorithms.
// DDL algorithms.
// For now, TiDB only supported inplace and instance algorithms. If the user specify `copy`,
// will get an error.
const (
AlterAlgorithmDefault AlterAlgorithm = iota
AlterAlgorithmCopy
AlterAlgorithmInplace
AlterAlgorithmInstant
AlgorithmTypeDefault AlgorithmType = iota
AlgorithmTypeCopy
AlgorithmTypeInplace
AlgorithmTypeInstant
)
func (a AlterAlgorithm) String() string {
func (a AlgorithmType) String() string {
switch a {
case AlterAlgorithmDefault:
case AlgorithmTypeDefault:
return "DEFAULT"
case AlterAlgorithmCopy:
case AlgorithmTypeCopy:
return "COPY"
case AlterAlgorithmInplace:
case AlgorithmTypeInplace:
return "INPLACE"
case AlterAlgorithmInstant:
case AlgorithmTypeInstant:
return "INSTANT"
default:
return "DEFAULT"
......@@ -1790,6 +1936,9 @@ type AlterTableSpec struct {
// see https://mariadb.com/kb/en/library/alter-table/
IfNotExists bool
NoWriteToBinlog bool
OnAllPartitions bool
Tp AlterTableType
Name string
Constraint *Constraint
......@@ -1797,15 +1946,17 @@ type AlterTableSpec struct {
NewTable *TableName
NewColumns []*ColumnDef
OldColumnName *ColumnName
NewColumnName *ColumnName
Position *ColumnPosition
LockType LockType
Algorithm AlterAlgorithm
Algorithm AlgorithmType
Comment string
FromKey model.CIStr
ToKey model.CIStr
Partition *PartitionOptions
PartitionNames []model.CIStr
PartDefinitions []*PartitionDefinition
WithValidation bool
Num uint64
}
......@@ -1918,6 +2069,15 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
if err := n.Position.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AlterTableSpec.Position")
}
case AlterTableRenameColumn:
ctx.WriteKeyWord("RENAME COLUMN ")
if err := n.OldColumnName.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AlterTableSpec.OldColumnName")
}
ctx.WriteKeyWord(" TO ")
if err := n.NewColumnName.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AlterTableSpec.NewColumnName")
}
case AlterTableRenameTable:
ctx.WriteKeyWord("RENAME AS ")
if err := n.NewTable.Restore(ctx); err != nil {
......@@ -1967,6 +2127,9 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
if n.IfNotExists {
ctx.WriteKeyWord(" IF NOT EXISTS")
}
if n.NoWriteToBinlog {
ctx.WriteKeyWord(" NO_WRITE_TO_BINLOG")
}
if n.PartDefinitions != nil {
ctx.WritePlain(" (")
for i, def := range n.PartDefinitions {
......@@ -1984,6 +2147,9 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
}
case AlterTableCoalescePartitions:
ctx.WriteKeyWord("COALESCE PARTITION ")
if n.NoWriteToBinlog {
ctx.WriteKeyWord("NO_WRITE_TO_BINLOG ")
}
ctx.WritePlainf("%d", n.Num)
case AlterTableDropPartition:
ctx.WriteKeyWord("DROP PARTITION ")
......@@ -1998,12 +2164,84 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
}
case AlterTableTruncatePartition:
ctx.WriteKeyWord("TRUNCATE PARTITION ")
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
case AlterTableCheckPartitions:
ctx.WriteKeyWord("CHECK PARTITION ")
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
case AlterTableOptimizePartition:
ctx.WriteKeyWord("OPTIMIZE PARTITION ")
if n.NoWriteToBinlog {
ctx.WriteKeyWord("NO_WRITE_TO_BINLOG ")
}
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
case AlterTableRepairPartition:
ctx.WriteKeyWord("REPAIR PARTITION ")
if n.NoWriteToBinlog {
ctx.WriteKeyWord("NO_WRITE_TO_BINLOG ")
}
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
case AlterTableImportPartitionTablespace:
ctx.WriteKeyWord("IMPORT PARTITION ")
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
} else {
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
}
ctx.WriteKeyWord(" TABLESPACE")
case AlterTableDiscardPartitionTablespace:
ctx.WriteKeyWord("DISCARD PARTITION ")
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
} else {
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
}
ctx.WriteKeyWord(" TABLESPACE")
case AlterTablePartition:
if err := n.Partition.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AlterTableSpec.Partition")
......@@ -2014,6 +2252,76 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("DISABLE KEYS")
case AlterTableRemovePartitioning:
ctx.WriteKeyWord("REMOVE PARTITIONING")
case AlterTableWithValidation:
ctx.WriteKeyWord("WITH VALIDATION")
case AlterTableWithoutValidation:
ctx.WriteKeyWord("WITHOUT VALIDATION")
case AlterTableRebuildPartition:
ctx.WriteKeyWord("REBUILD PARTITION ")
if n.NoWriteToBinlog {
ctx.WriteKeyWord("NO_WRITE_TO_BINLOG ")
}
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
case AlterTableReorganizePartition:
ctx.WriteKeyWord("REORGANIZE PARTITION")
if n.NoWriteToBinlog {
ctx.WriteKeyWord(" NO_WRITE_TO_BINLOG")
}
if n.OnAllPartitions {
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
} else {
ctx.WritePlain(" ")
}
ctx.WriteName(name.O)
}
ctx.WriteKeyWord(" INTO ")
if n.PartDefinitions != nil {
ctx.WritePlain("(")
for i, def := range n.PartDefinitions {
if i != 0 {
ctx.WritePlain(", ")
}
if err := def.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterTableSpec.PartDefinitions[%d]", i)
}
}
ctx.WritePlain(")")
}
case AlterTableExchangePartition:
ctx.WriteKeyWord("EXCHANGE PARTITION ")
ctx.WriteName(n.PartitionNames[0].O)
ctx.WriteKeyWord(" WITH TABLE ")
n.NewTable.Restore(ctx)
if !n.WithValidation {
ctx.WriteKeyWord(" WITHOUT VALIDATION")
}
case AlterTableSecondaryLoad:
ctx.WriteKeyWord("SECONDARY_LOAD")
case AlterTableSecondaryUnload:
ctx.WriteKeyWord("SECONDARY_UNLOAD")
case AlterTableAlterCheck:
ctx.WriteKeyWord("ALTER CHECK ")
ctx.WriteName(n.Constraint.Name)
if n.Constraint.Enforced == false {
ctx.WriteKeyWord(" NOT")
}
ctx.WriteKeyWord(" ENFORCED")
case AlterTableDropCheck:
ctx.WriteKeyWord("DROP CHECK ")
ctx.WriteName(n.Constraint.Name)
default:
// TODO: not support
ctx.WritePlainf(" /* AlterTableType(%d) is not supported */ ", n.Tp)
......@@ -2432,7 +2740,7 @@ type PartitionMethod struct {
// RANGE COLUMNS and LIST COLUMNS types
ColumnNames []*ColumnName
// Unit is a time unit used as argument of SYSTEM_TIME type
Unit ValueExpr
Unit TimeUnitType
// Limit is a row count used as argument of the SYSTEM_TIME type
Limit uint64
......@@ -2449,20 +2757,13 @@ func (n *PartitionMethod) Restore(ctx *RestoreCtx) error {
switch {
case n.Tp == model.PartitionTypeSystemTime:
if n.Expr != nil && n.Unit != nil {
if n.Expr != nil && n.Unit != TimeUnitInvalid {
ctx.WriteKeyWord(" INTERVAL ")
if err := n.Expr.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore PartitionMethod.Expr")
}
// Here the Unit string should not be quoted.
// TODO: This is a temporary workaround that should be changed once something like "Keyword Expression" is implemented.
var sb strings.Builder
if err := n.Unit.Restore(NewRestoreCtx(0, &sb)); err != nil {
return errors.Annotate(err, "An error occurred while restore PartitionMethod.Unit")
}
ctx.WritePlain(" ")
ctx.WriteKeyWord(sb.String())
ctx.WriteKeyWord(n.Unit.String())
}
case n.Expr != nil:
......@@ -2512,13 +2813,6 @@ func (n *PartitionMethod) acceptInPlace(v Visitor) bool {
}
n.ColumnNames[i] = newColName.(*ColumnName)
}
if n.Unit != nil {
unit, ok := n.Unit.Accept(v)
if !ok {
return false
}
n.Unit = unit.(ValueExpr)
}
return true
}
......
......@@ -14,8 +14,6 @@
package ast
import (
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/parser/auth"
. "github.com/pingcap/parser/format"
......@@ -2368,7 +2366,7 @@ type FrameBound struct {
Expr ExprNode
// `Unit` is used to indicate the units in which the `Expr` should be interpreted.
// For example: '2:30' MINUTE_SECOND.
Unit ExprNode
Unit TimeUnitType
}
// Restore implements Node interface.
......@@ -2380,7 +2378,7 @@ func (n *FrameBound) Restore(ctx *RestoreCtx) error {
case CurrentRow:
ctx.WriteKeyWord("CURRENT ROW")
case Preceding, Following:
if n.Unit != nil {
if n.Unit != TimeUnitInvalid {
ctx.WriteKeyWord("INTERVAL ")
}
if n.Expr != nil {
......@@ -2388,13 +2386,9 @@ func (n *FrameBound) Restore(ctx *RestoreCtx) error {
return errors.Annotate(err, "An error occurred while restore FrameBound.Expr")
}
}
if n.Unit != nil {
// Here the Unit string should not be quoted.
// TODO: This is a temporary workaround that should be changed once something like "Keyword Expression" is implemented.
var sb strings.Builder
n.Unit.Restore(NewRestoreCtx(0, &sb))
if n.Unit != TimeUnitInvalid {
ctx.WritePlain(" ")
ctx.WriteKeyWord(sb.String())
ctx.WriteKeyWord(n.Unit.String())
}
if n.Type == Preceding {
ctx.WriteKeyWord(" PRECEDING")
......@@ -2419,13 +2413,6 @@ func (n *FrameBound) Accept(v Visitor) (Node, bool) {
}
n.Expr = node.(ExprNode)
}
if n.Unit != nil {
node, ok := n.Unit.Accept(v)
if !ok {
return n, false
}
n.Unit = node.(ExprNode)
}
return v.Leave(n)
}
......
......@@ -345,26 +345,24 @@ func (n *FuncCallExpr) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.Args[1].GetType().Charset)
case "adddate", "subdate", "date_add", "date_sub":
if err := n.Args[0].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[0]")
}
ctx.WritePlain(", ")
ctx.WriteKeyWord("INTERVAL ")
if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[1]")
}
ctx.WritePlain(" ")
ctx.WriteKeyWord(n.Args[2].(ValueExpr).GetString())
if err := n.Args[2].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[2]")
}
case "extract":
ctx.WriteKeyWord(n.Args[0].(ValueExpr).GetString())
ctx.WriteKeyWord(" FROM ")
if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
if err := n.Args[0].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[0]")
}
case "get_format":
ctx.WriteKeyWord(n.Args[0].(ValueExpr).GetString())
ctx.WritePlain(", ")
ctx.WriteKeyWord(" FROM ")
if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[1]")
}
case "position":
if err := n.Args[0].Restore(ctx); err != nil {
......@@ -376,47 +374,26 @@ func (n *FuncCallExpr) Restore(ctx *RestoreCtx) error {
}
case "trim":
switch len(n.Args) {
case 1:
if err := n.Args[0].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
}
case 2:
if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
}
ctx.WriteKeyWord(" FROM ")
if err := n.Args[0].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
}
case 3:
switch fmt.Sprint(n.Args[2].(ValueExpr).GetValue()) {
case "3":
ctx.WriteKeyWord("TRAILING ")
case "2":
ctx.WriteKeyWord("LEADING ")
case "0", "1":
ctx.WriteKeyWord("BOTH ")
if err := n.Args[2].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[2]")
}
ctx.WritePlain(" ")
fallthrough
case 2:
if n.Args[1].(ValueExpr).GetValue() != nil {
if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[1]")
}
ctx.WritePlain(" ")
}
ctx.WriteKeyWord("FROM ")
fallthrough
case 1:
if err := n.Args[0].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[0]")
}
}
case "timestampdiff", "timestampadd":
ctx.WriteKeyWord(n.Args[0].(ValueExpr).GetString())
for i := 1; i < len(n.Args); {
ctx.WritePlain(", ")
if err := n.Args[i].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
}
i++
}
default:
for i, argv := range n.Args {
if i != 0 {
......@@ -452,12 +429,7 @@ func (n *FuncCallExpr) specialFormatArgs(w io.Writer) bool {
n.Args[0].Format(w)
fmt.Fprint(w, ", INTERVAL ")
n.Args[1].Format(w)
fmt.Fprintf(w, " %s", n.Args[2].(ValueExpr).GetDatumString())
return true
case TimestampAdd, TimestampDiff:
fmt.Fprintf(w, "%s, ", n.Args[0].(ValueExpr).GetDatumString())
n.Args[1].Format(w)
fmt.Fprint(w, ", ")
fmt.Fprint(w, " ")
n.Args[2].Format(w)
return true
}
......@@ -583,6 +555,47 @@ const (
TrimTrailing
)
// String implements fmt.Stringer interface.
func (direction TrimDirectionType) String() string {
switch direction {
case TrimBoth, TrimBothDefault:
return "BOTH"
case TrimLeading:
return "LEADING"
case TrimTrailing:
return "TRAILING"
default:
return ""
}
}
// TrimDirectionExpr is an expression representing the trim direction used in the TRIM() function.
type TrimDirectionExpr struct {
exprNode
// Direction is the trim direction
Direction TrimDirectionType
}
// Restore implements Node interface.
func (n *TrimDirectionExpr) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.Direction.String())
return nil
}
// Format the ExprNode into a Writer.
func (n *TrimDirectionExpr) Format(w io.Writer) {
fmt.Fprint(w, n.Direction.String())
}
// Accept implements Node Accept interface.
func (n *TrimDirectionExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
return v.Leave(n)
}
// DateArithType is type for DateArith type.
type DateArithType byte
......@@ -799,3 +812,179 @@ func (n *WindowFuncExpr) Accept(v Visitor) (Node, bool) {
n.Spec = *node.(*WindowSpec)
return v.Leave(n)
}
// TimeUnitType is the type for time and timestamp units.
type TimeUnitType int
const (
// TimeUnitInvalid is a placeholder for an invalid time or timestamp unit
TimeUnitInvalid TimeUnitType = iota
// TimeUnitMicrosecond is the time or timestamp unit MICROSECOND.
TimeUnitMicrosecond
// TimeUnitSecond is the time or timestamp unit SECOND.
TimeUnitSecond
// TimeUnitMinute is the time or timestamp unit MINUTE.
TimeUnitMinute
// TimeUnitHour is the time or timestamp unit HOUR.
TimeUnitHour
// TimeUnitDay is the time or timestamp unit DAY.
TimeUnitDay
// TimeUnitWeek is the time or timestamp unit WEEK.
TimeUnitWeek
// TimeUnitMonth is the time or timestamp unit MONTH.
TimeUnitMonth
// TimeUnitQuarter is the time or timestamp unit QUARTER.
TimeUnitQuarter
// TimeUnitYear is the time or timestamp unit YEAR.
TimeUnitYear
// TimeUnitSecondMicrosecond is the time unit SECOND_MICROSECOND.
TimeUnitSecondMicrosecond
// TimeUnitMinuteMicrosecond is the time unit MINUTE_MICROSECOND.
TimeUnitMinuteMicrosecond
// TimeUnitMinuteSecond is the time unit MINUTE_SECOND.
TimeUnitMinuteSecond
// TimeUnitHourMicrosecond is the time unit HOUR_MICROSECOND.
TimeUnitHourMicrosecond
// TimeUnitHourSecond is the time unit HOUR_SECOND.
TimeUnitHourSecond
// TimeUnitHourMinute is the time unit HOUR_MINUTE.
TimeUnitHourMinute
// TimeUnitDayMicrosecond is the time unit DAY_MICROSECOND.
TimeUnitDayMicrosecond
// TimeUnitDaySecond is the time unit DAY_SECOND.
TimeUnitDaySecond
// TimeUnitDayMinute is the time unit DAY_MINUTE.
TimeUnitDayMinute
// TimeUnitDayHour is the time unit DAY_HOUR.
TimeUnitDayHour
// TimeUnitYearMonth is the time unit YEAR_MONTH.
TimeUnitYearMonth
)
// String implements fmt.Stringer interface.
func (unit TimeUnitType) String() string {
switch unit {
case TimeUnitMicrosecond:
return "MICROSECOND"
case TimeUnitSecond:
return "SECOND"
case TimeUnitMinute:
return "MINUTE"
case TimeUnitHour:
return "HOUR"
case TimeUnitDay:
return "DAY"
case TimeUnitWeek:
return "WEEK"
case TimeUnitMonth:
return "MONTH"
case TimeUnitQuarter:
return "QUARTER"
case TimeUnitYear:
return "YEAR"
case TimeUnitSecondMicrosecond:
return "SECOND_MICROSECOND"
case TimeUnitMinuteMicrosecond:
return "MINUTE_MICROSECOND"
case TimeUnitMinuteSecond:
return "MINUTE_SECOND"
case TimeUnitHourMicrosecond:
return "HOUR_MICROSECOND"
case TimeUnitHourSecond:
return "HOUR_SECOND"
case TimeUnitHourMinute:
return "HOUR_MINUTE"
case TimeUnitDayMicrosecond:
return "DAY_MICROSECOND"
case TimeUnitDaySecond:
return "DAY_SECOND"
case TimeUnitDayMinute:
return "DAY_MINUTE"
case TimeUnitDayHour:
return "DAY_HOUR"
case TimeUnitYearMonth:
return "YEAR_MONTH"
default:
return ""
}
}
// TimeUnitExpr is an expression representing a time or timestamp unit.
type TimeUnitExpr struct {
exprNode
// Unit is the time or timestamp unit.
Unit TimeUnitType
}
// Restore implements Node interface.
func (n *TimeUnitExpr) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.Unit.String())
return nil
}
// Format the ExprNode into a Writer.
func (n *TimeUnitExpr) Format(w io.Writer) {
fmt.Fprint(w, n.Unit.String())
}
// Accept implements Node Accept interface.
func (n *TimeUnitExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
return v.Leave(n)
}
// GetFormatSelectorType is the type for the first argument of GET_FORMAT() function.
type GetFormatSelectorType int
const (
// GetFormatSelectorDate is the GET_FORMAT selector DATE.
GetFormatSelectorDate GetFormatSelectorType = iota + 1
// GetFormatSelectorTime is the GET_FORMAT selector TIME.
GetFormatSelectorTime
// GetFormatSelectorDatetime is the GET_FORMAT selector DATETIME and TIMESTAMP.
GetFormatSelectorDatetime
)
// GetFormatSelectorExpr is an expression used as the first argument of GET_FORMAT() function.
type GetFormatSelectorExpr struct {
exprNode
// Selector is the GET_FORMAT() selector.
Selector GetFormatSelectorType
}
// String implements fmt.Stringer interface.
func (selector GetFormatSelectorType) String() string {
switch selector {
case GetFormatSelectorDate:
return "DATE"
case GetFormatSelectorTime:
return "TIME"
case GetFormatSelectorDatetime:
return "DATETIME"
default:
return ""
}
}
// Restore implements Node interface.
func (n *GetFormatSelectorExpr) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.Selector.String())
return nil
}
// Format the ExprNode into a Writer.
func (n *GetFormatSelectorExpr) Format(w io.Writer) {
fmt.Fprint(w, n.Selector.String())
}
// Accept implements Node Accept interface.
func (n *GetFormatSelectorExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
return v.Leave(n)
}
......@@ -317,6 +317,7 @@ func (n *DeallocateStmt) Accept(v Visitor) (Node, bool) {
// Prepared represents a prepared statement.
type Prepared struct {
Stmt StmtNode
StmtType string
Params []ParamMarkerExpr
SchemaVersion int64
UseCache bool
......@@ -1055,7 +1056,7 @@ func (p *PasswordOrLockOption) Restore(ctx *RestoreCtx) error {
case PasswordExpireNever:
ctx.WriteKeyWord("PASSWORD EXPIRE NEVER")
case PasswordExpireInterval:
ctx.WriteKeyWord("PASSWORD EXPIRE NEVER")
ctx.WriteKeyWord("PASSWORD EXPIRE INTERVAL")
ctx.WritePlainf(" %d", p.Count)
ctx.WriteKeyWord(" DAY")
case Lock:
......@@ -1165,6 +1166,9 @@ type AlterUserStmt struct {
IfExists bool
CurrentAuth *AuthOption
Specs []*UserSpec
TslOptions []*TslOption
ResourceOptions []*ResourceOption
PasswordOrLockOptions []*PasswordOrLockOption
}
// Restore implements Node interface.
......@@ -1188,6 +1192,40 @@ func (n *AlterUserStmt) Restore(ctx *RestoreCtx) error {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.Specs[%d]", i)
}
}
tslOptionLen := len(n.TslOptions)
if tslOptionLen != 0 {
ctx.WriteKeyWord(" REQUIRE ")
}
// Restore `tslOptions` reversely to keep order the same with original sql
for i := tslOptionLen; i > 0; i-- {
if i != tslOptionLen {
ctx.WriteKeyWord(" AND ")
}
if err := n.TslOptions[i-1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.TslOptions[%d]", i)
}
}
if len(n.ResourceOptions) != 0 {
ctx.WriteKeyWord(" WITH")
}
for i, v := range n.ResourceOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.ResourceOptions[%d]", i)
}
}
for i, v := range n.PasswordOrLockOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.PasswordOrLockOptions[%d]", i)
}
}
return nil
}
......@@ -1988,26 +2026,81 @@ type TableOptimizerHint struct {
// Table hints has no schema info
// It allows only table name or alias (if table has an alias)
HintName model.CIStr
Tables []model.CIStr
// QBName is the default effective query block of this hint.
QBName model.CIStr
Tables []HintTable
Indexes []model.CIStr
// Statement Execution Time Optimizer Hints
// See https://dev.mysql.com/doc/refman/5.7/en/optimizer-hints.html#optimizer-hints-execution-time
MaxExecutionTime uint64
MemoryQuota uint64
QueryType model.CIStr
HintFlag bool
}
// HintTable is table in the hint. It may have query block info.
type HintTable struct {
TableName model.CIStr
QBName model.CIStr
}
func (ht *HintTable) Restore(ctx *RestoreCtx) {
ctx.WriteName(ht.TableName.String())
if ht.QBName.L != "" {
ctx.WriteKeyWord("@")
ctx.WriteName(ht.QBName.String())
}
}
// Restore implements Node interface.
func (n *TableOptimizerHint) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.HintName.String())
ctx.WritePlain("(")
if n.QBName.L != "" {
if n.HintName.L != "qb_name" {
ctx.WriteKeyWord("@")
}
ctx.WriteName(n.QBName.String())
}
// Hints without args except query block.
switch n.HintName.L {
case "hash_agg", "stream_agg", "read_consistent_replica", "no_index_merge", "qb_name":
ctx.WritePlain(")")
return nil
}
if n.QBName.L != "" {
ctx.WritePlain(" ")
}
// Hints with args except query block.
switch n.HintName.L {
case "max_execution_time":
ctx.WritePlainf("%d", n.MaxExecutionTime)
case "tidb_hj", "tidb_smj", "tidb_inlj":
case "tidb_hj", "tidb_smj", "tidb_inlj", "hash_join", "sm_join", "inl_join":
for i, table := range n.Tables {
if i != 0 {
ctx.WritePlain(", ")
}
ctx.WriteName(table.String())
table.Restore(ctx)
}
case "index", "use_index_merge":
n.Tables[0].Restore(ctx)
ctx.WritePlain(" ")
for i, index := range n.Indexes {
if i != 0 {
ctx.WritePlain(", ")
}
ctx.WriteName(index.String())
}
case "use_toja", "enable_plan_cache":
if n.HintFlag {
ctx.WritePlain("TRUE")
} else {
ctx.WritePlain("FALSE")
}
case "query_type":
ctx.WriteKeyWord(n.QueryType.String())
case "memory_quota":
ctx.WritePlainf("%d M", n.MemoryQuota)
}
ctx.WritePlain(")")
return nil
......
......@@ -48,6 +48,7 @@ const (
AnalyzeOptNumTopN
AnalyzeOptCMSketchDepth
AnalyzeOptCMSketchWidth
AnalyzeOptNumSamples
)
// AnalyzeOptionString stores the string form of analyze options.
......@@ -56,6 +57,7 @@ var AnalyzeOptionString = map[AnalyzeOptionType]string{
AnalyzeOptNumTopN: "TOPN",
AnalyzeOptCMSketchWidth: "CMSKETCH WIDTH",
AnalyzeOptCMSketchDepth: "CMSKETCH DEPTH",
AnalyzeOptNumSamples: "SAMPLES",
}
// AnalyzeOpt stores the analyze option type and value.
......
......@@ -188,6 +188,7 @@ var tokenMap = map[string]int{
"COLLATE": collate,
"COLLATION": collation,
"COLUMN": column,
"COLUMN_FORMAT": columnFormat,
"COLUMNS": columns,
"COMMENT": comment,
"COMMIT": commit,
......@@ -238,6 +239,7 @@ var tokenMap = map[string]int{
"DESCRIBE": describe,
"DIRECTORY": directory,
"DISABLE": disable,
"DISCARD": discard,
"DISK": disk,
"DISTINCT": distinct,
"DISTINCTROW": distinct,
......@@ -251,7 +253,9 @@ var tokenMap = map[string]int{
"DYNAMIC": dynamic,
"ELSE": elseKwd,
"ENABLE": enable,
"ENABLE_PLAN_CACHE": hintEnablePlanCache,
"ENCLOSED": enclosed,
"ENCRYPTION": encryption,
"END": end,
"ENFORCED": enforced,
"ENGINE": engine,
......@@ -263,6 +267,7 @@ var tokenMap = map[string]int{
"EVENTS": events,
"EXCLUSIVE": exclusive,
"EXCEPT": except,
"EXCHANGE": exchange,
"EXECUTE": execute,
"EXISTS": exists,
"EXPIRE": expire,
......@@ -292,6 +297,8 @@ var tokenMap = map[string]int{
"GROUP": group,
"GROUP_CONCAT": groupConcat,
"HASH": hash,
"HASH_AGG": hintHASHAGG,
"HASH_JOIN": hintHJ,
"HAVING": having,
"HIGH_PRIORITY": highPriority,
"HISTORY": history,
......@@ -302,15 +309,18 @@ var tokenMap = map[string]int{
"IDENTIFIED": identified,
"IF": ifKwd,
"IGNORE": ignore,
"IMPORT": importKwd,
"IN": in,
"INCREMENTAL": incremental,
"INDEX": index,
"INDEXES": indexes,
"INFILE": infile,
"INL_JOIN": hintINLJ,
"INNER": inner,
"INPLACE": inplace,
"INSTANT": instant,
"INSERT": insert,
"INSERT_METHOD": insertMethod,
"INT": intType,
"INT1": int1Type,
"INT2": int2Type,
......@@ -327,6 +337,7 @@ var tokenMap = map[string]int{
"IS": is,
"ISSUER": issuer,
"ISOLATION": isolation,
"USE_TOJA": hintUseToja,
"JOBS": jobs,
"JOB": job,
"JOIN": join,
......@@ -368,6 +379,7 @@ var tokenMap = map[string]int{
"MEDIUMINT": mediumIntType,
"MEDIUMTEXT": mediumtextType,
"MEMORY": memory,
"MEMORY_QUOTA": hintMemoryQuota,
"MERGE": merge,
"MICROSECOND": microsecond,
"MIN": min,
......@@ -385,6 +397,7 @@ var tokenMap = map[string]int{
"NEVER": never,
"NEXT_ROW_ID": next_row_id,
"NO": no,
"NO_INDEX_MERGE": hintNoIndexMerge,
"NO_WRITE_TO_BINLOG": noWriteToBinLog,
"NODE_ID": nodeID,
"NODE_STATE": nodeState,
......@@ -395,11 +408,15 @@ var tokenMap = map[string]int{
"NULL": null,
"NULLS": nulls,
"NUMERIC": numericType,
"NCHAR": ncharType,
"NVARCHAR": nvarcharType,
"OFFSET": offset,
"OLAP": hintOLAP,
"OLTP": hintOLTP,
"ON": on,
"ONLY": only,
"OPTIMISTIC": optimistic,
"OPTIMIZE": optimize,
"OPTION": option,
"OPTIONALLY": optionally,
"OR": or,
......@@ -426,15 +443,19 @@ var tokenMap = map[string]int{
"PROFILE": profile,
"PROFILES": profiles,
"PUMP": pump,
"QB_NAME": hintQBName,
"QUARTER": quarter,
"QUERY": query,
"QUERY_TYPE": hintQueryType,
"QUERIES": queries,
"QUICK": quick,
"SHARD_ROW_ID_BITS": shardRowIDBits,
"PRE_SPLIT_REGIONS": preSplitRegions,
"RANGE": rangeKwd,
"RECOVER": recover,
"REBUILD": rebuild,
"READ": read,
"READ_CONSISTENT_REPLICA": hintReadConsistentReplica,
"REAL": realType,
"RECENT": recent,
"REDUNDANT": redundant,
......@@ -444,6 +465,8 @@ var tokenMap = map[string]int{
"RELOAD": reload,
"REMOVE": remove,
"RENAME": rename,
"REORGANIZE": reorganize,
"REPAIR": repair,
"REPEAT": repeat,
"REPEATABLE": repeatable,
"REPLACE": replace,
......@@ -461,12 +484,18 @@ var tokenMap = map[string]int{
"ROW": row,
"ROW_COUNT": rowCount,
"ROW_FORMAT": rowFormat,
"RTREE": rtree,
"SAMPLES": samples,
"SCHEMA": database,
"SCHEMAS": databases,
"SECOND": second,
"SECONDARY_ENGINE": secondaryEngine,
"SECONDARY_LOAD": secondaryLoad,
"SECONDARY_UNLOAD": secondaryUnload,
"SECOND_MICROSECOND": secondMicrosecond,
"SECURITY": security,
"SELECT": selectKwd,
"SERIAL": serial,
"SERIALIZABLE": serializable,
"SESSION": session,
"SET": set,
......@@ -478,9 +507,11 @@ var tokenMap = map[string]int{
"SIMPLE": simple,
"SLAVE": slave,
"SLOW": slow,
"SM_JOIN": hintSMJ,
"SMALLINT": smallIntType,
"SNAPSHOT": snapshot,
"SOME": some,
"SPATIAL": spatial,
"SPLIT": split,
"SQL": sql,
"SQL_BIG_RESULT": sqlBigResult,
......@@ -489,6 +520,14 @@ var tokenMap = map[string]int{
"SQL_CALC_FOUND_ROWS": sqlCalcFoundRows,
"SQL_NO_CACHE": sqlNoCache,
"SQL_SMALL_RESULT": sqlSmallResult,
"SQL_TSI_DAY": sqlTsiDay,
"SQL_TSI_HOUR": sqlTsiHour,
"SQL_TSI_MINUTE": sqlTsiMinute,
"SQL_TSI_MONTH": sqlTsiMonth,
"SQL_TSI_QUARTER": sqlTsiQuarter,
"SQL_TSI_SECOND": sqlTsiSecond,
"SQL_TSI_WEEK": sqlTsiWeek,
"SQL_TSI_YEAR": sqlTsiYear,
"SOURCE": source,
"SSL": ssl,
"START": start,
......@@ -498,6 +537,7 @@ var tokenMap = map[string]int{
"STATS_HISTOGRAMS": statsHistograms,
"STATS_HEALTHY": statsHealthy,
"STATS_META": statsMeta,
"STATS_AUTO_RECALC": statsAutoRecalc,
"STATS_PERSISTENT": statsPersistent,
"STATS_SAMPLE_PAGES": statsSamplePages,
"STATUS": status,
......@@ -512,6 +552,7 @@ var tokenMap = map[string]int{
"STDDEV_SAMP": stddevSamp,
"STORED": stored,
"STRAIGHT_JOIN": straightJoin,
"STREAM_AGG": hintSTREAMAGG,
"SUBDATE": subDate,
"SUBJECT": subject,
"SUBPARTITION": subpartition,
......@@ -521,6 +562,7 @@ var tokenMap = map[string]int{
"SUM": sum,
"SUPER": super,
"TABLE": tableKwd,
"TABLE_CHECKSUM": tableChecksum,
"TABLES": tables,
"TABLESPACE": tablespace,
"TEMPORARY": temporary,
......@@ -530,11 +572,9 @@ var tokenMap = map[string]int{
"THAN": than,
"THEN": then,
"TIDB": tidb,
"TIDB_HJ": tidbHJ,
"TIDB_INLJ": tidbINLJ,
"TIDB_SMJ": tidbSMJ,
"TIDB_HASHAGG": tidbHASHAGG,
"TIDB_STREAMAGG": tidbSTREAMAGG,
"TIDB_HJ": hintHJ,
"TIDB_INLJ": hintINLJ,
"TIDB_SMJ": hintSMJ,
"TIME": timeType,
"TIMESTAMP": timestampType,
"TIMESTAMPADD": timestampAdd,
......@@ -573,17 +613,21 @@ var tokenMap = map[string]int{
"UPDATE": update,
"USAGE": usage,
"USE": use,
"USE_INDEX_MERGE": hintUseIndexMerge,
"USE_PLAN_CACHE": hintUsePlanCache,
"USER": user,
"USING": using,
"UTC_DATE": utcDate,
"UTC_TIME": utcTime,
"UTC_TIMESTAMP": utcTimestamp,
"VALIDATION": validation,
"VALUE": value,
"VALUES": values,
"VARBINARY": varbinaryType,
"VARCHAR": varcharType,
"VARIABLES": variables,
"VARIANCE": varPop,
"VARYING": varying,
"VAR_POP": varPop,
"VAR_SAMP": varSamp,
"VIEW": view,
......@@ -595,6 +639,7 @@ var tokenMap = map[string]int{
"WHERE": where,
"WIDTH": width,
"WITH": with,
"WITHOUT": without,
"WRITE": write,
"XOR": xor,
"X509": x509,
......@@ -667,6 +712,9 @@ var aliases = map[string]string{
"SCHEMAS": "DATABASES",
"DEC": "DECIMAL",
"SUBSTR": "SUBSTRING",
"TIDB_HJ": "HASH_JOIN",
"TIDB_INLJ": "INL_JOIN",
"TIDB_SMJ": "SM_JOIN",
}
func (s *Scanner) isTokenIdentifier(lit string, offset int) int {
......
......@@ -151,6 +151,7 @@ type Job struct {
Type ActionType `json:"type"`
SchemaID int64 `json:"schema_id"`
TableID int64 `json:"table_id"`
SchemaName string `json:"schema_name"`
State JobState `json:"state"`
Error *terror.Error `json:"err"`
// ErrorCount will be increased, every time we meet an error when running job.
......
......@@ -629,6 +629,8 @@ func (t IndexType) String() string {
return "BTREE"
case IndexTypeHash:
return "HASH"
case IndexTypeRtree:
return "RTREE"
default:
return ""
}
......@@ -639,6 +641,7 @@ const (
IndexTypeInvalid IndexType = iota
IndexTypeBtree
IndexTypeHash
IndexTypeRtree
)
// IndexInfo provides meta data describing a DB index.
......@@ -653,7 +656,7 @@ type IndexInfo struct {
Primary bool `json:"is_primary"` // Whether the index is primary key.
State SchemaState `json:"state"`
Comment string `json:"comment"` // Comment
Tp IndexType `json:"index_type"` // Index type: Btree or Hash
Tp IndexType `json:"index_type"` // Index type: Btree, Hash or Rtree
}
// Clone clones IndexInfo.
......
......@@ -80,6 +80,7 @@ var defaultLengthAndDecimalForCast = map[byte]lengthAndDecimal{
TypeDuration: {10, 0},
TypeLonglong: {22, 0},
TypeDouble: {22, -1},
TypeFloat: {12, -1},
TypeJSON: {4194304, 0}, // Flen differs.
}
......
因为 它太大了无法显示 source diff 。你可以改为 查看blob
......@@ -191,6 +191,7 @@ import (
numericType "NUMERIC"
nvarcharType "NVARCHAR"
on "ON"
optimize "OPTIMIZE"
option "OPTION"
optionally "OPTIONALLY"
or "OR"
......@@ -227,6 +228,7 @@ import (
set "SET"
show "SHOW"
smallIntType "SMALLINT"
spatial "SPATIAL"
sql "SQL"
sqlBigResult "SQL_BIG_RESULT"
sqlCalcFoundRows "SQL_CALC_FOUND_ROWS"
......@@ -260,6 +262,7 @@ import (
long "LONG"
varcharType "VARCHAR"
varbinaryType "VARBINARY"
varying "VARYING"
virtual "VIRTUAL"
when "WHEN"
where "WHERE"
......@@ -298,6 +301,7 @@ import (
client "CLIENT"
coalesce "COALESCE"
collation "COLLATION"
columnFormat "COLUMN_FORMAT"
columns "COLUMNS"
comment "COMMENT"
commit "COMMIT"
......@@ -319,11 +323,13 @@ import (
delayKeyWrite "DELAY_KEY_WRITE"
directory "DIRECTORY"
disable "DISABLE"
discard "DISCARD"
disk "DISK"
do "DO"
duplicate "DUPLICATE"
dynamic "DYNAMIC"
enable "ENABLE"
encryption "ENCRYPTION"
end "END"
engine "ENGINE"
engines "ENGINES"
......@@ -331,6 +337,7 @@ import (
event "EVENT"
events "EVENTS"
escape "ESCAPE"
exchange "EXCHANGE"
exclusive "EXCLUSIVE"
execute "EXECUTE"
expire "EXPIRE"
......@@ -348,6 +355,8 @@ import (
history "HISTORY"
hour "HOUR"
identified "IDENTIFIED"
importKwd "IMPORT"
insertMethod "INSERT_METHOD"
isolation "ISOLATION"
issuer "ISSUER"
incremental "INCREMENTAL"
......@@ -357,11 +366,11 @@ import (
ipc "IPC"
jsonType "JSON"
keyBlockSize "KEY_BLOCK_SIZE"
local "LOCAL"
last "LAST"
less "LESS"
level "LEVEL"
list "LIST"
local "LOCAL"
master "MASTER"
microsecond "MICROSECOND"
minute "MINUTE"
......@@ -378,6 +387,7 @@ import (
minRows "MIN_ROWS"
names "NAMES"
national "NATIONAL"
ncharType "NCHAR"
never "NEVER"
no "NO"
nodegroup "NODEGROUP"
......@@ -403,10 +413,13 @@ import (
query "QUERY"
queries "QUERIES"
quick "QUICK"
rebuild "REBUILD"
recover "RECOVER"
redundant "REDUNDANT"
reload "RELOAD"
remove "REMOVE"
reorganize "REORGANIZE"
repair "REPAIR"
repeatable "REPEATABLE"
respect "RESPECT"
replication "REPLICATION"
......@@ -416,9 +429,14 @@ import (
routine "ROUTINE"
rowCount "ROW_COUNT"
rowFormat "ROW_FORMAT"
rtree "RTREE"
second "SECOND"
secondaryEngine "SECONDARY_ENGINE"
secondaryLoad "SECONDARY_LOAD"
secondaryUnload "SECONDARY_UNLOAD"
security "SECURITY"
separator "SEPARATOR"
serial "SERIAL"
serializable "SERIALIZABLE"
session "SESSION"
share "SHARE"
......@@ -431,7 +449,16 @@ import (
sqlBufferResult "SQL_BUFFER_RESULT"
sqlCache "SQL_CACHE"
sqlNoCache "SQL_NO_CACHE"
sqlTsiDay "SQL_TSI_DAY"
sqlTsiHour "SQL_TSI_HOUR"
sqlTsiMinute "SQL_TSI_MINUTE"
sqlTsiMonth "SQL_TSI_MONTH"
sqlTsiQuarter "SQL_TSI_QUARTER"
sqlTsiSecond "SQL_TSI_SECOND"
sqlTsiWeek "SQL_TSI_WEEK"
sqlTsiYear "SQL_TSI_YEAR"
start "START"
statsAutoRecalc "STATS_AUTO_RECALC"
statsPersistent "STATS_PERSISTENT"
statsSamplePages "STATS_SAMPLE_PAGES"
status "STATUS"
......@@ -447,6 +474,7 @@ import (
super "SUPER"
some "SOME"
global "GLOBAL"
tableChecksum "TABLE_CHECKSUM"
tables "TABLES"
tablespace "TABLESPACE"
temporary "TEMPORARY"
......@@ -465,12 +493,14 @@ import (
unknown "UNKNOWN"
user "USER"
undefined "UNDEFINED"
validation "VALIDATION"
value "VALUE"
variables "VARIABLES"
view "VIEW"
binding "BINDING"
bindings "BINDINGS"
warnings "WARNINGS"
without "WITHOUT"
identSQLErrors "ERRORS"
week "WEEK"
yearType "YEAR"
......@@ -541,17 +571,29 @@ import (
optimistic "OPTIMISTIC"
pessimistic "PESSIMISTIC"
pump "PUMP"
samples "SAMPLES"
stats "STATS"
statsMeta "STATS_META"
statsHistograms "STATS_HISTOGRAMS"
statsBuckets "STATS_BUCKETS"
statsHealthy "STATS_HEALTHY"
tidb "TIDB"
tidbHJ "TIDB_HJ"
tidbSMJ "TIDB_SMJ"
tidbINLJ "TIDB_INLJ"
tidbHASHAGG "TIDB_HASHAGG"
tidbSTREAMAGG "TIDB_STREAMAGG"
hintHJ "HASH_JOIN"
hintSMJ "SM_JOIN"
hintINLJ "INL_JOIN"
hintHASHAGG "HASH_AGG"
hintSTREAMAGG "STREAM_AGG"
hintUseIndexMerge "USE_INDEX_MERGE"
hintNoIndexMerge "NO_INDEX_MERGE"
hintUseToja "USE_TOJA"
hintEnablePlanCache "ENABLE_PLAN_CACHE"
hintUsePlanCache "USE_PLAN_CACHE"
hintReadConsistentReplica "READ_CONSISTENT_REPLICA"
hintQBName "QB_NAME"
hintQueryType "QUERY_TYPE"
hintMemoryQuota "MEMORY_QUOTA"
hintOLAP "OLAP"
hintOLTP "OLTP"
topn "TOPN"
split "SPLIT"
width "WIDTH"
......@@ -697,7 +739,8 @@ import (
%type <item>
AdminShowSlow "Admin Show Slow statement"
AlterAlgorithm "Alter table algorithm"
AllOrPartitionNameList "All or partition name list"
AlgorithmClause "Alter table algorithm"
AlterTablePartitionOpt "Alter table partition option"
AlterTableSpec "Alter table specification"
AlterTableSpecList "Alter table specification list"
......@@ -718,6 +761,7 @@ import (
CollationName "Collation name"
ColumnDef "table column definition"
ColumnDefList "table column definition list"
ColumnFormat "Column format"
ColumnName "column name"
ColumnNameOrUserVariable "column name or user variable"
ColumnNameList "column name list"
......@@ -739,7 +783,6 @@ import (
Constraint "table constraint"
ConstraintElem "table constraint element"
ConstraintKeywordOpt "Constraint Keyword or empty"
CreateIndexStmtUnique "CREATE INDEX optional UNIQUE clause"
CreateTableOptionListOpt "create table option list opt"
CreateTableSelectOpt "Select/Union statement in CREATE TABLE ... SELECT"
DatabaseOption "CREATE Database specification"
......@@ -751,6 +794,7 @@ import (
DefaultTrueDistinctOpt "Distinct option which defaults to true"
BuggyDefaultFalseDistinctOpt "Distinct option which accepts DISTINCT ALL and defaults to false"
RequireClause "Encrypted connections options"
RequireClauseOpt "optional Encrypted connections options"
EqOpt "= or empty"
EscapedTableRef "escaped table reference"
ExplainFormatType "explain format type"
......@@ -771,6 +815,7 @@ import (
FieldItem "Field item for load data clause"
FieldItemList "Field items for load data clause"
FuncDatetimePrec "Function datetime precision"
GetFormatSelector "{DATE|DATETIME|TIME|TIMESTAMP}"
GlobalScope "The scope of variable"
GroupByClause "GROUP BY clause"
HashString "Hashed string"
......@@ -782,11 +827,14 @@ import (
IgnoreOptional "IGNORE or empty"
IndexColName "Index column name"
IndexColNameList "List of index column name"
IndexColNameListOpt "List of index column name opt"
IndexHint "index hint"
IndexHintList "index hint list"
IndexHintListOpt "index hint list opt"
IndexHintScope "index hint scope"
IndexHintType "index hint type"
IndexKeyTypeOpt "index key type"
IndexLockAndAlgorithmOpt "index lock and algorithm"
IndexName "index name"
IndexNameList "index name list"
IndexOption "Index Option"
......@@ -822,6 +870,7 @@ import (
OrderByOptional "Optional ORDER BY clause optional"
ByList "BY list"
QuickOptional "QUICK or empty"
QueryBlockOpt "Query block identifier optional"
PartitionDefinition "Partition definition"
PartitionDefinitionList "Partition definition list"
PartitionDefinitionListOpt "Partition definition list option"
......@@ -852,6 +901,7 @@ import (
OnDeleteUpdateOpt "optional ON DELETE and UPDATE clause"
OptGConcatSeparator "optional GROUP_CONCAT SEPARATOR"
ReferOpt "reference option"
ReorganizePartitionRuleOpt "optional reorganize partition partition list and definitions"
RequireList "require list"
RequireListElement "require list element"
Rolename "Rolename"
......@@ -915,6 +965,8 @@ import (
TableRefs "table references"
TableToTable "rename table to table"
TableToTableList "rename table to table by list"
TimeUnit "Time unit for 'DATE_ADD', 'DATE_SUB', 'ADDDATE', 'SUBDATE', 'EXTRACT'"
TimestampUnit "Time unit for 'TIMESTAMPADD' and 'TIMESTAMPDIFF'"
LockType "Table locks type"
TransactionChar "Transaction characteristic"
......@@ -946,6 +998,8 @@ import (
WhenClauseList "When clause list"
WithReadLockOpt "With Read Lock opt"
WithGrantOptionOpt "With Grant Option opt"
WithValidation "with validation"
WithValidationOpt "optional with validation"
ElseOpt "Optional else clause"
Type "Types"
......@@ -1004,10 +1058,14 @@ import (
NUM "A number"
NumList "Some numbers"
LengthNum "Field length num(uint64)"
HintTableList "Table list in optimizer hint"
TableOptimizerHintOpt "Table level optimizer hint"
TableOptimizerHints "Table level optimizer hints"
TableOptimizerHintList "Table level optimizer hint list"
HintTable "Table in optimizer hint"
HintTableList "Table list in optimizer hint"
HintTrueOrFalse "True or false in optimizer hint"
HintQueryType "Query type in optimizer hint"
HintMemoryQuota "Memory quota in optimizer hint"
EnforcedOrNot "{ENFORCED|NOT ENFORCED}"
EnforcedOrNotOpt "Optional {ENFORCED|NOT ENFORCED}"
EnforcedOrNotOrNotNullOpt "{[ENFORCED|NOT ENFORCED|NOT NULL]}"
......@@ -1027,9 +1085,10 @@ import (
RegexpSym "REGEXP or RLIKE"
IntoOpt "INTO or EmptyString"
ValueSym "Value or Values"
Varchar "{NATIONAL VARCHAR|VARCHAR|NVARCHAR}"
TimeUnit "Time unit for 'DATE_ADD', 'DATE_SUB', 'ADDDATE', 'SUBDATE', 'EXTRACT'"
TimestampUnit "Time unit for 'TIMESTAMPADD' and 'TIMESTAMPDIFF'"
Char "{CHAR|CHARACTER}"
NChar "{NCHAR|NATIONAL CHARACTER|NATIONAL CHAR}"
Varchar "{VARCHAR|CHARACTER VARYING|CHAR VARYING}"
NVarchar "{NATIONAL VARCHAR|NVARCHAR|NCHAR VARCHAR|NATIONAL CHARACTER VARYING|NATIONAL CHAR VARYING|NCHAR VARYING}"
DeallocateSym "Deallocate or drop"
OuterOpt "optional OUTER clause"
CrossOpt "Cross join option"
......@@ -1040,14 +1099,12 @@ import (
FromOrIn "From or In"
OptTable "Optional table keyword"
OptInteger "Optional Integer keyword"
NationalOpt "National option"
CharsetKw "charset or charater set"
CommaOpt "optional comma"
logAnd "logical and operator"
logOr "logical or operator"
LinearOpt "linear or empty"
FieldsOrColumns "Fields or columns"
GetFormatSelector "{DATE|DATETIME|TIME|TIMESTAMP}"
%type <ident>
ODBCDateTimeType "ODBC type keywords for date and time literals"
......@@ -1060,6 +1117,7 @@ import (
FunctionNameDatetimePrecision "Function with optional datetime precision, all of them are reserved keywords."
FunctionNameDateArith "Date arith function call names (date_add or date_sub)"
FunctionNameDateArithMultiForms "Date arith function call names (adddate or subdate)"
VariableName "A simple Identifier like xx or the xx.xx form"
%precedence empty
......@@ -1081,6 +1139,10 @@ import (
%precedence charsetKwd
%precedence lowerThanKey
%precedence key
%precedence lowerThanLocal
%precedence local
%precedence lowerThanRemove
%precedence remove
%left join straightJoin inner cross left right full natural
/* A dummy token to force the priority of TableRef production in a join. */
......@@ -1104,6 +1166,7 @@ import (
%precedence lowerThanNot
%right not not2
%right collate
%right encryption
%left splitOptionPriv
%precedence '('
......@@ -1216,30 +1279,63 @@ AlterTableSpec:
Constraint: constraint,
}
}
| "ADD" "PARTITION" IfNotExists PartitionDefinitionListOpt
| "ADD" "PARTITION" IfNotExists NoWriteToBinLogAliasOpt PartitionDefinitionListOpt
{
var defs []*ast.PartitionDefinition
if $4 != nil {
defs = $4.([]*ast.PartitionDefinition)
if $5 != nil {
defs = $5.([]*ast.PartitionDefinition)
}
noWriteToBinlog := $4.(bool)
if noWriteToBinlog {
yylex.AppendError(yylex.Errorf("The NO_WRITE_TO_BINLOG option is parsed but ignored for now."))
parser.lastErrorAsWarn()
}
$$ = &ast.AlterTableSpec{
IfNotExists: $3.(bool),
NoWriteToBinlog: noWriteToBinlog,
Tp: ast.AlterTableAddPartitions,
PartDefinitions: defs,
}
}
| "ADD" "PARTITION" "PARTITIONS" NUM
| "ADD" "PARTITION" IfNotExists NoWriteToBinLogAliasOpt "PARTITIONS" NUM
{
noWriteToBinlog := $4.(bool)
if noWriteToBinlog {
yylex.AppendError(yylex.Errorf("The NO_WRITE_TO_BINLOG option is parsed but ignored for now."))
parser.lastErrorAsWarn()
}
$$ = &ast.AlterTableSpec{
IfNotExists: $3.(bool),
NoWriteToBinlog: noWriteToBinlog,
Tp: ast.AlterTableAddPartitions,
Num: getUint64FromNUM($4),
Num: getUint64FromNUM($6),
}
}
| "CHECK" "PARTITION" AllOrPartitionNameList
{
yylex.AppendError(yylex.Errorf("The CHECK PARTITIONING clause is parsed but not implement yet."))
parser.lastErrorAsWarn()
ret := &ast.AlterTableSpec{
Tp: ast.AlterTableCheckPartitions,
}
if $3 == nil {
ret.OnAllPartitions = true
} else {
ret.PartitionNames = $3.([]model.CIStr)
}
$$ = ret
}
| "COALESCE" "PARTITION" NUM
| "COALESCE" "PARTITION" NoWriteToBinLogAliasOpt NUM
{
noWriteToBinlog := $3.(bool)
if noWriteToBinlog {
yylex.AppendError(yylex.Errorf("The NO_WRITE_TO_BINLOG option is parsed but ignored for now."))
parser.lastErrorAsWarn()
}
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableCoalescePartitions,
Num: getUint64FromNUM($3),
NoWriteToBinlog: noWriteToBinlog,
Num: getUint64FromNUM($4),
}
}
| "DROP" ColumnKeywordOpt IfExists ColumnName RestrictOrCascadeOpt
......@@ -1262,12 +1358,110 @@ AlterTableSpec:
PartitionNames: $4.([]model.CIStr),
}
}
| "TRUNCATE" "PARTITION" PartitionNameList %prec lowerThanComma
| "EXCHANGE" "PARTITION" Identifier "WITH" "TABLE" TableName WithValidationOpt
{
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableExchangePartition,
PartitionNames: []model.CIStr{model.NewCIStr($3)},
NewTable: $6.(*ast.TableName),
WithValidation: $7.(bool),
}
yylex.AppendError(yylex.Errorf("TiDB does not support EXCHANGE PARTITION now, it would be parsed but ignored."))
parser.lastErrorAsWarn()
}
| "TRUNCATE" "PARTITION" AllOrPartitionNameList
{
ret := &ast.AlterTableSpec{
Tp: ast.AlterTableTruncatePartition,
PartitionNames: $3.([]model.CIStr),
}
if $3 == nil {
ret.OnAllPartitions = true
yylex.AppendError(yylex.Errorf("The TRUNCATE PARTITION ALL clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
} else {
ret.PartitionNames = $3.([]model.CIStr)
}
$$ = ret
}
| "OPTIMIZE" "PARTITION" NoWriteToBinLogAliasOpt AllOrPartitionNameList
{
ret := &ast.AlterTableSpec{
NoWriteToBinlog: $3.(bool),
Tp: ast.AlterTableOptimizePartition,
}
if $4 == nil {
ret.OnAllPartitions = true
} else {
ret.PartitionNames = $4.([]model.CIStr)
}
$$ = ret
yylex.AppendError(yylex.Errorf("The OPTIMIZE PARTITION clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
| "REPAIR" "PARTITION" NoWriteToBinLogAliasOpt AllOrPartitionNameList
{
ret := &ast.AlterTableSpec{
NoWriteToBinlog: $3.(bool),
Tp: ast.AlterTableRepairPartition,
}
if $4 == nil {
ret.OnAllPartitions = true
} else {
ret.PartitionNames = $4.([]model.CIStr)
}
$$ = ret
yylex.AppendError(yylex.Errorf("The REPAIR PARTITION clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
| "IMPORT" "PARTITION" AllOrPartitionNameList "TABLESPACE"
{
ret := &ast.AlterTableSpec{
Tp: ast.AlterTableImportPartitionTablespace,
}
if $3 == nil {
ret.OnAllPartitions = true
} else {
ret.PartitionNames = $3.([]model.CIStr)
}
$$ = ret
yylex.AppendError(yylex.Errorf("The IMPORT PARTITION TABLESPACE clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
| "DISCARD" "PARTITION" AllOrPartitionNameList "TABLESPACE"
{
ret := &ast.AlterTableSpec{
Tp: ast.AlterTableDiscardPartitionTablespace,
}
if $3 == nil {
ret.OnAllPartitions = true
} else {
ret.PartitionNames = $3.([]model.CIStr)
}
$$ = ret
yylex.AppendError(yylex.Errorf("The DISCARD PARTITION TABLESPACE clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
| "REBUILD" "PARTITION" NoWriteToBinLogAliasOpt AllOrPartitionNameList
{
ret := &ast.AlterTableSpec{
Tp: ast.AlterTableRebuildPartition,
NoWriteToBinlog: $3.(bool),
}
if $4 == nil {
ret.OnAllPartitions = true
} else {
ret.PartitionNames = $4.([]model.CIStr)
}
$$ = ret
yylex.AppendError(yylex.Errorf("REBUILD PARTITION syntax is parsed but not implement for now."))
parser.lastErrorAsWarn()
}
| "REORGANIZE" "PARTITION" NoWriteToBinLogAliasOpt ReorganizePartitionRuleOpt {
ret := $4.(*ast.AlterTableSpec)
ret.NoWriteToBinlog = $3.(bool)
$$ = ret
yylex.AppendError(yylex.Errorf("REORGANIZE PARTITION syntax is parsed but not implement for now."))
parser.lastErrorAsWarn()
}
| "DROP" KeyOrIndex IfExists Identifier
{
......@@ -1350,6 +1544,14 @@ AlterTableSpec:
NewColumns: []*ast.ColumnDef{colDef},
}
}
| "RENAME" "COLUMN" ColumnName "TO" ColumnName
{
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableRenameColumn,
OldColumnName: $3.(*ast.ColumnName),
NewColumnName: $5.(*ast.ColumnName),
}
}
| "RENAME" "TO" TableName
{
$$ = &ast.AlterTableSpec{
......@@ -1357,11 +1559,11 @@ AlterTableSpec:
NewTable: $3.(*ast.TableName),
}
}
| "RENAME" TableName
| "RENAME" EqOpt TableName
{
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableRenameTable,
NewTable: $2.(*ast.TableName),
NewTable: $3.(*ast.TableName),
}
}
| "RENAME" "AS" TableName
......@@ -1386,12 +1588,12 @@ AlterTableSpec:
LockType: $1.(ast.LockType),
}
}
| "ALGORITHM" EqOpt AlterAlgorithm
| AlgorithmClause
{
// Parse it and ignore it. Just for compatibility.
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableAlgorithm,
Algorithm: $3.(ast.AlterAlgorithm),
Algorithm: $1.(ast.AlgorithmType),
}
}
| "FORCE"
......@@ -1401,35 +1603,143 @@ AlterTableSpec:
Tp: ast.AlterTableForce,
}
}
| "WITH" "VALIDATION"
{
// Parse it and ignore it. Just for compatibility.
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableWithValidation,
}
yylex.AppendError(yylex.Errorf("The WITH/WITHOUT VALIDATION clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
| "WITHOUT" "VALIDATION"
{
// Parse it and ignore it. Just for compatibility.
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableWithoutValidation,
}
yylex.AppendError(yylex.Errorf("The WITH/WITHOUT VALIDATION clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
// Added in MySQL 8.0.13, see: https://dev.mysql.com/doc/refman/8.0/en/keywords.html for details
| "SECONDARY_LOAD"
{
// Parse it and ignore it. Just for compatibility.
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableSecondaryLoad,
}
yylex.AppendError(yylex.Errorf("The SECONDARY_LOAD clause is parsed but not implement yet."))
parser.lastErrorAsWarn()
}
// Added in MySQL 8.0.13, see: https://dev.mysql.com/doc/refman/8.0/en/keywords.html for details
| "SECONDARY_UNLOAD"
{
// Parse it and ignore it. Just for compatibility.
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableSecondaryUnload,
}
yylex.AppendError(yylex.Errorf("The SECONDARY_UNLOAD VALIDATION clause is parsed but not implement yet."))
parser.lastErrorAsWarn()
}
| "ALTER" "CHECK" Identifier EnforcedOrNot
{
// Parse it and ignore it. Just for compatibility.
c := &ast.Constraint{
Name: $3,
Enforced: $4.(bool),
}
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableAlterCheck,
Constraint: c,
}
yylex.AppendError(yylex.Errorf("The ALTER CHECK clause is parsed but not implemented yet."))
parser.lastErrorAsWarn()
}
| "DROP" "CHECK" Identifier
{
// Parse it and ignore it. Just for compatibility.
c := &ast.Constraint{
Name: $3,
}
$$ = &ast.AlterTableSpec{
Tp: ast.AlterTableDropCheck,
Constraint: c,
}
yylex.AppendError(yylex.Errorf("The DROP CHECK clause is parsed but not implemented yet."))
parser.lastErrorAsWarn()
}
ReorganizePartitionRuleOpt:
/* empty */ %prec lowerThanRemove
{
ret := &ast.AlterTableSpec{
Tp: ast.AlterTableReorganizePartition,
OnAllPartitions: true,
}
$$ = ret
}
| PartitionNameList "INTO" '(' PartitionDefinitionList ')'
{
ret := &ast.AlterTableSpec{
Tp: ast.AlterTableReorganizePartition,
PartitionNames: $1.([]model.CIStr),
PartDefinitions: $4.([]*ast.PartitionDefinition),
}
$$ = ret
}
AlterAlgorithm:
"DEFAULT"
AllOrPartitionNameList:
"ALL"
{
$$ = nil
}
| PartitionNameList %prec lowerThanComma
{
$$ = $1
}
WithValidationOpt:
{
$$ = true
}
| WithValidation
{
$$ = $1
}
WithValidation:
"WITH" "VALIDATION"
{
$$ = ast.AlterAlgorithmDefault
$$ = true
}
| "COPY"
| "WITHOUT" "VALIDATION"
{
$$ = ast.AlterAlgorithmCopy
$$ = false
}
| "INPLACE"
AlgorithmClause:
"ALGORITHM" EqOpt "DEFAULT"
{
$$ = ast.AlterAlgorithmInplace
$$ = ast.AlgorithmTypeDefault
}
| "INSTANT"
| "ALGORITHM" EqOpt "COPY"
{
$$ = ast.AlterAlgorithmInstant
$$ = ast.AlgorithmTypeCopy
}
| identifier
| "ALGORITHM" EqOpt "INPLACE"
{
$$ = ast.AlgorithmTypeInplace
}
| "ALGORITHM" EqOpt "INSTANT"
{
$$ = ast.AlgorithmTypeInstant
}
| "ALGORITHM" EqOpt identifier
{
yylex.AppendError(ErrUnknownAlterAlgorithm.GenWithStackByArgs($1))
return 1
}
LockClauseOpt:
{}
| LockClause {}
LockClause:
"LOCK" EqOpt "NONE"
{
......@@ -1711,6 +2021,10 @@ AnalyzeOption:
{
$$ = ast.AnalyzeOpt{Type: ast.AnalyzeOptCMSketchWidth, Value: getUint64FromNUM($1)}
}
| NUM "SAMPLES"
{
$$ = ast.AnalyzeOpt{Type: ast.AnalyzeOptNumSamples, Value: getUint64FromNUM($1)}
}
/*******************************************************************************************/
Assignment:
......@@ -1788,6 +2102,20 @@ ColumnDef:
}
$$ = colDef
}
| ColumnName "SERIAL" ColumnOptionListOpt
{
// TODO: check flen 0
tp := types.NewFieldType(mysql.TypeLonglong)
options := []*ast.ColumnOption{{Tp: ast.ColumnOptionNotNull}, {Tp: ast.ColumnOptionAutoIncrement}, {Tp: ast.ColumnOptionUniqKey}}
options = append(options, $3.([]*ast.ColumnOption)...)
tp.Flag |= mysql.UnsignedFlag
colDef := &ast.ColumnDef{Name: $1.(*ast.ColumnName), Tp: tp, Options: options}
if !colDef.Validate() {
yylex.AppendError(yylex.Errorf("Invalid column definition"))
return 1
}
$$ = colDef
}
ColumnName:
Identifier
......@@ -1941,6 +2269,10 @@ ColumnOption:
{
$$ = &ast.ColumnOption{Tp: ast.ColumnOptionDefaultValue, Expr: $2}
}
| "SERIAL" "DEFAULT" "VALUE"
{
$$ = []*ast.ColumnOption{{Tp: ast.ColumnOptionNotNull}, {Tp: ast.ColumnOptionAutoIncrement}, {Tp: ast.ColumnOptionUniqKey}}
}
| "ON" "UPDATE" NowSymOptionFraction
{
$$ = &ast.ColumnOption{Tp: ast.ColumnOptionOnUpdate, Expr: $3}
......@@ -1998,6 +2330,24 @@ ColumnOption:
{
$$ = &ast.ColumnOption{Tp: ast.ColumnOptionCollate, StrValue: $2.(string)}
}
| "COLUMN_FORMAT" ColumnFormat
{
$$ = &ast.ColumnOption{Tp: ast.ColumnOptionColumnFormat, StrValue: $2.(string)}
}
ColumnFormat:
"DEFAULT"
{
$$ = "DEFAULT"
}
| "FIXED"
{
$$ = "FIXED"
}
| "DYNAMIC"
{
$$ = "DYNAMIC"
}
GeneratedAlways: | "GENERATED" "ALWAYS"
......@@ -2155,18 +2505,28 @@ MatchOpt:
}
ReferDef:
"REFERENCES" TableName '(' IndexColNameList ')' MatchOpt OnDeleteUpdateOpt
"REFERENCES" TableName IndexColNameListOpt MatchOpt OnDeleteUpdateOpt
{
onDeleteUpdate := $7.([2]interface{})
onDeleteUpdate := $5.([2]interface{})
$$ = &ast.ReferenceDef{
Table: $2.(*ast.TableName),
IndexColNames: $4.([]*ast.IndexColName),
IndexColNames: $3.([]*ast.IndexColName),
OnDelete: onDeleteUpdate[0].(*ast.OnDeleteOpt),
OnUpdate: onDeleteUpdate[1].(*ast.OnUpdateOpt),
Match: $6.(ast.MatchType),
Match: $4.(ast.MatchType),
}
}
IndexColNameListOpt:
{
$$ = ([]*ast.IndexColName)(nil)
}
|
'(' IndexColNameList ')'
{
$$ = $2
}
OnDelete:
"ON" "DELETE" ReferOpt
{
......@@ -2281,9 +2641,33 @@ NumLiteral:
| floatLit
| decLit
/**************************************CreateIndexStmt***************************************
* See https://dev.mysql.com/doc/refman/8.0/en/create-index.html
*
* CREATE [UNIQUE | FULLTEXT | SPATIAL] INDEX index_name
* [index_type]
* ON tbl_name (key_part,...)
* [index_option]
* [algorithm_option | lock_option] ...
*
* key_part: {col_name [(length)] | (expr)} [ASC | DESC]
*
* index_option:
* KEY_BLOCK_SIZE [=] value
* | index_type
* | COMMENT 'string'
*
* index_type:
* USING {BTREE | HASH}
*
* algorithm_option:
* ALGORITHM [=] {DEFAULT | INPLACE | COPY}
*
* lock_option:
* LOCK [=] {DEFAULT | NONE | SHARED | EXCLUSIVE}
*******************************************************************************************/
CreateIndexStmt:
"CREATE" CreateIndexStmtUnique "INDEX" IfNotExists Identifier IndexTypeOpt "ON" TableName '(' IndexColNameList ')' IndexOptionList LockClauseOpt
"CREATE" IndexKeyTypeOpt "INDEX" IfNotExists Identifier IndexTypeOpt "ON" TableName '(' IndexColNameList ')' IndexOptionList IndexLockAndAlgorithmOpt
{
var indexOption *ast.IndexOption
if $12 != nil {
......@@ -2299,25 +2683,24 @@ CreateIndexStmt:
indexOption.Tp = $6.(model.IndexType)
}
}
var indexLockAndAlgorithm *ast.IndexLockAndAlgorithm
if $13 != nil {
indexLockAndAlgorithm = $13.(*ast.IndexLockAndAlgorithm)
if indexLockAndAlgorithm.LockTp == ast.LockTypeDefault && indexLockAndAlgorithm.AlgorithmTp == ast.AlgorithmTypeDefault {
indexLockAndAlgorithm = nil
}
}
$$ = &ast.CreateIndexStmt{
Unique: $2.(bool),
IfNotExists: $4.(bool),
IndexName: $5,
Table: $8.(*ast.TableName),
IndexColNames: $10.([]*ast.IndexColName),
IndexOption: indexOption,
KeyType: $2.(ast.IndexKeyType),
LockAlg: indexLockAndAlgorithm,
}
}
CreateIndexStmtUnique:
{
$$ = false
}
| "UNIQUE"
{
$$ = true
}
IndexColName:
ColumnName OptFieldLen Order
{
......@@ -2335,7 +2718,55 @@ IndexColNameList:
$$ = append($1.([]*ast.IndexColName), $3.(*ast.IndexColName))
}
IndexLockAndAlgorithmOpt:
{
$$ = nil
}
| LockClause
{
$$ = &ast.IndexLockAndAlgorithm{
LockTp: $1.(ast.LockType),
AlgorithmTp: ast.AlgorithmTypeDefault,
}
}
| AlgorithmClause
{
$$ = &ast.IndexLockAndAlgorithm{
LockTp: ast.LockTypeDefault,
AlgorithmTp: $1.(ast.AlgorithmType),
}
}
| LockClause AlgorithmClause
{
$$ = &ast.IndexLockAndAlgorithm{
LockTp: $1.(ast.LockType),
AlgorithmTp: $2.(ast.AlgorithmType),
}
}
| AlgorithmClause LockClause
{
$$ = &ast.IndexLockAndAlgorithm{
LockTp: $2.(ast.LockType),
AlgorithmTp: $1.(ast.AlgorithmType),
}
}
IndexKeyTypeOpt:
{
$$ = ast.IndexKeyTypeNone
}
| "UNIQUE"
{
$$ = ast.IndexKeyTypeUnique
}
| "SPATIAL"
{
$$ = ast.IndexKeyTypeSpatial
}
| "FULLTEXT"
{
$$ = ast.IndexKeyTypeFullText
}
/**************************************AlterDatabaseStmt***************************************
* See https://dev.mysql.com/doc/refman/5.7/en/alter-database.html
......@@ -2347,6 +2778,7 @@ IndexColNameList:
* alter_specification:
* [DEFAULT] CHARACTER SET [=] charset_name
* | [DEFAULT] COLLATE [=] collation_name
* | [DEFAULT] ENCRYPTION [=] {'Y' | 'N'}
*******************************************************************************************/
AlterDatabaseStmt:
"ALTER" DatabaseSym DBName DatabaseOptionList
......@@ -2375,6 +2807,7 @@ IndexColNameList:
* create_specification:
* [DEFAULT] CHARACTER SET [=] charset_name
* | [DEFAULT] COLLATE [=] collation_name
* | [DEFAULT] ENCRYPTION [=] {'Y' | 'N'}
*******************************************************************/
CreateDatabaseStmt:
"CREATE" DatabaseSym IfNotExists DBName DatabaseOptionListOpt
......@@ -2401,6 +2834,10 @@ DatabaseOption:
{
$$ = &ast.DatabaseOption{Tp: ast.DatabaseOptionCollate, Value: $4.(string)}
}
| DefaultKwdOpt "ENCRYPTION" EqOpt stringLit
{
$$ = &ast.DatabaseOption{Tp: ast.DatabaseOptionEncryption, Value: $4}
}
DatabaseOptionListOpt:
{
......@@ -2435,25 +2872,27 @@ DatabaseOptionList:
*******************************************************************/
CreateTableStmt:
"CREATE" "TABLE" IfNotExists TableName TableElementListOpt CreateTableOptionListOpt PartitionOpt DuplicateOpt AsOpt CreateTableSelectOpt
"CREATE" OptTemporary "TABLE" IfNotExists TableName TableElementListOpt CreateTableOptionListOpt PartitionOpt DuplicateOpt AsOpt CreateTableSelectOpt
{
stmt := $5.(*ast.CreateTableStmt)
stmt.Table = $4.(*ast.TableName)
stmt.IfNotExists = $3.(bool)
stmt.Options = $6.([]*ast.TableOption)
if $7 != nil {
stmt.Partition = $7.(*ast.PartitionOptions)
stmt := $6.(*ast.CreateTableStmt)
stmt.Table = $5.(*ast.TableName)
stmt.IfNotExists = $4.(bool)
stmt.IsTemporary = $2.(bool)
stmt.Options = $7.([]*ast.TableOption)
if $8 != nil {
stmt.Partition = $8.(*ast.PartitionOptions)
}
stmt.OnDuplicate = $8.(ast.OnDuplicateKeyHandlingType)
stmt.Select = $10.(*ast.CreateTableStmt).Select
stmt.OnDuplicate = $9.(ast.OnDuplicateKeyHandlingType)
stmt.Select = $11.(*ast.CreateTableStmt).Select
$$ = stmt
}
| "CREATE" "TABLE" IfNotExists TableName LikeTableWithOrWithoutParen
| "CREATE" OptTemporary "TABLE" IfNotExists TableName LikeTableWithOrWithoutParen
{
$$ = &ast.CreateTableStmt{
Table: $4.(*ast.TableName),
ReferTable: $5.(*ast.TableName),
IfNotExists: $3.(bool),
Table: $5.(*ast.TableName),
ReferTable: $6.(*ast.TableName),
IfNotExists: $4.(bool),
IsTemporary: $2.(bool),
}
}
......@@ -2520,7 +2959,7 @@ PartitionMethod:
Expr: $3.(ast.ExprNode),
}
}
| "RANGE" "COLUMNS" '(' ColumnNameList ')'
| "RANGE" FieldsOrColumns '(' ColumnNameList ')'
{
$$ = &ast.PartitionMethod{
Tp: model.PartitionTypeRange,
......@@ -2534,7 +2973,7 @@ PartitionMethod:
Expr: $3.(ast.ExprNode),
}
}
| "LIST" "COLUMNS" '(' ColumnNameList ')'
| "LIST" FieldsOrColumns '(' ColumnNameList ')'
{
$$ = &ast.PartitionMethod{
Tp: model.PartitionTypeList,
......@@ -2546,7 +2985,7 @@ PartitionMethod:
$$ = &ast.PartitionMethod{
Tp: model.PartitionTypeSystemTime,
Expr: $3.(ast.ExprNode),
Unit: ast.NewValueExpr($4),
Unit: $4.(ast.TimeUnitType),
}
}
| "SYSTEM_TIME" "LIMIT" LengthNum
......@@ -2692,6 +3131,10 @@ PartDefOption:
{
$$ = &ast.TableOption{Tp: ast.TableOptionEngine, StrValue: $3.(string)}
}
| "INSERT_METHOD" EqOpt StringName
{
$$ = &ast.TableOption{Tp: ast.TableOptionInsertMethod, StrValue: $3.(string)}
}
| "DATA" "DIRECTORY" EqOpt stringLit
{
$$ = &ast.TableOption{Tp: ast.TableOptionDataDirectory, StrValue: $4}
......@@ -3028,10 +3471,30 @@ DropDatabaseStmt:
$$ = &ast.DropDatabaseStmt{IfExists: $3.(bool), Name: $4.(string)}
}
/******************************************************************
* Drop Index Statement
* See https://dev.mysql.com/doc/refman/8.0/en/drop-index.html
*
* DROP INDEX index_name ON tbl_name
* [algorithm_option | lock_option] ...
*
* algorithm_option:
* ALGORITHM [=] {DEFAULT|INPLACE|COPY}
*
* lock_option:
* LOCK [=] {DEFAULT|NONE|SHARED|EXCLUSIVE}
******************************************************************/
DropIndexStmt:
"DROP" "INDEX" IfExists Identifier "ON" TableName
"DROP" "INDEX" IfExists Identifier "ON" TableName IndexLockAndAlgorithmOpt
{
$$ = &ast.DropIndexStmt{IfExists: $3.(bool), IndexName: $4, Table: $6.(*ast.TableName)}
var indexLockAndAlgorithm *ast.IndexLockAndAlgorithm
if $7 != nil {
indexLockAndAlgorithm = $7.(*ast.IndexLockAndAlgorithm)
if indexLockAndAlgorithm.LockTp == ast.LockTypeDefault && indexLockAndAlgorithm.AlgorithmTp == ast.AlgorithmTypeDefault {
indexLockAndAlgorithm = nil
}
}
$$ = &ast.DropIndexStmt{IfExists: $3.(bool), IndexName: $4, Table: $6.(*ast.TableName), LockAlg: indexLockAndAlgorithm}
}
DropTableStmt:
......@@ -3041,8 +3504,13 @@ DropTableStmt:
}
OptTemporary:
/* empty */ { $$= false; }
| "TEMPORARY" { $$= true; }
/* empty */ { $$ = false; }
| "TEMPORARY"
{
$$ = true
yylex.AppendError(yylex.Errorf("TiDB doesn't support TEMPORARY TABLE, TEMPORARY will be parsed but ignored."))
parser.lastErrorAsWarn()
}
;
DropViewStmt:
......@@ -3703,6 +4171,10 @@ IndexType:
{
$$ = model.IndexTypeHash
}
| "USING" "RTREE"
{
$$ = model.IndexTypeRtree
}
IndexTypeOpt:
{
......@@ -3718,26 +4190,28 @@ Identifier:
identifier | UnReservedKeyword | NotKeywordToken | TiDBKeyword
UnReservedKeyword:
"ACTION" | "ASCII" | "AUTO_INCREMENT" | "AFTER" | "ALWAYS" | "AVG" | "BEGIN" | "BIT" | "BOOL" | "BOOLEAN" | "BTREE" | "BYTE" | "CLEANUP" | "CHARSET" %prec charsetKwd
"ACTION" | "ASCII" | "AUTO_INCREMENT" | "AFTER" | "ALWAYS" | "AVG" | "BEGIN" | "BIT" | "BOOL" | "BOOLEAN" | "BTREE" | "BYTE" | "CLEANUP" | "CHARSET"
| "COLUMNS" | "COMMIT" | "COMPACT" | "COMPRESSED" | "CONSISTENT" | "CURRENT" | "DATA" | "DATE" %prec lowerThanStringLitToken| "DATETIME" | "DAY" | "DEALLOCATE" | "DO" | "DUPLICATE"
| "DYNAMIC"| "END" | "ENFORCED" | "ENGINE" | "ENGINES" | "ENUM" | "ERRORS" | "ESCAPE" | "EXECUTE" | "FIELDS" | "FIRST" | "FIXED" | "FLUSH" | "FOLLOWING" | "FORMAT" | "FULL" |"GLOBAL"
| "HASH" | "HOUR" | "LESS" | "LOCAL" | "LAST" | "NAMES" | "OFFSET" | "PASSWORD" %prec lowerThanEq | "PREPARE" | "QUICK" | "REDUNDANT"
| "DYNAMIC" | "ENCRYPTION" | "END" | "ENFORCED" | "ENGINE" | "ENGINES" | "ENUM" | "ERRORS" | "ESCAPE" | "EXECUTE" | "FIELDS" | "FIRST" | "FIXED" | "FLUSH" | "FOLLOWING" | "FORMAT" | "FULL" |"GLOBAL"
| "HASH" | "HOUR" | "INSERT_METHOD" | "LESS" | "LOCAL" | "LAST" | "NAMES" | "OFFSET" | "PASSWORD" %prec lowerThanEq | "PREPARE" | "QUICK" | "REBUILD" | "REDUNDANT" | "REORGANIZE"
| "ROLE" |"ROLLBACK" | "SESSION" | "SIGNED" | "SNAPSHOT" | "START" | "STATUS" | "OPEN"| "SUBPARTITIONS" | "SUBPARTITION" | "TABLES" | "TABLESPACE" | "TEXT" | "THAN" | "TIME" %prec lowerThanStringLitToken
| "TIMESTAMP" %prec lowerThanStringLitToken | "TRACE" | "TRANSACTION" | "TRUNCATE" | "UNBOUNDED" | "UNKNOWN" | "VALUE" | "WARNINGS" | "YEAR" | "MODE" | "WEEK" | "ANY" | "SOME" | "USER" | "IDENTIFIED"
| "COLLATION" | "COMMENT" | "AVG_ROW_LENGTH" | "CONNECTION" | "CHECKSUM" | "COMPRESSION" | "KEY_BLOCK_SIZE" | "MASTER" | "MAX_ROWS"
| "MIN_ROWS" | "NATIONAL" | "ROW_FORMAT" | "QUARTER" | "GRANTS" | "TRIGGERS" | "DELAY_KEY_WRITE" | "ISOLATION" | "JSON"
| "REPEATABLE" | "RESPECT" | "COMMITTED" | "UNCOMMITTED" | "ONLY" | "SERIALIZABLE" | "LEVEL" | "VARIABLES" | "SQL_CACHE" | "INDEXES" | "PROCESSLIST"
| "MIN_ROWS" | "NATIONAL" | "NCHAR" | "ROW_FORMAT" | "QUARTER" | "GRANTS" | "TRIGGERS" | "DELAY_KEY_WRITE" | "ISOLATION" | "JSON"
| "REPEATABLE" | "RESPECT" | "COMMITTED" | "UNCOMMITTED" | "ONLY" | "SERIAL" | "SERIALIZABLE" | "LEVEL" | "VARIABLES" | "SQL_CACHE" | "INDEXES" | "PROCESSLIST"
| "SQL_NO_CACHE" | "DISABLE" | "ENABLE" | "REVERSE" | "PRIVILEGES" | "NO" | "BINLOG" | "FUNCTION" | "VIEW" | "BINDING" | "BINDINGS" | "MODIFY" | "EVENTS" | "PARTITIONS"
| "NONE" | "NULLS" | "SUPER" | "EXCLUSIVE" | "STATS_PERSISTENT" | "ROW_COUNT" | "COALESCE" | "MONTH" | "PROCESS" | "PROFILE" | "PROFILES"
| "NONE" | "NULLS" | "SUPER" | "EXCLUSIVE" | "STATS_PERSISTENT" | "STATS_AUTO_RECALC" | "ROW_COUNT" | "COALESCE" | "MONTH" | "PROCESS" | "PROFILE" | "PROFILES"
| "MICROSECOND" | "MINUTE" | "PLUGINS" | "PRECEDING" | "QUERY" | "QUERIES" | "SECOND" | "SEPARATOR" | "SHARE" | "SHARED" | "SLOW" | "MAX_CONNECTIONS_PER_HOUR" | "MAX_QUERIES_PER_HOUR" | "MAX_UPDATES_PER_HOUR"
| "MAX_USER_CONNECTIONS" | "REPLICATION" | "CLIENT" | "SLAVE" | "RELOAD" | "TEMPORARY" | "ROUTINE" | "EVENT" | "ALGORITHM" | "DEFINER" | "INVOKER" | "MERGE" | "TEMPTABLE" | "UNDEFINED" | "SECURITY" | "CASCADED"
| "RECOVER" | "CIPHER" | "SUBJECT" | "ISSUER" | "X509" | "NEVER" | "EXPIRE" | "ACCOUNT" | "INCREMENTAL" | "CPU" | "MEMORY" | "BLOCK" | "IO" | "CONTEXT" | "SWITCHES" | "PAGE" | "FAULTS" | "IPC" | "SWAPS" | "SOURCE"
| "TRADITIONAL" | "SQL_BUFFER_RESULT" | "DIRECTORY" | "HISTORY" | "LIST" | "NODEGROUP" | "SYSTEM_TIME" | "PARTIAL" | "SIMPLE" | "REMOVE" | "PARTITIONING" | "STORAGE" | "DISK" | "STATS_SAMPLE_PAGES"
| "TRADITIONAL" | "SQL_BUFFER_RESULT" | "DIRECTORY" | "HISTORY" | "LIST" | "NODEGROUP" | "SYSTEM_TIME" | "PARTIAL" | "SIMPLE" | "REMOVE" | "PARTITIONING" | "STORAGE" | "DISK" | "STATS_SAMPLE_PAGES" | "SECONDARY_ENGINE" | "SECONDARY_LOAD" | "SECONDARY_UNLOAD" | "VALIDATION"
| "WITHOUT" | "RTREE" | "EXCHANGE" | "COLUMN_FORMAT" | "REPAIR" | "IMPORT" | "DISCARD" | "TABLE_CHECKSUM"
| "SQL_TSI_DAY" | "SQL_TSI_HOUR" | "SQL_TSI_MINUTE" | "SQL_TSI_MONTH" | "SQL_TSI_QUARTER" | "SQL_TSI_SECOND" | "SQL_TSI_WEEK" | "SQL_TSI_YEAR"
TiDBKeyword:
"ADMIN" | "BUCKETS" | "CANCEL" | "CMSKETCH" | "DDL" | "DEPTH" | "DRAINER" | "JOBS" | "JOB" | "NODE_ID" | "NODE_STATE" | "PUMP" | "STATS" | "STATS_META" | "STATS_HISTOGRAMS" | "STATS_BUCKETS" | "STATS_HEALTHY" | "TIDB" | "TIDB_HJ"
| "TIDB_SMJ" | "TIDB_INLJ" | "TIDB_HASHAGG" | "TIDB_STREAMAGG" | "TOPN" | "SPLIT" | "OPTIMISTIC" | "PESSIMISTIC" | "WIDTH" | "REGIONS"
"ADMIN" | "BUCKETS" | "CANCEL" | "CMSKETCH" | "DDL" | "DEPTH" | "DRAINER" | "JOBS" | "JOB" | "NODE_ID" | "NODE_STATE" | "PUMP" | "SAMPLES" | "STATS" | "STATS_META" | "STATS_HISTOGRAMS" | "STATS_BUCKETS" | "STATS_HEALTHY" | "TIDB"
| "HASH_JOIN" | "SM_JOIN" | "INL_JOIN" | "HASH_AGG" | "STREAM_AGG" | "USE_INDEX_MERGE" | "NO_INDEX_MERGE" | "USE_TOJA" | "ENABLE_PLAN_CACHE" | "USE_PLAN_CACHE"
| "READ_CONSISTENT_REPLICA" | "QB_NAME" | "QUERY_TYPE" | "MEMORY_QUOTA" | "OLAP" | "OLTP" |"TOPN" | "SPLIT" | "OPTIMISTIC" | "PESSIMISTIC" | "WIDTH" | "REGIONS"
NotKeywordToken:
"ADDDATE" | "BIT_AND" | "BIT_OR" | "BIT_XOR" | "CAST" | "COPY" | "COUNT" | "CURTIME" | "DATE_ADD" | "DATE_SUB" | "EXTRACT" | "GET_FORMAT" | "GROUP_CONCAT"
......@@ -4085,7 +4559,7 @@ BitExpr:
Args: []ast.ExprNode{
$1,
$4,
ast.NewValueExpr($5),
&ast.TimeUnitExpr{Unit: $5.(ast.TimeUnitType)},
},
}
}
......@@ -4096,7 +4570,7 @@ BitExpr:
Args: []ast.ExprNode{
$1,
$4,
ast.NewValueExpr($5),
&ast.TimeUnitExpr{Unit: $5.(ast.TimeUnitType)},
},
}
}
......@@ -4487,7 +4961,7 @@ FunctionCallNonKeyword:
Args: []ast.ExprNode{
$3,
$5,
ast.NewValueExpr("DAY"),
&ast.TimeUnitExpr{Unit: ast.TimeUnitDay},
},
}
}
......@@ -4498,7 +4972,7 @@ FunctionCallNonKeyword:
Args: []ast.ExprNode{
$3,
$6,
ast.NewValueExpr($7),
&ast.TimeUnitExpr{Unit: $7.(ast.TimeUnitType)},
},
}
}
......@@ -4509,13 +4983,13 @@ FunctionCallNonKeyword:
Args: []ast.ExprNode{
$3,
$6,
ast.NewValueExpr($7),
&ast.TimeUnitExpr{Unit: $7.(ast.TimeUnitType)},
},
}
}
| builtinExtract '(' TimeUnit "FROM" Expression ')'
{
timeUnit := ast.NewValueExpr($3)
timeUnit := &ast.TimeUnitExpr{Unit: $3.(ast.TimeUnitType)}
$$ = &ast.FuncCallExpr{
FnName: model.NewCIStr($1),
Args: []ast.ExprNode{timeUnit, $5},
......@@ -4525,7 +4999,10 @@ FunctionCallNonKeyword:
{
$$ = &ast.FuncCallExpr{
FnName: model.NewCIStr($1),
Args: []ast.ExprNode{ast.NewValueExpr($3), $5},
Args: []ast.ExprNode{
&ast.GetFormatSelectorExpr{Selector: $3.(ast.GetFormatSelectorType)},
$5,
},
}
}
| builtinPosition '(' BitExpr "IN" Expression ')'
......@@ -4564,14 +5041,14 @@ FunctionCallNonKeyword:
{
$$ = &ast.FuncCallExpr{
FnName: model.NewCIStr($1),
Args: []ast.ExprNode{ast.NewValueExpr($3), $5, $7},
Args: []ast.ExprNode{&ast.TimeUnitExpr{Unit: $3.(ast.TimeUnitType)}, $5, $7},
}
}
| "TIMESTAMPDIFF" '(' TimestampUnit ',' Expression ',' Expression ')'
{
$$ = &ast.FuncCallExpr{
FnName: model.NewCIStr($1),
Args: []ast.ExprNode{ast.NewValueExpr($3), $5, $7},
Args: []ast.ExprNode{&ast.TimeUnitExpr{Unit: $3.(ast.TimeUnitType)}, $5, $7},
}
}
| builtinTrim '(' Expression ')'
......@@ -4591,7 +5068,7 @@ FunctionCallNonKeyword:
| builtinTrim '(' TrimDirection "FROM" Expression ')'
{
nilVal := ast.NewValueExpr(nil)
direction := ast.NewValueExpr(int($3.(ast.TrimDirectionType)))
direction := &ast.TrimDirectionExpr{Direction: $3.(ast.TrimDirectionType)}
$$ = &ast.FuncCallExpr{
FnName: model.NewCIStr($1),
Args: []ast.ExprNode{$5, nilVal, direction},
......@@ -4599,7 +5076,7 @@ FunctionCallNonKeyword:
}
| builtinTrim '(' TrimDirection Expression "FROM" Expression ')'
{
direction := ast.NewValueExpr(int($3.(ast.TrimDirectionType)))
direction := &ast.TrimDirectionExpr{Direction: $3.(ast.TrimDirectionType)}
$$ = &ast.FuncCallExpr{
FnName: model.NewCIStr($1),
Args: []ast.ExprNode{$6, $4, direction},
......@@ -4609,19 +5086,19 @@ FunctionCallNonKeyword:
GetFormatSelector:
"DATE"
{
$$ = strings.ToUpper($1)
$$ = ast.GetFormatSelectorDate
}
| "DATETIME"
{
$$ = strings.ToUpper($1)
$$ = ast.GetFormatSelectorDatetime
}
| "TIME"
{
$$ = strings.ToUpper($1)
$$ = ast.GetFormatSelectorTime
}
| "TIMESTAMP"
{
$$ = strings.ToUpper($1)
$$ = ast.GetFormatSelectorDatetime
}
......@@ -4821,123 +5298,123 @@ FuncDatetimePrec:
}
TimeUnit:
"MICROSECOND"
TimestampUnit
{
$$ = strings.ToUpper($1)
}
| "SECOND"
{
$$ = strings.ToUpper($1)
}
| "MINUTE"
{
$$ = strings.ToUpper($1)
}
| "HOUR"
{
$$ = strings.ToUpper($1)
}
| "DAY"
{
$$ = strings.ToUpper($1)
}
| "WEEK"
{
$$ = strings.ToUpper($1)
}
| "MONTH"
{
$$ = strings.ToUpper($1)
}
| "QUARTER"
{
$$ = strings.ToUpper($1)
}
| "YEAR"
{
$$ = strings.ToUpper($1)
$$ = $1
}
| "SECOND_MICROSECOND"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitSecondMicrosecond
}
| "MINUTE_MICROSECOND"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitMinuteMicrosecond
}
| "MINUTE_SECOND"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitMinuteSecond
}
| "HOUR_MICROSECOND"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitHourMicrosecond
}
| "HOUR_SECOND"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitHourSecond
}
| "HOUR_MINUTE"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitHourMinute
}
| "DAY_MICROSECOND"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitDayMicrosecond
}
| "DAY_SECOND"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitDaySecond
}
| "DAY_MINUTE"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitDayMinute
}
| "DAY_HOUR"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitDayHour
}
| "YEAR_MONTH"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitYearMonth
}
TimestampUnit:
"MICROSECOND"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitMicrosecond
}
| "SECOND"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitSecond
}
| "MINUTE"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitMinute
}
| "HOUR"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitHour
}
| "DAY"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitDay
}
| "WEEK"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitWeek
}
| "MONTH"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitMonth
}
| "QUARTER"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitQuarter
}
| "YEAR"
{
$$ = strings.ToUpper($1)
$$ = ast.TimeUnitYear
}
| "SQL_TSI_SECOND"
{
$$ = ast.TimeUnitSecond
}
| "SQL_TSI_MINUTE"
{
$$ = ast.TimeUnitMinute
}
| "SQL_TSI_HOUR"
{
$$ = ast.TimeUnitHour
}
| "SQL_TSI_DAY"
{
$$ = ast.TimeUnitDay
}
| "SQL_TSI_WEEK"
{
$$ = ast.TimeUnitWeek
}
| "SQL_TSI_MONTH"
{
$$ = ast.TimeUnitMonth
}
| "SQL_TSI_QUARTER"
{
$$ = ast.TimeUnitQuarter
}
| "SQL_TSI_YEAR"
{
$$ = ast.TimeUnitYear
}
ExpressionOpt:
......@@ -5083,6 +5560,35 @@ CastType:
x.Collate = charset.CollationBin
$$ = x
}
| "FLOAT" FloatOpt
{
x := types.NewFieldType(mysql.TypeFloat)
fopt := $2.(*ast.FloatOpt)
if fopt.Flen >= 54 {
yylex.AppendError(ErrTooBigPrecision.GenWithStackByArgs(fopt.Flen,"CAST",53))
} else if fopt.Flen >= 25 {
x = types.NewFieldType(mysql.TypeDouble)
}
x.Flen, x.Decimal = mysql.GetDefaultFieldLengthAndDecimalForCast(x.Tp)
x.Flag |= mysql.BinaryFlag
x.Charset = charset.CharsetBin
x.Collate = charset.CollationBin
$$ = x
}
| "REAL"
{
var x *types.FieldType
if parser.lexer.GetSQLMode().HasRealAsFloatMode() {
x = types.NewFieldType(mysql.TypeFloat)
} else {
x = types.NewFieldType(mysql.TypeDouble)
}
x.Flen, x.Decimal = mysql.GetDefaultFieldLengthAndDecimalForCast(x.Tp)
x.Flag |= mysql.BinaryFlag
x.Charset = charset.CharsetBin
x.Collate = charset.CollationBin
$$ = x
}
PriorityOpt:
{
......@@ -5475,7 +5981,7 @@ WindowFrameStart:
}
| "INTERVAL" Expression TimeUnit "PRECEDING"
{
$$ = ast.FrameBound{Type: ast.Preceding, Expr: $2, Unit: ast.NewValueExpr($3),}
$$ = ast.FrameBound{Type: ast.Preceding, Expr: $2, Unit: $3.(ast.TimeUnitType),}
}
| "CURRENT" "ROW"
{
......@@ -5507,7 +6013,7 @@ WindowFrameBound:
}
| "INTERVAL" Expression TimeUnit "FOLLOWING"
{
$$ = ast.FrameBound{Type: ast.Following, Expr: $2, Unit: ast.NewValueExpr($3),}
$$ = ast.FrameBound{Type: ast.Following, Expr: $2, Unit: $3.(ast.TimeUnitType),}
}
OptWindowingClause:
......@@ -5975,16 +6481,6 @@ TableOptimizerHints:
$$ = nil
}
HintTableList:
Identifier
{
$$ = []model.CIStr{model.NewCIStr($1)}
}
| HintTableList ',' Identifier
{
$$ = append($1.([]model.CIStr), model.NewCIStr($3))
}
TableOptimizerHintList:
TableOptimizerHintOpt
{
......@@ -5994,31 +6490,146 @@ TableOptimizerHintList:
{
$$ = append($1.([]*ast.TableOptimizerHint), $2.(*ast.TableOptimizerHint))
}
| TableOptimizerHintList ',' TableOptimizerHintOpt
{
$$ = append($1.([]*ast.TableOptimizerHint), $3.(*ast.TableOptimizerHint))
}
TableOptimizerHintOpt:
tidbSMJ '(' HintTableList ')'
index '(' QueryBlockOpt HintTable IndexNameList ')'
{
$$ = &ast.TableOptimizerHint{
HintName: model.NewCIStr($1),
QBName: $3.(model.CIStr),
Tables: []ast.HintTable{$4.(ast.HintTable)},
Indexes: $5.([]model.CIStr),
}
}
| hintSMJ '(' QueryBlockOpt HintTableList ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)}
}
| hintINLJ '(' QueryBlockOpt HintTableList ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)}
}
| hintHJ '(' QueryBlockOpt HintTableList ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), Tables: $4.([]ast.HintTable)}
}
| hintUseIndexMerge '(' QueryBlockOpt HintTable IndexNameList ')'
{
$$ = &ast.TableOptimizerHint{
HintName: model.NewCIStr($1),
QBName: $3.(model.CIStr),
Tables: []ast.HintTable{$4.(ast.HintTable)},
Indexes: $5.([]model.CIStr),
}
}
| hintUseToja '(' QueryBlockOpt HintTrueOrFalse ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), HintFlag: $4.(bool)}
}
| hintEnablePlanCache '(' QueryBlockOpt HintTrueOrFalse ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), HintFlag: $4.(bool)}
}
| maxExecutionTime '(' QueryBlockOpt NUM ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), MaxExecutionTime: getUint64FromNUM($4)}
}
| hintUsePlanCache '(' QueryBlockOpt ')'
{
// arguments not decided yet.
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)}
}
| hintQueryType '(' QueryBlockOpt HintQueryType ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), QueryType: model.NewCIStr($4.(string))}
}
| hintMemoryQuota '(' QueryBlockOpt HintMemoryQuota ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr), MemoryQuota: $4.(uint64)}
}
| hintHASHAGG '(' QueryBlockOpt ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)}
}
| hintSTREAMAGG '(' QueryBlockOpt ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)}
}
| hintNoIndexMerge '(' QueryBlockOpt ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)}
}
| hintReadConsistentReplica '(' QueryBlockOpt ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), Tables: $3.([]model.CIStr)}
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: $3.(model.CIStr)}
}
| tidbINLJ '(' HintTableList ')'
| hintQBName '(' Identifier ')'
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), Tables: $3.([]model.CIStr)}
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), QBName: model.NewCIStr($3)}
}
| tidbHJ '(' HintTableList ')'
QueryBlockOpt:
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), Tables: $3.([]model.CIStr)}
$$ = model.NewCIStr("")
}
| tidbHASHAGG '(' ')'
| singleAtIdentifier
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1)}
$$ = model.NewCIStr($1)
}
| tidbSTREAMAGG '(' ')'
HintTable:
Identifier QueryBlockOpt
{
$$ = ast.HintTable{TableName: model.NewCIStr($1), QBName: $2.(model.CIStr)}
}
HintTableList:
HintTable
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1)}
$$ = []ast.HintTable{$1.(ast.HintTable)}
}
| maxExecutionTime '(' NUM ')'
| HintTableList ',' HintTable
{
$$ = &ast.TableOptimizerHint{HintName: model.NewCIStr($1), MaxExecutionTime: getUint64FromNUM($3)}
$$ = append($1.([]ast.HintTable), $3.(ast.HintTable))
}
HintTrueOrFalse:
"TRUE"
{
$$ = true
}
| "FALSE"
{
$$ = false
}
HintQueryType:
hintOLAP
{
$$ = $1
}
| hintOLTP
{
$$ = $1
}
HintMemoryQuota:
NUM Identifier
{
// May change into MB/MiB or GB/GiB
switch model.NewCIStr($2).L {
case "m":
$$ = getUint64FromNUM($1)
case "g":
$$ = getUint64FromNUM($1) * 1024
default:
// Trigger warning in TiDB Planner
$$ = uint64(0)
}
}
SelectStmtCalcFoundRows:
......@@ -6423,20 +7034,27 @@ SetExpr:
EqOrAssignmentEq:
eq | assignmentEq
VariableName:
Identifier
| Identifier '.' Identifier
{
$$ = $1 + "." + $3
}
VariableAssignment:
Identifier EqOrAssignmentEq SetExpr
VariableName EqOrAssignmentEq SetExpr
{
$$ = &ast.VariableAssignment{Name: $1, Value: $3, IsSystem: true}
}
| "GLOBAL" Identifier EqOrAssignmentEq SetExpr
| "GLOBAL" VariableName EqOrAssignmentEq SetExpr
{
$$ = &ast.VariableAssignment{Name: $2, Value: $4, IsGlobal: true, IsSystem: true}
}
| "SESSION" Identifier EqOrAssignmentEq SetExpr
| "SESSION" VariableName EqOrAssignmentEq SetExpr
{
$$ = &ast.VariableAssignment{Name: $2, Value: $4, IsSystem: true}
}
| "LOCAL" Identifier EqOrAssignmentEq Expression
| "LOCAL" VariableName EqOrAssignmentEq Expression
{
$$ = &ast.VariableAssignment{Name: $2, Value: $4, IsSystem: true}
}
......@@ -7362,6 +7980,7 @@ FlushOption:
}
NoWriteToBinLogAliasOpt:
%prec lowerThanLocal
{
$$ = false
}
......@@ -7465,6 +8084,7 @@ TraceableStmt:
| InsertIntoStmt
| ReplaceIntoStmt
| UnionStmt
| LoadDataStmt
ExplainableStmt:
SelectStmt
......@@ -7544,8 +8164,7 @@ TableElementListOpt:
Constraints: constraints,
}
}
|
'(' TableElementList ')'
| '(' TableElementList ')'
{
tes := $2.([]interface {})
var columnDefs []*ast.ColumnDef
......@@ -7593,6 +8212,10 @@ TableOption:
{
$$ = &ast.TableOption{Tp: ast.TableOptionCheckSum, UintValue: $3.(uint64)}
}
| "TABLE_CHECKSUM" EqOpt LengthNum
{
$$ = &ast.TableOption{Tp: ast.TableOptionTableCheckSum, UintValue: $3.(uint64)}
}
| "PASSWORD" EqOpt stringLit
{
$$ = &ast.TableOption{Tp: ast.TableOptionPassword, StrValue: $3}
......@@ -7617,6 +8240,23 @@ TableOption:
{
$$ = &ast.TableOption{Tp: ast.TableOptionStatsPersistent}
}
| "STATS_AUTO_RECALC" EqOpt LengthNum
{
n := $3.(uint64)
if n != 0 && n != 1 {
yylex.AppendError(yylex.Errorf("The value of STATS_AUTO_RECALC must be one of [0|1|DEFAULT]."))
return 1
}
$$ = &ast.TableOption{Tp: ast.TableOptionStatsAutoRecalc, UintValue: n}
yylex.AppendError(yylex.Errorf("The STATS_AUTO_RECALC is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
| "STATS_AUTO_RECALC" EqOpt "DEFAULT"
{
$$ = &ast.TableOption{Tp: ast.TableOptionStatsAutoRecalc, Default: true}
yylex.AppendError(yylex.Errorf("The STATS_AUTO_RECALC is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
| "STATS_SAMPLE_PAGES" EqOpt LengthNum
{
// Parse it but will ignore it.
......@@ -7630,7 +8270,7 @@ TableOption:
{
// Parse it but will ignore it.
// In MySQL, default value of STATS_SAMPLE_PAGES is 0.
$$ = &ast.TableOption{Tp: ast.TableOptionStatsSamplePages, UintValue: 0}
$$ = &ast.TableOption{Tp: ast.TableOptionStatsSamplePages, Default: true}
yylex.AppendError(yylex.Errorf("The STATS_SAMPLE_PAGES is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
......@@ -7661,6 +8301,22 @@ TableOption:
yylex.AppendError(yylex.Errorf("The STORAGE clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
| "SECONDARY_ENGINE" EqOpt "NULL"
{
// Parse it but will ignore it
// See https://github.com/mysql/mysql-server/blob/8.0/sql/sql_yacc.yy#L5977-L5984
$$ = &ast.TableOption{Tp: ast.TableOptionSecondaryEngineNull}
yylex.AppendError(yylex.Errorf("The SECONDARY_ENGINE clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
| "SECONDARY_ENGINE" EqOpt StringName
{
// Parse it but will ignore it
// See https://github.com/mysql/mysql-server/blob/8.0/sql/sql_yacc.yy#L5977-L5984
$$ = &ast.TableOption{Tp: ast.TableOptionSecondaryEngine, StrValue: $3.(string)}
yylex.AppendError(yylex.Errorf("The SECONDARY_ENGINE clause is parsed but ignored by all storage engines."))
parser.lastErrorAsWarn()
}
StatsPersistentVal:
"DEFAULT"
......@@ -7954,31 +8610,40 @@ BitValueType:
}
StringType:
NationalOpt "CHAR" FieldLen OptBinary
Char FieldLen OptBinary
{
x := types.NewFieldType(mysql.TypeString)
x.Flen = $3.(int)
x.Charset = $4.(*ast.OptBinary).Charset
if $4.(*ast.OptBinary).IsBinary {
x.Flen = $2.(int)
x.Charset = $3.(*ast.OptBinary).Charset
if $3.(*ast.OptBinary).IsBinary {
x.Flag |= mysql.BinaryFlag
}
$$ = x
}
| NationalOpt "CHAR" OptBinary
| Char OptBinary
{
x := types.NewFieldType(mysql.TypeString)
x.Charset = $2.(*ast.OptBinary).Charset
if $2.(*ast.OptBinary).IsBinary {
x.Flag |= mysql.BinaryFlag
}
$$ = x
}
| NChar FieldLen OptBinary
{
x := types.NewFieldType(mysql.TypeString)
x.Flen = $2.(int)
x.Charset = $3.(*ast.OptBinary).Charset
if $3.(*ast.OptBinary).IsBinary {
x.Flag |= mysql.BinaryFlag
}
$$ = x
}
| "NATIONAL" "CHARACTER" FieldLen OptBinary
| NChar OptBinary
{
x := types.NewFieldType(mysql.TypeString)
x.Flen = $3.(int)
x.Charset = $4.(*ast.OptBinary).Charset
if $4.(*ast.OptBinary).IsBinary {
x.Charset = $2.(*ast.OptBinary).Charset
if $2.(*ast.OptBinary).IsBinary {
x.Flag |= mysql.BinaryFlag
}
$$ = x
......@@ -7993,6 +8658,16 @@ StringType:
}
$$ = x
}
| NVarchar FieldLen OptBinary
{
x := types.NewFieldType(mysql.TypeVarchar)
x.Flen = $2.(int)
x.Charset = $3.(*ast.OptBinary).Charset
if $3.(*ast.OptBinary).IsBinary {
x.Flag |= mysql.BinaryFlag
}
$$ = x
}
| "BINARY" OptFieldLen
{
x := types.NewFieldType(mysql.TypeString)
......@@ -8051,14 +8726,27 @@ StringType:
$$ = x
}
NationalOpt:
{}
| "NATIONAL"
Char:
"CHARACTER"
| "CHAR"
NChar:
"NCHAR"
| "NATIONAL" "CHARACTER"
| "NATIONAL" "CHAR"
Varchar:
"NATIONAL" "VARCHAR"
"CHARACTER" "VARYING"
| "CHAR" "VARYING"
| "VARCHAR"
NVarchar:
"NATIONAL" "VARCHAR"
| "NVARCHAR"
| "NCHAR" "VARCHAR"
| "NATIONAL" "CHARACTER" "VARYING"
| "NATIONAL" "CHAR" "VARYING"
| "NCHAR" "VARYING"
BlobType:
......@@ -8107,6 +8795,11 @@ TextType:
x := types.NewFieldType(mysql.TypeLongBlob)
$$ = x
}
| "LONG"
{
x := types.NewFieldType(mysql.TypeMediumBlob)
$$ = x
}
| "LONG" "VARCHAR"
{
x := types.NewFieldType(mysql.TypeMediumBlob)
......@@ -8373,7 +9066,7 @@ CommaOpt:
* https://dev.mysql.com/doc/refman/5.7/en/account-management-sql.html
************************************************************************************/
CreateUserStmt:
"CREATE" "USER" IfNotExists UserSpecList RequireClause ConnectionOptions PasswordOrLockOptions
"CREATE" "USER" IfNotExists UserSpecList RequireClauseOpt ConnectionOptions PasswordOrLockOptions
{
// See https://dev.mysql.com/doc/refman/5.7/en/create-user.html
$$ = &ast.CreateUserStmt{
......@@ -8399,11 +9092,14 @@ CreateRoleStmt:
/* See http://dev.mysql.com/doc/refman/5.7/en/alter-user.html */
AlterUserStmt:
"ALTER" "USER" IfExists UserSpecList
"ALTER" "USER" IfExists UserSpecList RequireClauseOpt ConnectionOptions PasswordOrLockOptions
{
$$ = &ast.AlterUserStmt{
IfExists: $3.(bool),
Specs: $4.([]*ast.UserSpec),
TslOptions: $5.([]*ast.TslOption),
ResourceOptions: $6.([]*ast.ResourceOption),
PasswordOrLockOptions: $7.([]*ast.PasswordOrLockOption),
}
}
| "ALTER" "USER" IfExists "USER" '(' ')' "IDENTIFIED" "BY" AuthString
......@@ -8448,6 +9144,8 @@ ConnectionOptions:
| "WITH" ConnectionOptionList
{
$$ = $2
yylex.AppendError(yylex.Errorf("TiDB does not support WITH ConnectionOptions now, they would be parsed but ignored."))
parser.lastErrorAsWarn()
}
ConnectionOptionList:
......@@ -8492,12 +9190,19 @@ ConnectionOption:
}
}
RequireClause:
RequireClauseOpt:
{
l := []*ast.TslOption{}
$$ = l
$$ = []*ast.TslOption{}
}
| "REQUIRE" "NONE"
| RequireClause
{
$$ = $1
yylex.AppendError(yylex.Errorf("TiDB does not support REQUIRE now, they would be parsed but ignored."))
parser.lastErrorAsWarn()
}
RequireClause:
"REQUIRE" "NONE"
{
t := &ast.TslOption {
Type: ast.TslNone,
......@@ -8566,6 +9271,8 @@ PasswordOrLockOptions:
| PasswordOrLockOptionList
{
$$ = $1
yylex.AppendError(yylex.Errorf("TiDB does not support PASSWORD EXPIRE and ACCOUNT LOCK now, they would be parsed but ignored."))
parser.lastErrorAsWarn()
}
PasswordOrLockOptionList:
......
......@@ -201,13 +201,11 @@ func (ft *FieldType) String() string {
func (ft *FieldType) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord(TypeToStr(ft.Tp, ft.Charset))
precision := ft.Flen
scale := ft.Decimal
precision := UnspecifiedLength
scale := UnspecifiedLength
switch ft.Tp {
case mysql.TypeEnum, mysql.TypeSet:
precision = UnspecifiedLength
scale = UnspecifiedLength
ctx.WritePlain("(")
for i, e := range ft.Elems {
if i != 0 {
......@@ -218,7 +216,11 @@ func (ft *FieldType) Restore(ctx *format.RestoreCtx) error {
ctx.WritePlain(")")
case mysql.TypeTimestamp, mysql.TypeDatetime, mysql.TypeDuration:
precision = ft.Decimal
scale = UnspecifiedLength
case mysql.TypeDecimal, mysql.TypeFloat, mysql.TypeDouble, mysql.TypeNewDecimal:
precision = ft.Flen
scale = ft.Decimal
default:
precision = ft.Flen
}
if precision != UnspecifiedLength {
......@@ -227,7 +229,6 @@ func (ft *FieldType) Restore(ctx *format.RestoreCtx) error {
ctx.WritePlainf(",%d", scale)
}
ctx.WritePlain(")")
}
if mysql.HasUnsignedFlag(ft.Flag) {
......@@ -301,6 +302,8 @@ func (ft *FieldType) RestoreAsCastType(ctx *format.RestoreCtx) {
ctx.WriteKeyWord("JSON")
case mysql.TypeDouble:
ctx.WriteKeyWord("DOUBLE")
case mysql.TypeFloat:
ctx.WriteKeyWord("FLOAT")
}
}
......
......@@ -36,6 +36,7 @@ const (
codeTooBigDisplayWidth = terror.ErrCode(mysql.ErrTooBigDisplaywidth)
codeErrUnknownAlterLock = terror.ErrCode(mysql.ErrUnknownAlterLock)
codeErrUnknownAlterAlgorithm = terror.ErrCode(mysql.ErrUnknownAlterAlgorithm)
codeErrTooBigPrecision = terror.ErrCode(mysql.ErrTooBigPrecision)
)
var (
......@@ -53,6 +54,8 @@ var (
ErrWrongFieldTerminators = terror.ClassParser.New(codeWrongFieldTerminators, mysql.MySQLErrName[mysql.ErrWrongFieldTerminators])
// ErrTooBigDisplayWidth returns for data display width exceed limit .
ErrTooBigDisplayWidth = terror.ClassParser.New(codeTooBigDisplayWidth, mysql.MySQLErrName[mysql.ErrTooBigDisplaywidth])
// ErrTooBigPrecision returns for data precision exceed limit.
ErrTooBigPrecision = terror.ClassParser.New(codeErrTooBigPrecision, mysql.MySQLErrName[mysql.ErrTooBigPrecision])
// ErrUnknownAlterLock returns for no alter lock type found error.
ErrUnknownAlterLock = terror.ClassParser.New(codeErrUnknownAlterLock, mysql.MySQLErrName[mysql.ErrUnknownAlterLock])
// ErrUnknownAlterAlgorithm returns for no alter algorithm found error.
......@@ -75,6 +78,7 @@ func init() {
codeTooBigDisplayWidth: mysql.ErrTooBigDisplaywidth,
codeErrUnknownAlterLock: mysql.ErrUnknownAlterLock,
codeErrUnknownAlterAlgorithm: mysql.ErrUnknownAlterAlgorithm,
codeErrTooBigPrecision: mysql.ErrTooBigPrecision,
}
terror.ErrClassToMySQLCodes[terror.ClassParser] = parserMySQLErrCodes
}
......
......@@ -76,6 +76,13 @@ type StatementContext struct {
// prefix in a strict way, only extract 0-9 and (+ or - in first bit).
CastStrToIntStrict bool
// StartTime is the query start time.
StartTime time.Time
// DurationParse is the duration of pasing SQL string to AST.
DurationParse time.Duration
// DurationCompile is the duration of compiling AST to execution plan.
DurationCompile time.Duration
// mu struct holds variables that change during execution.
mu struct {
sync.Mutex
......@@ -413,6 +420,9 @@ func (sc *StatementContext) ResetForRetry() {
sc.mu.Unlock()
sc.TableIDs = sc.TableIDs[:0]
sc.IndexIDs = sc.IndexIDs[:0]
sc.StartTime = time.Now()
sc.DurationCompile = time.Duration(0)
sc.DurationParse = time.Duration(0)
}
// MergeExecDetails merges a single region execution details into self, used to print
......
......@@ -98,6 +98,7 @@ func IntergerSignedLowerBound(intType byte) int64 {
}
// ConvertFloatToInt converts a float64 value to a int value.
// `tp` is used in err msg, if there is overflow, this func will report err according to `tp`
func ConvertFloatToInt(fval float64, lowerBound, upperBound int64, tp byte) (int64, error) {
val := RoundFloat(fval)
if val < float64(lowerBound) {
......@@ -292,7 +293,7 @@ func StrToUint(sc *stmtctx.StatementContext, str string) (uint64, error) {
}
// StrToDateTime converts str to MySQL DateTime.
func StrToDateTime(sc *stmtctx.StatementContext, str string, fsp int) (Time, error) {
func StrToDateTime(sc *stmtctx.StatementContext, str string, fsp int8) (Time, error) {
return ParseTime(sc, str, mysql.TypeDatetime, fsp)
}
......@@ -300,7 +301,7 @@ func StrToDateTime(sc *stmtctx.StatementContext, str string, fsp int) (Time, err
// and returns Time when str is in datetime format.
// when isDuration is true, the d is returned, when it is false, the t is returned.
// See https://dev.mysql.com/doc/refman/5.5/en/date-and-time-literals.html.
func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duration, t Time, isDuration bool, err error) {
func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int8) (d Duration, t Time, isDuration bool, err error) {
str = strings.TrimSpace(str)
length := len(str)
if length > 0 && str[0] == '-' {
......@@ -323,7 +324,7 @@ func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duratio
}
// NumberToDuration converts number to Duration.
func NumberToDuration(number int64, fsp int) (Duration, error) {
func NumberToDuration(number int64, fsp int8) (Duration, error) {
if number > TimeMaxValue {
// Try to parse DATETIME.
if number >= 10000000000 { // '2001-00-00 00-00-00'
......@@ -567,8 +568,12 @@ func ConvertJSONToInt(sc *stmtctx.StatementContext, j json.BinaryJSON, unsigned
return int64(u), errors.Trace(err)
case json.TypeCodeString:
str := string(hack.String(j.GetString()))
if !unsigned {
return StrToInt(sc, str)
}
u, err := StrToUint(sc, str)
return int64(u), errors.Trace(err)
}
return 0, errors.New("Unknown type code in JSON")
}
......
......@@ -260,7 +260,7 @@ func (d *Datum) SetMysqlDecimal(b *MyDecimal) {
// GetMysqlDuration gets Duration value
func (d *Datum) GetMysqlDuration() Duration {
return Duration{Duration: time.Duration(d.i), Fsp: int(d.decimal)}
return Duration{Duration: time.Duration(d.i), Fsp: int8(d.decimal)}
}
// SetMysqlDuration sets Duration value
......@@ -939,7 +939,7 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi
)
fsp := DefaultFsp
if target.Decimal != UnspecifiedLength {
fsp = target.Decimal
fsp = int8(target.Decimal)
}
switch d.k {
case KindMysqlTime:
......@@ -973,7 +973,7 @@ func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldTy
tp := target.Tp
fsp := DefaultFsp
if target.Decimal != UnspecifiedLength {
fsp = target.Decimal
fsp = int8(target.Decimal)
}
var (
ret Datum
......@@ -1019,7 +1019,7 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie
tp := target.Tp
fsp := DefaultFsp
if target.Decimal != UnspecifiedLength {
fsp = target.Decimal
fsp = int8(target.Decimal)
}
var ret Datum
switch d.k {
......
......@@ -83,10 +83,7 @@ func IsTemporalWithDate(tp byte) bool {
// IsBinaryStr returns a boolean indicating
// whether the field type is a binary string type.
func IsBinaryStr(ft *FieldType) bool {
if ft.Collate == charset.CollationBin && IsString(ft.Tp) {
return true
}
return false
return ft.Collate == charset.CollationBin && IsString(ft.Tp)
}
// IsNonBinaryStr returns a boolean indicating
......
......@@ -221,18 +221,18 @@ func DefaultTypeForValue(value interface{}, tp *FieldType) {
case mysql.TypeDatetime, mysql.TypeTimestamp:
tp.Flen = mysql.MaxDatetimeWidthNoFsp
if x.Fsp > DefaultFsp { // consider point('.') and the fractional part.
tp.Flen += x.Fsp + 1
tp.Flen += int(x.Fsp) + 1
}
tp.Decimal = x.Fsp
tp.Decimal = int(x.Fsp)
}
SetBinChsClnFlag(tp)
case Duration:
tp.Tp = mysql.TypeDuration
tp.Flen = len(x.String())
if x.Fsp > DefaultFsp { // consider point('.') and the fractional part.
tp.Flen = x.Fsp + 1
tp.Flen = int(x.Fsp) + 1
}
tp.Decimal = x.Fsp
tp.Decimal = int(x.Fsp)
SetBinChsClnFlag(tp)
case *MyDecimal:
tp.Tp = mysql.TypeNewDecimal
......
......@@ -23,46 +23,46 @@ import (
const (
// UnspecifiedFsp is the unspecified fractional seconds part.
UnspecifiedFsp = -1
UnspecifiedFsp = int8(-1)
// MaxFsp is the maximum digit of fractional seconds part.
MaxFsp = 6
MaxFsp = int8(6)
// MinFsp is the minimum digit of fractional seconds part.
MinFsp = 0
MinFsp = int8(0)
// DefaultFsp is the default digit of fractional seconds part.
// MySQL use 0 as the default Fsp.
DefaultFsp = 0
DefaultFsp = int8(0)
)
// CheckFsp checks whether fsp is in valid range.
func CheckFsp(fsp int) (int, error) {
if fsp == UnspecifiedFsp {
func CheckFsp(fsp int) (int8, error) {
if fsp == int(UnspecifiedFsp) {
return DefaultFsp, nil
}
if fsp < MinFsp || fsp > MaxFsp {
if fsp < int(MinFsp) || fsp > int(MaxFsp) {
return DefaultFsp, errors.Errorf("Invalid fsp %d", fsp)
}
return fsp, nil
return int8(fsp), nil
}
// ParseFrac parses the input string according to fsp, returns the microsecond,
// and also a bool value to indice overflow. eg:
// "999" fsp=2 will overflow.
func ParseFrac(s string, fsp int) (v int, overflow bool, err error) {
func ParseFrac(s string, fsp int8) (v int, overflow bool, err error) {
if len(s) == 0 {
return 0, false, nil
}
fsp, err = CheckFsp(fsp)
fsp, err = CheckFsp(int(fsp))
if err != nil {
return 0, false, errors.Trace(err)
}
if fsp >= len(s) {
if int(fsp) >= len(s) {
tmp, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return 0, false, errors.Trace(e)
}
v = int(float64(tmp) * math.Pow10(MaxFsp-len(s)))
v = int(float64(tmp) * math.Pow10(int(MaxFsp)-len(s)))
return
}
......@@ -73,7 +73,7 @@ func ParseFrac(s string, fsp int) (v int, overflow bool, err error) {
}
tmp = (tmp + 5) / 10
if float64(tmp) >= math.Pow10(fsp) {
if float64(tmp) >= math.Pow10(int(fsp)) {
// overflow
return 0, true, nil
}
......@@ -82,7 +82,7 @@ func ParseFrac(s string, fsp int) (v int, overflow bool, err error) {
// 1236 round 3 -> 124 -> 124000
// 0312 round 2 -> 3 -> 30000
// 999 round 2 -> 100 -> overflow
v = int(float64(tmp) * math.Pow10(MaxFsp-fsp))
v = int(float64(tmp) * math.Pow10(int(MaxFsp-fsp)))
return
}
......
......@@ -1088,7 +1088,7 @@ with the correct -1/0/+1 result
then the encoded value is not memory comparable.
NOTE
the buffer is assumed to be of the size decimalBinSize(precision, frac)
the buffer is assumed to be of the size DecimalBinSize(precision, frac)
RETURN VALUE
bin - binary value
......@@ -1334,7 +1334,7 @@ func (d *MyDecimal) FromBin(bin []byte, precision, frac int) (binSize int, err e
if bin[binIdx]&0x80 > 0 {
mask = 0
}
binSize = decimalBinSize(precision, frac)
binSize = DecimalBinSize(precision, frac)
dCopy := make([]byte, 40)
dCopy = dCopy[:binSize]
copy(dCopy, bin)
......@@ -1409,8 +1409,8 @@ func (d *MyDecimal) FromBin(bin []byte, precision, frac int) (binSize int, err e
return binSize, err
}
// decimalBinSize returns the size of array to hold a binary representation of a decimal.
func decimalBinSize(precision, frac int) int {
// DecimalBinSize returns the size of array to hold a binary representation of a decimal.
func DecimalBinSize(precision, frac int) int {
digitsInt := precision - frac
wordsInt := digitsInt / digitsPerWord
wordsFrac := frac / digitsPerWord
......@@ -2242,7 +2242,7 @@ func DecimalPeak(b []byte) (int, error) {
}
precision := int(b[0])
frac := int(b[1])
return decimalBinSize(precision, frac) + 2, nil
return DecimalBinSize(precision, frac) + 2, nil
}
// NewDecFromInt creates a MyDecimal from int.
......
......@@ -16,19 +16,27 @@ package types
import (
gotime "time"
"fmt"
"github.com/pingcap/errors"
)
// MysqlTime is the internal struct type for Time.
// The order of the attributes is refined to reduce the memory overhead
// considering memory alignment.
type MysqlTime struct {
// When it's type is Time, HH:MM:SS may be 839:59:59, so use uint32 to avoid overflow.
hour uint32 // hour <= 23
microsecond uint32
year uint16 // year <= 9999
month uint8 // month <= 12
day uint8 // day <= 31
// When it's type is Time, HH:MM:SS may be 839:59:59, so use int to avoid overflow.
hour int // hour <= 23
minute uint8 // minute <= 59
second uint8 // second <= 59
microsecond uint32
}
// String implements fmt.Stringer.
func (t MysqlTime) String() string {
return fmt.Sprintf("{%d %d %d %d %d %d %d}", t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond)
}
// Year returns the year value.
......@@ -173,7 +181,7 @@ func AddDate(year, month, day int64, ot gotime.Time) (nt gotime.Time) {
}
func calcTimeFromSec(to *MysqlTime, seconds, microseconds int) {
to.hour = seconds / 3600
to.hour = uint32(seconds / 3600)
seconds = seconds % 3600
to.minute = uint8(seconds / 60)
to.second = uint8(seconds % 60)
......
......@@ -203,13 +203,13 @@ func FromGoTime(t gotime.Time) MysqlTime {
// FromDate makes a internal time representation from the given date.
func FromDate(year int, month int, day int, hour int, minute int, second int, microsecond int) MysqlTime {
return MysqlTime{
uint16(year),
uint8(month),
uint8(day),
hour,
uint8(minute),
uint8(second),
uint32(microsecond),
year: uint16(year),
month: uint8(month),
day: uint8(day),
hour: uint32(hour),
minute: uint8(minute),
second: uint8(second),
microsecond: uint32(microsecond),
}
}
......@@ -225,11 +225,11 @@ type Time struct {
Type uint8
// Fsp is short for Fractional Seconds Precision.
// See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html
Fsp int
Fsp int8
}
// MaxMySQLTime returns Time with maximum mysql time type.
func MaxMySQLTime(fsp int) Time {
func MaxMySQLTime(fsp int8) Time {
return Time{Time: FromDate(0, 0, 0, TimeMaxHour, TimeMaxMinute, TimeMaxSecond, 0), Type: mysql.TypeDuration, Fsp: fsp}
}
......@@ -309,7 +309,7 @@ func (t Time) ToNumber() *MyDecimal {
if t.Fsp > 0 {
s1 := fmt.Sprintf("%s.%06d", s, t.Time.Microsecond())
s = s1[:len(s)+t.Fsp+1]
s = s1[:len(s)+int(t.Fsp)+1]
}
// We skip checking error here because time formatted string can be parsed certainly.
......@@ -392,19 +392,19 @@ func (t Time) CompareString(sc *stmtctx.StatementContext, str string) (int, erro
}
// roundTime rounds the time value according to digits count specified by fsp.
func roundTime(t gotime.Time, fsp int) gotime.Time {
d := gotime.Duration(math.Pow10(9 - fsp))
func roundTime(t gotime.Time, fsp int8) gotime.Time {
d := gotime.Duration(math.Pow10(9 - int(fsp)))
return t.Round(d)
}
// RoundFrac rounds the fraction part of a time-type value according to `fsp`.
func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int) (Time, error) {
func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int8) (Time, error) {
if t.Type == mysql.TypeDate || t.IsZero() {
// date type has no fsp
return t, nil
}
fsp, err := CheckFsp(fsp)
fsp, err := CheckFsp(int(fsp))
if err != nil {
return t, errors.Trace(err)
}
......@@ -438,8 +438,9 @@ func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int) (Time, error) {
}
// GetFsp gets the fsp of a string.
func GetFsp(s string) (fsp int) {
func GetFsp(s string) int8 {
index := GetFracIndex(s)
var fsp int
if index < 0 {
fsp = 0
} else {
......@@ -451,7 +452,7 @@ func GetFsp(s string) (fsp int) {
} else if fsp > 6 {
fsp = 6
}
return
return int8(fsp)
}
// GetFracIndex finds the last '.' for get fracStr, index = -1 means fracStr not found.
......@@ -474,22 +475,22 @@ func GetFracIndex(s string) (index int) {
// We will use the “round half up” rule, e.g, >= 0.5 -> 1, < 0.5 -> 0,
// so 2011:11:11 10:10:10.888888 round 0 -> 2011:11:11 10:10:11
// and 2011:11:11 10:10:10.111111 round 0 -> 2011:11:11 10:10:10
func RoundFrac(t gotime.Time, fsp int) (gotime.Time, error) {
_, err := CheckFsp(fsp)
func RoundFrac(t gotime.Time, fsp int8) (gotime.Time, error) {
_, err := CheckFsp(int(fsp))
if err != nil {
return t, errors.Trace(err)
}
return t.Round(gotime.Duration(math.Pow10(9-fsp)) * gotime.Nanosecond), nil
return t.Round(gotime.Duration(math.Pow10(9-int(fsp))) * gotime.Nanosecond), nil
}
// TruncateFrac truncates fractional seconds precision with new fsp and returns a new one.
// 2011:11:11 10:10:10.888888 round 0 -> 2011:11:11 10:10:10
// 2011:11:11 10:10:10.111111 round 0 -> 2011:11:11 10:10:10
func TruncateFrac(t gotime.Time, fsp int) (gotime.Time, error) {
if _, err := CheckFsp(fsp); err != nil {
func TruncateFrac(t gotime.Time, fsp int8) (gotime.Time, error) {
if _, err := CheckFsp(int(fsp)); err != nil {
return t, err
}
return t.Truncate(gotime.Duration(math.Pow10(9-fsp)) * gotime.Nanosecond), nil
return t.Truncate(gotime.Duration(math.Pow10(9-int(fsp))) * gotime.Nanosecond), nil
}
// ToPackedUint encodes Time to a packed uint64 value.
......@@ -683,7 +684,7 @@ func splitDateTime(format string) (seps []string, fracStr string) {
}
// See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-literals.html.
func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bool) (Time, error) {
func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int8, isFloat bool) (Time, error) {
// Try to split str with delimiter.
// TODO: only punctuation can be the delimiter for date parts or time parts.
// But only space and T can be the delimiter between the date and time part.
......@@ -896,7 +897,7 @@ type Duration struct {
gotime.Duration
// Fsp is short for Fractional Seconds Precision.
// See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html
Fsp int
Fsp int8
}
//Add adds d to d, returns a duration value.
......@@ -1002,8 +1003,8 @@ func (d Duration) ConvertToTime(sc *stmtctx.StatementContext, tp uint8) (Time, e
// We will use the “round half up” rule, e.g, >= 0.5 -> 1, < 0.5 -> 0,
// so 10:10:10.999999 round 0 -> 10:10:11
// and 10:10:10.000000 round 0 -> 10:10:10
func (d Duration) RoundFrac(fsp int) (Duration, error) {
fsp, err := CheckFsp(fsp)
func (d Duration) RoundFrac(fsp int8) (Duration, error) {
fsp, err := CheckFsp(int(fsp))
if err != nil {
return d, errors.Trace(err)
}
......@@ -1013,7 +1014,7 @@ func (d Duration) RoundFrac(fsp int) (Duration, error) {
}
n := gotime.Date(0, 0, 0, 0, 0, 0, 0, gotime.Local)
nd := n.Add(d.Duration).Round(gotime.Duration(math.Pow10(9-fsp)) * gotime.Nanosecond).Sub(n)
nd := n.Add(d.Duration).Round(gotime.Duration(math.Pow10(9-int(fsp))) * gotime.Nanosecond).Sub(n)
return Duration{Duration: nd, Fsp: fsp}, nil
}
......@@ -1072,7 +1073,7 @@ func (d Duration) MicroSecond() int {
// ParseDuration parses the time form a formatted string with a fractional seconds part,
// returns the duration type Time value.
// See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html
func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, error) {
func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int8) (Duration, error) {
var (
day, hour, minute, second int
err error
......@@ -1081,7 +1082,7 @@ func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration,
origStr = str
)
fsp, err = CheckFsp(fsp)
fsp, err = CheckFsp(int(fsp))
if err != nil {
return ZeroDuration, errors.Trace(err)
}
......@@ -1336,17 +1337,17 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error)
// The valid datetime range is from '1000-01-01 00:00:00.000000' to '9999-12-31 23:59:59.999999'.
// The valid timestamp range is from '1970-01-01 00:00:01.000000' to '2038-01-19 03:14:07.999999'.
// The valid date range is from '1000-01-01' to '9999-12-31'
func ParseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int) (Time, error) {
func ParseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int8) (Time, error) {
return parseTime(sc, str, tp, fsp, false)
}
// ParseTimeFromFloatString is similar to ParseTime, except that it's used to parse a float converted string.
func ParseTimeFromFloatString(sc *stmtctx.StatementContext, str string, tp byte, fsp int) (Time, error) {
func ParseTimeFromFloatString(sc *stmtctx.StatementContext, str string, tp byte, fsp int8) (Time, error) {
return parseTime(sc, str, tp, fsp, true)
}
func parseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int, isFloat bool) (Time, error) {
fsp, err := CheckFsp(fsp)
func parseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int8, isFloat bool) (Time, error) {
fsp, err := CheckFsp(int(fsp))
if err != nil {
return Time{Time: ZeroTime, Type: tp}, errors.Trace(err)
}
......@@ -1381,8 +1382,8 @@ func ParseDate(sc *stmtctx.StatementContext, str string) (Time, error) {
// ParseTimeFromNum parses a formatted int64,
// returns the value which type is tp.
func ParseTimeFromNum(sc *stmtctx.StatementContext, num int64, tp byte, fsp int) (Time, error) {
fsp, err := CheckFsp(fsp)
func ParseTimeFromNum(sc *stmtctx.StatementContext, num int64, tp byte, fsp int8) (Time, error) {
fsp, err := CheckFsp(int(fsp))
if err != nil {
return Time{Time: ZeroTime, Type: tp}, errors.Trace(err)
}
......@@ -1649,6 +1650,7 @@ func parseSingleTimeValue(unit string, format string, strictCheck bool) (int64,
if unit != "SECOND" {
err = ErrTruncatedWrongValue.GenWithStackByArgs(format)
}
dv *= sign
}
switch strings.ToUpper(unit) {
case "MICROSECOND":
......@@ -1763,7 +1765,7 @@ func parseTimeValue(format string, index, cnt int) (int64, int64, int64, int64,
if err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt)
}
microseconds, err := strconv.ParseInt(alignFrac(fields[MicrosecondIndex], MaxFsp), 10, 64)
microseconds, err := strconv.ParseInt(alignFrac(fields[MicrosecondIndex], int(MaxFsp)), 10, 64)
if err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt)
}
......@@ -1987,37 +1989,38 @@ func (t Time) convertDateFormat(b rune, buf *bytes.Buffer) error {
}
buf.WriteString(MonthNames[m-1])
case 'm':
fmt.Fprintf(buf, "%02d", t.Time.Month())
buf.WriteString(FormatIntWidthN(t.Time.Month(), 2))
case 'c':
fmt.Fprintf(buf, "%d", t.Time.Month())
buf.WriteString(strconv.FormatInt(int64(t.Time.Month()), 10))
case 'D':
fmt.Fprintf(buf, "%d%s", t.Time.Day(), abbrDayOfMonth(t.Time.Day()))
buf.WriteString(strconv.FormatInt(int64(t.Time.Day()), 10))
buf.WriteString(abbrDayOfMonth(t.Time.Day()))
case 'd':
fmt.Fprintf(buf, "%02d", t.Time.Day())
buf.WriteString(FormatIntWidthN(t.Time.Day(), 2))
case 'e':
fmt.Fprintf(buf, "%d", t.Time.Day())
buf.WriteString(strconv.FormatInt(int64(t.Time.Day()), 10))
case 'j':
fmt.Fprintf(buf, "%03d", t.Time.YearDay())
case 'H':
fmt.Fprintf(buf, "%02d", t.Time.Hour())
buf.WriteString(FormatIntWidthN(t.Time.Hour(), 2))
case 'k':
fmt.Fprintf(buf, "%d", t.Time.Hour())
buf.WriteString(strconv.FormatInt(int64(t.Time.Hour()), 10))
case 'h', 'I':
t := t.Time.Hour()
if t%12 == 0 {
fmt.Fprintf(buf, "%02d", 12)
buf.WriteString("12")
} else {
fmt.Fprintf(buf, "%02d", t%12)
buf.WriteString(FormatIntWidthN(t%12, 2))
}
case 'l':
t := t.Time.Hour()
if t%12 == 0 {
fmt.Fprintf(buf, "%d", 12)
buf.WriteString("12")
} else {
fmt.Fprintf(buf, "%d", t%12)
buf.WriteString(strconv.FormatInt(int64(t%12), 10))
}
case 'i':
fmt.Fprintf(buf, "%02d", t.Time.Minute())
buf.WriteString(FormatIntWidthN(t.Time.Minute(), 2))
case 'p':
hour := t.Time.Hour()
if hour/12%2 == 0 {
......@@ -2041,46 +2044,46 @@ func (t Time) convertDateFormat(b rune, buf *bytes.Buffer) error {
case 'T':
fmt.Fprintf(buf, "%02d:%02d:%02d", t.Time.Hour(), t.Time.Minute(), t.Time.Second())
case 'S', 's':
fmt.Fprintf(buf, "%02d", t.Time.Second())
buf.WriteString(FormatIntWidthN(t.Time.Second(), 2))
case 'f':
fmt.Fprintf(buf, "%06d", t.Time.Microsecond())
case 'U':
w := t.Time.Week(0)
fmt.Fprintf(buf, "%02d", w)
buf.WriteString(FormatIntWidthN(w, 2))
case 'u':
w := t.Time.Week(1)
fmt.Fprintf(buf, "%02d", w)
buf.WriteString(FormatIntWidthN(w, 2))
case 'V':
w := t.Time.Week(2)
fmt.Fprintf(buf, "%02d", w)
buf.WriteString(FormatIntWidthN(w, 2))
case 'v':
_, w := t.Time.YearWeek(3)
fmt.Fprintf(buf, "%02d", w)
buf.WriteString(FormatIntWidthN(w, 2))
case 'a':
weekday := t.Time.Weekday()
buf.WriteString(abbrevWeekdayName[weekday])
case 'W':
buf.WriteString(t.Time.Weekday().String())
case 'w':
fmt.Fprintf(buf, "%d", t.Time.Weekday())
buf.WriteString(strconv.FormatInt(int64(t.Time.Weekday()), 10))
case 'X':
year, _ := t.Time.YearWeek(2)
if year < 0 {
fmt.Fprintf(buf, "%v", uint64(math.MaxUint32))
buf.WriteString(strconv.FormatUint(uint64(math.MaxUint32), 10))
} else {
fmt.Fprintf(buf, "%04d", year)
buf.WriteString(FormatIntWidthN(year, 4))
}
case 'x':
year, _ := t.Time.YearWeek(3)
if year < 0 {
fmt.Fprintf(buf, "%v", uint64(math.MaxUint32))
buf.WriteString(strconv.FormatUint(uint64(math.MaxUint32), 10))
} else {
fmt.Fprintf(buf, "%04d", year)
buf.WriteString(FormatIntWidthN(year, 4))
}
case 'Y':
fmt.Fprintf(buf, "%04d", t.Time.Year())
buf.WriteString(FormatIntWidthN(t.Time.Year(), 4))
case 'y':
str := fmt.Sprintf("%04d", t.Time.Year())
str := FormatIntWidthN(t.Time.Year(), 4)
buf.WriteString(str[2:])
default:
buf.WriteRune(b)
......@@ -2089,6 +2092,19 @@ func (t Time) convertDateFormat(b rune, buf *bytes.Buffer) error {
return nil
}
// FormatIntWidthN uses to format int with width. Insufficient digits are filled by 0.
func FormatIntWidthN(num, n int) string {
numString := strconv.FormatInt(int64(num), 10)
if len(numString) >= n {
return numString
}
padBytes := make([]byte, n-len(numString))
for i := range padBytes {
padBytes[i] = '0'
}
return string(padBytes) + numString
}
func abbrDayOfMonth(day int) string {
var str string
switch day {
......@@ -2338,7 +2354,7 @@ func hour24TwoDigits(t *MysqlTime, input string, ctx map[string]int) (string, bo
if !succ || v >= 24 {
return input, false
}
t.hour = v
t.hour = uint32(v)
return input[2:], true
}
......@@ -2391,9 +2407,9 @@ func time12Hour(t *MysqlTime, input string, ctx map[string]int) (string, bool) {
remain := skipWhiteSpace(input[8:])
switch {
case strings.HasPrefix(remain, "AM"):
t.hour = hour
t.hour = uint32(hour)
case strings.HasPrefix(remain, "PM"):
t.hour = hour + 12
t.hour = uint32(hour + 12)
default:
return input, false
}
......@@ -2426,7 +2442,7 @@ func time24Hour(t *MysqlTime, input string, ctx map[string]int) (string, bool) {
return input, false
}
t.hour = hour
t.hour = uint32(hour)
t.minute = uint8(minute)
t.second = uint8(second)
return input[8:], true
......@@ -2507,7 +2523,7 @@ func hour24Numeric(t *MysqlTime, input string, ctx map[string]int) (string, bool
if !ok || v > 23 {
return input, false
}
t.hour = v
t.hour = uint32(v)
ctx["%H"] = v
return input[length:], true
}
......@@ -2521,7 +2537,7 @@ func hour12Numeric(t *MysqlTime, input string, ctx map[string]int) (string, bool
if !ok || v > 12 || v == 0 {
return input, false
}
t.hour = v
t.hour = uint32(v)
return input[length:], true
}
......
......@@ -26,8 +26,10 @@ import (
"go.uber.org/zap"
)
type commitDetailCtxKeyType struct{}
// CommitDetailCtxKey presents CommitDetail info key in context.
const CommitDetailCtxKey = "commitDetail"
var CommitDetailCtxKey = commitDetailCtxKeyType{}
// ExecDetails contains execution detail information.
type ExecDetails struct {
......
......@@ -24,15 +24,8 @@ type MutableString string
// String converts slice to MutableString without copy.
// The MutableString can be converts to string without copy.
// Use it at your own risk.
func String(b []byte) (s MutableString) {
if len(b) == 0 {
return ""
}
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
pstring.Data = pbytes.Data
pstring.Len = pbytes.Len
return
func String(b []byte) MutableString {
return *(*MutableString)(unsafe.Pointer(&b))
}
// Slice converts string to slice without copy.
......
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by aprettyPrintlicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package logutil
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
)
// Hex defines a fmt.Stringer for proto.Message.
// We can't define the String() method on proto.Message, but we can wrap it.
func Hex(msg proto.Message) fmt.Stringer {
return hexStringer{msg}
}
type hexStringer struct {
proto.Message
}
func (h hexStringer) String() string {
val := reflect.ValueOf(h.Message)
var w bytes.Buffer
prettyPrint(&w, val)
return w.String()
}
func prettyPrint(w io.Writer, val reflect.Value) {
tp := val.Type()
switch val.Kind() {
case reflect.Slice:
elemType := tp.Elem()
if elemType.Kind() == reflect.Uint8 {
fmt.Fprintf(w, "%s", hex.EncodeToString(val.Bytes()))
} else {
fmt.Fprintf(w, "%s", val.Interface())
}
case reflect.Struct:
fmt.Fprintf(w, "{")
for i := 0; i < val.NumField(); i++ {
fv := val.Field(i)
ft := tp.Field(i)
if strings.HasPrefix(ft.Name, "XXX_") {
continue
}
if i != 0 {
fmt.Fprintf(w, " ")
}
fmt.Fprintf(w, "%s:", ft.Name)
prettyPrint(w, fv)
}
fmt.Fprintf(w, "}")
case reflect.Ptr:
if val.IsNil() {
fmt.Fprintf(w, "%v", val.Interface())
} else {
prettyPrint(w, reflect.Indirect(val))
}
default:
fmt.Fprintf(w, "%v", val.Interface())
}
}
......@@ -24,6 +24,8 @@ import (
"strings"
"time"
"github.com/opentracing/opentracing-go"
tlog "github.com/opentracing/opentracing-go/log"
"github.com/pingcap/errors"
zaplog "github.com/pingcap/log"
log "github.com/sirupsen/logrus"
......@@ -317,9 +319,9 @@ func SetLevel(level string) error {
return nil
}
type ctxKeyType int
type ctxLogKeyType struct{}
const ctxLogKey ctxKeyType = iota
var ctxLogKey = ctxLogKeyType{}
// Logger gets a contextual logger from current context.
// contextual logger will output common fields from context.
......@@ -356,3 +358,27 @@ func WithKeyValue(ctx context.Context, key, value string) context.Context {
}
return context.WithValue(ctx, ctxLogKey, logger.With(zap.String(key, value)))
}
// TraceEventKey presents the TraceEventKey in span log.
const TraceEventKey = "event"
// Event records event in current tracing span.
func Event(ctx context.Context, event string) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span.LogFields(tlog.String(TraceEventKey, event))
}
}
// Eventf records event in current tracing span with format support.
func Eventf(ctx context.Context, format string, args ...interface{}) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span.LogFields(tlog.String(TraceEventKey, fmt.Sprintf(format, args...)))
}
}
// SetTag sets tag kv-pair in current tracing span
func SetTag(ctx context.Context, key string, value interface{}) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span.SetTag(key, value)
}
}
......@@ -98,6 +98,18 @@
"revision": "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f",
"revisionTime": "2018-05-06T08:24:08Z"
},
{
"checksumSHA1": "6EIQaeaWECn3zlechdGkqmIKld4=",
"path": "github.com/opentracing/opentracing-go",
"revision": "135aa78c6f95b4a199daf2f0470d231136cbbd0c",
"revisionTime": "2019-07-04T17:58:13Z"
},
{
"checksumSHA1": "tnkdNJbJxNKuPZMWapP1xhKIIGw=",
"path": "github.com/opentracing/opentracing-go/log",
"revision": "135aa78c6f95b4a199daf2f0470d231136cbbd0c",
"revisionTime": "2019-07-04T17:58:13Z"
},
{
"checksumSHA1": "M0UdRpCVjXiuie7PfJQPZ/V1pVI=",
"path": "github.com/percona/go-mysql/query",
......@@ -117,118 +129,118 @@
"revisionTime": "2019-03-07T07:54:52Z"
},
{
"checksumSHA1": "8XbJFHOYoZvqf3Fq+J4l90DiGlM=",
"checksumSHA1": "RK5vW/hPsPk0JDi1atCWaUR8iFo=",
"path": "github.com/pingcap/parser",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "a+3CnBMiJTPiAKhRzxW5ybMR6IY=",
"checksumSHA1": "WYPpAYqE/lpu4PBR9TCn6UigcTg=",
"path": "github.com/pingcap/parser/ast",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "xiv40YqnvHcbIhaEzJqjh5K7ehM=",
"path": "github.com/pingcap/parser/auth",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "EvDXpplklIXmKqLclzWzaN/uHKQ=",
"path": "github.com/pingcap/parser/charset",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "Aao6Mul/qqogOwPwM2arBKZkYZs=",
"path": "github.com/pingcap/parser/format",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "f14oFKfX0pSkUM9w9m94eZG5vEw=",
"checksumSHA1": "GAJ7IUg0t8DCKJbJQxJLkklEj2E=",
"path": "github.com/pingcap/parser/model",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "JcR/7pmocSZK4K6tDK2zO54DJWg=",
"checksumSHA1": "WMkc5bRIYYfQdu9lBlVGyKTGIyg=",
"path": "github.com/pingcap/parser/mysql",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "olapD16WCMBU9vrA5PtlERGFfXw=",
"path": "github.com/pingcap/parser/opcode",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "L6rzy3sJU1RPf7AkJN+0zcwW/YY=",
"path": "github.com/pingcap/parser/terror",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "nWkzWKjheFi0/Ov/0rhc4CUMZLo=",
"checksumSHA1": "u1Lmm4Fa3su4ElZMN4w0hPzFZl4=",
"path": "github.com/pingcap/parser/types",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf",
"revisionTime": "2019-07-30T09:13:57Z"
"revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-08-22T02:41:27Z"
},
{
"checksumSHA1": "KHvXxhiZAHkE8APuMlaAXDOX6eU=",
"checksumSHA1": "cbEwgTkDlGpIIIqmNAuWrxsUwKw=",
"path": "github.com/pingcap/tidb/sessionctx/stmtctx",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f",
"revisionTime": "2019-07-31T03:50:10Z"
"revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-08-22T02:51:25Z"
},
{
"checksumSHA1": "6pIJUxO/VoKsIdWibgApSW91MRg=",
"checksumSHA1": "erB64jt/DCEoRs+KrywwHGJG2/k=",
"path": "github.com/pingcap/tidb/types",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f",
"revisionTime": "2019-07-31T03:50:10Z"
"revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-08-22T02:51:25Z"
},
{
"checksumSHA1": "gKBD02jzm/d7gn2kX7pXLi+M2ZY=",
"path": "github.com/pingcap/tidb/types/json",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f",
"revisionTime": "2019-07-31T03:50:10Z"
"revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-08-22T02:51:25Z"
},
{
"checksumSHA1": "45zWX5Q6D6aTEWtc4p/lbD9WD4o=",
"path": "github.com/pingcap/tidb/types/parser_driver",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f",
"revisionTime": "2019-07-31T03:50:10Z"
"revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-08-22T02:51:25Z"
},
{
"checksumSHA1": "dI3bZpUsujM1shEDvORNQj5FCN0=",
"checksumSHA1": "q5aOzPGCVZNkrru6v6+uImWm1eA=",
"path": "github.com/pingcap/tidb/util/execdetails",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f",
"revisionTime": "2019-07-31T03:50:10Z"
"revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-08-22T02:51:25Z"
},
{
"checksumSHA1": "RdbHgQWMHjRtKjqPcTX81k1V3sw=",
"checksumSHA1": "EFDXphVEI9ohnPky64fc+0lkRkw=",
"path": "github.com/pingcap/tidb/util/hack",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f",
"revisionTime": "2019-07-31T03:50:10Z"
"revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-08-22T02:51:25Z"
},
{
"checksumSHA1": "5DVxTRYAXrCkrtmTqi/fZfY/Zfk=",
"checksumSHA1": "fDbwnQlRCKnr5y6MY799BEd4WlQ=",
"path": "github.com/pingcap/tidb/util/logutil",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f",
"revisionTime": "2019-07-31T03:50:10Z"
"revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-08-22T02:51:25Z"
},
{
"checksumSHA1": "OveQu0ABBJmMEwmmthqSRQC2Ef0=",
"path": "github.com/pingcap/tidb/util/math",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f",
"revisionTime": "2019-07-31T03:50:10Z"
"revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-08-22T02:51:25Z"
},
{
"checksumSHA1": "loL2JgZDLapEOgfM/XUJI5f0HVs=",
"path": "github.com/pingcap/tidb/util/memory",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f",
"revisionTime": "2019-07-31T03:50:10Z"
"revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-08-22T02:51:25Z"
},
{
"checksumSHA1": "QPIBwDNUFF5Whrnd41S3mkKa4gQ=",
......@@ -485,68 +497,68 @@
{
"checksumSHA1": "aKn1oKcY74N8TRLm3Ayt7Q4bbI4=",
"path": "vitess.io/vitess/go/bytes2",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "bhE6CGQgZTIgLPp9lnvlKW/47xc=",
"path": "vitess.io/vitess/go/hack",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "2m7CYdLr+epKNLqWaGHkinr3k7w=",
"checksumSHA1": "8zh04M7R0JjzpE+w6/gxHdgZrJg=",
"path": "vitess.io/vitess/go/sqltypes",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "vAIRxI6MHsq3x1hLQwIyw5AvqtI=",
"path": "vitess.io/vitess/go/vt/log",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "/0K9CBbInkAhioqKX9ocBrJ6AKE=",
"checksumSHA1": "//MHnGEq9xApvIMdwQaRrQf5ZWo=",
"path": "vitess.io/vitess/go/vt/proto/binlogdata",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "87Zndvk3Y+M+QxMx3uFa0iSbvWY=",
"path": "vitess.io/vitess/go/vt/proto/query",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "xpcb9NfXMEeHhEPStbJntIfa5GQ=",
"path": "vitess.io/vitess/go/vt/proto/topodata",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "Bv8lucvoH9AnJSYiWX8MIrJl4zY=",
"path": "vitess.io/vitess/go/vt/proto/vtgate",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "qz32abYdmm9NfKTc++K0l1EvXXM=",
"path": "vitess.io/vitess/go/vt/proto/vtrpc",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "9Fy+Gm//g50wu30nICOF7HMq4po=",
"checksumSHA1": "0SPe/oMz50OW+yC+DGV4UJpjZ3Y=",
"path": "vitess.io/vitess/go/vt/sqlparser",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
},
{
"checksumSHA1": "z9+F/lA1Xrl5S16LKssUH8VL6hs=",
"path": "vitess.io/vitess/go/vt/vterrors",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250",
"revisionTime": "2019-07-30T06:18:30Z"
"revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-08-21T22:46:46Z"
}
],
"rootPath": "github.com/XiaoMi/soar"
......
......@@ -167,6 +167,7 @@ var mysqlToType = map[int64]querypb.Type{
11: Time,
12: Datetime,
13: Year,
15: VarChar,
16: Bit,
245: TypeJSON,
246: Decimal,
......
......@@ -48,7 +48,7 @@ func (x OnDDLAction) String() string {
return proto.EnumName(OnDDLAction_name, int32(x))
}
func (OnDDLAction) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{0}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{0}
}
// VEventType enumerates the event types.
......@@ -73,6 +73,7 @@ const (
VEventType_FIELD VEventType = 13
VEventType_HEARTBEAT VEventType = 14
VEventType_VGTID VEventType = 15
VEventType_JOURNAL VEventType = 16
)
var VEventType_name = map[int32]string{
......@@ -92,6 +93,7 @@ var VEventType_name = map[int32]string{
13: "FIELD",
14: "HEARTBEAT",
15: "VGTID",
16: "JOURNAL",
}
var VEventType_value = map[string]int32{
"UNKNOWN": 0,
......@@ -110,13 +112,38 @@ var VEventType_value = map[string]int32{
"FIELD": 13,
"HEARTBEAT": 14,
"VGTID": 15,
"JOURNAL": 16,
}
func (x VEventType) String() string {
return proto.EnumName(VEventType_name, int32(x))
}
func (VEventType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1}
}
// MigrationType specifies the type of migration for the Journal.
type MigrationType int32
const (
MigrationType_TABLES MigrationType = 0
MigrationType_SHARDS MigrationType = 1
)
var MigrationType_name = map[int32]string{
0: "TABLES",
1: "SHARDS",
}
var MigrationType_value = map[string]int32{
"TABLES": 0,
"SHARDS": 1,
}
func (x MigrationType) String() string {
return proto.EnumName(MigrationType_name, int32(x))
}
func (MigrationType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{2}
}
type BinlogTransaction_Statement_Category int32
......@@ -164,7 +191,7 @@ func (x BinlogTransaction_Statement_Category) String() string {
return proto.EnumName(BinlogTransaction_Statement_Category_name, int32(x))
}
func (BinlogTransaction_Statement_Category) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1, 0, 0}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1, 0, 0}
}
// Charset is the per-statement charset info from a QUERY_EVENT binlog entry.
......@@ -184,7 +211,7 @@ func (m *Charset) Reset() { *m = Charset{} }
func (m *Charset) String() string { return proto.CompactTextString(m) }
func (*Charset) ProtoMessage() {}
func (*Charset) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{0}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{0}
}
func (m *Charset) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Charset.Unmarshal(m, b)
......@@ -241,7 +268,7 @@ func (m *BinlogTransaction) Reset() { *m = BinlogTransaction{} }
func (m *BinlogTransaction) String() string { return proto.CompactTextString(m) }
func (*BinlogTransaction) ProtoMessage() {}
func (*BinlogTransaction) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1}
}
func (m *BinlogTransaction) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BinlogTransaction.Unmarshal(m, b)
......@@ -291,7 +318,7 @@ func (m *BinlogTransaction_Statement) Reset() { *m = BinlogTransaction_S
func (m *BinlogTransaction_Statement) String() string { return proto.CompactTextString(m) }
func (*BinlogTransaction_Statement) ProtoMessage() {}
func (*BinlogTransaction_Statement) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{1, 0}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{1, 0}
}
func (m *BinlogTransaction_Statement) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BinlogTransaction_Statement.Unmarshal(m, b)
......@@ -349,7 +376,7 @@ func (m *StreamKeyRangeRequest) Reset() { *m = StreamKeyRangeRequest{} }
func (m *StreamKeyRangeRequest) String() string { return proto.CompactTextString(m) }
func (*StreamKeyRangeRequest) ProtoMessage() {}
func (*StreamKeyRangeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{2}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{2}
}
func (m *StreamKeyRangeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamKeyRangeRequest.Unmarshal(m, b)
......@@ -402,7 +429,7 @@ func (m *StreamKeyRangeResponse) Reset() { *m = StreamKeyRangeResponse{}
func (m *StreamKeyRangeResponse) String() string { return proto.CompactTextString(m) }
func (*StreamKeyRangeResponse) ProtoMessage() {}
func (*StreamKeyRangeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{3}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{3}
}
func (m *StreamKeyRangeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamKeyRangeResponse.Unmarshal(m, b)
......@@ -446,7 +473,7 @@ func (m *StreamTablesRequest) Reset() { *m = StreamTablesRequest{} }
func (m *StreamTablesRequest) String() string { return proto.CompactTextString(m) }
func (*StreamTablesRequest) ProtoMessage() {}
func (*StreamTablesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{4}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{4}
}
func (m *StreamTablesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamTablesRequest.Unmarshal(m, b)
......@@ -499,7 +526,7 @@ func (m *StreamTablesResponse) Reset() { *m = StreamTablesResponse{} }
func (m *StreamTablesResponse) String() string { return proto.CompactTextString(m) }
func (*StreamTablesResponse) ProtoMessage() {}
func (*StreamTablesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{5}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{5}
}
func (m *StreamTablesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamTablesResponse.Unmarshal(m, b)
......@@ -544,7 +571,7 @@ func (m *Rule) Reset() { *m = Rule{} }
func (m *Rule) String() string { return proto.CompactTextString(m) }
func (*Rule) ProtoMessage() {}
func (*Rule) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{6}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{6}
}
func (m *Rule) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Rule.Unmarshal(m, b)
......@@ -591,7 +618,7 @@ func (m *Filter) Reset() { *m = Filter{} }
func (m *Filter) String() string { return proto.CompactTextString(m) }
func (*Filter) ProtoMessage() {}
func (*Filter) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{7}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{7}
}
func (m *Filter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Filter.Unmarshal(m, b)
......@@ -646,7 +673,7 @@ func (m *BinlogSource) Reset() { *m = BinlogSource{} }
func (m *BinlogSource) String() string { return proto.CompactTextString(m) }
func (*BinlogSource) ProtoMessage() {}
func (*BinlogSource) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{8}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{8}
}
func (m *BinlogSource) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BinlogSource.Unmarshal(m, b)
......@@ -728,7 +755,7 @@ func (m *RowChange) Reset() { *m = RowChange{} }
func (m *RowChange) String() string { return proto.CompactTextString(m) }
func (*RowChange) ProtoMessage() {}
func (*RowChange) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{9}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{9}
}
func (m *RowChange) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RowChange.Unmarshal(m, b)
......@@ -775,7 +802,7 @@ func (m *RowEvent) Reset() { *m = RowEvent{} }
func (m *RowEvent) String() string { return proto.CompactTextString(m) }
func (*RowEvent) ProtoMessage() {}
func (*RowEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{10}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{10}
}
func (m *RowEvent) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RowEvent.Unmarshal(m, b)
......@@ -821,7 +848,7 @@ func (m *FieldEvent) Reset() { *m = FieldEvent{} }
func (m *FieldEvent) String() string { return proto.CompactTextString(m) }
func (*FieldEvent) ProtoMessage() {}
func (*FieldEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{11}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{11}
}
func (m *FieldEvent) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FieldEvent.Unmarshal(m, b)
......@@ -868,7 +895,7 @@ func (m *ShardGtid) Reset() { *m = ShardGtid{} }
func (m *ShardGtid) String() string { return proto.CompactTextString(m) }
func (*ShardGtid) ProtoMessage() {}
func (*ShardGtid) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{12}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{12}
}
func (m *ShardGtid) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ShardGtid.Unmarshal(m, b)
......@@ -920,7 +947,7 @@ func (m *VGtid) Reset() { *m = VGtid{} }
func (m *VGtid) String() string { return proto.CompactTextString(m) }
func (*VGtid) ProtoMessage() {}
func (*VGtid) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{13}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{13}
}
func (m *VGtid) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VGtid.Unmarshal(m, b)
......@@ -947,6 +974,138 @@ func (m *VGtid) GetShardGtids() []*ShardGtid {
return nil
}
type KeyspaceShard struct {
Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"`
Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *KeyspaceShard) Reset() { *m = KeyspaceShard{} }
func (m *KeyspaceShard) String() string { return proto.CompactTextString(m) }
func (*KeyspaceShard) ProtoMessage() {}
func (*KeyspaceShard) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{14}
}
func (m *KeyspaceShard) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_KeyspaceShard.Unmarshal(m, b)
}
func (m *KeyspaceShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_KeyspaceShard.Marshal(b, m, deterministic)
}
func (dst *KeyspaceShard) XXX_Merge(src proto.Message) {
xxx_messageInfo_KeyspaceShard.Merge(dst, src)
}
func (m *KeyspaceShard) XXX_Size() int {
return xxx_messageInfo_KeyspaceShard.Size(m)
}
func (m *KeyspaceShard) XXX_DiscardUnknown() {
xxx_messageInfo_KeyspaceShard.DiscardUnknown(m)
}
var xxx_messageInfo_KeyspaceShard proto.InternalMessageInfo
func (m *KeyspaceShard) GetKeyspace() string {
if m != nil {
return m.Keyspace
}
return ""
}
func (m *KeyspaceShard) GetShard() string {
if m != nil {
return m.Shard
}
return ""
}
type Journal struct {
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
MigrationType MigrationType `protobuf:"varint,2,opt,name=migration_type,json=migrationType,proto3,enum=binlogdata.MigrationType" json:"migration_type,omitempty"`
Tables []string `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty"`
LocalPosition string `protobuf:"bytes,4,opt,name=local_position,json=localPosition,proto3" json:"local_position,omitempty"`
ShardGtids []*ShardGtid `protobuf:"bytes,5,rep,name=shard_gtids,json=shardGtids,proto3" json:"shard_gtids,omitempty"`
Participants []*KeyspaceShard `protobuf:"bytes,6,rep,name=participants,proto3" json:"participants,omitempty"`
ReversedIds []int64 `protobuf:"varint,7,rep,packed,name=reversed_ids,json=reversedIds,proto3" json:"reversed_ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Journal) Reset() { *m = Journal{} }
func (m *Journal) String() string { return proto.CompactTextString(m) }
func (*Journal) ProtoMessage() {}
func (*Journal) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{15}
}
func (m *Journal) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Journal.Unmarshal(m, b)
}
func (m *Journal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Journal.Marshal(b, m, deterministic)
}
func (dst *Journal) XXX_Merge(src proto.Message) {
xxx_messageInfo_Journal.Merge(dst, src)
}
func (m *Journal) XXX_Size() int {
return xxx_messageInfo_Journal.Size(m)
}
func (m *Journal) XXX_DiscardUnknown() {
xxx_messageInfo_Journal.DiscardUnknown(m)
}
var xxx_messageInfo_Journal proto.InternalMessageInfo
func (m *Journal) GetId() int64 {
if m != nil {
return m.Id
}
return 0
}
func (m *Journal) GetMigrationType() MigrationType {
if m != nil {
return m.MigrationType
}
return MigrationType_TABLES
}
func (m *Journal) GetTables() []string {
if m != nil {
return m.Tables
}
return nil
}
func (m *Journal) GetLocalPosition() string {
if m != nil {
return m.LocalPosition
}
return ""
}
func (m *Journal) GetShardGtids() []*ShardGtid {
if m != nil {
return m.ShardGtids
}
return nil
}
func (m *Journal) GetParticipants() []*KeyspaceShard {
if m != nil {
return m.Participants
}
return nil
}
func (m *Journal) GetReversedIds() []int64 {
if m != nil {
return m.ReversedIds
}
return nil
}
// VEvent represents a vstream event
type VEvent struct {
Type VEventType `protobuf:"varint,1,opt,name=type,proto3,enum=binlogdata.VEventType" json:"type,omitempty"`
......@@ -956,6 +1115,7 @@ type VEvent struct {
RowEvent *RowEvent `protobuf:"bytes,5,opt,name=row_event,json=rowEvent,proto3" json:"row_event,omitempty"`
FieldEvent *FieldEvent `protobuf:"bytes,6,opt,name=field_event,json=fieldEvent,proto3" json:"field_event,omitempty"`
Vgtid *VGtid `protobuf:"bytes,7,opt,name=vgtid,proto3" json:"vgtid,omitempty"`
Journal *Journal `protobuf:"bytes,8,opt,name=journal,proto3" json:"journal,omitempty"`
// current_time specifies the current time to handle clock skew.
CurrentTime int64 `protobuf:"varint,20,opt,name=current_time,json=currentTime,proto3" json:"current_time,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
......@@ -967,7 +1127,7 @@ func (m *VEvent) Reset() { *m = VEvent{} }
func (m *VEvent) String() string { return proto.CompactTextString(m) }
func (*VEvent) ProtoMessage() {}
func (*VEvent) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{14}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{16}
}
func (m *VEvent) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VEvent.Unmarshal(m, b)
......@@ -1036,6 +1196,13 @@ func (m *VEvent) GetVgtid() *VGtid {
return nil
}
func (m *VEvent) GetJournal() *Journal {
if m != nil {
return m.Journal
}
return nil
}
func (m *VEvent) GetCurrentTime() int64 {
if m != nil {
return m.CurrentTime
......@@ -1059,7 +1226,7 @@ func (m *VStreamRequest) Reset() { *m = VStreamRequest{} }
func (m *VStreamRequest) String() string { return proto.CompactTextString(m) }
func (*VStreamRequest) ProtoMessage() {}
func (*VStreamRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{15}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{17}
}
func (m *VStreamRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VStreamRequest.Unmarshal(m, b)
......@@ -1126,7 +1293,7 @@ func (m *VStreamResponse) Reset() { *m = VStreamResponse{} }
func (m *VStreamResponse) String() string { return proto.CompactTextString(m) }
func (*VStreamResponse) ProtoMessage() {}
func (*VStreamResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{16}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{18}
}
func (m *VStreamResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VStreamResponse.Unmarshal(m, b)
......@@ -1169,7 +1336,7 @@ func (m *VStreamRowsRequest) Reset() { *m = VStreamRowsRequest{} }
func (m *VStreamRowsRequest) String() string { return proto.CompactTextString(m) }
func (*VStreamRowsRequest) ProtoMessage() {}
func (*VStreamRowsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{17}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{19}
}
func (m *VStreamRowsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VStreamRowsRequest.Unmarshal(m, b)
......@@ -1240,7 +1407,7 @@ func (m *VStreamRowsResponse) Reset() { *m = VStreamRowsResponse{} }
func (m *VStreamRowsResponse) String() string { return proto.CompactTextString(m) }
func (*VStreamRowsResponse) ProtoMessage() {}
func (*VStreamRowsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_binlogdata_e3df2e837eaa5305, []int{18}
return fileDescriptor_binlogdata_db2d20dd0016de21, []int{20}
}
func (m *VStreamRowsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_VStreamRowsResponse.Unmarshal(m, b)
......@@ -1311,6 +1478,8 @@ func init() {
proto.RegisterType((*FieldEvent)(nil), "binlogdata.FieldEvent")
proto.RegisterType((*ShardGtid)(nil), "binlogdata.ShardGtid")
proto.RegisterType((*VGtid)(nil), "binlogdata.VGtid")
proto.RegisterType((*KeyspaceShard)(nil), "binlogdata.KeyspaceShard")
proto.RegisterType((*Journal)(nil), "binlogdata.Journal")
proto.RegisterType((*VEvent)(nil), "binlogdata.VEvent")
proto.RegisterType((*VStreamRequest)(nil), "binlogdata.VStreamRequest")
proto.RegisterType((*VStreamResponse)(nil), "binlogdata.VStreamResponse")
......@@ -1318,97 +1487,110 @@ func init() {
proto.RegisterType((*VStreamRowsResponse)(nil), "binlogdata.VStreamRowsResponse")
proto.RegisterEnum("binlogdata.OnDDLAction", OnDDLAction_name, OnDDLAction_value)
proto.RegisterEnum("binlogdata.VEventType", VEventType_name, VEventType_value)
proto.RegisterEnum("binlogdata.MigrationType", MigrationType_name, MigrationType_value)
proto.RegisterEnum("binlogdata.BinlogTransaction_Statement_Category", BinlogTransaction_Statement_Category_name, BinlogTransaction_Statement_Category_value)
}
func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_e3df2e837eaa5305) }
var fileDescriptor_binlogdata_e3df2e837eaa5305 = []byte{
// 1372 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xdd, 0x72, 0xdb, 0x54,
0x10, 0xae, 0x6d, 0xf9, 0x6f, 0x95, 0x26, 0xca, 0xc9, 0x0f, 0x9e, 0x0c, 0x65, 0x82, 0x06, 0x68,
0xc8, 0x0c, 0x4e, 0x31, 0x50, 0xae, 0xa0, 0xe3, 0x1f, 0xd5, 0x75, 0xab, 0xd8, 0xe9, 0xb1, 0x9a,
0x32, 0xbd, 0xd1, 0x28, 0xd2, 0x71, 0xa2, 0x89, 0x2c, 0x39, 0xd2, 0xb1, 0x83, 0x1f, 0x80, 0xe1,
0x01, 0xb8, 0xe5, 0x05, 0xb8, 0xe3, 0x05, 0xb8, 0x63, 0x78, 0x13, 0xde, 0x83, 0x39, 0x3f, 0x92,
0xed, 0xb4, 0xb4, 0x81, 0x19, 0x2e, 0xb8, 0xc9, 0xec, 0xff, 0xd9, 0xfd, 0x76, 0xbd, 0xda, 0x80,
0x76, 0xe6, 0x87, 0x41, 0x74, 0xee, 0x39, 0xd4, 0xa9, 0x4f, 0xe2, 0x88, 0x46, 0x08, 0x16, 0x92,
0x3d, 0x75, 0x46, 0xe3, 0x89, 0x2b, 0x14, 0x7b, 0xea, 0xd5, 0x94, 0xc4, 0x73, 0xc9, 0xac, 0xd3,
0x68, 0x12, 0x2d, 0xbc, 0xf4, 0x63, 0x28, 0xb7, 0x2f, 0x9c, 0x38, 0x21, 0x14, 0xed, 0x42, 0xc9,
0x0d, 0x7c, 0x12, 0xd2, 0x5a, 0x6e, 0x3f, 0x77, 0x50, 0xc4, 0x92, 0x43, 0x08, 0x14, 0x37, 0x0a,
0xc3, 0x5a, 0x9e, 0x4b, 0x39, 0xcd, 0x6c, 0x13, 0x12, 0xcf, 0x48, 0x5c, 0x2b, 0x08, 0x5b, 0xc1,
0xe9, 0x7f, 0x16, 0x60, 0xb3, 0xc5, 0xf3, 0xb0, 0x62, 0x27, 0x4c, 0x1c, 0x97, 0xfa, 0x51, 0x88,
0xba, 0x00, 0x09, 0x75, 0x28, 0x19, 0x93, 0x90, 0x26, 0xb5, 0xdc, 0x7e, 0xe1, 0x40, 0x6d, 0xdc,
0xaf, 0x2f, 0x55, 0xf0, 0x9a, 0x4b, 0x7d, 0x98, 0xda, 0xe3, 0x25, 0x57, 0xd4, 0x00, 0x95, 0xcc,
0x48, 0x48, 0x6d, 0x1a, 0x5d, 0x92, 0xb0, 0xa6, 0xec, 0xe7, 0x0e, 0xd4, 0xc6, 0x66, 0x5d, 0x14,
0x68, 0x30, 0x8d, 0xc5, 0x14, 0x18, 0x48, 0x46, 0xef, 0xfd, 0x91, 0x87, 0x6a, 0x16, 0x0d, 0x99,
0x50, 0x71, 0x1d, 0x4a, 0xce, 0xa3, 0x78, 0xce, 0xcb, 0x5c, 0x6f, 0x3c, 0xb8, 0x65, 0x22, 0xf5,
0xb6, 0xf4, 0xc3, 0x59, 0x04, 0xf4, 0x19, 0x94, 0x5d, 0x81, 0x1e, 0x47, 0x47, 0x6d, 0x6c, 0x2d,
0x07, 0x93, 0xc0, 0xe2, 0xd4, 0x06, 0x69, 0x50, 0x48, 0xae, 0x02, 0x0e, 0xd9, 0x1a, 0x66, 0xa4,
0xfe, 0x4b, 0x0e, 0x2a, 0x69, 0x5c, 0xb4, 0x05, 0x1b, 0x2d, 0xd3, 0x7e, 0xd1, 0xc7, 0x46, 0x7b,
0xd0, 0xed, 0xf7, 0x5e, 0x19, 0x1d, 0xed, 0x0e, 0x5a, 0x83, 0x4a, 0xcb, 0xb4, 0x5b, 0x46, 0xb7,
0xd7, 0xd7, 0x72, 0xe8, 0x2e, 0x54, 0x5b, 0xa6, 0xdd, 0x1e, 0x1c, 0x1f, 0xf7, 0x2c, 0x2d, 0x8f,
0x36, 0x40, 0x6d, 0x99, 0x36, 0x1e, 0x98, 0x66, 0xab, 0xd9, 0x7e, 0xa6, 0x15, 0xd0, 0x0e, 0x6c,
0xb6, 0x4c, 0xbb, 0x73, 0x6c, 0xda, 0x1d, 0xe3, 0x04, 0x1b, 0xed, 0xa6, 0x65, 0x74, 0x34, 0x05,
0x01, 0x94, 0x98, 0xb8, 0x63, 0x6a, 0x45, 0x49, 0x0f, 0x0d, 0x4b, 0x2b, 0xc9, 0x70, 0xbd, 0xfe,
0xd0, 0xc0, 0x96, 0x56, 0x96, 0xec, 0x8b, 0x93, 0x4e, 0xd3, 0x32, 0xb4, 0x8a, 0x64, 0x3b, 0x86,
0x69, 0x58, 0x86, 0x56, 0x7d, 0xaa, 0x54, 0xf2, 0x5a, 0xe1, 0xa9, 0x52, 0x29, 0x68, 0x8a, 0xfe,
0x53, 0x0e, 0x76, 0x86, 0x34, 0x26, 0xce, 0xf8, 0x19, 0x99, 0x63, 0x27, 0x3c, 0x27, 0x98, 0x5c,
0x4d, 0x49, 0x42, 0xd1, 0x1e, 0x54, 0x26, 0x51, 0xe2, 0x33, 0xec, 0x38, 0xc0, 0x55, 0x9c, 0xf1,
0xe8, 0x08, 0xaa, 0x97, 0x64, 0x6e, 0xc7, 0xcc, 0x5e, 0x02, 0x86, 0xea, 0xd9, 0x40, 0x66, 0x91,
0x2a, 0x97, 0x92, 0x5a, 0xc6, 0xb7, 0xf0, 0x6e, 0x7c, 0xf5, 0x11, 0xec, 0xde, 0x4c, 0x2a, 0x99,
0x44, 0x61, 0x42, 0x90, 0x09, 0x48, 0x38, 0xda, 0x74, 0xd1, 0x5b, 0x9e, 0x9f, 0xda, 0xb8, 0xf7,
0xd6, 0x01, 0xc0, 0x9b, 0x67, 0x37, 0x45, 0xfa, 0xf7, 0xb0, 0x25, 0xde, 0xb1, 0x9c, 0xb3, 0x80,
0x24, 0xb7, 0x29, 0x7d, 0x17, 0x4a, 0x94, 0x1b, 0xd7, 0xf2, 0xfb, 0x85, 0x83, 0x2a, 0x96, 0xdc,
0x3f, 0xad, 0xd0, 0x83, 0xed, 0xd5, 0x97, 0xff, 0x93, 0xfa, 0xbe, 0x04, 0x05, 0x4f, 0x03, 0x82,
0xb6, 0xa1, 0x38, 0x76, 0xa8, 0x7b, 0x21, 0xab, 0x11, 0x0c, 0x2b, 0x65, 0xe4, 0x07, 0x94, 0xc4,
0xbc, 0x85, 0x55, 0x2c, 0x39, 0xfd, 0x01, 0x94, 0x1e, 0x73, 0x0a, 0x7d, 0x02, 0xc5, 0x78, 0xca,
0x6a, 0x15, 0x3f, 0x75, 0x6d, 0x39, 0x01, 0x16, 0x18, 0x0b, 0xb5, 0xfe, 0x73, 0x1e, 0xd6, 0x44,
0x42, 0xc3, 0x68, 0x1a, 0xbb, 0x84, 0x21, 0x78, 0x49, 0xe6, 0xc9, 0xc4, 0x71, 0x49, 0x8a, 0x60,
0xca, 0xb3, 0x64, 0x92, 0x0b, 0x27, 0xf6, 0xe4, 0xab, 0x82, 0x41, 0x5f, 0x81, 0xca, 0x91, 0xa4,
0x36, 0x9d, 0x4f, 0x08, 0xc7, 0x70, 0xbd, 0xb1, 0xbd, 0x18, 0x2a, 0x8e, 0x13, 0xb5, 0xe6, 0x13,
0x82, 0x81, 0x66, 0xf4, 0xea, 0x24, 0x2a, 0xb7, 0x98, 0xc4, 0x45, 0xff, 0x8a, 0x2b, 0xfd, 0x3b,
0xcc, 0xc0, 0x28, 0xc9, 0x28, 0x4b, 0xb5, 0x0a, 0x38, 0x52, 0x80, 0x50, 0x1d, 0x4a, 0x51, 0x68,
0x7b, 0x5e, 0x50, 0x2b, 0xf3, 0x34, 0xdf, 0x5b, 0xb6, 0x1d, 0x84, 0x9d, 0x8e, 0xd9, 0x14, 0x2d,
0x29, 0x46, 0x61, 0xc7, 0x0b, 0xf4, 0xe7, 0x50, 0xc5, 0xd1, 0x75, 0xfb, 0x82, 0x27, 0xa0, 0x43,
0xe9, 0x8c, 0x8c, 0xa2, 0x98, 0xc8, 0xae, 0x82, 0xdc, 0x7a, 0x38, 0xba, 0xc6, 0x52, 0x83, 0xf6,
0xa1, 0xe8, 0x8c, 0xd2, 0xc6, 0xac, 0x9a, 0x08, 0x85, 0xee, 0x40, 0x05, 0x47, 0xd7, 0x7c, 0x53,
0xa2, 0x7b, 0x20, 0x10, 0xb1, 0x43, 0x67, 0x9c, 0xc2, 0x5d, 0xe5, 0x92, 0xbe, 0x33, 0x26, 0xe8,
0x21, 0xa8, 0x71, 0x74, 0x6d, 0xbb, 0xfc, 0x79, 0x31, 0xb6, 0x6a, 0x63, 0x67, 0xa5, 0x95, 0x69,
0x72, 0x18, 0xe2, 0x94, 0x4c, 0xf4, 0xe7, 0x00, 0x8f, 0x7d, 0x12, 0x78, 0xb7, 0x7a, 0xe4, 0x23,
0x06, 0x1f, 0x09, 0xbc, 0x34, 0xfe, 0x9a, 0x4c, 0x99, 0x47, 0xc0, 0x52, 0xc7, 0x80, 0x18, 0xb2,
0x6e, 0x77, 0xa9, 0xef, 0xfd, 0x8b, 0x19, 0x41, 0xa0, 0x9c, 0x53, 0xdf, 0xe3, 0xc3, 0x51, 0xc5,
0x9c, 0xd6, 0x1f, 0x41, 0xf1, 0x94, 0x87, 0x7b, 0x08, 0x2a, 0xb7, 0xb2, 0x99, 0x38, 0x9d, 0xd8,
0x95, 0x32, 0xb3, 0xa7, 0x31, 0x24, 0x29, 0x99, 0xe8, 0xbf, 0xe6, 0xa1, 0x74, 0x2a, 0x6a, 0x3c,
0x04, 0x85, 0x0f, 0x9f, 0xf8, 0x9e, 0xec, 0x2e, 0xfb, 0x0a, 0x0b, 0x3e, 0x7e, 0xdc, 0x06, 0xbd,
0x0f, 0x55, 0xea, 0x8f, 0x49, 0x42, 0x9d, 0xf1, 0x84, 0x67, 0x59, 0xc0, 0x0b, 0xc1, 0x9b, 0x32,
0x65, 0x1f, 0x0d, 0x36, 0x32, 0x0a, 0x17, 0x31, 0x12, 0x7d, 0x0e, 0x55, 0xd6, 0x19, 0xfe, 0x8d,
0xab, 0x15, 0x79, 0xab, 0xb7, 0x6f, 0xf4, 0x85, 0x3f, 0x8b, 0x2b, 0x71, 0xda, 0xeb, 0xaf, 0x41,
0xe5, 0x58, 0x4a, 0x27, 0x31, 0xab, 0xbb, 0xab, 0xb3, 0x9a, 0xf6, 0x0c, 0xc3, 0x68, 0xd1, 0xbf,
0xfb, 0x50, 0x9c, 0xf1, 0x94, 0xca, 0xf2, 0x5b, 0xbb, 0x5c, 0x1c, 0x07, 0x45, 0xe8, 0xd1, 0x87,
0xb0, 0xe6, 0x4e, 0xe3, 0x98, 0x7f, 0x9c, 0xfd, 0x31, 0xa9, 0x6d, 0xf3, 0xda, 0x54, 0x29, 0xb3,
0xfc, 0x31, 0xd1, 0x7f, 0xcc, 0xc3, 0xfa, 0xa9, 0x58, 0x5f, 0xe9, 0xca, 0x7c, 0x04, 0x5b, 0x64,
0x34, 0x22, 0x2e, 0xf5, 0x67, 0xc4, 0x76, 0x9d, 0x20, 0x20, 0xb1, 0xed, 0x7b, 0x72, 0xc4, 0x37,
0xea, 0xe2, 0x8c, 0x69, 0x73, 0x79, 0xaf, 0x83, 0x37, 0x33, 0x5b, 0x29, 0xf2, 0x90, 0x01, 0x5b,
0xfe, 0x78, 0x4c, 0x3c, 0xdf, 0xa1, 0xcb, 0x01, 0xc4, 0x0f, 0x60, 0x47, 0x4e, 0xd3, 0xa9, 0xd5,
0x75, 0x28, 0x59, 0x84, 0xc9, 0x3c, 0xb2, 0x30, 0x1f, 0xb3, 0x9f, 0x77, 0x7c, 0x9e, 0x6d, 0xe1,
0xbb, 0xd2, 0xd3, 0xe2, 0x42, 0x2c, 0x95, 0x2b, 0x1b, 0x5e, 0xb9, 0xb1, 0xe1, 0x17, 0x9b, 0xa0,
0xf8, 0xae, 0x4d, 0xa0, 0x7f, 0x03, 0x1b, 0x19, 0x10, 0x72, 0x83, 0x1f, 0x42, 0x89, 0xf7, 0x26,
0x1d, 0x41, 0xf4, 0xfa, 0x18, 0x61, 0x69, 0xa1, 0xff, 0x90, 0x07, 0x94, 0xfa, 0x47, 0xd7, 0xc9,
0xff, 0x14, 0xcc, 0x6d, 0x28, 0x72, 0xb9, 0x44, 0x52, 0x30, 0x0c, 0x87, 0xc0, 0x49, 0xe8, 0xe4,
0x32, 0x83, 0x51, 0x38, 0x3f, 0x67, 0x7f, 0x31, 0x49, 0xa6, 0x01, 0xc5, 0xd2, 0x42, 0xff, 0x2d,
0x07, 0x5b, 0x2b, 0x38, 0x48, 0x2c, 0x17, 0x5b, 0x25, 0xf7, 0xf7, 0x5b, 0x05, 0x1d, 0x40, 0x65,
0x72, 0xf9, 0x96, 0xed, 0x93, 0x69, 0xdf, 0xf8, 0xb3, 0xfc, 0x00, 0x94, 0x38, 0xba, 0x4e, 0x6a,
0x0a, 0xf7, 0x5c, 0x5e, 0xb5, 0x5c, 0xce, 0xf6, 0xf5, 0x4a, 0x1d, 0x2b, 0xfb, 0x5a, 0x68, 0x0e,
0xbf, 0x05, 0x75, 0x69, 0xed, 0xb3, 0xcb, 0xac, 0xd7, 0xed, 0x0f, 0xb0, 0xa1, 0xdd, 0x41, 0x15,
0x50, 0x86, 0xd6, 0xe0, 0x44, 0xcb, 0x31, 0xca, 0xf8, 0xce, 0x68, 0x8b, 0x6b, 0x8f, 0x51, 0xb6,
0x34, 0x2a, 0x1c, 0xfe, 0x9e, 0x03, 0x58, 0x6c, 0x18, 0xa4, 0x42, 0xf9, 0x45, 0xff, 0x59, 0x7f,
0xf0, 0xb2, 0x2f, 0x02, 0x74, 0xad, 0x5e, 0x47, 0xcb, 0xa1, 0x2a, 0x14, 0xc5, 0xf9, 0x98, 0x67,
0x2f, 0xc8, 0xdb, 0xb1, 0xc0, 0x0e, 0xcb, 0xec, 0x70, 0x54, 0x50, 0x19, 0x0a, 0xd9, 0x79, 0x28,
0xef, 0xc1, 0x12, 0x0b, 0x88, 0x8d, 0x13, 0xb3, 0xd9, 0x36, 0xb4, 0x32, 0x53, 0x64, 0x97, 0x21,
0x40, 0x29, 0x3d, 0x0b, 0x99, 0x27, 0x3b, 0x26, 0x81, 0xbd, 0x33, 0xb0, 0x9e, 0x18, 0x58, 0x53,
0x99, 0x0c, 0x0f, 0x5e, 0x6a, 0x6b, 0x4c, 0xf6, 0xb8, 0x67, 0x98, 0x1d, 0xed, 0x2e, 0xbb, 0x26,
0x9f, 0x18, 0x4d, 0x6c, 0xb5, 0x8c, 0xa6, 0xa5, 0xad, 0x33, 0xcd, 0x29, 0x4f, 0x70, 0xa3, 0xf5,
0xe9, 0xab, 0xfb, 0x33, 0x9f, 0x92, 0x24, 0xa9, 0xfb, 0xd1, 0x91, 0xa0, 0x8e, 0xce, 0xa3, 0xa3,
0x19, 0x3d, 0xe2, 0xff, 0xa3, 0x1c, 0x2d, 0x7e, 0x08, 0x67, 0x25, 0x2e, 0xf9, 0xe2, 0xaf, 0x00,
0x00, 0x00, 0xff, 0xff, 0x59, 0xa0, 0xff, 0x30, 0xff, 0x0c, 0x00, 0x00,
func init() { proto.RegisterFile("binlogdata.proto", fileDescriptor_binlogdata_db2d20dd0016de21) }
var fileDescriptor_binlogdata_db2d20dd0016de21 = []byte{
// 1558 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xcb, 0x72, 0xdb, 0xca,
0x11, 0x35, 0x09, 0xf0, 0xd5, 0x90, 0x28, 0x68, 0xf4, 0x08, 0xa3, 0x8a, 0x53, 0x0a, 0x2a, 0x8e,
0x14, 0x55, 0x85, 0x72, 0x98, 0xc4, 0x59, 0x39, 0x0e, 0x1f, 0xb0, 0x4c, 0x09, 0x22, 0xe5, 0x21,
0x24, 0xa7, 0xbc, 0x41, 0x41, 0xc4, 0x48, 0x42, 0x04, 0x02, 0x34, 0x30, 0xa4, 0xa2, 0x0f, 0x48,
0xe5, 0x03, 0xb2, 0xcd, 0x0f, 0x64, 0x9f, 0x6d, 0xb6, 0xd9, 0xe7, 0x0b, 0xb2, 0xca, 0x7f, 0xdc,
0x9a, 0x07, 0x40, 0x42, 0xf6, 0xb5, 0xe5, 0x5b, 0x75, 0x17, 0x77, 0xc3, 0xea, 0xe9, 0xe9, 0xe7,
0x41, 0x4f, 0x77, 0x13, 0xf4, 0x4b, 0x3f, 0x0c, 0xa2, 0x6b, 0xcf, 0xa5, 0x6e, 0x73, 0x1a, 0x47,
0x34, 0x42, 0xb0, 0xe0, 0xec, 0x68, 0x73, 0x1a, 0x4f, 0xc7, 0xe2, 0x62, 0x47, 0xfb, 0x30, 0x23,
0xf1, 0xbd, 0x3c, 0xd4, 0x69, 0x34, 0x8d, 0x16, 0x5a, 0xc6, 0x29, 0x54, 0xba, 0x37, 0x6e, 0x9c,
0x10, 0x8a, 0xb6, 0xa1, 0x3c, 0x0e, 0x7c, 0x12, 0xd2, 0x46, 0x61, 0xb7, 0xb0, 0x5f, 0xc2, 0xf2,
0x84, 0x10, 0xa8, 0xe3, 0x28, 0x0c, 0x1b, 0x45, 0xce, 0xe5, 0x34, 0x93, 0x4d, 0x48, 0x3c, 0x27,
0x71, 0x43, 0x11, 0xb2, 0xe2, 0x64, 0xfc, 0x5f, 0x81, 0xf5, 0x0e, 0x8f, 0xc3, 0x8e, 0xdd, 0x30,
0x71, 0xc7, 0xd4, 0x8f, 0x42, 0x74, 0x04, 0x90, 0x50, 0x97, 0x92, 0x09, 0x09, 0x69, 0xd2, 0x28,
0xec, 0x2a, 0xfb, 0x5a, 0x6b, 0xaf, 0xb9, 0x94, 0xc1, 0x47, 0x2a, 0xcd, 0x51, 0x2a, 0x8f, 0x97,
0x54, 0x51, 0x0b, 0x34, 0x32, 0x27, 0x21, 0x75, 0x68, 0x74, 0x4b, 0xc2, 0x86, 0xba, 0x5b, 0xd8,
0xd7, 0x5a, 0xeb, 0x4d, 0x91, 0xa0, 0xc9, 0x6e, 0x6c, 0x76, 0x81, 0x81, 0x64, 0xf4, 0xce, 0x7f,
0x8a, 0x50, 0xcb, 0xac, 0x21, 0x0b, 0xaa, 0x63, 0x97, 0x92, 0xeb, 0x28, 0xbe, 0xe7, 0x69, 0xd6,
0x5b, 0xcf, 0x1f, 0x19, 0x48, 0xb3, 0x2b, 0xf5, 0x70, 0x66, 0x01, 0xfd, 0x0a, 0x2a, 0x63, 0x81,
0x1e, 0x47, 0x47, 0x6b, 0x6d, 0x2c, 0x1b, 0x93, 0xc0, 0xe2, 0x54, 0x06, 0xe9, 0xa0, 0x24, 0x1f,
0x02, 0x0e, 0xd9, 0x0a, 0x66, 0xa4, 0xf1, 0xcf, 0x02, 0x54, 0x53, 0xbb, 0x68, 0x03, 0xd6, 0x3a,
0x96, 0x73, 0x3e, 0xc0, 0x66, 0x77, 0x78, 0x34, 0xe8, 0xbf, 0x37, 0x7b, 0xfa, 0x13, 0xb4, 0x02,
0xd5, 0x8e, 0xe5, 0x74, 0xcc, 0xa3, 0xfe, 0x40, 0x2f, 0xa0, 0x55, 0xa8, 0x75, 0x2c, 0xa7, 0x3b,
0x3c, 0x3d, 0xed, 0xdb, 0x7a, 0x11, 0xad, 0x81, 0xd6, 0xb1, 0x1c, 0x3c, 0xb4, 0xac, 0x4e, 0xbb,
0x7b, 0xa2, 0x2b, 0x68, 0x0b, 0xd6, 0x3b, 0x96, 0xd3, 0x3b, 0xb5, 0x9c, 0x9e, 0x79, 0x86, 0xcd,
0x6e, 0xdb, 0x36, 0x7b, 0xba, 0x8a, 0x00, 0xca, 0x8c, 0xdd, 0xb3, 0xf4, 0x92, 0xa4, 0x47, 0xa6,
0xad, 0x97, 0xa5, 0xb9, 0xfe, 0x60, 0x64, 0x62, 0x5b, 0xaf, 0xc8, 0xe3, 0xf9, 0x59, 0xaf, 0x6d,
0x9b, 0x7a, 0x55, 0x1e, 0x7b, 0xa6, 0x65, 0xda, 0xa6, 0x5e, 0x3b, 0x56, 0xab, 0x45, 0x5d, 0x39,
0x56, 0xab, 0x8a, 0xae, 0x1a, 0x7f, 0x2f, 0xc0, 0xd6, 0x88, 0xc6, 0xc4, 0x9d, 0x9c, 0x90, 0x7b,
0xec, 0x86, 0xd7, 0x04, 0x93, 0x0f, 0x33, 0x92, 0x50, 0xb4, 0x03, 0xd5, 0x69, 0x94, 0xf8, 0x0c,
0x3b, 0x0e, 0x70, 0x0d, 0x67, 0x67, 0x74, 0x08, 0xb5, 0x5b, 0x72, 0xef, 0xc4, 0x4c, 0x5e, 0x02,
0x86, 0x9a, 0x59, 0x41, 0x66, 0x96, 0xaa, 0xb7, 0x92, 0x5a, 0xc6, 0x57, 0xf9, 0x32, 0xbe, 0xc6,
0x15, 0x6c, 0x3f, 0x0c, 0x2a, 0x99, 0x46, 0x61, 0x42, 0x90, 0x05, 0x48, 0x28, 0x3a, 0x74, 0xf1,
0x6d, 0x79, 0x7c, 0x5a, 0xeb, 0xe9, 0x67, 0x0b, 0x00, 0xaf, 0x5f, 0x3e, 0x64, 0x19, 0x7f, 0x81,
0x0d, 0xe1, 0xc7, 0x76, 0x2f, 0x03, 0x92, 0x3c, 0x26, 0xf5, 0x6d, 0x28, 0x53, 0x2e, 0xdc, 0x28,
0xee, 0x2a, 0xfb, 0x35, 0x2c, 0x4f, 0x5f, 0x9b, 0xa1, 0x07, 0x9b, 0x79, 0xcf, 0xdf, 0x4b, 0x7e,
0xbf, 0x05, 0x15, 0xcf, 0x02, 0x82, 0x36, 0xa1, 0x34, 0x71, 0xe9, 0xf8, 0x46, 0x66, 0x23, 0x0e,
0x2c, 0x95, 0x2b, 0x3f, 0xa0, 0x24, 0xe6, 0x9f, 0xb0, 0x86, 0xe5, 0xc9, 0x78, 0x0e, 0xe5, 0xd7,
0x9c, 0x42, 0xbf, 0x80, 0x52, 0x3c, 0x63, 0xb9, 0x8a, 0xa7, 0xae, 0x2f, 0x07, 0xc0, 0x0c, 0x63,
0x71, 0x6d, 0xfc, 0xa3, 0x08, 0x2b, 0x22, 0xa0, 0x51, 0x34, 0x8b, 0xc7, 0x84, 0x21, 0x78, 0x4b,
0xee, 0x93, 0xa9, 0x3b, 0x26, 0x29, 0x82, 0xe9, 0x99, 0x05, 0x93, 0xdc, 0xb8, 0xb1, 0x27, 0xbd,
0x8a, 0x03, 0xfa, 0x1d, 0x68, 0x1c, 0x49, 0xea, 0xd0, 0xfb, 0x29, 0xe1, 0x18, 0xd6, 0x5b, 0x9b,
0x8b, 0xa2, 0xe2, 0x38, 0x51, 0xfb, 0x7e, 0x4a, 0x30, 0xd0, 0x8c, 0xce, 0x57, 0xa2, 0xfa, 0x88,
0x4a, 0x5c, 0x7c, 0xbf, 0x52, 0xee, 0xfb, 0x1d, 0x64, 0x60, 0x94, 0xa5, 0x95, 0xa5, 0x5c, 0x05,
0x1c, 0x29, 0x40, 0xa8, 0x09, 0xe5, 0x28, 0x74, 0x3c, 0x2f, 0x68, 0x54, 0x78, 0x98, 0x3f, 0x5a,
0x96, 0x1d, 0x86, 0xbd, 0x9e, 0xd5, 0x16, 0x9f, 0xa4, 0x14, 0x85, 0x3d, 0x2f, 0x30, 0xde, 0x42,
0x0d, 0x47, 0x77, 0xdd, 0x1b, 0x1e, 0x80, 0x01, 0xe5, 0x4b, 0x72, 0x15, 0xc5, 0x44, 0x7e, 0x55,
0x90, 0x5d, 0x0f, 0x47, 0x77, 0x58, 0xde, 0xa0, 0x5d, 0x28, 0xb9, 0x57, 0xe9, 0x87, 0xc9, 0x8b,
0x88, 0x0b, 0xc3, 0x85, 0x2a, 0x8e, 0xee, 0x78, 0xa7, 0x44, 0x4f, 0x41, 0x20, 0xe2, 0x84, 0xee,
0x24, 0x85, 0xbb, 0xc6, 0x39, 0x03, 0x77, 0x42, 0xd0, 0x0b, 0xd0, 0xe2, 0xe8, 0xce, 0x19, 0x73,
0xf7, 0xa2, 0x6c, 0xb5, 0xd6, 0x56, 0xee, 0x53, 0xa6, 0xc1, 0x61, 0x88, 0x53, 0x32, 0x31, 0xde,
0x02, 0xbc, 0xf6, 0x49, 0xe0, 0x3d, 0xca, 0xc9, 0xcf, 0x19, 0x7c, 0x24, 0xf0, 0x52, 0xfb, 0x2b,
0x32, 0x64, 0x6e, 0x01, 0xcb, 0x3b, 0x06, 0xc4, 0x88, 0x7d, 0xed, 0x23, 0xea, 0x7b, 0xdf, 0xa1,
0x46, 0x10, 0xa8, 0xd7, 0xd4, 0xf7, 0x78, 0x71, 0xd4, 0x30, 0xa7, 0x8d, 0x57, 0x50, 0xba, 0xe0,
0xe6, 0x5e, 0x80, 0xc6, 0xa5, 0x1c, 0xc6, 0x4e, 0x2b, 0x36, 0x97, 0x66, 0xe6, 0x1a, 0x43, 0x92,
0x92, 0x89, 0xd1, 0x86, 0xd5, 0x13, 0xe9, 0x96, 0x0b, 0x7c, 0x7d, 0x5c, 0xc6, 0xbf, 0x8a, 0x50,
0x39, 0x8e, 0x66, 0x71, 0xe8, 0x06, 0xa8, 0x0e, 0x45, 0xdf, 0xe3, 0x7a, 0x0a, 0x2e, 0xfa, 0x1e,
0xfa, 0x23, 0xd4, 0x27, 0xfe, 0x75, 0xec, 0xb2, 0x7a, 0x10, 0xa5, 0x5d, 0xe4, 0x35, 0xf3, 0xe3,
0xe5, 0xc8, 0x4e, 0x53, 0x09, 0x5e, 0xdf, 0xab, 0x93, 0xe5, 0xe3, 0x52, 0xc5, 0x2a, 0xb9, 0x8a,
0x7d, 0x06, 0xf5, 0x20, 0x1a, 0xbb, 0x81, 0x93, 0xf5, 0x2a, 0x95, 0x07, 0xb5, 0xca, 0xb9, 0x67,
0x69, 0xc3, 0x7a, 0x80, 0x4b, 0xe9, 0x91, 0xb8, 0xa0, 0x97, 0xb0, 0x32, 0x75, 0x63, 0xea, 0x8f,
0xfd, 0xa9, 0xcb, 0xa6, 0x7d, 0x99, 0x2b, 0xe6, 0xc2, 0xce, 0xe1, 0x86, 0x73, 0xe2, 0xe8, 0x67,
0xb0, 0x12, 0x93, 0x39, 0x89, 0x13, 0xe2, 0x39, 0xcc, 0x6f, 0x65, 0x57, 0xd9, 0x57, 0xb0, 0x96,
0xf2, 0xfa, 0x5e, 0x62, 0xfc, 0xaf, 0x08, 0xe5, 0x0b, 0x51, 0x5d, 0x07, 0xa0, 0x72, 0x6c, 0xc4,
0x24, 0xdf, 0x5e, 0x76, 0x22, 0x24, 0x38, 0x30, 0x5c, 0x06, 0xfd, 0x04, 0x6a, 0xd4, 0x9f, 0x90,
0x84, 0xba, 0x93, 0x29, 0x07, 0x53, 0xc1, 0x0b, 0xc6, 0xa7, 0x6a, 0x84, 0x8d, 0x6b, 0xf6, 0x58,
0x05, 0x3c, 0x8c, 0x44, 0xbf, 0x86, 0x1a, 0x7b, 0x13, 0x7c, 0xbb, 0x68, 0x94, 0xf8, 0x23, 0xdb,
0x7c, 0xf0, 0x22, 0xb8, 0x5b, 0x5c, 0x8d, 0xd3, 0x57, 0xf6, 0x7b, 0xd0, 0x78, 0x15, 0x4b, 0x25,
0xd1, 0x25, 0xb6, 0xf3, 0x5d, 0x22, 0x7d, 0x2d, 0x18, 0xae, 0x16, 0x2f, 0x67, 0x0f, 0x4a, 0x73,
0x1e, 0x52, 0x45, 0x6e, 0x39, 0xcb, 0xc9, 0x71, 0xd8, 0xc5, 0x3d, 0x1b, 0x21, 0x7f, 0x16, 0x55,
0xd4, 0xa8, 0x7e, 0x3c, 0x42, 0x64, 0x81, 0xe1, 0x54, 0x86, 0x21, 0x3c, 0x9e, 0xc5, 0x31, 0xdf,
0xa2, 0xfc, 0x09, 0x69, 0x6c, 0x72, 0x28, 0x34, 0xc9, 0xb3, 0xfd, 0x09, 0x31, 0xfe, 0x56, 0x84,
0xfa, 0x85, 0x98, 0x33, 0xe9, 0x6c, 0x7b, 0x05, 0x1b, 0xe4, 0xea, 0x8a, 0x8c, 0xa9, 0x3f, 0x27,
0xce, 0xd8, 0x0d, 0x02, 0x12, 0x3b, 0xb2, 0x60, 0xb5, 0xd6, 0x5a, 0x53, 0xec, 0x9b, 0x5d, 0xce,
0xef, 0xf7, 0xf0, 0x7a, 0x26, 0x2b, 0x59, 0x1e, 0x32, 0x61, 0xc3, 0x9f, 0x4c, 0x88, 0xe7, 0xbb,
0x74, 0xd9, 0x80, 0xe8, 0x54, 0x5b, 0xf2, 0xd9, 0x5f, 0xd8, 0x47, 0x2e, 0x25, 0x0b, 0x33, 0x99,
0x46, 0x66, 0xe6, 0x19, 0xab, 0xea, 0xf8, 0x3a, 0x1b, 0x97, 0xab, 0x52, 0xd3, 0xe6, 0x4c, 0x2c,
0x2f, 0x73, 0xa3, 0x58, 0x7d, 0x30, 0x8a, 0x17, 0x2d, 0xbb, 0xf4, 0xa5, 0x96, 0x6d, 0xbc, 0x84,
0xb5, 0x0c, 0x08, 0x39, 0x6a, 0x0f, 0xa0, 0xcc, 0x3f, 0x65, 0xda, 0x2b, 0xd0, 0xc7, 0x55, 0x87,
0xa5, 0x84, 0xf1, 0xd7, 0x22, 0xa0, 0x54, 0x3f, 0xba, 0x4b, 0x7e, 0xa0, 0x60, 0x6e, 0x42, 0x89,
0xf3, 0x25, 0x92, 0xe2, 0xc0, 0x70, 0x08, 0xdc, 0x84, 0x4e, 0x6f, 0x33, 0x18, 0x85, 0xf2, 0x5b,
0xf6, 0x8b, 0x49, 0x32, 0x0b, 0x28, 0x96, 0x12, 0xc6, 0xbf, 0x0b, 0xb0, 0x91, 0xc3, 0x41, 0x62,
0xb9, 0x68, 0xff, 0x85, 0x6f, 0x6f, 0xff, 0x68, 0x1f, 0xaa, 0xd3, 0xdb, 0xcf, 0x8c, 0x89, 0xec,
0xf6, 0x93, 0xaf, 0xf8, 0xa7, 0xa0, 0xc6, 0xd1, 0x5d, 0xd2, 0x50, 0xb9, 0xe6, 0xf2, 0x4c, 0xe4,
0x7c, 0x36, 0x58, 0x73, 0x79, 0xe4, 0x06, 0xab, 0xb8, 0x39, 0xf8, 0x03, 0x68, 0x4b, 0xf3, 0x99,
0xad, 0xd0, 0xfd, 0xa3, 0xc1, 0x10, 0x9b, 0xfa, 0x13, 0x54, 0x05, 0x75, 0x64, 0x0f, 0xcf, 0xf4,
0x02, 0xa3, 0xcc, 0x3f, 0x99, 0x5d, 0xb1, 0x96, 0x33, 0xca, 0x91, 0x42, 0xca, 0xc1, 0x7f, 0x0b,
0x00, 0x8b, 0x86, 0x84, 0x34, 0xa8, 0x9c, 0x0f, 0x4e, 0x06, 0xc3, 0x77, 0x03, 0x61, 0xe0, 0xc8,
0xee, 0xf7, 0xf4, 0x02, 0xaa, 0x41, 0x49, 0xec, 0xf9, 0x45, 0xe6, 0x41, 0x2e, 0xf9, 0x0a, 0xfb,
0x07, 0x90, 0x6d, 0xf8, 0x2a, 0xaa, 0x80, 0x92, 0xed, 0xf1, 0x72, 0x71, 0x2f, 0x33, 0x83, 0xd8,
0x3c, 0xb3, 0xda, 0x5d, 0x53, 0xaf, 0xb0, 0x8b, 0x6c, 0x85, 0x07, 0x28, 0xa7, 0xfb, 0x3b, 0xd3,
0x64, 0x5b, 0x3f, 0x30, 0x3f, 0x43, 0xfb, 0x8d, 0x89, 0x75, 0x8d, 0xf1, 0xf0, 0xf0, 0x9d, 0xbe,
0xc2, 0x78, 0xaf, 0xfb, 0xa6, 0xd5, 0xd3, 0x57, 0xd9, 0xda, 0xff, 0xc6, 0x6c, 0x63, 0xbb, 0x63,
0xb6, 0x6d, 0xbd, 0xce, 0x6e, 0x2e, 0x78, 0x80, 0x6b, 0xcc, 0xcd, 0xf1, 0xf0, 0x1c, 0x0f, 0xda,
0x96, 0xae, 0x1f, 0xec, 0xc1, 0x6a, 0x6e, 0xfe, 0x30, 0x5f, 0x76, 0xbb, 0x63, 0x99, 0x23, 0xfd,
0x09, 0xa3, 0x47, 0x6f, 0xda, 0xb8, 0x37, 0xd2, 0x0b, 0x9d, 0x5f, 0xbe, 0xdf, 0x9b, 0xfb, 0x94,
0x24, 0x49, 0xd3, 0x8f, 0x0e, 0x05, 0x75, 0x78, 0x1d, 0x1d, 0xce, 0xe9, 0x21, 0xff, 0x0b, 0x7a,
0xb8, 0x78, 0x3e, 0x97, 0x65, 0xce, 0xf9, 0xcd, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2e, 0xb4,
0x72, 0xde, 0xde, 0x0e, 0x00, 0x00,
}
......@@ -739,6 +739,9 @@ type DDL struct {
// VindexCols is set for AddColVindexStr.
VindexCols []ColIdent
// AutoIncSpec is set for AddAutoIncStr.
AutoIncSpec *AutoIncSpec
}
// DDL strings.
......@@ -755,6 +758,8 @@ const (
DropVschemaTableStr = "drop vschema table"
AddColVindexStr = "on table add vindex"
DropColVindexStr = "on table drop vindex"
AddSequenceStr = "add sequence"
AddAutoIncStr = "add auto_increment"
// Vindex DDL param to specify the owner of a vindex
VindexOwnerStr = "owner"
......@@ -813,6 +818,10 @@ func (node *DDL) Format(buf *TrackedBuffer) {
}
case DropColVindexStr:
buf.Myprintf("alter vschema on %v drop vindex %v", node.Table, node.VindexSpec.Name)
case AddSequenceStr:
buf.Myprintf("alter vschema add sequence %v", node.Table)
case AddAutoIncStr:
buf.Myprintf("alter vschema on %v add auto_increment %v", node.Table, node.AutoIncSpec)
default:
buf.Myprintf("%s table %v", node.Action, node.Table)
}
......@@ -1352,6 +1361,23 @@ type VindexSpec struct {
Params []VindexParam
}
// AutoIncSpec defines and autoincrement value for a ADD AUTO_INCREMENT statement
type AutoIncSpec struct {
Column ColIdent
Sequence TableName
}
// Format formats the node.
func (node *AutoIncSpec) Format(buf *TrackedBuffer) {
buf.Myprintf("%v ", node.Column)
buf.Myprintf("using %v", node.Sequence)
}
func (node *AutoIncSpec) walkSubtree(visit Visit) error {
err := Walk(visit, node.Sequence, node.Column)
return err
}
// ParseParams parses the vindex parameter list, pulling out the special-case
// "owner" parameter
func (node *VindexSpec) ParseParams() (string, map[string]string) {
......
因为 它太大了无法显示 source diff 。你可以改为 查看blob
......@@ -164,6 +164,7 @@ func skipToEnd(yylex interface{}) {
%token <bytes> MAXVALUE PARTITION REORGANIZE LESS THAN PROCEDURE TRIGGER
%token <bytes> VINDEX VINDEXES
%token <bytes> STATUS VARIABLES WARNINGS
%token <bytes> SEQUENCE
// Transaction Tokens
%token <bytes> BEGIN START TRANSACTION COMMIT ROLLBACK
......@@ -181,7 +182,7 @@ func skipToEnd(yylex interface{}) {
%token <bytes> NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL
// Supported SHOW tokens
%token <bytes> COLLATION DATABASES SCHEMAS TABLES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS VSCHEMA VSCHEMA_TABLES VITESS_TARGET FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS
%token <bytes> COLLATION DATABASES TABLES VSCHEMA FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS
// SET tokens
%token <bytes> NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE
......@@ -1352,6 +1353,21 @@ alter_statement:
},
}
}
| ALTER VSCHEMA ADD SEQUENCE table_name
{
$$ = &DDL{Action: AddSequenceStr, Table: $5}
}
| ALTER VSCHEMA ON table_name ADD AUTO_INCREMENT sql_id USING table_name
{
$$ = &DDL{
Action: AddAutoIncStr,
Table: $4,
AutoIncSpec: &AutoIncSpec{
Column: $7,
Sequence: $9,
},
}
}
alter_object_type:
COLUMN
......@@ -1499,10 +1515,6 @@ show_statement:
{
$$ = &Show{Type: string($2)}
}
| SHOW SCHEMAS ddl_skip_to_end
{
$$ = &Show{Type: string($2)}
}
| SHOW ENGINES
{
$$ = &Show{Type: string($2)}
......@@ -1560,22 +1572,6 @@ show_statement:
showCollationFilterOpt := $4
$$ = &Show{Type: string($2), ShowCollationFilterOpt: &showCollationFilterOpt}
}
| SHOW VITESS_KEYSPACES
{
$$ = &Show{Type: string($2)}
}
| SHOW VITESS_SHARDS
{
$$ = &Show{Type: string($2)}
}
| SHOW VITESS_TABLETS
{
$$ = &Show{Type: string($2)}
}
| SHOW VITESS_TARGET
{
$$ = &Show{Type: string($2)}
}
| SHOW VSCHEMA TABLES
{
$$ = &Show{Type: string($2) + " " + string($3)}
......@@ -1597,6 +1593,10 @@ show_statement:
*
* SHOW BINARY LOGS
* SHOW INVALID
* SHOW VITESS_KEYSPACES
* SHOW VITESS_TABLETS
* SHOW VITESS_SHARDS
* SHOW VITESS_TARGET
*/
| SHOW ID ddl_skip_to_end
{
......@@ -3364,7 +3364,7 @@ non_reserved_keyword:
| REPEATABLE
| RESTRICT
| ROLLBACK
| SCHEMAS
| SEQUENCE
| SESSION
| SERIALIZABLE
| SHARE
......@@ -3393,12 +3393,7 @@ non_reserved_keyword:
| VIEW
| VINDEX
| VINDEXES
| VITESS_KEYSPACES
| VITESS_SHARDS
| VITESS_TABLETS
| VSCHEMA
| VSCHEMA_TABLES
| VITESS_TARGET
| WARNINGS
| WITH
| WRITE
......
......@@ -318,11 +318,11 @@ var keywords = map[string]int{
"rlike": REGEXP,
"rollback": ROLLBACK,
"schema": SCHEMA,
"schemas": SCHEMAS,
"second_microsecond": UNUSED,
"select": SELECT,
"sensitive": UNUSED,
"separator": SEPARATOR,
"sequence": SEQUENCE,
"serializable": SERIALIZABLE,
"session": SESSION,
"set": SET,
......@@ -391,12 +391,7 @@ var keywords = map[string]int{
"vindex": VINDEX,
"vindexes": VINDEXES,
"view": VIEW,
"vitess_keyspaces": VITESS_KEYSPACES,
"vitess_shards": VITESS_SHARDS,
"vitess_tablets": VITESS_TABLETS,
"vitess_target": VITESS_TARGET,
"vschema": VSCHEMA,
"vschema_tables": VSCHEMA_TABLES,
"warnings": WARNINGS,
"when": WHEN,
"where": WHERE,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册