提交 b2a3445f 编写于 作者: martianzhang's avatar martianzhang

update vendor

上级 dc1bb566
...@@ -2792,7 +2792,7 @@ func (q *Query4Audit) RuleUniqueKeyDup() Rule { ...@@ -2792,7 +2792,7 @@ func (q *Query4Audit) RuleUniqueKeyDup() Rule {
switch node := tiStmt.(type) { switch node := tiStmt.(type) {
case *tidb.CreateIndexStmt: case *tidb.CreateIndexStmt:
// create index // create index
if node.Unique { if node.KeyType == tidb.IndexKeyTypeUnique {
re := regexp.MustCompile(`(?i)(create\s+(unique)\s)`) re := regexp.MustCompile(`(?i)(create\s+(unique)\s)`)
rule = HeuristicRules["KEY.009"] rule = HeuristicRules["KEY.009"]
if position := re.FindIndex([]byte(q.Query)); len(position) > 0 { if position := re.FindIndex([]byte(q.Query)); len(position) > 0 {
......
...@@ -342,10 +342,6 @@ var TokenString = map[int]string{ ...@@ -342,10 +342,6 @@ var TokenString = map[int]string{
sqlparser.ZEROFILL: "zerofill", sqlparser.ZEROFILL: "zerofill",
sqlparser.DATABASES: "databases", sqlparser.DATABASES: "databases",
sqlparser.TABLES: "tables", sqlparser.TABLES: "tables",
sqlparser.VITESS_KEYSPACES: "vitess_keyspaces",
sqlparser.VITESS_SHARDS: "vitess_shards",
sqlparser.VITESS_TABLETS: "vitess_tablets",
sqlparser.VSCHEMA_TABLES: "vschema_tables",
sqlparser.NAMES: "names", sqlparser.NAMES: "names",
sqlparser.CHARSET: "charset", sqlparser.CHARSET: "charset",
sqlparser.GLOBAL: "global", sqlparser.GLOBAL: "global",
...@@ -366,6 +362,8 @@ var TokenString = map[int]string{ ...@@ -366,6 +362,8 @@ var TokenString = map[int]string{
sqlparser.SUBSTRING: "substring", sqlparser.SUBSTRING: "substring",
sqlparser.GROUP_CONCAT: "group_concat", sqlparser.GROUP_CONCAT: "group_concat",
sqlparser.SEPARATOR: "separator", sqlparser.SEPARATOR: "separator",
sqlparser.VSCHEMA: "vschema",
sqlparser.SEQUENCE: "sequence",
sqlparser.MATCH: "match", sqlparser.MATCH: "match",
sqlparser.AGAINST: "against", sqlparser.AGAINST: "against",
sqlparser.BOOLEAN: "boolean", sqlparser.BOOLEAN: "boolean",
......
Changes by Version
==================
1.1.0 (2019-03-23)
-------------------
Notable changes:
- The library is now released under Apache 2.0 license
- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
- 'golang.org/x/net/context' is replaced with 'context' from the standard library
List of all changes:
- Export StartSpanFromContextWithTracer (#214) <Aaron Delaney>
- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) <Mike Goldsmith>
- Use Set() instead of Add() in HTTPHeadersCarrier (#191) <jeremyxu2010>
- Update license to Apache 2.0 (#181) <Andrea Kao>
- Replace 'golang.org/x/net/context' with 'context' (#176) <Tony Ghita>
- Port of Python opentracing/harness/api_check.py to Go (#146) <chris erway>
- Fix race condition in MockSpan.Context() (#170) <Brad>
- Add PeerHostIPv4.SetString() (#155) <NeoCN>
- Add a Noop log field type to log to allow for optional fields (#150) <Matt Ho>
1.0.2 (2017-04-26)
-------------------
- Add more semantic tags (#139) <Rustam Zagirov>
1.0.1 (2017-02-06)
-------------------
- Correct spelling in comments <Ben Sigelman>
- Address race in nextMockID() (#123) <bill fumerola>
- log: avoid panic marshaling nil error (#131) <Anthony Voutas>
- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) <Yuri Shkuro>
- Drop Go 1.5 that fails in Travis (#129) <Yuri Shkuro>
- Add convenience methods Key() and Value() to log.Field <Ben Sigelman>
- Add convenience methods to log.Field (2 years, 6 months ago) <Radu Berinde>
1.0.0 (2016-09-26)
-------------------
- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 The OpenTracing Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
.DEFAULT_GOAL := test-and-lint
.PHONY: test-and-lint
test-and-lint: test lint
.PHONY: test
test:
go test -v -cover -race ./...
.PHONY: cover
cover:
go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
.PHONY: lint
lint:
go fmt ./...
golint ./...
@# Run again with magic to exit non-zero if golint outputs anything.
@! (golint ./... | read dummy)
go vet ./...
[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
# OpenTracing API for Go
This package is a Go platform API for OpenTracing.
## Required Reading
In order to understand the Go platform API, one must first be familiar with the
[OpenTracing project](https://opentracing.io) and
[terminology](https://opentracing.io/specification/) more specifically.
## API overview for those adding instrumentation
Everyday consumers of this `opentracing` package really only need to worry
about a couple of key abstractions: the `StartSpan` function, the `Span`
interface, and binding a `Tracer` at `main()`-time. Here are code snippets
demonstrating some important use cases.
#### Singleton initialization
The simplest starting point is `./default_tracer.go`. As early as possible, call
```go
import "github.com/opentracing/opentracing-go"
import ".../some_tracing_impl"
func main() {
opentracing.SetGlobalTracer(
// tracing impl specific:
some_tracing_impl.New(...),
)
...
}
```
#### Non-Singleton initialization
If you prefer direct control to singletons, manage ownership of the
`opentracing.Tracer` implementation explicitly.
#### Creating a Span given an existing Go `context.Context`
If you use `context.Context` in your application, OpenTracing's Go library will
happily rely on it for `Span` propagation. To start a new (blocking child)
`Span`, you can use `StartSpanFromContext`.
```go
func xyz(ctx context.Context, ...) {
...
span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
defer span.Finish()
span.LogFields(
log.String("event", "soft error"),
log.String("type", "cache timeout"),
log.Int("waited.millis", 1500))
...
}
```
#### Starting an empty trace by creating a "root span"
It's always possible to create a "root" `Span` with no parent or other causal
reference.
```go
func xyz() {
...
sp := opentracing.StartSpan("operation_name")
defer sp.Finish()
...
}
```
#### Creating a (child) Span given an existing (parent) Span
```go
func xyz(parentSpan opentracing.Span, ...) {
...
sp := opentracing.StartSpan(
"operation_name",
opentracing.ChildOf(parentSpan.Context()))
defer sp.Finish()
...
}
```
#### Serializing to the wire
```go
func makeSomeRequest(ctx context.Context) ... {
if span := opentracing.SpanFromContext(ctx); span != nil {
httpClient := &http.Client{}
httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
// Transmit the span's TraceContext as HTTP headers on our
// outbound request.
opentracing.GlobalTracer().Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(httpReq.Header))
resp, err := httpClient.Do(httpReq)
...
}
...
}
```
#### Deserializing from the wire
```go
http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
var serverSpan opentracing.Span
appSpecificOperationName := ...
wireContext, err := opentracing.GlobalTracer().Extract(
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header))
if err != nil {
// Optionally record something about err here
}
// Create the span referring to the RPC client if available.
// If wireContext == nil, a root span will be created.
serverSpan = opentracing.StartSpan(
appSpecificOperationName,
ext.RPCServerOption(wireContext))
defer serverSpan.Finish()
ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
...
}
```
#### Conditionally capture a field using `log.Noop`
In some situations, you may want to dynamically decide whether or not
to log a field. For example, you may want to capture additional data,
such as a customer ID, in non-production environments:
```go
func Customer(order *Order) log.Field {
if os.Getenv("ENVIRONMENT") == "dev" {
return log.String("customer", order.Customer.ID)
}
return log.Noop()
}
```
#### Goroutine-safety
The entire public API is goroutine-safe and does not require external
synchronization.
## API pointers for those implementing a tracing system
Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
## API compatibility
For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
## Tracer test suite
A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
## Licensing
[Apache 2.0 License](./LICENSE).
package opentracing
type registeredTracer struct {
tracer Tracer
isRegistered bool
}
var (
globalTracer = registeredTracer{NoopTracer{}, false}
)
// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
// opentracing.Tracer instance) should call SetGlobalTracer as early as
// possible in main(), prior to calling the `StartSpan` global func below.
// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
// (etc) globals are noops.
func SetGlobalTracer(tracer Tracer) {
globalTracer = registeredTracer{tracer, true}
}
// GlobalTracer returns the global singleton `Tracer` implementation.
// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
// implementation that drops all data handed to it.
func GlobalTracer() Tracer {
return globalTracer.tracer
}
// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
func StartSpan(operationName string, opts ...StartSpanOption) Span {
return globalTracer.tracer.StartSpan(operationName, opts...)
}
// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
func InitGlobalTracer(tracer Tracer) {
SetGlobalTracer(tracer)
}
// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
func IsGlobalTracerRegistered() bool {
return globalTracer.isRegistered
}
package opentracing
import "context"
type contextKey struct{}
var activeSpanKey = contextKey{}
// ContextWithSpan returns a new `context.Context` that holds a reference to
// `span`'s SpanContext.
func ContextWithSpan(ctx context.Context, span Span) context.Context {
return context.WithValue(ctx, activeSpanKey, span)
}
// SpanFromContext returns the `Span` previously associated with `ctx`, or
// `nil` if no such `Span` could be found.
//
// NOTE: context.Context != SpanContext: the former is Go's intra-process
// context propagation mechanism, and the latter houses OpenTracing's per-Span
// identity and baggage information.
func SpanFromContext(ctx context.Context) Span {
val := ctx.Value(activeSpanKey)
if sp, ok := val.(Span); ok {
return sp
}
return nil
}
// StartSpanFromContext starts and returns a Span with `operationName`, using
// any Span found within `ctx` as a ChildOfRef. If no such parent could be
// found, StartSpanFromContext creates a root (parentless) Span.
//
// The second return value is a context.Context object built around the
// returned Span.
//
// Example usage:
//
// SomeFunction(ctx context.Context, ...) {
// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
// defer sp.Finish()
// ...
// }
func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
}
// StartSpanFromContextWithTracer starts and returns a span with `operationName`
// using a span found within the context as a ChildOfRef. If that doesn't exist
// it creates a root span. It also returns a context.Context object built
// around the returned span.
//
// It's behavior is identical to StartSpanFromContext except that it takes an explicit
// tracer as opposed to using the global tracer.
func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
if parentSpan := SpanFromContext(ctx); parentSpan != nil {
opts = append(opts, ChildOf(parentSpan.Context()))
}
span := tracer.StartSpan(operationName, opts...)
return span, ContextWithSpan(ctx, span)
}
package log
import (
"fmt"
"math"
)
type fieldType int
const (
stringType fieldType = iota
boolType
intType
int32Type
uint32Type
int64Type
uint64Type
float32Type
float64Type
errorType
objectType
lazyLoggerType
noopType
)
// Field instances are constructed via LogBool, LogString, and so on.
// Tracing implementations may then handle them via the Field.Marshal
// method.
//
// "heavily influenced by" (i.e., partially stolen from)
// https://github.com/uber-go/zap
type Field struct {
key string
fieldType fieldType
numericVal int64
stringVal string
interfaceVal interface{}
}
// String adds a string-valued key:value pair to a Span.LogFields() record
func String(key, val string) Field {
return Field{
key: key,
fieldType: stringType,
stringVal: val,
}
}
// Bool adds a bool-valued key:value pair to a Span.LogFields() record
func Bool(key string, val bool) Field {
var numericVal int64
if val {
numericVal = 1
}
return Field{
key: key,
fieldType: boolType,
numericVal: numericVal,
}
}
// Int adds an int-valued key:value pair to a Span.LogFields() record
func Int(key string, val int) Field {
return Field{
key: key,
fieldType: intType,
numericVal: int64(val),
}
}
// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
func Int32(key string, val int32) Field {
return Field{
key: key,
fieldType: int32Type,
numericVal: int64(val),
}
}
// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
func Int64(key string, val int64) Field {
return Field{
key: key,
fieldType: int64Type,
numericVal: val,
}
}
// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
func Uint32(key string, val uint32) Field {
return Field{
key: key,
fieldType: uint32Type,
numericVal: int64(val),
}
}
// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
func Uint64(key string, val uint64) Field {
return Field{
key: key,
fieldType: uint64Type,
numericVal: int64(val),
}
}
// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
func Float32(key string, val float32) Field {
return Field{
key: key,
fieldType: float32Type,
numericVal: int64(math.Float32bits(val)),
}
}
// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
func Float64(key string, val float64) Field {
return Field{
key: key,
fieldType: float64Type,
numericVal: int64(math.Float64bits(val)),
}
}
// Error adds an error with the key "error" to a Span.LogFields() record
func Error(err error) Field {
return Field{
key: "error",
fieldType: errorType,
interfaceVal: err,
}
}
// Object adds an object-valued key:value pair to a Span.LogFields() record
func Object(key string, obj interface{}) Field {
return Field{
key: key,
fieldType: objectType,
interfaceVal: obj,
}
}
// LazyLogger allows for user-defined, late-bound logging of arbitrary data
type LazyLogger func(fv Encoder)
// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
// implementation will call the LazyLogger function at an indefinite time in
// the future (after Lazy() returns).
func Lazy(ll LazyLogger) Field {
return Field{
fieldType: lazyLoggerType,
interfaceVal: ll,
}
}
// Noop creates a no-op log field that should be ignored by the tracer.
// It can be used to capture optional fields, for example those that should
// only be logged in non-production environment:
//
// func customerField(order *Order) log.Field {
// if os.Getenv("ENVIRONMENT") == "dev" {
// return log.String("customer", order.Customer.ID)
// }
// return log.Noop()
// }
//
// span.LogFields(log.String("event", "purchase"), customerField(order))
//
func Noop() Field {
return Field{
fieldType: noopType,
}
}
// Encoder allows access to the contents of a Field (via a call to
// Field.Marshal).
//
// Tracer implementations typically provide an implementation of Encoder;
// OpenTracing callers typically do not need to concern themselves with it.
type Encoder interface {
EmitString(key, value string)
EmitBool(key string, value bool)
EmitInt(key string, value int)
EmitInt32(key string, value int32)
EmitInt64(key string, value int64)
EmitUint32(key string, value uint32)
EmitUint64(key string, value uint64)
EmitFloat32(key string, value float32)
EmitFloat64(key string, value float64)
EmitObject(key string, value interface{})
EmitLazyLogger(value LazyLogger)
}
// Marshal passes a Field instance through to the appropriate
// field-type-specific method of an Encoder.
func (lf Field) Marshal(visitor Encoder) {
switch lf.fieldType {
case stringType:
visitor.EmitString(lf.key, lf.stringVal)
case boolType:
visitor.EmitBool(lf.key, lf.numericVal != 0)
case intType:
visitor.EmitInt(lf.key, int(lf.numericVal))
case int32Type:
visitor.EmitInt32(lf.key, int32(lf.numericVal))
case int64Type:
visitor.EmitInt64(lf.key, int64(lf.numericVal))
case uint32Type:
visitor.EmitUint32(lf.key, uint32(lf.numericVal))
case uint64Type:
visitor.EmitUint64(lf.key, uint64(lf.numericVal))
case float32Type:
visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
case float64Type:
visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
case errorType:
if err, ok := lf.interfaceVal.(error); ok {
visitor.EmitString(lf.key, err.Error())
} else {
visitor.EmitString(lf.key, "<nil>")
}
case objectType:
visitor.EmitObject(lf.key, lf.interfaceVal)
case lazyLoggerType:
visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
case noopType:
// intentionally left blank
}
}
// Key returns the field's key.
func (lf Field) Key() string {
return lf.key
}
// Value returns the field's value as interface{}.
func (lf Field) Value() interface{} {
switch lf.fieldType {
case stringType:
return lf.stringVal
case boolType:
return lf.numericVal != 0
case intType:
return int(lf.numericVal)
case int32Type:
return int32(lf.numericVal)
case int64Type:
return int64(lf.numericVal)
case uint32Type:
return uint32(lf.numericVal)
case uint64Type:
return uint64(lf.numericVal)
case float32Type:
return math.Float32frombits(uint32(lf.numericVal))
case float64Type:
return math.Float64frombits(uint64(lf.numericVal))
case errorType, objectType, lazyLoggerType:
return lf.interfaceVal
case noopType:
return nil
default:
return nil
}
}
// String returns a string representation of the key and value.
func (lf Field) String() string {
return fmt.Sprint(lf.key, ":", lf.Value())
}
package log
import "fmt"
// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
// a la Span.LogFields().
func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
if len(keyValues)%2 != 0 {
return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
}
fields := make([]Field, len(keyValues)/2)
for i := 0; i*2 < len(keyValues); i++ {
key, ok := keyValues[i*2].(string)
if !ok {
return nil, fmt.Errorf(
"non-string key (pair #%d): %T",
i, keyValues[i*2])
}
switch typedVal := keyValues[i*2+1].(type) {
case bool:
fields[i] = Bool(key, typedVal)
case string:
fields[i] = String(key, typedVal)
case int:
fields[i] = Int(key, typedVal)
case int8:
fields[i] = Int32(key, int32(typedVal))
case int16:
fields[i] = Int32(key, int32(typedVal))
case int32:
fields[i] = Int32(key, typedVal)
case int64:
fields[i] = Int64(key, typedVal)
case uint:
fields[i] = Uint64(key, uint64(typedVal))
case uint64:
fields[i] = Uint64(key, typedVal)
case uint8:
fields[i] = Uint32(key, uint32(typedVal))
case uint16:
fields[i] = Uint32(key, uint32(typedVal))
case uint32:
fields[i] = Uint32(key, typedVal)
case float32:
fields[i] = Float32(key, typedVal)
case float64:
fields[i] = Float64(key, typedVal)
default:
// When in doubt, coerce to a string
fields[i] = String(key, fmt.Sprint(typedVal))
}
}
return fields, nil
}
package opentracing
import "github.com/opentracing/opentracing-go/log"
// A NoopTracer is a trivial, minimum overhead implementation of Tracer
// for which all operations are no-ops.
//
// The primary use of this implementation is in libraries, such as RPC
// frameworks, that make tracing an optional feature controlled by the
// end user. A no-op implementation allows said libraries to use it
// as the default Tracer and to write instrumentation that does
// not need to keep checking if the tracer instance is nil.
//
// For the same reason, the NoopTracer is the default "global" tracer
// (see GlobalTracer and SetGlobalTracer functions).
//
// WARNING: NoopTracer does not support baggage propagation.
type NoopTracer struct{}
type noopSpan struct{}
type noopSpanContext struct{}
var (
defaultNoopSpanContext = noopSpanContext{}
defaultNoopSpan = noopSpan{}
defaultNoopTracer = NoopTracer{}
)
const (
emptyString = ""
)
// noopSpanContext:
func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
// noopSpan:
func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan }
func (n noopSpan) BaggageItem(key string) string { return emptyString }
func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
func (n noopSpan) LogFields(fields ...log.Field) {}
func (n noopSpan) LogKV(keyVals ...interface{}) {}
func (n noopSpan) Finish() {}
func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
func (n noopSpan) SetOperationName(operationName string) Span { return n }
func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
func (n noopSpan) LogEvent(event string) {}
func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
func (n noopSpan) Log(data LogData) {}
// StartSpan belongs to the Tracer interface.
func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
return defaultNoopSpan
}
// Inject belongs to the Tracer interface.
func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
return nil
}
// Extract belongs to the Tracer interface.
func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
return nil, ErrSpanContextNotFound
}
package opentracing
import (
"errors"
"net/http"
)
///////////////////////////////////////////////////////////////////////////////
// CORE PROPAGATION INTERFACES:
///////////////////////////////////////////////////////////////////////////////
var (
// ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
// Tracer.Extract() is not recognized by the Tracer implementation.
ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
// ErrSpanContextNotFound occurs when the `carrier` passed to
// Tracer.Extract() is valid and uncorrupted but has insufficient
// information to extract a SpanContext.
ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
// ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
// operate on a SpanContext which it is not prepared to handle (for
// example, since it was created by a different tracer implementation).
ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
// ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
// implementations expect a different type of `carrier` than they are
// given.
ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
// ErrSpanContextCorrupted occurs when the `carrier` passed to
// Tracer.Extract() is of the expected type but is corrupted.
ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
)
///////////////////////////////////////////////////////////////////////////////
// BUILTIN PROPAGATION FORMATS:
///////////////////////////////////////////////////////////////////////////////
// BuiltinFormat is used to demarcate the values within package `opentracing`
// that are intended for use with the Tracer.Inject() and Tracer.Extract()
// methods.
type BuiltinFormat byte
const (
// Binary represents SpanContexts as opaque binary data.
//
// For Tracer.Inject(): the carrier must be an `io.Writer`.
//
// For Tracer.Extract(): the carrier must be an `io.Reader`.
Binary BuiltinFormat = iota
// TextMap represents SpanContexts as key:value string pairs.
//
// Unlike HTTPHeaders, the TextMap format does not restrict the key or
// value character sets in any way.
//
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
//
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
TextMap
// HTTPHeaders represents SpanContexts as HTTP header string pairs.
//
// Unlike TextMap, the HTTPHeaders format requires that the keys and values
// be valid as HTTP headers as-is (i.e., character casing may be unstable
// and special characters are disallowed in keys, values should be
// URL-escaped, etc).
//
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
//
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
//
// See HTTPHeadersCarrier for an implementation of both TextMapWriter
// and TextMapReader that defers to an http.Header instance for storage.
// For example, Inject():
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := span.Tracer().Inject(
// span.Context(), opentracing.HTTPHeaders, carrier)
//
// Or Extract():
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(
// opentracing.HTTPHeaders, carrier)
//
HTTPHeaders
)
// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
// it, the caller can encode a SpanContext for propagation as entries in a map
// of unicode strings.
type TextMapWriter interface {
// Set a key:value pair to the carrier. Multiple calls to Set() for the
// same key leads to undefined behavior.
//
// NOTE: The backing store for the TextMapWriter may contain data unrelated
// to SpanContext. As such, Inject() and Extract() implementations that
// call the TextMapWriter and TextMapReader interfaces must agree on a
// prefix or other convention to distinguish their own key:value pairs.
Set(key, val string)
}
// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
// the caller can decode a propagated SpanContext as entries in a map of
// unicode strings.
type TextMapReader interface {
// ForeachKey returns TextMap contents via repeated calls to the `handler`
// function. If any call to `handler` returns a non-nil error, ForeachKey
// terminates and returns that error.
//
// NOTE: The backing store for the TextMapReader may contain data unrelated
// to SpanContext. As such, Inject() and Extract() implementations that
// call the TextMapWriter and TextMapReader interfaces must agree on a
// prefix or other convention to distinguish their own key:value pairs.
//
// The "foreach" callback pattern reduces unnecessary copying in some cases
// and also allows implementations to hold locks while the map is read.
ForeachKey(handler func(key, val string) error) error
}
// TextMapCarrier allows the use of regular map[string]string
// as both TextMapWriter and TextMapReader.
type TextMapCarrier map[string]string
// ForeachKey conforms to the TextMapReader interface.
func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
for k, v := range c {
if err := handler(k, v); err != nil {
return err
}
}
return nil
}
// Set implements Set() of opentracing.TextMapWriter
func (c TextMapCarrier) Set(key, val string) {
c[key] = val
}
// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
//
// Example usage for server side:
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
//
// Example usage for client side:
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := tracer.Inject(
// span.Context(),
// opentracing.HTTPHeaders,
// carrier)
//
type HTTPHeadersCarrier http.Header
// Set conforms to the TextMapWriter interface.
func (c HTTPHeadersCarrier) Set(key, val string) {
h := http.Header(c)
h.Set(key, val)
}
// ForeachKey conforms to the TextMapReader interface.
func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
for k, vals := range c {
for _, v := range vals {
if err := handler(k, v); err != nil {
return err
}
}
}
return nil
}
package opentracing
import (
"time"
"github.com/opentracing/opentracing-go/log"
)
// SpanContext represents Span state that must propagate to descendant Spans and across process
// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
type SpanContext interface {
// ForeachBaggageItem grants access to all baggage items stored in the
// SpanContext.
// The handler function will be called for each baggage key/value pair.
// The ordering of items is not guaranteed.
//
// The bool return value indicates if the handler wants to continue iterating
// through the rest of the baggage items; for example if the handler is trying to
// find some baggage item by pattern matching the name, it can return false
// as soon as the item is found to stop further iterations.
ForeachBaggageItem(handler func(k, v string) bool)
}
// Span represents an active, un-finished span in the OpenTracing system.
//
// Spans are created by the Tracer interface.
type Span interface {
// Sets the end timestamp and finalizes Span state.
//
// With the exception of calls to Context() (which are always allowed),
// Finish() must be the last call made to any span instance, and to do
// otherwise leads to undefined behavior.
Finish()
// FinishWithOptions is like Finish() but with explicit control over
// timestamps and log data.
FinishWithOptions(opts FinishOptions)
// Context() yields the SpanContext for this Span. Note that the return
// value of Context() is still valid after a call to Span.Finish(), as is
// a call to Span.Context() after a call to Span.Finish().
Context() SpanContext
// Sets or changes the operation name.
//
// Returns a reference to this Span for chaining.
SetOperationName(operationName string) Span
// Adds a tag to the span.
//
// If there is a pre-existing tag set for `key`, it is overwritten.
//
// Tag values can be numeric types, strings, or bools. The behavior of
// other tag value types is undefined at the OpenTracing level. If a
// tracing system does not know how to handle a particular value type, it
// may ignore the tag, but shall not panic.
//
// Returns a reference to this Span for chaining.
SetTag(key string, value interface{}) Span
// LogFields is an efficient and type-checked way to record key:value
// logging data about a Span, though the programming interface is a little
// more verbose than LogKV(). Here's an example:
//
// span.LogFields(
// log.String("event", "soft error"),
// log.String("type", "cache timeout"),
// log.Int("waited.millis", 1500))
//
// Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
LogFields(fields ...log.Field)
// LogKV is a concise, readable way to record key:value logging data about
// a Span, though unfortunately this also makes it less efficient and less
// type-safe than LogFields(). Here's an example:
//
// span.LogKV(
// "event", "soft error",
// "type", "cache timeout",
// "waited.millis", 1500)
//
// For LogKV (as opposed to LogFields()), the parameters must appear as
// key-value pairs, like
//
// span.LogKV(key1, val1, key2, val2, key3, val3, ...)
//
// The keys must all be strings. The values may be strings, numeric types,
// bools, Go error instances, or arbitrary structs.
//
// (Note to implementors: consider the log.InterleavedKVToFields() helper)
LogKV(alternatingKeyValues ...interface{})
// SetBaggageItem sets a key:value pair on this Span and its SpanContext
// that also propagates to descendants of this Span.
//
// SetBaggageItem() enables powerful functionality given a full-stack
// opentracing integration (e.g., arbitrary application data from a mobile
// app can make it, transparently, all the way into the depths of a storage
// system), and with it some powerful costs: use this feature with care.
//
// IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
// *future* causal descendants of the associated Span.
//
// IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
// value is copied into every local *and remote* child of the associated
// Span, and that can add up to a lot of network and cpu overhead.
//
// Returns a reference to this Span for chaining.
SetBaggageItem(restrictedKey, value string) Span
// Gets the value for a baggage item given its key. Returns the empty string
// if the value isn't found in this Span.
BaggageItem(restrictedKey string) string
// Provides access to the Tracer that created this Span.
Tracer() Tracer
// Deprecated: use LogFields or LogKV
LogEvent(event string)
// Deprecated: use LogFields or LogKV
LogEventWithPayload(event string, payload interface{})
// Deprecated: use LogFields or LogKV
Log(data LogData)
}
// LogRecord is data associated with a single Span log. Every LogRecord
// instance must specify at least one Field.
type LogRecord struct {
Timestamp time.Time
Fields []log.Field
}
// FinishOptions allows Span.FinishWithOptions callers to override the finish
// timestamp and provide log data via a bulk interface.
type FinishOptions struct {
// FinishTime overrides the Span's finish time, or implicitly becomes
// time.Now() if FinishTime.IsZero().
//
// FinishTime must resolve to a timestamp that's >= the Span's StartTime
// (per StartSpanOptions).
FinishTime time.Time
// LogRecords allows the caller to specify the contents of many LogFields()
// calls with a single slice. May be nil.
//
// None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
// be set explicitly). Also, they must be >= the Span's start timestamp and
// <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
// behavior of FinishWithOptions() is undefined.
//
// If specified, the caller hands off ownership of LogRecords at
// FinishWithOptions() invocation time.
//
// If specified, the (deprecated) BulkLogData must be nil or empty.
LogRecords []LogRecord
// BulkLogData is DEPRECATED.
BulkLogData []LogData
}
// LogData is DEPRECATED
type LogData struct {
Timestamp time.Time
Event string
Payload interface{}
}
// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
func (ld *LogData) ToLogRecord() LogRecord {
var literalTimestamp time.Time
if ld.Timestamp.IsZero() {
literalTimestamp = time.Now()
} else {
literalTimestamp = ld.Timestamp
}
rval := LogRecord{
Timestamp: literalTimestamp,
}
if ld.Payload == nil {
rval.Fields = []log.Field{
log.String("event", ld.Event),
}
} else {
rval.Fields = []log.Field{
log.String("event", ld.Event),
log.Object("payload", ld.Payload),
}
}
return rval
}
package opentracing
import "time"
// Tracer is a simple, thin interface for Span creation and SpanContext
// propagation.
type Tracer interface {
// Create, start, and return a new Span with the given `operationName` and
// incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
// from the "functional options" pattern, per
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
//
// A Span with no SpanReference options (e.g., opentracing.ChildOf() or
// opentracing.FollowsFrom()) becomes the root of its own trace.
//
// Examples:
//
// var tracer opentracing.Tracer = ...
//
// // The root-span case:
// sp := tracer.StartSpan("GetFeed")
//
// // The vanilla child span case:
// sp := tracer.StartSpan(
// "GetFeed",
// opentracing.ChildOf(parentSpan.Context()))
//
// // All the bells and whistles:
// sp := tracer.StartSpan(
// "GetFeed",
// opentracing.ChildOf(parentSpan.Context()),
// opentracing.Tag{"user_agent", loggedReq.UserAgent},
// opentracing.StartTime(loggedReq.Timestamp),
// )
//
StartSpan(operationName string, opts ...StartSpanOption) Span
// Inject() takes the `sm` SpanContext instance and injects it for
// propagation within `carrier`. The actual type of `carrier` depends on
// the value of `format`.
//
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
// and each has an expected carrier type.
//
// Other packages may declare their own `format` values, much like the keys
// used by `context.Context` (see https://godoc.org/context#WithValue).
//
// Example usage (sans error handling):
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// err := tracer.Inject(
// span.Context(),
// opentracing.HTTPHeaders,
// carrier)
//
// NOTE: All opentracing.Tracer implementations MUST support all
// BuiltinFormats.
//
// Implementations may return opentracing.ErrUnsupportedFormat if `format`
// is not supported by (or not known by) the implementation.
//
// Implementations may return opentracing.ErrInvalidCarrier or any other
// implementation-specific error if the format is supported but injection
// fails anyway.
//
// See Tracer.Extract().
Inject(sm SpanContext, format interface{}, carrier interface{}) error
// Extract() returns a SpanContext instance given `format` and `carrier`.
//
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
// and each has an expected carrier type.
//
// Other packages may declare their own `format` values, much like the keys
// used by `context.Context` (see
// https://godoc.org/golang.org/x/net/context#WithValue).
//
// Example usage (with StartSpan):
//
//
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
//
// // ... assuming the ultimate goal here is to resume the trace with a
// // server-side Span:
// var serverSpan opentracing.Span
// if err == nil {
// span = tracer.StartSpan(
// rpcMethodName, ext.RPCServerOption(clientContext))
// } else {
// span = tracer.StartSpan(rpcMethodName)
// }
//
//
// NOTE: All opentracing.Tracer implementations MUST support all
// BuiltinFormats.
//
// Return values:
// - A successful Extract returns a SpanContext instance and a nil error
// - If there was simply no SpanContext to extract in `carrier`, Extract()
// returns (nil, opentracing.ErrSpanContextNotFound)
// - If `format` is unsupported or unrecognized, Extract() returns (nil,
// opentracing.ErrUnsupportedFormat)
// - If there are more fundamental problems with the `carrier` object,
// Extract() may return opentracing.ErrInvalidCarrier,
// opentracing.ErrSpanContextCorrupted, or implementation-specific
// errors.
//
// See Tracer.Inject().
Extract(format interface{}, carrier interface{}) (SpanContext, error)
}
// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
// mechanism to override the start timestamp, specify Span References, and make
// a single Tag or multiple Tags available at Span start time.
//
// StartSpan() callers should look at the StartSpanOption interface and
// implementations available in this package.
//
// Tracer implementations can convert a slice of `StartSpanOption` instances
// into a `StartSpanOptions` struct like so:
//
// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
// sso := opentracing.StartSpanOptions{}
// for _, o := range opts {
// o.Apply(&sso)
// }
// ...
// }
//
type StartSpanOptions struct {
// Zero or more causal references to other Spans (via their SpanContext).
// If empty, start a "root" Span (i.e., start a new trace).
References []SpanReference
// StartTime overrides the Span's start time, or implicitly becomes
// time.Now() if StartTime.IsZero().
StartTime time.Time
// Tags may have zero or more entries; the restrictions on map values are
// identical to those for Span.SetTag(). May be nil.
//
// If specified, the caller hands off ownership of Tags at
// StartSpan() invocation time.
Tags map[string]interface{}
}
// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
//
// StartSpanOption borrows from the "functional options" pattern, per
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type StartSpanOption interface {
Apply(*StartSpanOptions)
}
// SpanReferenceType is an enum type describing different categories of
// relationships between two Spans. If Span-2 refers to Span-1, the
// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
// ChildOfRef means that Span-1 created Span-2.
//
// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
// or Span-2 may be sitting in a distributed queue behind Span-1.
type SpanReferenceType int
const (
// ChildOfRef refers to a parent Span that caused *and* somehow depends
// upon the new child Span. Often (but not always), the parent Span cannot
// finish until the child Span does.
//
// An timing diagram for a ChildOfRef that's blocked on the new Span:
//
// [-Parent Span---------]
// [-Child Span----]
//
// See http://opentracing.io/spec/
//
// See opentracing.ChildOf()
ChildOfRef SpanReferenceType = iota
// FollowsFromRef refers to a parent Span that does not depend in any way
// on the result of the new child Span. For instance, one might use
// FollowsFromRefs to describe pipeline stages separated by queues,
// or a fire-and-forget cache insert at the tail end of a web request.
//
// A FollowsFromRef Span is part of the same logical trace as the new Span:
// i.e., the new Span is somehow caused by the work of its FollowsFromRef.
//
// All of the following could be valid timing diagrams for children that
// "FollowFrom" a parent.
//
// [-Parent Span-] [-Child Span-]
//
//
// [-Parent Span--]
// [-Child Span-]
//
//
// [-Parent Span-]
// [-Child Span-]
//
// See http://opentracing.io/spec/
//
// See opentracing.FollowsFrom()
FollowsFromRef
)
// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
// referenced SpanContext. See the SpanReferenceType documentation for
// supported relationships. If SpanReference is created with
// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
// syntax for starting spans:
//
// sc, _ := tracer.Extract(someFormat, someCarrier)
// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
//
// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
// not add the parent span reference to the options.
type SpanReference struct {
Type SpanReferenceType
ReferencedContext SpanContext
}
// Apply satisfies the StartSpanOption interface.
func (r SpanReference) Apply(o *StartSpanOptions) {
if r.ReferencedContext != nil {
o.References = append(o.References, r)
}
}
// ChildOf returns a StartSpanOption pointing to a dependent parent span.
// If sc == nil, the option has no effect.
//
// See ChildOfRef, SpanReference
func ChildOf(sc SpanContext) SpanReference {
return SpanReference{
Type: ChildOfRef,
ReferencedContext: sc,
}
}
// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
// the child Span but does not directly depend on its result in any way.
// If sc == nil, the option has no effect.
//
// See FollowsFromRef, SpanReference
func FollowsFrom(sc SpanContext) SpanReference {
return SpanReference{
Type: FollowsFromRef,
ReferencedContext: sc,
}
}
// StartTime is a StartSpanOption that sets an explicit start timestamp for the
// new Span.
type StartTime time.Time
// Apply satisfies the StartSpanOption interface.
func (t StartTime) Apply(o *StartSpanOptions) {
o.StartTime = time.Time(t)
}
// Tags are a generic map from an arbitrary string key to an opaque value type.
// The underlying tracing system is responsible for interpreting and
// serializing the values.
type Tags map[string]interface{}
// Apply satisfies the StartSpanOption interface.
func (t Tags) Apply(o *StartSpanOptions) {
if o.Tags == nil {
o.Tags = make(map[string]interface{})
}
for k, v := range t {
o.Tags[k] = v
}
}
// Tag may be passed as a StartSpanOption to add a tag to new spans,
// or its Set method may be used to apply the tag to an existing Span,
// for example:
//
// tracer.StartSpan("opName", Tag{"Key", value})
//
// or
//
// Tag{"key", value}.Set(span)
type Tag struct {
Key string
Value interface{}
}
// Apply satisfies the StartSpanOption interface.
func (t Tag) Apply(o *StartSpanOptions) {
if o.Tags == nil {
o.Tags = make(map[string]interface{})
}
o.Tags[t.Key] = t.Value
}
// Set applies the tag to an existing Span.
func (t Tag) Set(s Span) {
s.SetTag(t.Key, t.Value)
}
...@@ -14,8 +14,6 @@ ...@@ -14,8 +14,6 @@
package ast package ast
import ( import (
"strings"
"github.com/pingcap/errors" "github.com/pingcap/errors"
"github.com/pingcap/parser/auth" "github.com/pingcap/parser/auth"
. "github.com/pingcap/parser/format" . "github.com/pingcap/parser/format"
...@@ -60,6 +58,7 @@ const ( ...@@ -60,6 +58,7 @@ const (
DatabaseOptionNone DatabaseOptionType = iota DatabaseOptionNone DatabaseOptionType = iota
DatabaseOptionCharset DatabaseOptionCharset
DatabaseOptionCollate DatabaseOptionCollate
DatabaseOptionEncryption
) )
// DatabaseOption represents database option. // DatabaseOption represents database option.
...@@ -79,6 +78,10 @@ func (n *DatabaseOption) Restore(ctx *RestoreCtx) error { ...@@ -79,6 +78,10 @@ func (n *DatabaseOption) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("COLLATE") ctx.WriteKeyWord("COLLATE")
ctx.WritePlain(" = ") ctx.WritePlain(" = ")
ctx.WritePlain(n.Value) ctx.WritePlain(n.Value)
case DatabaseOptionEncryption:
ctx.WriteKeyWord("ENCRYPTION")
ctx.WritePlain(" = ")
ctx.WriteString(n.Value)
default: default:
return errors.Errorf("invalid DatabaseOptionType: %d", n.Tp) return errors.Errorf("invalid DatabaseOptionType: %d", n.Tp)
} }
...@@ -253,16 +256,20 @@ func (n *ReferenceDef) Restore(ctx *RestoreCtx) error { ...@@ -253,16 +256,20 @@ func (n *ReferenceDef) Restore(ctx *RestoreCtx) error {
return errors.Annotate(err, "An error occurred while splicing ReferenceDef") return errors.Annotate(err, "An error occurred while splicing ReferenceDef")
} }
} }
ctx.WritePlain("(")
for i, indexColNames := range n.IndexColNames { if n.IndexColNames != nil {
if i > 0 { ctx.WritePlain("(")
ctx.WritePlain(", ") for i, indexColNames := range n.IndexColNames {
} if i > 0 {
if err := indexColNames.Restore(ctx); err != nil { ctx.WritePlain(", ")
return errors.Annotatef(err, "An error occurred while splicing IndexColNames: [%v]", i) }
if err := indexColNames.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while splicing IndexColNames: [%v]", i)
}
} }
ctx.WritePlain(")")
} }
ctx.WritePlain(")")
if n.Match != MatchNone { if n.Match != MatchNone {
ctx.WriteKeyWord(" MATCH ") ctx.WriteKeyWord(" MATCH ")
switch n.Match { switch n.Match {
...@@ -420,6 +427,7 @@ const ( ...@@ -420,6 +427,7 @@ const (
ColumnOptionReference ColumnOptionReference
ColumnOptionCollate ColumnOptionCollate
ColumnOptionCheck ColumnOptionCheck
ColumnOptionColumnFormat
) )
var ( var (
...@@ -514,6 +522,9 @@ func (n *ColumnOption) Restore(ctx *RestoreCtx) error { ...@@ -514,6 +522,9 @@ func (n *ColumnOption) Restore(ctx *RestoreCtx) error {
} else { } else {
ctx.WriteKeyWord(" NOT ENFORCED") ctx.WriteKeyWord(" NOT ENFORCED")
} }
case ColumnOptionColumnFormat:
ctx.WriteKeyWord("COLUMN_FORMAT ")
ctx.WriteKeyWord(n.StrValue)
default: default:
return errors.New("An error occurred while splicing ColumnOption") return errors.New("An error occurred while splicing ColumnOption")
} }
...@@ -821,6 +832,7 @@ type CreateTableStmt struct { ...@@ -821,6 +832,7 @@ type CreateTableStmt struct {
ddlNode ddlNode
IfNotExists bool IfNotExists bool
IsTemporary bool
Table *TableName Table *TableName
ReferTable *TableName ReferTable *TableName
Cols []*ColumnDef Cols []*ColumnDef
...@@ -833,7 +845,11 @@ type CreateTableStmt struct { ...@@ -833,7 +845,11 @@ type CreateTableStmt struct {
// Restore implements Node interface. // Restore implements Node interface.
func (n *CreateTableStmt) Restore(ctx *RestoreCtx) error { func (n *CreateTableStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("CREATE TABLE ") if n.IsTemporary {
ctx.WriteKeyWord("CREATE TEMPORARY TABLE ")
} else {
ctx.WriteKeyWord("CREATE TABLE ")
}
if n.IfNotExists { if n.IfNotExists {
ctx.WriteKeyWord("IF NOT EXISTS ") ctx.WriteKeyWord("IF NOT EXISTS ")
} }
...@@ -1197,6 +1213,56 @@ func (n *CreateViewStmt) Accept(v Visitor) (Node, bool) { ...@@ -1197,6 +1213,56 @@ func (n *CreateViewStmt) Accept(v Visitor) (Node, bool) {
return v.Leave(n) return v.Leave(n)
} }
// IndexLockAndAlgorithm stores the algorithm option and the lock option.
type IndexLockAndAlgorithm struct {
node
LockTp LockType
AlgorithmTp AlgorithmType
}
// Restore implements Node interface.
func (n *IndexLockAndAlgorithm) Restore(ctx *RestoreCtx) error {
hasPrevOption := false
if n.AlgorithmTp != AlgorithmTypeDefault {
ctx.WriteKeyWord("ALGORITHM")
ctx.WritePlain(" = ")
ctx.WriteKeyWord(n.AlgorithmTp.String())
hasPrevOption = true
}
if n.LockTp != LockTypeDefault {
if hasPrevOption {
ctx.WritePlain(" ")
}
ctx.WriteKeyWord("LOCK")
ctx.WritePlain(" = ")
ctx.WriteKeyWord(n.LockTp.String())
}
return nil
}
// Accept implements Node Accept interface.
func (n *IndexLockAndAlgorithm) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*IndexLockAndAlgorithm)
return v.Leave(n)
}
// IndexKeyType is the type for index key.
type IndexKeyType int
// Index key types.
const (
IndexKeyTypeNone IndexKeyType = iota
IndexKeyTypeUnique
IndexKeyTypeSpatial
IndexKeyTypeFullText
)
// CreateIndexStmt is a statement to create an index. // CreateIndexStmt is a statement to create an index.
// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html // See https://dev.mysql.com/doc/refman/5.7/en/create-index.html
type CreateIndexStmt struct { type CreateIndexStmt struct {
...@@ -1208,16 +1274,22 @@ type CreateIndexStmt struct { ...@@ -1208,16 +1274,22 @@ type CreateIndexStmt struct {
IndexName string IndexName string
Table *TableName Table *TableName
Unique bool
IndexColNames []*IndexColName IndexColNames []*IndexColName
IndexOption *IndexOption IndexOption *IndexOption
KeyType IndexKeyType
LockAlg *IndexLockAndAlgorithm
} }
// Restore implements Node interface. // Restore implements Node interface.
func (n *CreateIndexStmt) Restore(ctx *RestoreCtx) error { func (n *CreateIndexStmt) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("CREATE ") ctx.WriteKeyWord("CREATE ")
if n.Unique { switch n.KeyType {
case IndexKeyTypeUnique:
ctx.WriteKeyWord("UNIQUE ") ctx.WriteKeyWord("UNIQUE ")
case IndexKeyTypeSpatial:
ctx.WriteKeyWord("SPATIAL ")
case IndexKeyTypeFullText:
ctx.WriteKeyWord("FULLTEXT ")
} }
ctx.WriteKeyWord("INDEX ") ctx.WriteKeyWord("INDEX ")
if n.IfNotExists { if n.IfNotExists {
...@@ -1247,6 +1319,13 @@ func (n *CreateIndexStmt) Restore(ctx *RestoreCtx) error { ...@@ -1247,6 +1319,13 @@ func (n *CreateIndexStmt) Restore(ctx *RestoreCtx) error {
} }
} }
if n.LockAlg != nil {
ctx.WritePlain(" ")
if err := n.LockAlg.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore CreateIndexStmt.LockAlg")
}
}
return nil return nil
} }
...@@ -1276,6 +1355,13 @@ func (n *CreateIndexStmt) Accept(v Visitor) (Node, bool) { ...@@ -1276,6 +1355,13 @@ func (n *CreateIndexStmt) Accept(v Visitor) (Node, bool) {
} }
n.IndexOption = node.(*IndexOption) n.IndexOption = node.(*IndexOption)
} }
if n.LockAlg != nil {
node, ok := n.LockAlg.Accept(v)
if !ok {
return n, false
}
n.LockAlg = node.(*IndexLockAndAlgorithm)
}
return v.Leave(n) return v.Leave(n)
} }
...@@ -1287,6 +1373,7 @@ type DropIndexStmt struct { ...@@ -1287,6 +1373,7 @@ type DropIndexStmt struct {
IfExists bool IfExists bool
IndexName string IndexName string
Table *TableName Table *TableName
LockAlg *IndexLockAndAlgorithm
} }
// Restore implements Node interface. // Restore implements Node interface.
...@@ -1302,6 +1389,13 @@ func (n *DropIndexStmt) Restore(ctx *RestoreCtx) error { ...@@ -1302,6 +1389,13 @@ func (n *DropIndexStmt) Restore(ctx *RestoreCtx) error {
return errors.Annotate(err, "An error occurred while add index") return errors.Annotate(err, "An error occurred while add index")
} }
if n.LockAlg != nil {
ctx.WritePlain(" ")
if err := n.LockAlg.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore CreateIndexStmt.LockAlg")
}
}
return nil return nil
} }
...@@ -1317,6 +1411,13 @@ func (n *DropIndexStmt) Accept(v Visitor) (Node, bool) { ...@@ -1317,6 +1411,13 @@ func (n *DropIndexStmt) Accept(v Visitor) (Node, bool) {
return n, false return n, false
} }
n.Table = node.(*TableName) n.Table = node.(*TableName)
if n.LockAlg != nil {
node, ok := n.LockAlg.Accept(v)
if !ok {
return n, false
}
n.LockAlg = node.(*IndexLockAndAlgorithm)
}
return v.Leave(n) return v.Leave(n)
} }
...@@ -1442,6 +1543,7 @@ const ( ...@@ -1442,6 +1543,7 @@ const (
TableOptionDelayKeyWrite TableOptionDelayKeyWrite
TableOptionRowFormat TableOptionRowFormat
TableOptionStatsPersistent TableOptionStatsPersistent
TableOptionStatsAutoRecalc
TableOptionShardRowID TableOptionShardRowID
TableOptionPreSplitRegion TableOptionPreSplitRegion
TableOptionPackKeys TableOptionPackKeys
...@@ -1451,6 +1553,10 @@ const ( ...@@ -1451,6 +1553,10 @@ const (
TableOptionIndexDirectory TableOptionIndexDirectory
TableOptionStorageMedia TableOptionStorageMedia
TableOptionStatsSamplePages TableOptionStatsSamplePages
TableOptionSecondaryEngine
TableOptionSecondaryEngineNull
TableOptionInsertMethod
TableOptionTableCheckSum
) )
// RowFormat types // RowFormat types
...@@ -1486,6 +1592,7 @@ const ( ...@@ -1486,6 +1592,7 @@ const (
// TableOption is used for parsing table option from SQL. // TableOption is used for parsing table option from SQL.
type TableOption struct { type TableOption struct {
Tp TableOptionType Tp TableOptionType
Default bool
StrValue string StrValue string
UintValue uint64 UintValue uint64
} }
...@@ -1593,6 +1700,14 @@ func (n *TableOption) Restore(ctx *RestoreCtx) error { ...@@ -1593,6 +1700,14 @@ func (n *TableOption) Restore(ctx *RestoreCtx) error {
ctx.WritePlain("= ") ctx.WritePlain("= ")
ctx.WriteKeyWord("DEFAULT") ctx.WriteKeyWord("DEFAULT")
ctx.WritePlain(" /* TableOptionStatsPersistent is not supported */ ") ctx.WritePlain(" /* TableOptionStatsPersistent is not supported */ ")
case TableOptionStatsAutoRecalc:
ctx.WriteKeyWord("STATS_AUTO_RECALC ")
ctx.WritePlain("= ")
if n.Default {
ctx.WriteKeyWord("DEFAULT")
} else {
ctx.WritePlainf("%d", n.UintValue)
}
case TableOptionShardRowID: case TableOptionShardRowID:
ctx.WriteKeyWord("SHARD_ROW_ID_BITS ") ctx.WriteKeyWord("SHARD_ROW_ID_BITS ")
ctx.WritePlainf("= %d", n.UintValue) ctx.WritePlainf("= %d", n.UintValue)
...@@ -1626,11 +1741,27 @@ func (n *TableOption) Restore(ctx *RestoreCtx) error { ...@@ -1626,11 +1741,27 @@ func (n *TableOption) Restore(ctx *RestoreCtx) error {
case TableOptionStatsSamplePages: case TableOptionStatsSamplePages:
ctx.WriteKeyWord("STATS_SAMPLE_PAGES ") ctx.WriteKeyWord("STATS_SAMPLE_PAGES ")
ctx.WritePlain("= ") ctx.WritePlain("= ")
if n.UintValue == 0 { if n.Default {
ctx.WriteKeyWord("DEFAULT") ctx.WriteKeyWord("DEFAULT")
} else { } else {
ctx.WritePlainf("%d", n.UintValue) ctx.WritePlainf("%d", n.UintValue)
} }
case TableOptionSecondaryEngine:
ctx.WriteKeyWord("SECONDARY_ENGINE ")
ctx.WritePlain("= ")
ctx.WriteString(n.StrValue)
case TableOptionSecondaryEngineNull:
ctx.WriteKeyWord("SECONDARY_ENGINE ")
ctx.WritePlain("= ")
ctx.WriteKeyWord("NULL")
case TableOptionInsertMethod:
ctx.WriteKeyWord("INSERT_METHOD ")
ctx.WritePlain("= ")
ctx.WriteString(n.StrValue)
case TableOptionTableCheckSum:
ctx.WriteKeyWord("TABLE_CHECKSUM ")
ctx.WritePlain("= ")
ctx.WritePlainf("%d", n.UintValue)
default: default:
return errors.Errorf("invalid TableOption: %d", n.Tp) return errors.Errorf("invalid TableOption: %d", n.Tp)
} }
...@@ -1705,6 +1836,7 @@ const ( ...@@ -1705,6 +1836,7 @@ const (
AlterTableDropForeignKey AlterTableDropForeignKey
AlterTableModifyColumn AlterTableModifyColumn
AlterTableChangeColumn AlterTableChangeColumn
AlterTableRenameColumn
AlterTableRenameTable AlterTableRenameTable
AlterTableAlterColumn AlterTableAlterColumn
AlterTableLock AlterTableLock
...@@ -1719,6 +1851,20 @@ const ( ...@@ -1719,6 +1851,20 @@ const (
AlterTableEnableKeys AlterTableEnableKeys
AlterTableDisableKeys AlterTableDisableKeys
AlterTableRemovePartitioning AlterTableRemovePartitioning
AlterTableWithValidation
AlterTableWithoutValidation
AlterTableSecondaryLoad
AlterTableSecondaryUnload
AlterTableRebuildPartition
AlterTableReorganizePartition
AlterTableCheckPartitions
AlterTableExchangePartition
AlterTableOptimizePartition
AlterTableRepairPartition
AlterTableImportPartitionTablespace
AlterTableDiscardPartitionTablespace
AlterTableAlterCheck
AlterTableDropCheck
// TODO: Add more actions // TODO: Add more actions
) )
...@@ -1749,29 +1895,29 @@ const ( ...@@ -1749,29 +1895,29 @@ const (
LockTypeExclusive LockTypeExclusive
) )
// AlterAlgorithm is the algorithm of the DDL operations. // AlgorithmType is the algorithm of the DDL operations.
// See https://dev.mysql.com/doc/refman/8.0/en/alter-table.html#alter-table-performance. // See https://dev.mysql.com/doc/refman/8.0/en/alter-table.html#alter-table-performance.
type AlterAlgorithm byte type AlgorithmType byte
// DDL alter algorithms. // DDL algorithms.
// For now, TiDB only supported inplace and instance algorithms. If the user specify `copy`, // For now, TiDB only supported inplace and instance algorithms. If the user specify `copy`,
// will get an error. // will get an error.
const ( const (
AlterAlgorithmDefault AlterAlgorithm = iota AlgorithmTypeDefault AlgorithmType = iota
AlterAlgorithmCopy AlgorithmTypeCopy
AlterAlgorithmInplace AlgorithmTypeInplace
AlterAlgorithmInstant AlgorithmTypeInstant
) )
func (a AlterAlgorithm) String() string { func (a AlgorithmType) String() string {
switch a { switch a {
case AlterAlgorithmDefault: case AlgorithmTypeDefault:
return "DEFAULT" return "DEFAULT"
case AlterAlgorithmCopy: case AlgorithmTypeCopy:
return "COPY" return "COPY"
case AlterAlgorithmInplace: case AlgorithmTypeInplace:
return "INPLACE" return "INPLACE"
case AlterAlgorithmInstant: case AlgorithmTypeInstant:
return "INSTANT" return "INSTANT"
default: default:
return "DEFAULT" return "DEFAULT"
...@@ -1790,6 +1936,9 @@ type AlterTableSpec struct { ...@@ -1790,6 +1936,9 @@ type AlterTableSpec struct {
// see https://mariadb.com/kb/en/library/alter-table/ // see https://mariadb.com/kb/en/library/alter-table/
IfNotExists bool IfNotExists bool
NoWriteToBinlog bool
OnAllPartitions bool
Tp AlterTableType Tp AlterTableType
Name string Name string
Constraint *Constraint Constraint *Constraint
...@@ -1797,15 +1946,17 @@ type AlterTableSpec struct { ...@@ -1797,15 +1946,17 @@ type AlterTableSpec struct {
NewTable *TableName NewTable *TableName
NewColumns []*ColumnDef NewColumns []*ColumnDef
OldColumnName *ColumnName OldColumnName *ColumnName
NewColumnName *ColumnName
Position *ColumnPosition Position *ColumnPosition
LockType LockType LockType LockType
Algorithm AlterAlgorithm Algorithm AlgorithmType
Comment string Comment string
FromKey model.CIStr FromKey model.CIStr
ToKey model.CIStr ToKey model.CIStr
Partition *PartitionOptions Partition *PartitionOptions
PartitionNames []model.CIStr PartitionNames []model.CIStr
PartDefinitions []*PartitionDefinition PartDefinitions []*PartitionDefinition
WithValidation bool
Num uint64 Num uint64
} }
...@@ -1918,6 +2069,15 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error { ...@@ -1918,6 +2069,15 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
if err := n.Position.Restore(ctx); err != nil { if err := n.Position.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AlterTableSpec.Position") return errors.Annotate(err, "An error occurred while restore AlterTableSpec.Position")
} }
case AlterTableRenameColumn:
ctx.WriteKeyWord("RENAME COLUMN ")
if err := n.OldColumnName.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AlterTableSpec.OldColumnName")
}
ctx.WriteKeyWord(" TO ")
if err := n.NewColumnName.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AlterTableSpec.NewColumnName")
}
case AlterTableRenameTable: case AlterTableRenameTable:
ctx.WriteKeyWord("RENAME AS ") ctx.WriteKeyWord("RENAME AS ")
if err := n.NewTable.Restore(ctx); err != nil { if err := n.NewTable.Restore(ctx); err != nil {
...@@ -1967,6 +2127,9 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error { ...@@ -1967,6 +2127,9 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
if n.IfNotExists { if n.IfNotExists {
ctx.WriteKeyWord(" IF NOT EXISTS") ctx.WriteKeyWord(" IF NOT EXISTS")
} }
if n.NoWriteToBinlog {
ctx.WriteKeyWord(" NO_WRITE_TO_BINLOG")
}
if n.PartDefinitions != nil { if n.PartDefinitions != nil {
ctx.WritePlain(" (") ctx.WritePlain(" (")
for i, def := range n.PartDefinitions { for i, def := range n.PartDefinitions {
...@@ -1984,6 +2147,9 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error { ...@@ -1984,6 +2147,9 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
} }
case AlterTableCoalescePartitions: case AlterTableCoalescePartitions:
ctx.WriteKeyWord("COALESCE PARTITION ") ctx.WriteKeyWord("COALESCE PARTITION ")
if n.NoWriteToBinlog {
ctx.WriteKeyWord("NO_WRITE_TO_BINLOG ")
}
ctx.WritePlainf("%d", n.Num) ctx.WritePlainf("%d", n.Num)
case AlterTableDropPartition: case AlterTableDropPartition:
ctx.WriteKeyWord("DROP PARTITION ") ctx.WriteKeyWord("DROP PARTITION ")
...@@ -1998,12 +2164,84 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error { ...@@ -1998,12 +2164,84 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
} }
case AlterTableTruncatePartition: case AlterTableTruncatePartition:
ctx.WriteKeyWord("TRUNCATE PARTITION ") ctx.WriteKeyWord("TRUNCATE PARTITION ")
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
case AlterTableCheckPartitions:
ctx.WriteKeyWord("CHECK PARTITION ")
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
case AlterTableOptimizePartition:
ctx.WriteKeyWord("OPTIMIZE PARTITION ")
if n.NoWriteToBinlog {
ctx.WriteKeyWord("NO_WRITE_TO_BINLOG ")
}
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames { for i, name := range n.PartitionNames {
if i != 0 { if i != 0 {
ctx.WritePlain(",") ctx.WritePlain(",")
} }
ctx.WriteName(name.O) ctx.WriteName(name.O)
} }
case AlterTableRepairPartition:
ctx.WriteKeyWord("REPAIR PARTITION ")
if n.NoWriteToBinlog {
ctx.WriteKeyWord("NO_WRITE_TO_BINLOG ")
}
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
case AlterTableImportPartitionTablespace:
ctx.WriteKeyWord("IMPORT PARTITION ")
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
} else {
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
}
ctx.WriteKeyWord(" TABLESPACE")
case AlterTableDiscardPartitionTablespace:
ctx.WriteKeyWord("DISCARD PARTITION ")
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
} else {
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
}
ctx.WriteKeyWord(" TABLESPACE")
case AlterTablePartition: case AlterTablePartition:
if err := n.Partition.Restore(ctx); err != nil { if err := n.Partition.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AlterTableSpec.Partition") return errors.Annotate(err, "An error occurred while restore AlterTableSpec.Partition")
...@@ -2014,6 +2252,76 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error { ...@@ -2014,6 +2252,76 @@ func (n *AlterTableSpec) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord("DISABLE KEYS") ctx.WriteKeyWord("DISABLE KEYS")
case AlterTableRemovePartitioning: case AlterTableRemovePartitioning:
ctx.WriteKeyWord("REMOVE PARTITIONING") ctx.WriteKeyWord("REMOVE PARTITIONING")
case AlterTableWithValidation:
ctx.WriteKeyWord("WITH VALIDATION")
case AlterTableWithoutValidation:
ctx.WriteKeyWord("WITHOUT VALIDATION")
case AlterTableRebuildPartition:
ctx.WriteKeyWord("REBUILD PARTITION ")
if n.NoWriteToBinlog {
ctx.WriteKeyWord("NO_WRITE_TO_BINLOG ")
}
if n.OnAllPartitions {
ctx.WriteKeyWord("ALL")
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(name.O)
}
case AlterTableReorganizePartition:
ctx.WriteKeyWord("REORGANIZE PARTITION")
if n.NoWriteToBinlog {
ctx.WriteKeyWord(" NO_WRITE_TO_BINLOG")
}
if n.OnAllPartitions {
return nil
}
for i, name := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
} else {
ctx.WritePlain(" ")
}
ctx.WriteName(name.O)
}
ctx.WriteKeyWord(" INTO ")
if n.PartDefinitions != nil {
ctx.WritePlain("(")
for i, def := range n.PartDefinitions {
if i != 0 {
ctx.WritePlain(", ")
}
if err := def.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterTableSpec.PartDefinitions[%d]", i)
}
}
ctx.WritePlain(")")
}
case AlterTableExchangePartition:
ctx.WriteKeyWord("EXCHANGE PARTITION ")
ctx.WriteName(n.PartitionNames[0].O)
ctx.WriteKeyWord(" WITH TABLE ")
n.NewTable.Restore(ctx)
if !n.WithValidation {
ctx.WriteKeyWord(" WITHOUT VALIDATION")
}
case AlterTableSecondaryLoad:
ctx.WriteKeyWord("SECONDARY_LOAD")
case AlterTableSecondaryUnload:
ctx.WriteKeyWord("SECONDARY_UNLOAD")
case AlterTableAlterCheck:
ctx.WriteKeyWord("ALTER CHECK ")
ctx.WriteName(n.Constraint.Name)
if n.Constraint.Enforced == false {
ctx.WriteKeyWord(" NOT")
}
ctx.WriteKeyWord(" ENFORCED")
case AlterTableDropCheck:
ctx.WriteKeyWord("DROP CHECK ")
ctx.WriteName(n.Constraint.Name)
default: default:
// TODO: not support // TODO: not support
ctx.WritePlainf(" /* AlterTableType(%d) is not supported */ ", n.Tp) ctx.WritePlainf(" /* AlterTableType(%d) is not supported */ ", n.Tp)
...@@ -2432,7 +2740,7 @@ type PartitionMethod struct { ...@@ -2432,7 +2740,7 @@ type PartitionMethod struct {
// RANGE COLUMNS and LIST COLUMNS types // RANGE COLUMNS and LIST COLUMNS types
ColumnNames []*ColumnName ColumnNames []*ColumnName
// Unit is a time unit used as argument of SYSTEM_TIME type // Unit is a time unit used as argument of SYSTEM_TIME type
Unit ValueExpr Unit TimeUnitType
// Limit is a row count used as argument of the SYSTEM_TIME type // Limit is a row count used as argument of the SYSTEM_TIME type
Limit uint64 Limit uint64
...@@ -2449,20 +2757,13 @@ func (n *PartitionMethod) Restore(ctx *RestoreCtx) error { ...@@ -2449,20 +2757,13 @@ func (n *PartitionMethod) Restore(ctx *RestoreCtx) error {
switch { switch {
case n.Tp == model.PartitionTypeSystemTime: case n.Tp == model.PartitionTypeSystemTime:
if n.Expr != nil && n.Unit != nil { if n.Expr != nil && n.Unit != TimeUnitInvalid {
ctx.WriteKeyWord(" INTERVAL ") ctx.WriteKeyWord(" INTERVAL ")
if err := n.Expr.Restore(ctx); err != nil { if err := n.Expr.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore PartitionMethod.Expr") return errors.Annotate(err, "An error occurred while restore PartitionMethod.Expr")
} }
// Here the Unit string should not be quoted.
// TODO: This is a temporary workaround that should be changed once something like "Keyword Expression" is implemented.
var sb strings.Builder
if err := n.Unit.Restore(NewRestoreCtx(0, &sb)); err != nil {
return errors.Annotate(err, "An error occurred while restore PartitionMethod.Unit")
}
ctx.WritePlain(" ") ctx.WritePlain(" ")
ctx.WriteKeyWord(sb.String()) ctx.WriteKeyWord(n.Unit.String())
} }
case n.Expr != nil: case n.Expr != nil:
...@@ -2512,13 +2813,6 @@ func (n *PartitionMethod) acceptInPlace(v Visitor) bool { ...@@ -2512,13 +2813,6 @@ func (n *PartitionMethod) acceptInPlace(v Visitor) bool {
} }
n.ColumnNames[i] = newColName.(*ColumnName) n.ColumnNames[i] = newColName.(*ColumnName)
} }
if n.Unit != nil {
unit, ok := n.Unit.Accept(v)
if !ok {
return false
}
n.Unit = unit.(ValueExpr)
}
return true return true
} }
......
...@@ -14,8 +14,6 @@ ...@@ -14,8 +14,6 @@
package ast package ast
import ( import (
"strings"
"github.com/pingcap/errors" "github.com/pingcap/errors"
"github.com/pingcap/parser/auth" "github.com/pingcap/parser/auth"
. "github.com/pingcap/parser/format" . "github.com/pingcap/parser/format"
...@@ -2368,7 +2366,7 @@ type FrameBound struct { ...@@ -2368,7 +2366,7 @@ type FrameBound struct {
Expr ExprNode Expr ExprNode
// `Unit` is used to indicate the units in which the `Expr` should be interpreted. // `Unit` is used to indicate the units in which the `Expr` should be interpreted.
// For example: '2:30' MINUTE_SECOND. // For example: '2:30' MINUTE_SECOND.
Unit ExprNode Unit TimeUnitType
} }
// Restore implements Node interface. // Restore implements Node interface.
...@@ -2380,7 +2378,7 @@ func (n *FrameBound) Restore(ctx *RestoreCtx) error { ...@@ -2380,7 +2378,7 @@ func (n *FrameBound) Restore(ctx *RestoreCtx) error {
case CurrentRow: case CurrentRow:
ctx.WriteKeyWord("CURRENT ROW") ctx.WriteKeyWord("CURRENT ROW")
case Preceding, Following: case Preceding, Following:
if n.Unit != nil { if n.Unit != TimeUnitInvalid {
ctx.WriteKeyWord("INTERVAL ") ctx.WriteKeyWord("INTERVAL ")
} }
if n.Expr != nil { if n.Expr != nil {
...@@ -2388,13 +2386,9 @@ func (n *FrameBound) Restore(ctx *RestoreCtx) error { ...@@ -2388,13 +2386,9 @@ func (n *FrameBound) Restore(ctx *RestoreCtx) error {
return errors.Annotate(err, "An error occurred while restore FrameBound.Expr") return errors.Annotate(err, "An error occurred while restore FrameBound.Expr")
} }
} }
if n.Unit != nil { if n.Unit != TimeUnitInvalid {
// Here the Unit string should not be quoted.
// TODO: This is a temporary workaround that should be changed once something like "Keyword Expression" is implemented.
var sb strings.Builder
n.Unit.Restore(NewRestoreCtx(0, &sb))
ctx.WritePlain(" ") ctx.WritePlain(" ")
ctx.WriteKeyWord(sb.String()) ctx.WriteKeyWord(n.Unit.String())
} }
if n.Type == Preceding { if n.Type == Preceding {
ctx.WriteKeyWord(" PRECEDING") ctx.WriteKeyWord(" PRECEDING")
...@@ -2419,13 +2413,6 @@ func (n *FrameBound) Accept(v Visitor) (Node, bool) { ...@@ -2419,13 +2413,6 @@ func (n *FrameBound) Accept(v Visitor) (Node, bool) {
} }
n.Expr = node.(ExprNode) n.Expr = node.(ExprNode)
} }
if n.Unit != nil {
node, ok := n.Unit.Accept(v)
if !ok {
return n, false
}
n.Unit = node.(ExprNode)
}
return v.Leave(n) return v.Leave(n)
} }
......
...@@ -345,26 +345,24 @@ func (n *FuncCallExpr) Restore(ctx *RestoreCtx) error { ...@@ -345,26 +345,24 @@ func (n *FuncCallExpr) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.Args[1].GetType().Charset) ctx.WriteKeyWord(n.Args[1].GetType().Charset)
case "adddate", "subdate", "date_add", "date_sub": case "adddate", "subdate", "date_add", "date_sub":
if err := n.Args[0].Restore(ctx); err != nil { if err := n.Args[0].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr") return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[0]")
} }
ctx.WritePlain(", ") ctx.WritePlain(", ")
ctx.WriteKeyWord("INTERVAL ") ctx.WriteKeyWord("INTERVAL ")
if err := n.Args[1].Restore(ctx); err != nil { if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr") return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[1]")
} }
ctx.WritePlain(" ") ctx.WritePlain(" ")
ctx.WriteKeyWord(n.Args[2].(ValueExpr).GetString()) if err := n.Args[2].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[2]")
}
case "extract": case "extract":
ctx.WriteKeyWord(n.Args[0].(ValueExpr).GetString()) if err := n.Args[0].Restore(ctx); err != nil {
ctx.WriteKeyWord(" FROM ") return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[0]")
if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
} }
case "get_format": ctx.WriteKeyWord(" FROM ")
ctx.WriteKeyWord(n.Args[0].(ValueExpr).GetString())
ctx.WritePlain(", ")
if err := n.Args[1].Restore(ctx); err != nil { if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr") return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[1]")
} }
case "position": case "position":
if err := n.Args[0].Restore(ctx); err != nil { if err := n.Args[0].Restore(ctx); err != nil {
...@@ -376,47 +374,26 @@ func (n *FuncCallExpr) Restore(ctx *RestoreCtx) error { ...@@ -376,47 +374,26 @@ func (n *FuncCallExpr) Restore(ctx *RestoreCtx) error {
} }
case "trim": case "trim":
switch len(n.Args) { switch len(n.Args) {
case 1:
if err := n.Args[0].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
}
case 2:
if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
}
ctx.WriteKeyWord(" FROM ")
if err := n.Args[0].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
}
case 3: case 3:
switch fmt.Sprint(n.Args[2].(ValueExpr).GetValue()) { if err := n.Args[2].Restore(ctx); err != nil {
case "3": return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[2]")
ctx.WriteKeyWord("TRAILING ")
case "2":
ctx.WriteKeyWord("LEADING ")
case "0", "1":
ctx.WriteKeyWord("BOTH ")
} }
ctx.WritePlain(" ")
fallthrough
case 2:
if n.Args[1].(ValueExpr).GetValue() != nil { if n.Args[1].(ValueExpr).GetValue() != nil {
if err := n.Args[1].Restore(ctx); err != nil { if err := n.Args[1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr") return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[1]")
} }
ctx.WritePlain(" ") ctx.WritePlain(" ")
} }
ctx.WriteKeyWord("FROM ") ctx.WriteKeyWord("FROM ")
fallthrough
case 1:
if err := n.Args[0].Restore(ctx); err != nil { if err := n.Args[0].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr") return errors.Annotatef(err, "An error occurred while restore FuncCallExpr.Args[0]")
} }
} }
case "timestampdiff", "timestampadd":
ctx.WriteKeyWord(n.Args[0].(ValueExpr).GetString())
for i := 1; i < len(n.Args); {
ctx.WritePlain(", ")
if err := n.Args[i].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FuncCallExpr")
}
i++
}
default: default:
for i, argv := range n.Args { for i, argv := range n.Args {
if i != 0 { if i != 0 {
...@@ -452,12 +429,7 @@ func (n *FuncCallExpr) specialFormatArgs(w io.Writer) bool { ...@@ -452,12 +429,7 @@ func (n *FuncCallExpr) specialFormatArgs(w io.Writer) bool {
n.Args[0].Format(w) n.Args[0].Format(w)
fmt.Fprint(w, ", INTERVAL ") fmt.Fprint(w, ", INTERVAL ")
n.Args[1].Format(w) n.Args[1].Format(w)
fmt.Fprintf(w, " %s", n.Args[2].(ValueExpr).GetDatumString()) fmt.Fprint(w, " ")
return true
case TimestampAdd, TimestampDiff:
fmt.Fprintf(w, "%s, ", n.Args[0].(ValueExpr).GetDatumString())
n.Args[1].Format(w)
fmt.Fprint(w, ", ")
n.Args[2].Format(w) n.Args[2].Format(w)
return true return true
} }
...@@ -583,6 +555,47 @@ const ( ...@@ -583,6 +555,47 @@ const (
TrimTrailing TrimTrailing
) )
// String implements fmt.Stringer interface.
func (direction TrimDirectionType) String() string {
switch direction {
case TrimBoth, TrimBothDefault:
return "BOTH"
case TrimLeading:
return "LEADING"
case TrimTrailing:
return "TRAILING"
default:
return ""
}
}
// TrimDirectionExpr is an expression representing the trim direction used in the TRIM() function.
type TrimDirectionExpr struct {
exprNode
// Direction is the trim direction
Direction TrimDirectionType
}
// Restore implements Node interface.
func (n *TrimDirectionExpr) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.Direction.String())
return nil
}
// Format the ExprNode into a Writer.
func (n *TrimDirectionExpr) Format(w io.Writer) {
fmt.Fprint(w, n.Direction.String())
}
// Accept implements Node Accept interface.
func (n *TrimDirectionExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
return v.Leave(n)
}
// DateArithType is type for DateArith type. // DateArithType is type for DateArith type.
type DateArithType byte type DateArithType byte
...@@ -799,3 +812,179 @@ func (n *WindowFuncExpr) Accept(v Visitor) (Node, bool) { ...@@ -799,3 +812,179 @@ func (n *WindowFuncExpr) Accept(v Visitor) (Node, bool) {
n.Spec = *node.(*WindowSpec) n.Spec = *node.(*WindowSpec)
return v.Leave(n) return v.Leave(n)
} }
// TimeUnitType is the type for time and timestamp units.
type TimeUnitType int
const (
// TimeUnitInvalid is a placeholder for an invalid time or timestamp unit
TimeUnitInvalid TimeUnitType = iota
// TimeUnitMicrosecond is the time or timestamp unit MICROSECOND.
TimeUnitMicrosecond
// TimeUnitSecond is the time or timestamp unit SECOND.
TimeUnitSecond
// TimeUnitMinute is the time or timestamp unit MINUTE.
TimeUnitMinute
// TimeUnitHour is the time or timestamp unit HOUR.
TimeUnitHour
// TimeUnitDay is the time or timestamp unit DAY.
TimeUnitDay
// TimeUnitWeek is the time or timestamp unit WEEK.
TimeUnitWeek
// TimeUnitMonth is the time or timestamp unit MONTH.
TimeUnitMonth
// TimeUnitQuarter is the time or timestamp unit QUARTER.
TimeUnitQuarter
// TimeUnitYear is the time or timestamp unit YEAR.
TimeUnitYear
// TimeUnitSecondMicrosecond is the time unit SECOND_MICROSECOND.
TimeUnitSecondMicrosecond
// TimeUnitMinuteMicrosecond is the time unit MINUTE_MICROSECOND.
TimeUnitMinuteMicrosecond
// TimeUnitMinuteSecond is the time unit MINUTE_SECOND.
TimeUnitMinuteSecond
// TimeUnitHourMicrosecond is the time unit HOUR_MICROSECOND.
TimeUnitHourMicrosecond
// TimeUnitHourSecond is the time unit HOUR_SECOND.
TimeUnitHourSecond
// TimeUnitHourMinute is the time unit HOUR_MINUTE.
TimeUnitHourMinute
// TimeUnitDayMicrosecond is the time unit DAY_MICROSECOND.
TimeUnitDayMicrosecond
// TimeUnitDaySecond is the time unit DAY_SECOND.
TimeUnitDaySecond
// TimeUnitDayMinute is the time unit DAY_MINUTE.
TimeUnitDayMinute
// TimeUnitDayHour is the time unit DAY_HOUR.
TimeUnitDayHour
// TimeUnitYearMonth is the time unit YEAR_MONTH.
TimeUnitYearMonth
)
// String implements fmt.Stringer interface.
func (unit TimeUnitType) String() string {
switch unit {
case TimeUnitMicrosecond:
return "MICROSECOND"
case TimeUnitSecond:
return "SECOND"
case TimeUnitMinute:
return "MINUTE"
case TimeUnitHour:
return "HOUR"
case TimeUnitDay:
return "DAY"
case TimeUnitWeek:
return "WEEK"
case TimeUnitMonth:
return "MONTH"
case TimeUnitQuarter:
return "QUARTER"
case TimeUnitYear:
return "YEAR"
case TimeUnitSecondMicrosecond:
return "SECOND_MICROSECOND"
case TimeUnitMinuteMicrosecond:
return "MINUTE_MICROSECOND"
case TimeUnitMinuteSecond:
return "MINUTE_SECOND"
case TimeUnitHourMicrosecond:
return "HOUR_MICROSECOND"
case TimeUnitHourSecond:
return "HOUR_SECOND"
case TimeUnitHourMinute:
return "HOUR_MINUTE"
case TimeUnitDayMicrosecond:
return "DAY_MICROSECOND"
case TimeUnitDaySecond:
return "DAY_SECOND"
case TimeUnitDayMinute:
return "DAY_MINUTE"
case TimeUnitDayHour:
return "DAY_HOUR"
case TimeUnitYearMonth:
return "YEAR_MONTH"
default:
return ""
}
}
// TimeUnitExpr is an expression representing a time or timestamp unit.
type TimeUnitExpr struct {
exprNode
// Unit is the time or timestamp unit.
Unit TimeUnitType
}
// Restore implements Node interface.
func (n *TimeUnitExpr) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.Unit.String())
return nil
}
// Format the ExprNode into a Writer.
func (n *TimeUnitExpr) Format(w io.Writer) {
fmt.Fprint(w, n.Unit.String())
}
// Accept implements Node Accept interface.
func (n *TimeUnitExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
return v.Leave(n)
}
// GetFormatSelectorType is the type for the first argument of GET_FORMAT() function.
type GetFormatSelectorType int
const (
// GetFormatSelectorDate is the GET_FORMAT selector DATE.
GetFormatSelectorDate GetFormatSelectorType = iota + 1
// GetFormatSelectorTime is the GET_FORMAT selector TIME.
GetFormatSelectorTime
// GetFormatSelectorDatetime is the GET_FORMAT selector DATETIME and TIMESTAMP.
GetFormatSelectorDatetime
)
// GetFormatSelectorExpr is an expression used as the first argument of GET_FORMAT() function.
type GetFormatSelectorExpr struct {
exprNode
// Selector is the GET_FORMAT() selector.
Selector GetFormatSelectorType
}
// String implements fmt.Stringer interface.
func (selector GetFormatSelectorType) String() string {
switch selector {
case GetFormatSelectorDate:
return "DATE"
case GetFormatSelectorTime:
return "TIME"
case GetFormatSelectorDatetime:
return "DATETIME"
default:
return ""
}
}
// Restore implements Node interface.
func (n *GetFormatSelectorExpr) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.Selector.String())
return nil
}
// Format the ExprNode into a Writer.
func (n *GetFormatSelectorExpr) Format(w io.Writer) {
fmt.Fprint(w, n.Selector.String())
}
// Accept implements Node Accept interface.
func (n *GetFormatSelectorExpr) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
return v.Leave(n)
}
...@@ -317,6 +317,7 @@ func (n *DeallocateStmt) Accept(v Visitor) (Node, bool) { ...@@ -317,6 +317,7 @@ func (n *DeallocateStmt) Accept(v Visitor) (Node, bool) {
// Prepared represents a prepared statement. // Prepared represents a prepared statement.
type Prepared struct { type Prepared struct {
Stmt StmtNode Stmt StmtNode
StmtType string
Params []ParamMarkerExpr Params []ParamMarkerExpr
SchemaVersion int64 SchemaVersion int64
UseCache bool UseCache bool
...@@ -1055,7 +1056,7 @@ func (p *PasswordOrLockOption) Restore(ctx *RestoreCtx) error { ...@@ -1055,7 +1056,7 @@ func (p *PasswordOrLockOption) Restore(ctx *RestoreCtx) error {
case PasswordExpireNever: case PasswordExpireNever:
ctx.WriteKeyWord("PASSWORD EXPIRE NEVER") ctx.WriteKeyWord("PASSWORD EXPIRE NEVER")
case PasswordExpireInterval: case PasswordExpireInterval:
ctx.WriteKeyWord("PASSWORD EXPIRE NEVER") ctx.WriteKeyWord("PASSWORD EXPIRE INTERVAL")
ctx.WritePlainf(" %d", p.Count) ctx.WritePlainf(" %d", p.Count)
ctx.WriteKeyWord(" DAY") ctx.WriteKeyWord(" DAY")
case Lock: case Lock:
...@@ -1162,9 +1163,12 @@ func (n *CreateUserStmt) SecureText() string { ...@@ -1162,9 +1163,12 @@ func (n *CreateUserStmt) SecureText() string {
type AlterUserStmt struct { type AlterUserStmt struct {
stmtNode stmtNode
IfExists bool IfExists bool
CurrentAuth *AuthOption CurrentAuth *AuthOption
Specs []*UserSpec Specs []*UserSpec
TslOptions []*TslOption
ResourceOptions []*ResourceOption
PasswordOrLockOptions []*PasswordOrLockOption
} }
// Restore implements Node interface. // Restore implements Node interface.
...@@ -1188,6 +1192,40 @@ func (n *AlterUserStmt) Restore(ctx *RestoreCtx) error { ...@@ -1188,6 +1192,40 @@ func (n *AlterUserStmt) Restore(ctx *RestoreCtx) error {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.Specs[%d]", i) return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.Specs[%d]", i)
} }
} }
tslOptionLen := len(n.TslOptions)
if tslOptionLen != 0 {
ctx.WriteKeyWord(" REQUIRE ")
}
// Restore `tslOptions` reversely to keep order the same with original sql
for i := tslOptionLen; i > 0; i-- {
if i != tslOptionLen {
ctx.WriteKeyWord(" AND ")
}
if err := n.TslOptions[i-1].Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.TslOptions[%d]", i)
}
}
if len(n.ResourceOptions) != 0 {
ctx.WriteKeyWord(" WITH")
}
for i, v := range n.ResourceOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.ResourceOptions[%d]", i)
}
}
for i, v := range n.PasswordOrLockOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.PasswordOrLockOptions[%d]", i)
}
}
return nil return nil
} }
...@@ -1988,26 +2026,81 @@ type TableOptimizerHint struct { ...@@ -1988,26 +2026,81 @@ type TableOptimizerHint struct {
// Table hints has no schema info // Table hints has no schema info
// It allows only table name or alias (if table has an alias) // It allows only table name or alias (if table has an alias)
HintName model.CIStr HintName model.CIStr
Tables []model.CIStr // QBName is the default effective query block of this hint.
QBName model.CIStr
Tables []HintTable
Indexes []model.CIStr
// Statement Execution Time Optimizer Hints // Statement Execution Time Optimizer Hints
// See https://dev.mysql.com/doc/refman/5.7/en/optimizer-hints.html#optimizer-hints-execution-time // See https://dev.mysql.com/doc/refman/5.7/en/optimizer-hints.html#optimizer-hints-execution-time
MaxExecutionTime uint64 MaxExecutionTime uint64
MemoryQuota uint64
QueryType model.CIStr
HintFlag bool
}
// HintTable is table in the hint. It may have query block info.
type HintTable struct {
TableName model.CIStr
QBName model.CIStr
}
func (ht *HintTable) Restore(ctx *RestoreCtx) {
ctx.WriteName(ht.TableName.String())
if ht.QBName.L != "" {
ctx.WriteKeyWord("@")
ctx.WriteName(ht.QBName.String())
}
} }
// Restore implements Node interface. // Restore implements Node interface.
func (n *TableOptimizerHint) Restore(ctx *RestoreCtx) error { func (n *TableOptimizerHint) Restore(ctx *RestoreCtx) error {
ctx.WriteKeyWord(n.HintName.String()) ctx.WriteKeyWord(n.HintName.String())
ctx.WritePlain("(") ctx.WritePlain("(")
if n.QBName.L != "" {
if n.HintName.L != "qb_name" {
ctx.WriteKeyWord("@")
}
ctx.WriteName(n.QBName.String())
}
// Hints without args except query block.
switch n.HintName.L {
case "hash_agg", "stream_agg", "read_consistent_replica", "no_index_merge", "qb_name":
ctx.WritePlain(")")
return nil
}
if n.QBName.L != "" {
ctx.WritePlain(" ")
}
// Hints with args except query block.
switch n.HintName.L { switch n.HintName.L {
case "max_execution_time": case "max_execution_time":
ctx.WritePlainf("%d", n.MaxExecutionTime) ctx.WritePlainf("%d", n.MaxExecutionTime)
case "tidb_hj", "tidb_smj", "tidb_inlj": case "tidb_hj", "tidb_smj", "tidb_inlj", "hash_join", "sm_join", "inl_join":
for i, table := range n.Tables { for i, table := range n.Tables {
if i != 0 { if i != 0 {
ctx.WritePlain(", ") ctx.WritePlain(", ")
} }
ctx.WriteName(table.String()) table.Restore(ctx)
}
case "index", "use_index_merge":
n.Tables[0].Restore(ctx)
ctx.WritePlain(" ")
for i, index := range n.Indexes {
if i != 0 {
ctx.WritePlain(", ")
}
ctx.WriteName(index.String())
}
case "use_toja", "enable_plan_cache":
if n.HintFlag {
ctx.WritePlain("TRUE")
} else {
ctx.WritePlain("FALSE")
} }
case "query_type":
ctx.WriteKeyWord(n.QueryType.String())
case "memory_quota":
ctx.WritePlainf("%d M", n.MemoryQuota)
} }
ctx.WritePlain(")") ctx.WritePlain(")")
return nil return nil
......
...@@ -48,6 +48,7 @@ const ( ...@@ -48,6 +48,7 @@ const (
AnalyzeOptNumTopN AnalyzeOptNumTopN
AnalyzeOptCMSketchDepth AnalyzeOptCMSketchDepth
AnalyzeOptCMSketchWidth AnalyzeOptCMSketchWidth
AnalyzeOptNumSamples
) )
// AnalyzeOptionString stores the string form of analyze options. // AnalyzeOptionString stores the string form of analyze options.
...@@ -56,6 +57,7 @@ var AnalyzeOptionString = map[AnalyzeOptionType]string{ ...@@ -56,6 +57,7 @@ var AnalyzeOptionString = map[AnalyzeOptionType]string{
AnalyzeOptNumTopN: "TOPN", AnalyzeOptNumTopN: "TOPN",
AnalyzeOptCMSketchWidth: "CMSKETCH WIDTH", AnalyzeOptCMSketchWidth: "CMSKETCH WIDTH",
AnalyzeOptCMSketchDepth: "CMSKETCH DEPTH", AnalyzeOptCMSketchDepth: "CMSKETCH DEPTH",
AnalyzeOptNumSamples: "SAMPLES",
} }
// AnalyzeOpt stores the analyze option type and value. // AnalyzeOpt stores the analyze option type and value.
......
...@@ -188,6 +188,7 @@ var tokenMap = map[string]int{ ...@@ -188,6 +188,7 @@ var tokenMap = map[string]int{
"COLLATE": collate, "COLLATE": collate,
"COLLATION": collation, "COLLATION": collation,
"COLUMN": column, "COLUMN": column,
"COLUMN_FORMAT": columnFormat,
"COLUMNS": columns, "COLUMNS": columns,
"COMMENT": comment, "COMMENT": comment,
"COMMIT": commit, "COMMIT": commit,
...@@ -238,6 +239,7 @@ var tokenMap = map[string]int{ ...@@ -238,6 +239,7 @@ var tokenMap = map[string]int{
"DESCRIBE": describe, "DESCRIBE": describe,
"DIRECTORY": directory, "DIRECTORY": directory,
"DISABLE": disable, "DISABLE": disable,
"DISCARD": discard,
"DISK": disk, "DISK": disk,
"DISTINCT": distinct, "DISTINCT": distinct,
"DISTINCTROW": distinct, "DISTINCTROW": distinct,
...@@ -251,7 +253,9 @@ var tokenMap = map[string]int{ ...@@ -251,7 +253,9 @@ var tokenMap = map[string]int{
"DYNAMIC": dynamic, "DYNAMIC": dynamic,
"ELSE": elseKwd, "ELSE": elseKwd,
"ENABLE": enable, "ENABLE": enable,
"ENABLE_PLAN_CACHE": hintEnablePlanCache,
"ENCLOSED": enclosed, "ENCLOSED": enclosed,
"ENCRYPTION": encryption,
"END": end, "END": end,
"ENFORCED": enforced, "ENFORCED": enforced,
"ENGINE": engine, "ENGINE": engine,
...@@ -263,6 +267,7 @@ var tokenMap = map[string]int{ ...@@ -263,6 +267,7 @@ var tokenMap = map[string]int{
"EVENTS": events, "EVENTS": events,
"EXCLUSIVE": exclusive, "EXCLUSIVE": exclusive,
"EXCEPT": except, "EXCEPT": except,
"EXCHANGE": exchange,
"EXECUTE": execute, "EXECUTE": execute,
"EXISTS": exists, "EXISTS": exists,
"EXPIRE": expire, "EXPIRE": expire,
...@@ -292,6 +297,8 @@ var tokenMap = map[string]int{ ...@@ -292,6 +297,8 @@ var tokenMap = map[string]int{
"GROUP": group, "GROUP": group,
"GROUP_CONCAT": groupConcat, "GROUP_CONCAT": groupConcat,
"HASH": hash, "HASH": hash,
"HASH_AGG": hintHASHAGG,
"HASH_JOIN": hintHJ,
"HAVING": having, "HAVING": having,
"HIGH_PRIORITY": highPriority, "HIGH_PRIORITY": highPriority,
"HISTORY": history, "HISTORY": history,
...@@ -302,15 +309,18 @@ var tokenMap = map[string]int{ ...@@ -302,15 +309,18 @@ var tokenMap = map[string]int{
"IDENTIFIED": identified, "IDENTIFIED": identified,
"IF": ifKwd, "IF": ifKwd,
"IGNORE": ignore, "IGNORE": ignore,
"IMPORT": importKwd,
"IN": in, "IN": in,
"INCREMENTAL": incremental, "INCREMENTAL": incremental,
"INDEX": index, "INDEX": index,
"INDEXES": indexes, "INDEXES": indexes,
"INFILE": infile, "INFILE": infile,
"INL_JOIN": hintINLJ,
"INNER": inner, "INNER": inner,
"INPLACE": inplace, "INPLACE": inplace,
"INSTANT": instant, "INSTANT": instant,
"INSERT": insert, "INSERT": insert,
"INSERT_METHOD": insertMethod,
"INT": intType, "INT": intType,
"INT1": int1Type, "INT1": int1Type,
"INT2": int2Type, "INT2": int2Type,
...@@ -327,6 +337,7 @@ var tokenMap = map[string]int{ ...@@ -327,6 +337,7 @@ var tokenMap = map[string]int{
"IS": is, "IS": is,
"ISSUER": issuer, "ISSUER": issuer,
"ISOLATION": isolation, "ISOLATION": isolation,
"USE_TOJA": hintUseToja,
"JOBS": jobs, "JOBS": jobs,
"JOB": job, "JOB": job,
"JOIN": join, "JOIN": join,
...@@ -368,6 +379,7 @@ var tokenMap = map[string]int{ ...@@ -368,6 +379,7 @@ var tokenMap = map[string]int{
"MEDIUMINT": mediumIntType, "MEDIUMINT": mediumIntType,
"MEDIUMTEXT": mediumtextType, "MEDIUMTEXT": mediumtextType,
"MEMORY": memory, "MEMORY": memory,
"MEMORY_QUOTA": hintMemoryQuota,
"MERGE": merge, "MERGE": merge,
"MICROSECOND": microsecond, "MICROSECOND": microsecond,
"MIN": min, "MIN": min,
...@@ -385,6 +397,7 @@ var tokenMap = map[string]int{ ...@@ -385,6 +397,7 @@ var tokenMap = map[string]int{
"NEVER": never, "NEVER": never,
"NEXT_ROW_ID": next_row_id, "NEXT_ROW_ID": next_row_id,
"NO": no, "NO": no,
"NO_INDEX_MERGE": hintNoIndexMerge,
"NO_WRITE_TO_BINLOG": noWriteToBinLog, "NO_WRITE_TO_BINLOG": noWriteToBinLog,
"NODE_ID": nodeID, "NODE_ID": nodeID,
"NODE_STATE": nodeState, "NODE_STATE": nodeState,
...@@ -395,11 +408,15 @@ var tokenMap = map[string]int{ ...@@ -395,11 +408,15 @@ var tokenMap = map[string]int{
"NULL": null, "NULL": null,
"NULLS": nulls, "NULLS": nulls,
"NUMERIC": numericType, "NUMERIC": numericType,
"NCHAR": ncharType,
"NVARCHAR": nvarcharType, "NVARCHAR": nvarcharType,
"OFFSET": offset, "OFFSET": offset,
"OLAP": hintOLAP,
"OLTP": hintOLTP,
"ON": on, "ON": on,
"ONLY": only, "ONLY": only,
"OPTIMISTIC": optimistic, "OPTIMISTIC": optimistic,
"OPTIMIZE": optimize,
"OPTION": option, "OPTION": option,
"OPTIONALLY": optionally, "OPTIONALLY": optionally,
"OR": or, "OR": or,
...@@ -426,15 +443,19 @@ var tokenMap = map[string]int{ ...@@ -426,15 +443,19 @@ var tokenMap = map[string]int{
"PROFILE": profile, "PROFILE": profile,
"PROFILES": profiles, "PROFILES": profiles,
"PUMP": pump, "PUMP": pump,
"QB_NAME": hintQBName,
"QUARTER": quarter, "QUARTER": quarter,
"QUERY": query, "QUERY": query,
"QUERY_TYPE": hintQueryType,
"QUERIES": queries, "QUERIES": queries,
"QUICK": quick, "QUICK": quick,
"SHARD_ROW_ID_BITS": shardRowIDBits, "SHARD_ROW_ID_BITS": shardRowIDBits,
"PRE_SPLIT_REGIONS": preSplitRegions, "PRE_SPLIT_REGIONS": preSplitRegions,
"RANGE": rangeKwd, "RANGE": rangeKwd,
"RECOVER": recover, "RECOVER": recover,
"REBUILD": rebuild,
"READ": read, "READ": read,
"READ_CONSISTENT_REPLICA": hintReadConsistentReplica,
"REAL": realType, "REAL": realType,
"RECENT": recent, "RECENT": recent,
"REDUNDANT": redundant, "REDUNDANT": redundant,
...@@ -444,6 +465,8 @@ var tokenMap = map[string]int{ ...@@ -444,6 +465,8 @@ var tokenMap = map[string]int{
"RELOAD": reload, "RELOAD": reload,
"REMOVE": remove, "REMOVE": remove,
"RENAME": rename, "RENAME": rename,
"REORGANIZE": reorganize,
"REPAIR": repair,
"REPEAT": repeat, "REPEAT": repeat,
"REPEATABLE": repeatable, "REPEATABLE": repeatable,
"REPLACE": replace, "REPLACE": replace,
...@@ -461,12 +484,18 @@ var tokenMap = map[string]int{ ...@@ -461,12 +484,18 @@ var tokenMap = map[string]int{
"ROW": row, "ROW": row,
"ROW_COUNT": rowCount, "ROW_COUNT": rowCount,
"ROW_FORMAT": rowFormat, "ROW_FORMAT": rowFormat,
"RTREE": rtree,
"SAMPLES": samples,
"SCHEMA": database, "SCHEMA": database,
"SCHEMAS": databases, "SCHEMAS": databases,
"SECOND": second, "SECOND": second,
"SECONDARY_ENGINE": secondaryEngine,
"SECONDARY_LOAD": secondaryLoad,
"SECONDARY_UNLOAD": secondaryUnload,
"SECOND_MICROSECOND": secondMicrosecond, "SECOND_MICROSECOND": secondMicrosecond,
"SECURITY": security, "SECURITY": security,
"SELECT": selectKwd, "SELECT": selectKwd,
"SERIAL": serial,
"SERIALIZABLE": serializable, "SERIALIZABLE": serializable,
"SESSION": session, "SESSION": session,
"SET": set, "SET": set,
...@@ -478,9 +507,11 @@ var tokenMap = map[string]int{ ...@@ -478,9 +507,11 @@ var tokenMap = map[string]int{
"SIMPLE": simple, "SIMPLE": simple,
"SLAVE": slave, "SLAVE": slave,
"SLOW": slow, "SLOW": slow,
"SM_JOIN": hintSMJ,
"SMALLINT": smallIntType, "SMALLINT": smallIntType,
"SNAPSHOT": snapshot, "SNAPSHOT": snapshot,
"SOME": some, "SOME": some,
"SPATIAL": spatial,
"SPLIT": split, "SPLIT": split,
"SQL": sql, "SQL": sql,
"SQL_BIG_RESULT": sqlBigResult, "SQL_BIG_RESULT": sqlBigResult,
...@@ -489,6 +520,14 @@ var tokenMap = map[string]int{ ...@@ -489,6 +520,14 @@ var tokenMap = map[string]int{
"SQL_CALC_FOUND_ROWS": sqlCalcFoundRows, "SQL_CALC_FOUND_ROWS": sqlCalcFoundRows,
"SQL_NO_CACHE": sqlNoCache, "SQL_NO_CACHE": sqlNoCache,
"SQL_SMALL_RESULT": sqlSmallResult, "SQL_SMALL_RESULT": sqlSmallResult,
"SQL_TSI_DAY": sqlTsiDay,
"SQL_TSI_HOUR": sqlTsiHour,
"SQL_TSI_MINUTE": sqlTsiMinute,
"SQL_TSI_MONTH": sqlTsiMonth,
"SQL_TSI_QUARTER": sqlTsiQuarter,
"SQL_TSI_SECOND": sqlTsiSecond,
"SQL_TSI_WEEK": sqlTsiWeek,
"SQL_TSI_YEAR": sqlTsiYear,
"SOURCE": source, "SOURCE": source,
"SSL": ssl, "SSL": ssl,
"START": start, "START": start,
...@@ -498,6 +537,7 @@ var tokenMap = map[string]int{ ...@@ -498,6 +537,7 @@ var tokenMap = map[string]int{
"STATS_HISTOGRAMS": statsHistograms, "STATS_HISTOGRAMS": statsHistograms,
"STATS_HEALTHY": statsHealthy, "STATS_HEALTHY": statsHealthy,
"STATS_META": statsMeta, "STATS_META": statsMeta,
"STATS_AUTO_RECALC": statsAutoRecalc,
"STATS_PERSISTENT": statsPersistent, "STATS_PERSISTENT": statsPersistent,
"STATS_SAMPLE_PAGES": statsSamplePages, "STATS_SAMPLE_PAGES": statsSamplePages,
"STATUS": status, "STATUS": status,
...@@ -512,6 +552,7 @@ var tokenMap = map[string]int{ ...@@ -512,6 +552,7 @@ var tokenMap = map[string]int{
"STDDEV_SAMP": stddevSamp, "STDDEV_SAMP": stddevSamp,
"STORED": stored, "STORED": stored,
"STRAIGHT_JOIN": straightJoin, "STRAIGHT_JOIN": straightJoin,
"STREAM_AGG": hintSTREAMAGG,
"SUBDATE": subDate, "SUBDATE": subDate,
"SUBJECT": subject, "SUBJECT": subject,
"SUBPARTITION": subpartition, "SUBPARTITION": subpartition,
...@@ -521,6 +562,7 @@ var tokenMap = map[string]int{ ...@@ -521,6 +562,7 @@ var tokenMap = map[string]int{
"SUM": sum, "SUM": sum,
"SUPER": super, "SUPER": super,
"TABLE": tableKwd, "TABLE": tableKwd,
"TABLE_CHECKSUM": tableChecksum,
"TABLES": tables, "TABLES": tables,
"TABLESPACE": tablespace, "TABLESPACE": tablespace,
"TEMPORARY": temporary, "TEMPORARY": temporary,
...@@ -530,11 +572,9 @@ var tokenMap = map[string]int{ ...@@ -530,11 +572,9 @@ var tokenMap = map[string]int{
"THAN": than, "THAN": than,
"THEN": then, "THEN": then,
"TIDB": tidb, "TIDB": tidb,
"TIDB_HJ": tidbHJ, "TIDB_HJ": hintHJ,
"TIDB_INLJ": tidbINLJ, "TIDB_INLJ": hintINLJ,
"TIDB_SMJ": tidbSMJ, "TIDB_SMJ": hintSMJ,
"TIDB_HASHAGG": tidbHASHAGG,
"TIDB_STREAMAGG": tidbSTREAMAGG,
"TIME": timeType, "TIME": timeType,
"TIMESTAMP": timestampType, "TIMESTAMP": timestampType,
"TIMESTAMPADD": timestampAdd, "TIMESTAMPADD": timestampAdd,
...@@ -573,17 +613,21 @@ var tokenMap = map[string]int{ ...@@ -573,17 +613,21 @@ var tokenMap = map[string]int{
"UPDATE": update, "UPDATE": update,
"USAGE": usage, "USAGE": usage,
"USE": use, "USE": use,
"USE_INDEX_MERGE": hintUseIndexMerge,
"USE_PLAN_CACHE": hintUsePlanCache,
"USER": user, "USER": user,
"USING": using, "USING": using,
"UTC_DATE": utcDate, "UTC_DATE": utcDate,
"UTC_TIME": utcTime, "UTC_TIME": utcTime,
"UTC_TIMESTAMP": utcTimestamp, "UTC_TIMESTAMP": utcTimestamp,
"VALIDATION": validation,
"VALUE": value, "VALUE": value,
"VALUES": values, "VALUES": values,
"VARBINARY": varbinaryType, "VARBINARY": varbinaryType,
"VARCHAR": varcharType, "VARCHAR": varcharType,
"VARIABLES": variables, "VARIABLES": variables,
"VARIANCE": varPop, "VARIANCE": varPop,
"VARYING": varying,
"VAR_POP": varPop, "VAR_POP": varPop,
"VAR_SAMP": varSamp, "VAR_SAMP": varSamp,
"VIEW": view, "VIEW": view,
...@@ -595,6 +639,7 @@ var tokenMap = map[string]int{ ...@@ -595,6 +639,7 @@ var tokenMap = map[string]int{
"WHERE": where, "WHERE": where,
"WIDTH": width, "WIDTH": width,
"WITH": with, "WITH": with,
"WITHOUT": without,
"WRITE": write, "WRITE": write,
"XOR": xor, "XOR": xor,
"X509": x509, "X509": x509,
...@@ -663,10 +708,13 @@ var windowFuncTokenMap = map[string]int{ ...@@ -663,10 +708,13 @@ var windowFuncTokenMap = map[string]int{
// aliases are strings directly map to another string and use the same token. // aliases are strings directly map to another string and use the same token.
var aliases = map[string]string{ var aliases = map[string]string{
"SCHEMA": "DATABASE", "SCHEMA": "DATABASE",
"SCHEMAS": "DATABASES", "SCHEMAS": "DATABASES",
"DEC": "DECIMAL", "DEC": "DECIMAL",
"SUBSTR": "SUBSTRING", "SUBSTR": "SUBSTRING",
"TIDB_HJ": "HASH_JOIN",
"TIDB_INLJ": "INL_JOIN",
"TIDB_SMJ": "SM_JOIN",
} }
func (s *Scanner) isTokenIdentifier(lit string, offset int) int { func (s *Scanner) isTokenIdentifier(lit string, offset int) int {
......
...@@ -147,12 +147,13 @@ func NewDDLReorgMeta() *DDLReorgMeta { ...@@ -147,12 +147,13 @@ func NewDDLReorgMeta() *DDLReorgMeta {
// Job is for a DDL operation. // Job is for a DDL operation.
type Job struct { type Job struct {
ID int64 `json:"id"` ID int64 `json:"id"`
Type ActionType `json:"type"` Type ActionType `json:"type"`
SchemaID int64 `json:"schema_id"` SchemaID int64 `json:"schema_id"`
TableID int64 `json:"table_id"` TableID int64 `json:"table_id"`
State JobState `json:"state"` SchemaName string `json:"schema_name"`
Error *terror.Error `json:"err"` State JobState `json:"state"`
Error *terror.Error `json:"err"`
// ErrorCount will be increased, every time we meet an error when running job. // ErrorCount will be increased, every time we meet an error when running job.
ErrorCount int64 `json:"err_count"` ErrorCount int64 `json:"err_count"`
// RowCount means the number of rows that are processed. // RowCount means the number of rows that are processed.
......
...@@ -629,6 +629,8 @@ func (t IndexType) String() string { ...@@ -629,6 +629,8 @@ func (t IndexType) String() string {
return "BTREE" return "BTREE"
case IndexTypeHash: case IndexTypeHash:
return "HASH" return "HASH"
case IndexTypeRtree:
return "RTREE"
default: default:
return "" return ""
} }
...@@ -639,6 +641,7 @@ const ( ...@@ -639,6 +641,7 @@ const (
IndexTypeInvalid IndexType = iota IndexTypeInvalid IndexType = iota
IndexTypeBtree IndexTypeBtree
IndexTypeHash IndexTypeHash
IndexTypeRtree
) )
// IndexInfo provides meta data describing a DB index. // IndexInfo provides meta data describing a DB index.
...@@ -653,7 +656,7 @@ type IndexInfo struct { ...@@ -653,7 +656,7 @@ type IndexInfo struct {
Primary bool `json:"is_primary"` // Whether the index is primary key. Primary bool `json:"is_primary"` // Whether the index is primary key.
State SchemaState `json:"state"` State SchemaState `json:"state"`
Comment string `json:"comment"` // Comment Comment string `json:"comment"` // Comment
Tp IndexType `json:"index_type"` // Index type: Btree or Hash Tp IndexType `json:"index_type"` // Index type: Btree, Hash or Rtree
} }
// Clone clones IndexInfo. // Clone clones IndexInfo.
......
...@@ -80,6 +80,7 @@ var defaultLengthAndDecimalForCast = map[byte]lengthAndDecimal{ ...@@ -80,6 +80,7 @@ var defaultLengthAndDecimalForCast = map[byte]lengthAndDecimal{
TypeDuration: {10, 0}, TypeDuration: {10, 0},
TypeLonglong: {22, 0}, TypeLonglong: {22, 0},
TypeDouble: {22, -1}, TypeDouble: {22, -1},
TypeFloat: {12, -1},
TypeJSON: {4194304, 0}, // Flen differs. TypeJSON: {4194304, 0}, // Flen differs.
} }
......
...@@ -201,13 +201,11 @@ func (ft *FieldType) String() string { ...@@ -201,13 +201,11 @@ func (ft *FieldType) String() string {
func (ft *FieldType) Restore(ctx *format.RestoreCtx) error { func (ft *FieldType) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord(TypeToStr(ft.Tp, ft.Charset)) ctx.WriteKeyWord(TypeToStr(ft.Tp, ft.Charset))
precision := ft.Flen precision := UnspecifiedLength
scale := ft.Decimal scale := UnspecifiedLength
switch ft.Tp { switch ft.Tp {
case mysql.TypeEnum, mysql.TypeSet: case mysql.TypeEnum, mysql.TypeSet:
precision = UnspecifiedLength
scale = UnspecifiedLength
ctx.WritePlain("(") ctx.WritePlain("(")
for i, e := range ft.Elems { for i, e := range ft.Elems {
if i != 0 { if i != 0 {
...@@ -218,7 +216,11 @@ func (ft *FieldType) Restore(ctx *format.RestoreCtx) error { ...@@ -218,7 +216,11 @@ func (ft *FieldType) Restore(ctx *format.RestoreCtx) error {
ctx.WritePlain(")") ctx.WritePlain(")")
case mysql.TypeTimestamp, mysql.TypeDatetime, mysql.TypeDuration: case mysql.TypeTimestamp, mysql.TypeDatetime, mysql.TypeDuration:
precision = ft.Decimal precision = ft.Decimal
scale = UnspecifiedLength case mysql.TypeDecimal, mysql.TypeFloat, mysql.TypeDouble, mysql.TypeNewDecimal:
precision = ft.Flen
scale = ft.Decimal
default:
precision = ft.Flen
} }
if precision != UnspecifiedLength { if precision != UnspecifiedLength {
...@@ -227,7 +229,6 @@ func (ft *FieldType) Restore(ctx *format.RestoreCtx) error { ...@@ -227,7 +229,6 @@ func (ft *FieldType) Restore(ctx *format.RestoreCtx) error {
ctx.WritePlainf(",%d", scale) ctx.WritePlainf(",%d", scale)
} }
ctx.WritePlain(")") ctx.WritePlain(")")
} }
if mysql.HasUnsignedFlag(ft.Flag) { if mysql.HasUnsignedFlag(ft.Flag) {
...@@ -301,6 +302,8 @@ func (ft *FieldType) RestoreAsCastType(ctx *format.RestoreCtx) { ...@@ -301,6 +302,8 @@ func (ft *FieldType) RestoreAsCastType(ctx *format.RestoreCtx) {
ctx.WriteKeyWord("JSON") ctx.WriteKeyWord("JSON")
case mysql.TypeDouble: case mysql.TypeDouble:
ctx.WriteKeyWord("DOUBLE") ctx.WriteKeyWord("DOUBLE")
case mysql.TypeFloat:
ctx.WriteKeyWord("FLOAT")
} }
} }
......
...@@ -36,6 +36,7 @@ const ( ...@@ -36,6 +36,7 @@ const (
codeTooBigDisplayWidth = terror.ErrCode(mysql.ErrTooBigDisplaywidth) codeTooBigDisplayWidth = terror.ErrCode(mysql.ErrTooBigDisplaywidth)
codeErrUnknownAlterLock = terror.ErrCode(mysql.ErrUnknownAlterLock) codeErrUnknownAlterLock = terror.ErrCode(mysql.ErrUnknownAlterLock)
codeErrUnknownAlterAlgorithm = terror.ErrCode(mysql.ErrUnknownAlterAlgorithm) codeErrUnknownAlterAlgorithm = terror.ErrCode(mysql.ErrUnknownAlterAlgorithm)
codeErrTooBigPrecision = terror.ErrCode(mysql.ErrTooBigPrecision)
) )
var ( var (
...@@ -53,6 +54,8 @@ var ( ...@@ -53,6 +54,8 @@ var (
ErrWrongFieldTerminators = terror.ClassParser.New(codeWrongFieldTerminators, mysql.MySQLErrName[mysql.ErrWrongFieldTerminators]) ErrWrongFieldTerminators = terror.ClassParser.New(codeWrongFieldTerminators, mysql.MySQLErrName[mysql.ErrWrongFieldTerminators])
// ErrTooBigDisplayWidth returns for data display width exceed limit . // ErrTooBigDisplayWidth returns for data display width exceed limit .
ErrTooBigDisplayWidth = terror.ClassParser.New(codeTooBigDisplayWidth, mysql.MySQLErrName[mysql.ErrTooBigDisplaywidth]) ErrTooBigDisplayWidth = terror.ClassParser.New(codeTooBigDisplayWidth, mysql.MySQLErrName[mysql.ErrTooBigDisplaywidth])
// ErrTooBigPrecision returns for data precision exceed limit.
ErrTooBigPrecision = terror.ClassParser.New(codeErrTooBigPrecision, mysql.MySQLErrName[mysql.ErrTooBigPrecision])
// ErrUnknownAlterLock returns for no alter lock type found error. // ErrUnknownAlterLock returns for no alter lock type found error.
ErrUnknownAlterLock = terror.ClassParser.New(codeErrUnknownAlterLock, mysql.MySQLErrName[mysql.ErrUnknownAlterLock]) ErrUnknownAlterLock = terror.ClassParser.New(codeErrUnknownAlterLock, mysql.MySQLErrName[mysql.ErrUnknownAlterLock])
// ErrUnknownAlterAlgorithm returns for no alter algorithm found error. // ErrUnknownAlterAlgorithm returns for no alter algorithm found error.
...@@ -75,6 +78,7 @@ func init() { ...@@ -75,6 +78,7 @@ func init() {
codeTooBigDisplayWidth: mysql.ErrTooBigDisplaywidth, codeTooBigDisplayWidth: mysql.ErrTooBigDisplaywidth,
codeErrUnknownAlterLock: mysql.ErrUnknownAlterLock, codeErrUnknownAlterLock: mysql.ErrUnknownAlterLock,
codeErrUnknownAlterAlgorithm: mysql.ErrUnknownAlterAlgorithm, codeErrUnknownAlterAlgorithm: mysql.ErrUnknownAlterAlgorithm,
codeErrTooBigPrecision: mysql.ErrTooBigPrecision,
} }
terror.ErrClassToMySQLCodes[terror.ClassParser] = parserMySQLErrCodes terror.ErrClassToMySQLCodes[terror.ClassParser] = parserMySQLErrCodes
} }
......
...@@ -76,6 +76,13 @@ type StatementContext struct { ...@@ -76,6 +76,13 @@ type StatementContext struct {
// prefix in a strict way, only extract 0-9 and (+ or - in first bit). // prefix in a strict way, only extract 0-9 and (+ or - in first bit).
CastStrToIntStrict bool CastStrToIntStrict bool
// StartTime is the query start time.
StartTime time.Time
// DurationParse is the duration of pasing SQL string to AST.
DurationParse time.Duration
// DurationCompile is the duration of compiling AST to execution plan.
DurationCompile time.Duration
// mu struct holds variables that change during execution. // mu struct holds variables that change during execution.
mu struct { mu struct {
sync.Mutex sync.Mutex
...@@ -413,6 +420,9 @@ func (sc *StatementContext) ResetForRetry() { ...@@ -413,6 +420,9 @@ func (sc *StatementContext) ResetForRetry() {
sc.mu.Unlock() sc.mu.Unlock()
sc.TableIDs = sc.TableIDs[:0] sc.TableIDs = sc.TableIDs[:0]
sc.IndexIDs = sc.IndexIDs[:0] sc.IndexIDs = sc.IndexIDs[:0]
sc.StartTime = time.Now()
sc.DurationCompile = time.Duration(0)
sc.DurationParse = time.Duration(0)
} }
// MergeExecDetails merges a single region execution details into self, used to print // MergeExecDetails merges a single region execution details into self, used to print
......
...@@ -98,6 +98,7 @@ func IntergerSignedLowerBound(intType byte) int64 { ...@@ -98,6 +98,7 @@ func IntergerSignedLowerBound(intType byte) int64 {
} }
// ConvertFloatToInt converts a float64 value to a int value. // ConvertFloatToInt converts a float64 value to a int value.
// `tp` is used in err msg, if there is overflow, this func will report err according to `tp`
func ConvertFloatToInt(fval float64, lowerBound, upperBound int64, tp byte) (int64, error) { func ConvertFloatToInt(fval float64, lowerBound, upperBound int64, tp byte) (int64, error) {
val := RoundFloat(fval) val := RoundFloat(fval)
if val < float64(lowerBound) { if val < float64(lowerBound) {
...@@ -292,7 +293,7 @@ func StrToUint(sc *stmtctx.StatementContext, str string) (uint64, error) { ...@@ -292,7 +293,7 @@ func StrToUint(sc *stmtctx.StatementContext, str string) (uint64, error) {
} }
// StrToDateTime converts str to MySQL DateTime. // StrToDateTime converts str to MySQL DateTime.
func StrToDateTime(sc *stmtctx.StatementContext, str string, fsp int) (Time, error) { func StrToDateTime(sc *stmtctx.StatementContext, str string, fsp int8) (Time, error) {
return ParseTime(sc, str, mysql.TypeDatetime, fsp) return ParseTime(sc, str, mysql.TypeDatetime, fsp)
} }
...@@ -300,7 +301,7 @@ func StrToDateTime(sc *stmtctx.StatementContext, str string, fsp int) (Time, err ...@@ -300,7 +301,7 @@ func StrToDateTime(sc *stmtctx.StatementContext, str string, fsp int) (Time, err
// and returns Time when str is in datetime format. // and returns Time when str is in datetime format.
// when isDuration is true, the d is returned, when it is false, the t is returned. // when isDuration is true, the d is returned, when it is false, the t is returned.
// See https://dev.mysql.com/doc/refman/5.5/en/date-and-time-literals.html. // See https://dev.mysql.com/doc/refman/5.5/en/date-and-time-literals.html.
func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duration, t Time, isDuration bool, err error) { func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int8) (d Duration, t Time, isDuration bool, err error) {
str = strings.TrimSpace(str) str = strings.TrimSpace(str)
length := len(str) length := len(str)
if length > 0 && str[0] == '-' { if length > 0 && str[0] == '-' {
...@@ -323,7 +324,7 @@ func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duratio ...@@ -323,7 +324,7 @@ func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duratio
} }
// NumberToDuration converts number to Duration. // NumberToDuration converts number to Duration.
func NumberToDuration(number int64, fsp int) (Duration, error) { func NumberToDuration(number int64, fsp int8) (Duration, error) {
if number > TimeMaxValue { if number > TimeMaxValue {
// Try to parse DATETIME. // Try to parse DATETIME.
if number >= 10000000000 { // '2001-00-00 00-00-00' if number >= 10000000000 { // '2001-00-00 00-00-00'
...@@ -567,7 +568,11 @@ func ConvertJSONToInt(sc *stmtctx.StatementContext, j json.BinaryJSON, unsigned ...@@ -567,7 +568,11 @@ func ConvertJSONToInt(sc *stmtctx.StatementContext, j json.BinaryJSON, unsigned
return int64(u), errors.Trace(err) return int64(u), errors.Trace(err)
case json.TypeCodeString: case json.TypeCodeString:
str := string(hack.String(j.GetString())) str := string(hack.String(j.GetString()))
return StrToInt(sc, str) if !unsigned {
return StrToInt(sc, str)
}
u, err := StrToUint(sc, str)
return int64(u), errors.Trace(err)
} }
return 0, errors.New("Unknown type code in JSON") return 0, errors.New("Unknown type code in JSON")
} }
......
...@@ -260,7 +260,7 @@ func (d *Datum) SetMysqlDecimal(b *MyDecimal) { ...@@ -260,7 +260,7 @@ func (d *Datum) SetMysqlDecimal(b *MyDecimal) {
// GetMysqlDuration gets Duration value // GetMysqlDuration gets Duration value
func (d *Datum) GetMysqlDuration() Duration { func (d *Datum) GetMysqlDuration() Duration {
return Duration{Duration: time.Duration(d.i), Fsp: int(d.decimal)} return Duration{Duration: time.Duration(d.i), Fsp: int8(d.decimal)}
} }
// SetMysqlDuration sets Duration value // SetMysqlDuration sets Duration value
...@@ -939,7 +939,7 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi ...@@ -939,7 +939,7 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi
) )
fsp := DefaultFsp fsp := DefaultFsp
if target.Decimal != UnspecifiedLength { if target.Decimal != UnspecifiedLength {
fsp = target.Decimal fsp = int8(target.Decimal)
} }
switch d.k { switch d.k {
case KindMysqlTime: case KindMysqlTime:
...@@ -973,7 +973,7 @@ func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldTy ...@@ -973,7 +973,7 @@ func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldTy
tp := target.Tp tp := target.Tp
fsp := DefaultFsp fsp := DefaultFsp
if target.Decimal != UnspecifiedLength { if target.Decimal != UnspecifiedLength {
fsp = target.Decimal fsp = int8(target.Decimal)
} }
var ( var (
ret Datum ret Datum
...@@ -1019,7 +1019,7 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie ...@@ -1019,7 +1019,7 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie
tp := target.Tp tp := target.Tp
fsp := DefaultFsp fsp := DefaultFsp
if target.Decimal != UnspecifiedLength { if target.Decimal != UnspecifiedLength {
fsp = target.Decimal fsp = int8(target.Decimal)
} }
var ret Datum var ret Datum
switch d.k { switch d.k {
......
...@@ -83,10 +83,7 @@ func IsTemporalWithDate(tp byte) bool { ...@@ -83,10 +83,7 @@ func IsTemporalWithDate(tp byte) bool {
// IsBinaryStr returns a boolean indicating // IsBinaryStr returns a boolean indicating
// whether the field type is a binary string type. // whether the field type is a binary string type.
func IsBinaryStr(ft *FieldType) bool { func IsBinaryStr(ft *FieldType) bool {
if ft.Collate == charset.CollationBin && IsString(ft.Tp) { return ft.Collate == charset.CollationBin && IsString(ft.Tp)
return true
}
return false
} }
// IsNonBinaryStr returns a boolean indicating // IsNonBinaryStr returns a boolean indicating
......
...@@ -221,18 +221,18 @@ func DefaultTypeForValue(value interface{}, tp *FieldType) { ...@@ -221,18 +221,18 @@ func DefaultTypeForValue(value interface{}, tp *FieldType) {
case mysql.TypeDatetime, mysql.TypeTimestamp: case mysql.TypeDatetime, mysql.TypeTimestamp:
tp.Flen = mysql.MaxDatetimeWidthNoFsp tp.Flen = mysql.MaxDatetimeWidthNoFsp
if x.Fsp > DefaultFsp { // consider point('.') and the fractional part. if x.Fsp > DefaultFsp { // consider point('.') and the fractional part.
tp.Flen += x.Fsp + 1 tp.Flen += int(x.Fsp) + 1
} }
tp.Decimal = x.Fsp tp.Decimal = int(x.Fsp)
} }
SetBinChsClnFlag(tp) SetBinChsClnFlag(tp)
case Duration: case Duration:
tp.Tp = mysql.TypeDuration tp.Tp = mysql.TypeDuration
tp.Flen = len(x.String()) tp.Flen = len(x.String())
if x.Fsp > DefaultFsp { // consider point('.') and the fractional part. if x.Fsp > DefaultFsp { // consider point('.') and the fractional part.
tp.Flen = x.Fsp + 1 tp.Flen = int(x.Fsp) + 1
} }
tp.Decimal = x.Fsp tp.Decimal = int(x.Fsp)
SetBinChsClnFlag(tp) SetBinChsClnFlag(tp)
case *MyDecimal: case *MyDecimal:
tp.Tp = mysql.TypeNewDecimal tp.Tp = mysql.TypeNewDecimal
......
...@@ -23,46 +23,46 @@ import ( ...@@ -23,46 +23,46 @@ import (
const ( const (
// UnspecifiedFsp is the unspecified fractional seconds part. // UnspecifiedFsp is the unspecified fractional seconds part.
UnspecifiedFsp = -1 UnspecifiedFsp = int8(-1)
// MaxFsp is the maximum digit of fractional seconds part. // MaxFsp is the maximum digit of fractional seconds part.
MaxFsp = 6 MaxFsp = int8(6)
// MinFsp is the minimum digit of fractional seconds part. // MinFsp is the minimum digit of fractional seconds part.
MinFsp = 0 MinFsp = int8(0)
// DefaultFsp is the default digit of fractional seconds part. // DefaultFsp is the default digit of fractional seconds part.
// MySQL use 0 as the default Fsp. // MySQL use 0 as the default Fsp.
DefaultFsp = 0 DefaultFsp = int8(0)
) )
// CheckFsp checks whether fsp is in valid range. // CheckFsp checks whether fsp is in valid range.
func CheckFsp(fsp int) (int, error) { func CheckFsp(fsp int) (int8, error) {
if fsp == UnspecifiedFsp { if fsp == int(UnspecifiedFsp) {
return DefaultFsp, nil return DefaultFsp, nil
} }
if fsp < MinFsp || fsp > MaxFsp { if fsp < int(MinFsp) || fsp > int(MaxFsp) {
return DefaultFsp, errors.Errorf("Invalid fsp %d", fsp) return DefaultFsp, errors.Errorf("Invalid fsp %d", fsp)
} }
return fsp, nil return int8(fsp), nil
} }
// ParseFrac parses the input string according to fsp, returns the microsecond, // ParseFrac parses the input string according to fsp, returns the microsecond,
// and also a bool value to indice overflow. eg: // and also a bool value to indice overflow. eg:
// "999" fsp=2 will overflow. // "999" fsp=2 will overflow.
func ParseFrac(s string, fsp int) (v int, overflow bool, err error) { func ParseFrac(s string, fsp int8) (v int, overflow bool, err error) {
if len(s) == 0 { if len(s) == 0 {
return 0, false, nil return 0, false, nil
} }
fsp, err = CheckFsp(fsp) fsp, err = CheckFsp(int(fsp))
if err != nil { if err != nil {
return 0, false, errors.Trace(err) return 0, false, errors.Trace(err)
} }
if fsp >= len(s) { if int(fsp) >= len(s) {
tmp, e := strconv.ParseInt(s, 10, 64) tmp, e := strconv.ParseInt(s, 10, 64)
if e != nil { if e != nil {
return 0, false, errors.Trace(e) return 0, false, errors.Trace(e)
} }
v = int(float64(tmp) * math.Pow10(MaxFsp-len(s))) v = int(float64(tmp) * math.Pow10(int(MaxFsp)-len(s)))
return return
} }
...@@ -73,7 +73,7 @@ func ParseFrac(s string, fsp int) (v int, overflow bool, err error) { ...@@ -73,7 +73,7 @@ func ParseFrac(s string, fsp int) (v int, overflow bool, err error) {
} }
tmp = (tmp + 5) / 10 tmp = (tmp + 5) / 10
if float64(tmp) >= math.Pow10(fsp) { if float64(tmp) >= math.Pow10(int(fsp)) {
// overflow // overflow
return 0, true, nil return 0, true, nil
} }
...@@ -82,7 +82,7 @@ func ParseFrac(s string, fsp int) (v int, overflow bool, err error) { ...@@ -82,7 +82,7 @@ func ParseFrac(s string, fsp int) (v int, overflow bool, err error) {
// 1236 round 3 -> 124 -> 124000 // 1236 round 3 -> 124 -> 124000
// 0312 round 2 -> 3 -> 30000 // 0312 round 2 -> 3 -> 30000
// 999 round 2 -> 100 -> overflow // 999 round 2 -> 100 -> overflow
v = int(float64(tmp) * math.Pow10(MaxFsp-fsp)) v = int(float64(tmp) * math.Pow10(int(MaxFsp-fsp)))
return return
} }
......
...@@ -1088,7 +1088,7 @@ with the correct -1/0/+1 result ...@@ -1088,7 +1088,7 @@ with the correct -1/0/+1 result
then the encoded value is not memory comparable. then the encoded value is not memory comparable.
NOTE NOTE
the buffer is assumed to be of the size decimalBinSize(precision, frac) the buffer is assumed to be of the size DecimalBinSize(precision, frac)
RETURN VALUE RETURN VALUE
bin - binary value bin - binary value
...@@ -1334,7 +1334,7 @@ func (d *MyDecimal) FromBin(bin []byte, precision, frac int) (binSize int, err e ...@@ -1334,7 +1334,7 @@ func (d *MyDecimal) FromBin(bin []byte, precision, frac int) (binSize int, err e
if bin[binIdx]&0x80 > 0 { if bin[binIdx]&0x80 > 0 {
mask = 0 mask = 0
} }
binSize = decimalBinSize(precision, frac) binSize = DecimalBinSize(precision, frac)
dCopy := make([]byte, 40) dCopy := make([]byte, 40)
dCopy = dCopy[:binSize] dCopy = dCopy[:binSize]
copy(dCopy, bin) copy(dCopy, bin)
...@@ -1409,8 +1409,8 @@ func (d *MyDecimal) FromBin(bin []byte, precision, frac int) (binSize int, err e ...@@ -1409,8 +1409,8 @@ func (d *MyDecimal) FromBin(bin []byte, precision, frac int) (binSize int, err e
return binSize, err return binSize, err
} }
// decimalBinSize returns the size of array to hold a binary representation of a decimal. // DecimalBinSize returns the size of array to hold a binary representation of a decimal.
func decimalBinSize(precision, frac int) int { func DecimalBinSize(precision, frac int) int {
digitsInt := precision - frac digitsInt := precision - frac
wordsInt := digitsInt / digitsPerWord wordsInt := digitsInt / digitsPerWord
wordsFrac := frac / digitsPerWord wordsFrac := frac / digitsPerWord
...@@ -2242,7 +2242,7 @@ func DecimalPeak(b []byte) (int, error) { ...@@ -2242,7 +2242,7 @@ func DecimalPeak(b []byte) (int, error) {
} }
precision := int(b[0]) precision := int(b[0])
frac := int(b[1]) frac := int(b[1])
return decimalBinSize(precision, frac) + 2, nil return DecimalBinSize(precision, frac) + 2, nil
} }
// NewDecFromInt creates a MyDecimal from int. // NewDecFromInt creates a MyDecimal from int.
......
...@@ -16,19 +16,27 @@ package types ...@@ -16,19 +16,27 @@ package types
import ( import (
gotime "time" gotime "time"
"fmt"
"github.com/pingcap/errors" "github.com/pingcap/errors"
) )
// MysqlTime is the internal struct type for Time. // MysqlTime is the internal struct type for Time.
// The order of the attributes is refined to reduce the memory overhead
// considering memory alignment.
type MysqlTime struct { type MysqlTime struct {
year uint16 // year <= 9999 // When it's type is Time, HH:MM:SS may be 839:59:59, so use uint32 to avoid overflow.
month uint8 // month <= 12 hour uint32 // hour <= 23
day uint8 // day <= 31
// When it's type is Time, HH:MM:SS may be 839:59:59, so use int to avoid overflow.
hour int // hour <= 23
minute uint8 // minute <= 59
second uint8 // second <= 59
microsecond uint32 microsecond uint32
year uint16 // year <= 9999
month uint8 // month <= 12
day uint8 // day <= 31
minute uint8 // minute <= 59
second uint8 // second <= 59
}
// String implements fmt.Stringer.
func (t MysqlTime) String() string {
return fmt.Sprintf("{%d %d %d %d %d %d %d}", t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond)
} }
// Year returns the year value. // Year returns the year value.
...@@ -173,7 +181,7 @@ func AddDate(year, month, day int64, ot gotime.Time) (nt gotime.Time) { ...@@ -173,7 +181,7 @@ func AddDate(year, month, day int64, ot gotime.Time) (nt gotime.Time) {
} }
func calcTimeFromSec(to *MysqlTime, seconds, microseconds int) { func calcTimeFromSec(to *MysqlTime, seconds, microseconds int) {
to.hour = seconds / 3600 to.hour = uint32(seconds / 3600)
seconds = seconds % 3600 seconds = seconds % 3600
to.minute = uint8(seconds / 60) to.minute = uint8(seconds / 60)
to.second = uint8(seconds % 60) to.second = uint8(seconds % 60)
......
...@@ -203,13 +203,13 @@ func FromGoTime(t gotime.Time) MysqlTime { ...@@ -203,13 +203,13 @@ func FromGoTime(t gotime.Time) MysqlTime {
// FromDate makes a internal time representation from the given date. // FromDate makes a internal time representation from the given date.
func FromDate(year int, month int, day int, hour int, minute int, second int, microsecond int) MysqlTime { func FromDate(year int, month int, day int, hour int, minute int, second int, microsecond int) MysqlTime {
return MysqlTime{ return MysqlTime{
uint16(year), year: uint16(year),
uint8(month), month: uint8(month),
uint8(day), day: uint8(day),
hour, hour: uint32(hour),
uint8(minute), minute: uint8(minute),
uint8(second), second: uint8(second),
uint32(microsecond), microsecond: uint32(microsecond),
} }
} }
...@@ -225,11 +225,11 @@ type Time struct { ...@@ -225,11 +225,11 @@ type Time struct {
Type uint8 Type uint8
// Fsp is short for Fractional Seconds Precision. // Fsp is short for Fractional Seconds Precision.
// See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html // See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html
Fsp int Fsp int8
} }
// MaxMySQLTime returns Time with maximum mysql time type. // MaxMySQLTime returns Time with maximum mysql time type.
func MaxMySQLTime(fsp int) Time { func MaxMySQLTime(fsp int8) Time {
return Time{Time: FromDate(0, 0, 0, TimeMaxHour, TimeMaxMinute, TimeMaxSecond, 0), Type: mysql.TypeDuration, Fsp: fsp} return Time{Time: FromDate(0, 0, 0, TimeMaxHour, TimeMaxMinute, TimeMaxSecond, 0), Type: mysql.TypeDuration, Fsp: fsp}
} }
...@@ -309,7 +309,7 @@ func (t Time) ToNumber() *MyDecimal { ...@@ -309,7 +309,7 @@ func (t Time) ToNumber() *MyDecimal {
if t.Fsp > 0 { if t.Fsp > 0 {
s1 := fmt.Sprintf("%s.%06d", s, t.Time.Microsecond()) s1 := fmt.Sprintf("%s.%06d", s, t.Time.Microsecond())
s = s1[:len(s)+t.Fsp+1] s = s1[:len(s)+int(t.Fsp)+1]
} }
// We skip checking error here because time formatted string can be parsed certainly. // We skip checking error here because time formatted string can be parsed certainly.
...@@ -392,19 +392,19 @@ func (t Time) CompareString(sc *stmtctx.StatementContext, str string) (int, erro ...@@ -392,19 +392,19 @@ func (t Time) CompareString(sc *stmtctx.StatementContext, str string) (int, erro
} }
// roundTime rounds the time value according to digits count specified by fsp. // roundTime rounds the time value according to digits count specified by fsp.
func roundTime(t gotime.Time, fsp int) gotime.Time { func roundTime(t gotime.Time, fsp int8) gotime.Time {
d := gotime.Duration(math.Pow10(9 - fsp)) d := gotime.Duration(math.Pow10(9 - int(fsp)))
return t.Round(d) return t.Round(d)
} }
// RoundFrac rounds the fraction part of a time-type value according to `fsp`. // RoundFrac rounds the fraction part of a time-type value according to `fsp`.
func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int) (Time, error) { func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int8) (Time, error) {
if t.Type == mysql.TypeDate || t.IsZero() { if t.Type == mysql.TypeDate || t.IsZero() {
// date type has no fsp // date type has no fsp
return t, nil return t, nil
} }
fsp, err := CheckFsp(fsp) fsp, err := CheckFsp(int(fsp))
if err != nil { if err != nil {
return t, errors.Trace(err) return t, errors.Trace(err)
} }
...@@ -438,8 +438,9 @@ func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int) (Time, error) { ...@@ -438,8 +438,9 @@ func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int) (Time, error) {
} }
// GetFsp gets the fsp of a string. // GetFsp gets the fsp of a string.
func GetFsp(s string) (fsp int) { func GetFsp(s string) int8 {
index := GetFracIndex(s) index := GetFracIndex(s)
var fsp int
if index < 0 { if index < 0 {
fsp = 0 fsp = 0
} else { } else {
...@@ -451,7 +452,7 @@ func GetFsp(s string) (fsp int) { ...@@ -451,7 +452,7 @@ func GetFsp(s string) (fsp int) {
} else if fsp > 6 { } else if fsp > 6 {
fsp = 6 fsp = 6
} }
return return int8(fsp)
} }
// GetFracIndex finds the last '.' for get fracStr, index = -1 means fracStr not found. // GetFracIndex finds the last '.' for get fracStr, index = -1 means fracStr not found.
...@@ -474,22 +475,22 @@ func GetFracIndex(s string) (index int) { ...@@ -474,22 +475,22 @@ func GetFracIndex(s string) (index int) {
// We will use the “round half up” rule, e.g, >= 0.5 -> 1, < 0.5 -> 0, // We will use the “round half up” rule, e.g, >= 0.5 -> 1, < 0.5 -> 0,
// so 2011:11:11 10:10:10.888888 round 0 -> 2011:11:11 10:10:11 // so 2011:11:11 10:10:10.888888 round 0 -> 2011:11:11 10:10:11
// and 2011:11:11 10:10:10.111111 round 0 -> 2011:11:11 10:10:10 // and 2011:11:11 10:10:10.111111 round 0 -> 2011:11:11 10:10:10
func RoundFrac(t gotime.Time, fsp int) (gotime.Time, error) { func RoundFrac(t gotime.Time, fsp int8) (gotime.Time, error) {
_, err := CheckFsp(fsp) _, err := CheckFsp(int(fsp))
if err != nil { if err != nil {
return t, errors.Trace(err) return t, errors.Trace(err)
} }
return t.Round(gotime.Duration(math.Pow10(9-fsp)) * gotime.Nanosecond), nil return t.Round(gotime.Duration(math.Pow10(9-int(fsp))) * gotime.Nanosecond), nil
} }
// TruncateFrac truncates fractional seconds precision with new fsp and returns a new one. // TruncateFrac truncates fractional seconds precision with new fsp and returns a new one.
// 2011:11:11 10:10:10.888888 round 0 -> 2011:11:11 10:10:10 // 2011:11:11 10:10:10.888888 round 0 -> 2011:11:11 10:10:10
// 2011:11:11 10:10:10.111111 round 0 -> 2011:11:11 10:10:10 // 2011:11:11 10:10:10.111111 round 0 -> 2011:11:11 10:10:10
func TruncateFrac(t gotime.Time, fsp int) (gotime.Time, error) { func TruncateFrac(t gotime.Time, fsp int8) (gotime.Time, error) {
if _, err := CheckFsp(fsp); err != nil { if _, err := CheckFsp(int(fsp)); err != nil {
return t, err return t, err
} }
return t.Truncate(gotime.Duration(math.Pow10(9-fsp)) * gotime.Nanosecond), nil return t.Truncate(gotime.Duration(math.Pow10(9-int(fsp))) * gotime.Nanosecond), nil
} }
// ToPackedUint encodes Time to a packed uint64 value. // ToPackedUint encodes Time to a packed uint64 value.
...@@ -683,7 +684,7 @@ func splitDateTime(format string) (seps []string, fracStr string) { ...@@ -683,7 +684,7 @@ func splitDateTime(format string) (seps []string, fracStr string) {
} }
// See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-literals.html. // See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-literals.html.
func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bool) (Time, error) { func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int8, isFloat bool) (Time, error) {
// Try to split str with delimiter. // Try to split str with delimiter.
// TODO: only punctuation can be the delimiter for date parts or time parts. // TODO: only punctuation can be the delimiter for date parts or time parts.
// But only space and T can be the delimiter between the date and time part. // But only space and T can be the delimiter between the date and time part.
...@@ -896,7 +897,7 @@ type Duration struct { ...@@ -896,7 +897,7 @@ type Duration struct {
gotime.Duration gotime.Duration
// Fsp is short for Fractional Seconds Precision. // Fsp is short for Fractional Seconds Precision.
// See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html // See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html
Fsp int Fsp int8
} }
//Add adds d to d, returns a duration value. //Add adds d to d, returns a duration value.
...@@ -1002,8 +1003,8 @@ func (d Duration) ConvertToTime(sc *stmtctx.StatementContext, tp uint8) (Time, e ...@@ -1002,8 +1003,8 @@ func (d Duration) ConvertToTime(sc *stmtctx.StatementContext, tp uint8) (Time, e
// We will use the “round half up” rule, e.g, >= 0.5 -> 1, < 0.5 -> 0, // We will use the “round half up” rule, e.g, >= 0.5 -> 1, < 0.5 -> 0,
// so 10:10:10.999999 round 0 -> 10:10:11 // so 10:10:10.999999 round 0 -> 10:10:11
// and 10:10:10.000000 round 0 -> 10:10:10 // and 10:10:10.000000 round 0 -> 10:10:10
func (d Duration) RoundFrac(fsp int) (Duration, error) { func (d Duration) RoundFrac(fsp int8) (Duration, error) {
fsp, err := CheckFsp(fsp) fsp, err := CheckFsp(int(fsp))
if err != nil { if err != nil {
return d, errors.Trace(err) return d, errors.Trace(err)
} }
...@@ -1013,7 +1014,7 @@ func (d Duration) RoundFrac(fsp int) (Duration, error) { ...@@ -1013,7 +1014,7 @@ func (d Duration) RoundFrac(fsp int) (Duration, error) {
} }
n := gotime.Date(0, 0, 0, 0, 0, 0, 0, gotime.Local) n := gotime.Date(0, 0, 0, 0, 0, 0, 0, gotime.Local)
nd := n.Add(d.Duration).Round(gotime.Duration(math.Pow10(9-fsp)) * gotime.Nanosecond).Sub(n) nd := n.Add(d.Duration).Round(gotime.Duration(math.Pow10(9-int(fsp))) * gotime.Nanosecond).Sub(n)
return Duration{Duration: nd, Fsp: fsp}, nil return Duration{Duration: nd, Fsp: fsp}, nil
} }
...@@ -1072,7 +1073,7 @@ func (d Duration) MicroSecond() int { ...@@ -1072,7 +1073,7 @@ func (d Duration) MicroSecond() int {
// ParseDuration parses the time form a formatted string with a fractional seconds part, // ParseDuration parses the time form a formatted string with a fractional seconds part,
// returns the duration type Time value. // returns the duration type Time value.
// See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html // See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html
func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, error) { func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int8) (Duration, error) {
var ( var (
day, hour, minute, second int day, hour, minute, second int
err error err error
...@@ -1081,7 +1082,7 @@ func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, ...@@ -1081,7 +1082,7 @@ func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration,
origStr = str origStr = str
) )
fsp, err = CheckFsp(fsp) fsp, err = CheckFsp(int(fsp))
if err != nil { if err != nil {
return ZeroDuration, errors.Trace(err) return ZeroDuration, errors.Trace(err)
} }
...@@ -1336,17 +1337,17 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) ...@@ -1336,17 +1337,17 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error)
// The valid datetime range is from '1000-01-01 00:00:00.000000' to '9999-12-31 23:59:59.999999'. // The valid datetime range is from '1000-01-01 00:00:00.000000' to '9999-12-31 23:59:59.999999'.
// The valid timestamp range is from '1970-01-01 00:00:01.000000' to '2038-01-19 03:14:07.999999'. // The valid timestamp range is from '1970-01-01 00:00:01.000000' to '2038-01-19 03:14:07.999999'.
// The valid date range is from '1000-01-01' to '9999-12-31' // The valid date range is from '1000-01-01' to '9999-12-31'
func ParseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int) (Time, error) { func ParseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int8) (Time, error) {
return parseTime(sc, str, tp, fsp, false) return parseTime(sc, str, tp, fsp, false)
} }
// ParseTimeFromFloatString is similar to ParseTime, except that it's used to parse a float converted string. // ParseTimeFromFloatString is similar to ParseTime, except that it's used to parse a float converted string.
func ParseTimeFromFloatString(sc *stmtctx.StatementContext, str string, tp byte, fsp int) (Time, error) { func ParseTimeFromFloatString(sc *stmtctx.StatementContext, str string, tp byte, fsp int8) (Time, error) {
return parseTime(sc, str, tp, fsp, true) return parseTime(sc, str, tp, fsp, true)
} }
func parseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int, isFloat bool) (Time, error) { func parseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int8, isFloat bool) (Time, error) {
fsp, err := CheckFsp(fsp) fsp, err := CheckFsp(int(fsp))
if err != nil { if err != nil {
return Time{Time: ZeroTime, Type: tp}, errors.Trace(err) return Time{Time: ZeroTime, Type: tp}, errors.Trace(err)
} }
...@@ -1381,8 +1382,8 @@ func ParseDate(sc *stmtctx.StatementContext, str string) (Time, error) { ...@@ -1381,8 +1382,8 @@ func ParseDate(sc *stmtctx.StatementContext, str string) (Time, error) {
// ParseTimeFromNum parses a formatted int64, // ParseTimeFromNum parses a formatted int64,
// returns the value which type is tp. // returns the value which type is tp.
func ParseTimeFromNum(sc *stmtctx.StatementContext, num int64, tp byte, fsp int) (Time, error) { func ParseTimeFromNum(sc *stmtctx.StatementContext, num int64, tp byte, fsp int8) (Time, error) {
fsp, err := CheckFsp(fsp) fsp, err := CheckFsp(int(fsp))
if err != nil { if err != nil {
return Time{Time: ZeroTime, Type: tp}, errors.Trace(err) return Time{Time: ZeroTime, Type: tp}, errors.Trace(err)
} }
...@@ -1649,6 +1650,7 @@ func parseSingleTimeValue(unit string, format string, strictCheck bool) (int64, ...@@ -1649,6 +1650,7 @@ func parseSingleTimeValue(unit string, format string, strictCheck bool) (int64,
if unit != "SECOND" { if unit != "SECOND" {
err = ErrTruncatedWrongValue.GenWithStackByArgs(format) err = ErrTruncatedWrongValue.GenWithStackByArgs(format)
} }
dv *= sign
} }
switch strings.ToUpper(unit) { switch strings.ToUpper(unit) {
case "MICROSECOND": case "MICROSECOND":
...@@ -1763,7 +1765,7 @@ func parseTimeValue(format string, index, cnt int) (int64, int64, int64, int64, ...@@ -1763,7 +1765,7 @@ func parseTimeValue(format string, index, cnt int) (int64, int64, int64, int64,
if err != nil { if err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt) return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt)
} }
microseconds, err := strconv.ParseInt(alignFrac(fields[MicrosecondIndex], MaxFsp), 10, 64) microseconds, err := strconv.ParseInt(alignFrac(fields[MicrosecondIndex], int(MaxFsp)), 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt) return 0, 0, 0, 0, ErrIncorrectDatetimeValue.GenWithStackByArgs(originalFmt)
} }
...@@ -1987,37 +1989,38 @@ func (t Time) convertDateFormat(b rune, buf *bytes.Buffer) error { ...@@ -1987,37 +1989,38 @@ func (t Time) convertDateFormat(b rune, buf *bytes.Buffer) error {
} }
buf.WriteString(MonthNames[m-1]) buf.WriteString(MonthNames[m-1])
case 'm': case 'm':
fmt.Fprintf(buf, "%02d", t.Time.Month()) buf.WriteString(FormatIntWidthN(t.Time.Month(), 2))
case 'c': case 'c':
fmt.Fprintf(buf, "%d", t.Time.Month()) buf.WriteString(strconv.FormatInt(int64(t.Time.Month()), 10))
case 'D': case 'D':
fmt.Fprintf(buf, "%d%s", t.Time.Day(), abbrDayOfMonth(t.Time.Day())) buf.WriteString(strconv.FormatInt(int64(t.Time.Day()), 10))
buf.WriteString(abbrDayOfMonth(t.Time.Day()))
case 'd': case 'd':
fmt.Fprintf(buf, "%02d", t.Time.Day()) buf.WriteString(FormatIntWidthN(t.Time.Day(), 2))
case 'e': case 'e':
fmt.Fprintf(buf, "%d", t.Time.Day()) buf.WriteString(strconv.FormatInt(int64(t.Time.Day()), 10))
case 'j': case 'j':
fmt.Fprintf(buf, "%03d", t.Time.YearDay()) fmt.Fprintf(buf, "%03d", t.Time.YearDay())
case 'H': case 'H':
fmt.Fprintf(buf, "%02d", t.Time.Hour()) buf.WriteString(FormatIntWidthN(t.Time.Hour(), 2))
case 'k': case 'k':
fmt.Fprintf(buf, "%d", t.Time.Hour()) buf.WriteString(strconv.FormatInt(int64(t.Time.Hour()), 10))
case 'h', 'I': case 'h', 'I':
t := t.Time.Hour() t := t.Time.Hour()
if t%12 == 0 { if t%12 == 0 {
fmt.Fprintf(buf, "%02d", 12) buf.WriteString("12")
} else { } else {
fmt.Fprintf(buf, "%02d", t%12) buf.WriteString(FormatIntWidthN(t%12, 2))
} }
case 'l': case 'l':
t := t.Time.Hour() t := t.Time.Hour()
if t%12 == 0 { if t%12 == 0 {
fmt.Fprintf(buf, "%d", 12) buf.WriteString("12")
} else { } else {
fmt.Fprintf(buf, "%d", t%12) buf.WriteString(strconv.FormatInt(int64(t%12), 10))
} }
case 'i': case 'i':
fmt.Fprintf(buf, "%02d", t.Time.Minute()) buf.WriteString(FormatIntWidthN(t.Time.Minute(), 2))
case 'p': case 'p':
hour := t.Time.Hour() hour := t.Time.Hour()
if hour/12%2 == 0 { if hour/12%2 == 0 {
...@@ -2041,46 +2044,46 @@ func (t Time) convertDateFormat(b rune, buf *bytes.Buffer) error { ...@@ -2041,46 +2044,46 @@ func (t Time) convertDateFormat(b rune, buf *bytes.Buffer) error {
case 'T': case 'T':
fmt.Fprintf(buf, "%02d:%02d:%02d", t.Time.Hour(), t.Time.Minute(), t.Time.Second()) fmt.Fprintf(buf, "%02d:%02d:%02d", t.Time.Hour(), t.Time.Minute(), t.Time.Second())
case 'S', 's': case 'S', 's':
fmt.Fprintf(buf, "%02d", t.Time.Second()) buf.WriteString(FormatIntWidthN(t.Time.Second(), 2))
case 'f': case 'f':
fmt.Fprintf(buf, "%06d", t.Time.Microsecond()) fmt.Fprintf(buf, "%06d", t.Time.Microsecond())
case 'U': case 'U':
w := t.Time.Week(0) w := t.Time.Week(0)
fmt.Fprintf(buf, "%02d", w) buf.WriteString(FormatIntWidthN(w, 2))
case 'u': case 'u':
w := t.Time.Week(1) w := t.Time.Week(1)
fmt.Fprintf(buf, "%02d", w) buf.WriteString(FormatIntWidthN(w, 2))
case 'V': case 'V':
w := t.Time.Week(2) w := t.Time.Week(2)
fmt.Fprintf(buf, "%02d", w) buf.WriteString(FormatIntWidthN(w, 2))
case 'v': case 'v':
_, w := t.Time.YearWeek(3) _, w := t.Time.YearWeek(3)
fmt.Fprintf(buf, "%02d", w) buf.WriteString(FormatIntWidthN(w, 2))
case 'a': case 'a':
weekday := t.Time.Weekday() weekday := t.Time.Weekday()
buf.WriteString(abbrevWeekdayName[weekday]) buf.WriteString(abbrevWeekdayName[weekday])
case 'W': case 'W':
buf.WriteString(t.Time.Weekday().String()) buf.WriteString(t.Time.Weekday().String())
case 'w': case 'w':
fmt.Fprintf(buf, "%d", t.Time.Weekday()) buf.WriteString(strconv.FormatInt(int64(t.Time.Weekday()), 10))
case 'X': case 'X':
year, _ := t.Time.YearWeek(2) year, _ := t.Time.YearWeek(2)
if year < 0 { if year < 0 {
fmt.Fprintf(buf, "%v", uint64(math.MaxUint32)) buf.WriteString(strconv.FormatUint(uint64(math.MaxUint32), 10))
} else { } else {
fmt.Fprintf(buf, "%04d", year) buf.WriteString(FormatIntWidthN(year, 4))
} }
case 'x': case 'x':
year, _ := t.Time.YearWeek(3) year, _ := t.Time.YearWeek(3)
if year < 0 { if year < 0 {
fmt.Fprintf(buf, "%v", uint64(math.MaxUint32)) buf.WriteString(strconv.FormatUint(uint64(math.MaxUint32), 10))
} else { } else {
fmt.Fprintf(buf, "%04d", year) buf.WriteString(FormatIntWidthN(year, 4))
} }
case 'Y': case 'Y':
fmt.Fprintf(buf, "%04d", t.Time.Year()) buf.WriteString(FormatIntWidthN(t.Time.Year(), 4))
case 'y': case 'y':
str := fmt.Sprintf("%04d", t.Time.Year()) str := FormatIntWidthN(t.Time.Year(), 4)
buf.WriteString(str[2:]) buf.WriteString(str[2:])
default: default:
buf.WriteRune(b) buf.WriteRune(b)
...@@ -2089,6 +2092,19 @@ func (t Time) convertDateFormat(b rune, buf *bytes.Buffer) error { ...@@ -2089,6 +2092,19 @@ func (t Time) convertDateFormat(b rune, buf *bytes.Buffer) error {
return nil return nil
} }
// FormatIntWidthN uses to format int with width. Insufficient digits are filled by 0.
func FormatIntWidthN(num, n int) string {
numString := strconv.FormatInt(int64(num), 10)
if len(numString) >= n {
return numString
}
padBytes := make([]byte, n-len(numString))
for i := range padBytes {
padBytes[i] = '0'
}
return string(padBytes) + numString
}
func abbrDayOfMonth(day int) string { func abbrDayOfMonth(day int) string {
var str string var str string
switch day { switch day {
...@@ -2338,7 +2354,7 @@ func hour24TwoDigits(t *MysqlTime, input string, ctx map[string]int) (string, bo ...@@ -2338,7 +2354,7 @@ func hour24TwoDigits(t *MysqlTime, input string, ctx map[string]int) (string, bo
if !succ || v >= 24 { if !succ || v >= 24 {
return input, false return input, false
} }
t.hour = v t.hour = uint32(v)
return input[2:], true return input[2:], true
} }
...@@ -2391,9 +2407,9 @@ func time12Hour(t *MysqlTime, input string, ctx map[string]int) (string, bool) { ...@@ -2391,9 +2407,9 @@ func time12Hour(t *MysqlTime, input string, ctx map[string]int) (string, bool) {
remain := skipWhiteSpace(input[8:]) remain := skipWhiteSpace(input[8:])
switch { switch {
case strings.HasPrefix(remain, "AM"): case strings.HasPrefix(remain, "AM"):
t.hour = hour t.hour = uint32(hour)
case strings.HasPrefix(remain, "PM"): case strings.HasPrefix(remain, "PM"):
t.hour = hour + 12 t.hour = uint32(hour + 12)
default: default:
return input, false return input, false
} }
...@@ -2426,7 +2442,7 @@ func time24Hour(t *MysqlTime, input string, ctx map[string]int) (string, bool) { ...@@ -2426,7 +2442,7 @@ func time24Hour(t *MysqlTime, input string, ctx map[string]int) (string, bool) {
return input, false return input, false
} }
t.hour = hour t.hour = uint32(hour)
t.minute = uint8(minute) t.minute = uint8(minute)
t.second = uint8(second) t.second = uint8(second)
return input[8:], true return input[8:], true
...@@ -2507,7 +2523,7 @@ func hour24Numeric(t *MysqlTime, input string, ctx map[string]int) (string, bool ...@@ -2507,7 +2523,7 @@ func hour24Numeric(t *MysqlTime, input string, ctx map[string]int) (string, bool
if !ok || v > 23 { if !ok || v > 23 {
return input, false return input, false
} }
t.hour = v t.hour = uint32(v)
ctx["%H"] = v ctx["%H"] = v
return input[length:], true return input[length:], true
} }
...@@ -2521,7 +2537,7 @@ func hour12Numeric(t *MysqlTime, input string, ctx map[string]int) (string, bool ...@@ -2521,7 +2537,7 @@ func hour12Numeric(t *MysqlTime, input string, ctx map[string]int) (string, bool
if !ok || v > 12 || v == 0 { if !ok || v > 12 || v == 0 {
return input, false return input, false
} }
t.hour = v t.hour = uint32(v)
return input[length:], true return input[length:], true
} }
......
...@@ -26,8 +26,10 @@ import ( ...@@ -26,8 +26,10 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
type commitDetailCtxKeyType struct{}
// CommitDetailCtxKey presents CommitDetail info key in context. // CommitDetailCtxKey presents CommitDetail info key in context.
const CommitDetailCtxKey = "commitDetail" var CommitDetailCtxKey = commitDetailCtxKeyType{}
// ExecDetails contains execution detail information. // ExecDetails contains execution detail information.
type ExecDetails struct { type ExecDetails struct {
......
...@@ -24,15 +24,8 @@ type MutableString string ...@@ -24,15 +24,8 @@ type MutableString string
// String converts slice to MutableString without copy. // String converts slice to MutableString without copy.
// The MutableString can be converts to string without copy. // The MutableString can be converts to string without copy.
// Use it at your own risk. // Use it at your own risk.
func String(b []byte) (s MutableString) { func String(b []byte) MutableString {
if len(b) == 0 { return *(*MutableString)(unsafe.Pointer(&b))
return ""
}
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
pstring.Data = pbytes.Data
pstring.Len = pbytes.Len
return
} }
// Slice converts string to slice without copy. // Slice converts string to slice without copy.
......
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by aprettyPrintlicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package logutil
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
)
// Hex defines a fmt.Stringer for proto.Message.
// We can't define the String() method on proto.Message, but we can wrap it.
func Hex(msg proto.Message) fmt.Stringer {
return hexStringer{msg}
}
type hexStringer struct {
proto.Message
}
func (h hexStringer) String() string {
val := reflect.ValueOf(h.Message)
var w bytes.Buffer
prettyPrint(&w, val)
return w.String()
}
func prettyPrint(w io.Writer, val reflect.Value) {
tp := val.Type()
switch val.Kind() {
case reflect.Slice:
elemType := tp.Elem()
if elemType.Kind() == reflect.Uint8 {
fmt.Fprintf(w, "%s", hex.EncodeToString(val.Bytes()))
} else {
fmt.Fprintf(w, "%s", val.Interface())
}
case reflect.Struct:
fmt.Fprintf(w, "{")
for i := 0; i < val.NumField(); i++ {
fv := val.Field(i)
ft := tp.Field(i)
if strings.HasPrefix(ft.Name, "XXX_") {
continue
}
if i != 0 {
fmt.Fprintf(w, " ")
}
fmt.Fprintf(w, "%s:", ft.Name)
prettyPrint(w, fv)
}
fmt.Fprintf(w, "}")
case reflect.Ptr:
if val.IsNil() {
fmt.Fprintf(w, "%v", val.Interface())
} else {
prettyPrint(w, reflect.Indirect(val))
}
default:
fmt.Fprintf(w, "%v", val.Interface())
}
}
...@@ -24,6 +24,8 @@ import ( ...@@ -24,6 +24,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/opentracing/opentracing-go"
tlog "github.com/opentracing/opentracing-go/log"
"github.com/pingcap/errors" "github.com/pingcap/errors"
zaplog "github.com/pingcap/log" zaplog "github.com/pingcap/log"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
...@@ -317,9 +319,9 @@ func SetLevel(level string) error { ...@@ -317,9 +319,9 @@ func SetLevel(level string) error {
return nil return nil
} }
type ctxKeyType int type ctxLogKeyType struct{}
const ctxLogKey ctxKeyType = iota var ctxLogKey = ctxLogKeyType{}
// Logger gets a contextual logger from current context. // Logger gets a contextual logger from current context.
// contextual logger will output common fields from context. // contextual logger will output common fields from context.
...@@ -356,3 +358,27 @@ func WithKeyValue(ctx context.Context, key, value string) context.Context { ...@@ -356,3 +358,27 @@ func WithKeyValue(ctx context.Context, key, value string) context.Context {
} }
return context.WithValue(ctx, ctxLogKey, logger.With(zap.String(key, value))) return context.WithValue(ctx, ctxLogKey, logger.With(zap.String(key, value)))
} }
// TraceEventKey presents the TraceEventKey in span log.
const TraceEventKey = "event"
// Event records event in current tracing span.
func Event(ctx context.Context, event string) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span.LogFields(tlog.String(TraceEventKey, event))
}
}
// Eventf records event in current tracing span with format support.
func Eventf(ctx context.Context, format string, args ...interface{}) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span.LogFields(tlog.String(TraceEventKey, fmt.Sprintf(format, args...)))
}
}
// SetTag sets tag kv-pair in current tracing span
func SetTag(ctx context.Context, key string, value interface{}) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span.SetTag(key, value)
}
}
...@@ -98,6 +98,18 @@ ...@@ -98,6 +98,18 @@
"revision": "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f", "revision": "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f",
"revisionTime": "2018-05-06T08:24:08Z" "revisionTime": "2018-05-06T08:24:08Z"
}, },
{
"checksumSHA1": "6EIQaeaWECn3zlechdGkqmIKld4=",
"path": "github.com/opentracing/opentracing-go",
"revision": "135aa78c6f95b4a199daf2f0470d231136cbbd0c",
"revisionTime": "2019-07-04T17:58:13Z"
},
{
"checksumSHA1": "tnkdNJbJxNKuPZMWapP1xhKIIGw=",
"path": "github.com/opentracing/opentracing-go/log",
"revision": "135aa78c6f95b4a199daf2f0470d231136cbbd0c",
"revisionTime": "2019-07-04T17:58:13Z"
},
{ {
"checksumSHA1": "M0UdRpCVjXiuie7PfJQPZ/V1pVI=", "checksumSHA1": "M0UdRpCVjXiuie7PfJQPZ/V1pVI=",
"path": "github.com/percona/go-mysql/query", "path": "github.com/percona/go-mysql/query",
...@@ -117,118 +129,118 @@ ...@@ -117,118 +129,118 @@
"revisionTime": "2019-03-07T07:54:52Z" "revisionTime": "2019-03-07T07:54:52Z"
}, },
{ {
"checksumSHA1": "8XbJFHOYoZvqf3Fq+J4l90DiGlM=", "checksumSHA1": "RK5vW/hPsPk0JDi1atCWaUR8iFo=",
"path": "github.com/pingcap/parser", "path": "github.com/pingcap/parser",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "a+3CnBMiJTPiAKhRzxW5ybMR6IY=", "checksumSHA1": "WYPpAYqE/lpu4PBR9TCn6UigcTg=",
"path": "github.com/pingcap/parser/ast", "path": "github.com/pingcap/parser/ast",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "xiv40YqnvHcbIhaEzJqjh5K7ehM=", "checksumSHA1": "xiv40YqnvHcbIhaEzJqjh5K7ehM=",
"path": "github.com/pingcap/parser/auth", "path": "github.com/pingcap/parser/auth",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "EvDXpplklIXmKqLclzWzaN/uHKQ=", "checksumSHA1": "EvDXpplklIXmKqLclzWzaN/uHKQ=",
"path": "github.com/pingcap/parser/charset", "path": "github.com/pingcap/parser/charset",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "Aao6Mul/qqogOwPwM2arBKZkYZs=", "checksumSHA1": "Aao6Mul/qqogOwPwM2arBKZkYZs=",
"path": "github.com/pingcap/parser/format", "path": "github.com/pingcap/parser/format",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "f14oFKfX0pSkUM9w9m94eZG5vEw=", "checksumSHA1": "GAJ7IUg0t8DCKJbJQxJLkklEj2E=",
"path": "github.com/pingcap/parser/model", "path": "github.com/pingcap/parser/model",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "JcR/7pmocSZK4K6tDK2zO54DJWg=", "checksumSHA1": "WMkc5bRIYYfQdu9lBlVGyKTGIyg=",
"path": "github.com/pingcap/parser/mysql", "path": "github.com/pingcap/parser/mysql",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "olapD16WCMBU9vrA5PtlERGFfXw=", "checksumSHA1": "olapD16WCMBU9vrA5PtlERGFfXw=",
"path": "github.com/pingcap/parser/opcode", "path": "github.com/pingcap/parser/opcode",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "L6rzy3sJU1RPf7AkJN+0zcwW/YY=", "checksumSHA1": "L6rzy3sJU1RPf7AkJN+0zcwW/YY=",
"path": "github.com/pingcap/parser/terror", "path": "github.com/pingcap/parser/terror",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "nWkzWKjheFi0/Ov/0rhc4CUMZLo=", "checksumSHA1": "u1Lmm4Fa3su4ElZMN4w0hPzFZl4=",
"path": "github.com/pingcap/parser/types", "path": "github.com/pingcap/parser/types",
"revision": "5238015a66f827e0d0d01e9a1dc19e4e3338c5bf", "revision": "41d48df058643bdb2e24c64b1685c4e9ff6608f8",
"revisionTime": "2019-07-30T09:13:57Z" "revisionTime": "2019-08-22T02:41:27Z"
}, },
{ {
"checksumSHA1": "KHvXxhiZAHkE8APuMlaAXDOX6eU=", "checksumSHA1": "cbEwgTkDlGpIIIqmNAuWrxsUwKw=",
"path": "github.com/pingcap/tidb/sessionctx/stmtctx", "path": "github.com/pingcap/tidb/sessionctx/stmtctx",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f", "revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-07-31T03:50:10Z" "revisionTime": "2019-08-22T02:51:25Z"
}, },
{ {
"checksumSHA1": "6pIJUxO/VoKsIdWibgApSW91MRg=", "checksumSHA1": "erB64jt/DCEoRs+KrywwHGJG2/k=",
"path": "github.com/pingcap/tidb/types", "path": "github.com/pingcap/tidb/types",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f", "revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-07-31T03:50:10Z" "revisionTime": "2019-08-22T02:51:25Z"
}, },
{ {
"checksumSHA1": "gKBD02jzm/d7gn2kX7pXLi+M2ZY=", "checksumSHA1": "gKBD02jzm/d7gn2kX7pXLi+M2ZY=",
"path": "github.com/pingcap/tidb/types/json", "path": "github.com/pingcap/tidb/types/json",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f", "revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-07-31T03:50:10Z" "revisionTime": "2019-08-22T02:51:25Z"
}, },
{ {
"checksumSHA1": "45zWX5Q6D6aTEWtc4p/lbD9WD4o=", "checksumSHA1": "45zWX5Q6D6aTEWtc4p/lbD9WD4o=",
"path": "github.com/pingcap/tidb/types/parser_driver", "path": "github.com/pingcap/tidb/types/parser_driver",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f", "revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-07-31T03:50:10Z" "revisionTime": "2019-08-22T02:51:25Z"
}, },
{ {
"checksumSHA1": "dI3bZpUsujM1shEDvORNQj5FCN0=", "checksumSHA1": "q5aOzPGCVZNkrru6v6+uImWm1eA=",
"path": "github.com/pingcap/tidb/util/execdetails", "path": "github.com/pingcap/tidb/util/execdetails",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f", "revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-07-31T03:50:10Z" "revisionTime": "2019-08-22T02:51:25Z"
}, },
{ {
"checksumSHA1": "RdbHgQWMHjRtKjqPcTX81k1V3sw=", "checksumSHA1": "EFDXphVEI9ohnPky64fc+0lkRkw=",
"path": "github.com/pingcap/tidb/util/hack", "path": "github.com/pingcap/tidb/util/hack",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f", "revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-07-31T03:50:10Z" "revisionTime": "2019-08-22T02:51:25Z"
}, },
{ {
"checksumSHA1": "5DVxTRYAXrCkrtmTqi/fZfY/Zfk=", "checksumSHA1": "fDbwnQlRCKnr5y6MY799BEd4WlQ=",
"path": "github.com/pingcap/tidb/util/logutil", "path": "github.com/pingcap/tidb/util/logutil",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f", "revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-07-31T03:50:10Z" "revisionTime": "2019-08-22T02:51:25Z"
}, },
{ {
"checksumSHA1": "OveQu0ABBJmMEwmmthqSRQC2Ef0=", "checksumSHA1": "OveQu0ABBJmMEwmmthqSRQC2Ef0=",
"path": "github.com/pingcap/tidb/util/math", "path": "github.com/pingcap/tidb/util/math",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f", "revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-07-31T03:50:10Z" "revisionTime": "2019-08-22T02:51:25Z"
}, },
{ {
"checksumSHA1": "loL2JgZDLapEOgfM/XUJI5f0HVs=", "checksumSHA1": "loL2JgZDLapEOgfM/XUJI5f0HVs=",
"path": "github.com/pingcap/tidb/util/memory", "path": "github.com/pingcap/tidb/util/memory",
"revision": "13778fe51b713f005e1de848e7994f0a8031678f", "revision": "6f76bbe1f75e01dc40f2d0478d484aa9df2b284c",
"revisionTime": "2019-07-31T03:50:10Z" "revisionTime": "2019-08-22T02:51:25Z"
}, },
{ {
"checksumSHA1": "QPIBwDNUFF5Whrnd41S3mkKa4gQ=", "checksumSHA1": "QPIBwDNUFF5Whrnd41S3mkKa4gQ=",
...@@ -485,68 +497,68 @@ ...@@ -485,68 +497,68 @@
{ {
"checksumSHA1": "aKn1oKcY74N8TRLm3Ayt7Q4bbI4=", "checksumSHA1": "aKn1oKcY74N8TRLm3Ayt7Q4bbI4=",
"path": "vitess.io/vitess/go/bytes2", "path": "vitess.io/vitess/go/bytes2",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "bhE6CGQgZTIgLPp9lnvlKW/47xc=", "checksumSHA1": "bhE6CGQgZTIgLPp9lnvlKW/47xc=",
"path": "vitess.io/vitess/go/hack", "path": "vitess.io/vitess/go/hack",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "2m7CYdLr+epKNLqWaGHkinr3k7w=", "checksumSHA1": "8zh04M7R0JjzpE+w6/gxHdgZrJg=",
"path": "vitess.io/vitess/go/sqltypes", "path": "vitess.io/vitess/go/sqltypes",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "vAIRxI6MHsq3x1hLQwIyw5AvqtI=", "checksumSHA1": "vAIRxI6MHsq3x1hLQwIyw5AvqtI=",
"path": "vitess.io/vitess/go/vt/log", "path": "vitess.io/vitess/go/vt/log",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "/0K9CBbInkAhioqKX9ocBrJ6AKE=", "checksumSHA1": "//MHnGEq9xApvIMdwQaRrQf5ZWo=",
"path": "vitess.io/vitess/go/vt/proto/binlogdata", "path": "vitess.io/vitess/go/vt/proto/binlogdata",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "87Zndvk3Y+M+QxMx3uFa0iSbvWY=", "checksumSHA1": "87Zndvk3Y+M+QxMx3uFa0iSbvWY=",
"path": "vitess.io/vitess/go/vt/proto/query", "path": "vitess.io/vitess/go/vt/proto/query",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "xpcb9NfXMEeHhEPStbJntIfa5GQ=", "checksumSHA1": "xpcb9NfXMEeHhEPStbJntIfa5GQ=",
"path": "vitess.io/vitess/go/vt/proto/topodata", "path": "vitess.io/vitess/go/vt/proto/topodata",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "Bv8lucvoH9AnJSYiWX8MIrJl4zY=", "checksumSHA1": "Bv8lucvoH9AnJSYiWX8MIrJl4zY=",
"path": "vitess.io/vitess/go/vt/proto/vtgate", "path": "vitess.io/vitess/go/vt/proto/vtgate",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "qz32abYdmm9NfKTc++K0l1EvXXM=", "checksumSHA1": "qz32abYdmm9NfKTc++K0l1EvXXM=",
"path": "vitess.io/vitess/go/vt/proto/vtrpc", "path": "vitess.io/vitess/go/vt/proto/vtrpc",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "9Fy+Gm//g50wu30nICOF7HMq4po=", "checksumSHA1": "0SPe/oMz50OW+yC+DGV4UJpjZ3Y=",
"path": "vitess.io/vitess/go/vt/sqlparser", "path": "vitess.io/vitess/go/vt/sqlparser",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
}, },
{ {
"checksumSHA1": "z9+F/lA1Xrl5S16LKssUH8VL6hs=", "checksumSHA1": "z9+F/lA1Xrl5S16LKssUH8VL6hs=",
"path": "vitess.io/vitess/go/vt/vterrors", "path": "vitess.io/vitess/go/vt/vterrors",
"revision": "f93c96c738d7d5bbbcdc03c828f8bf0a5ba16250", "revision": "b5207f0d590a8b596a9b465ee378be182459300f",
"revisionTime": "2019-07-30T06:18:30Z" "revisionTime": "2019-08-21T22:46:46Z"
} }
], ],
"rootPath": "github.com/XiaoMi/soar" "rootPath": "github.com/XiaoMi/soar"
......
...@@ -167,6 +167,7 @@ var mysqlToType = map[int64]querypb.Type{ ...@@ -167,6 +167,7 @@ var mysqlToType = map[int64]querypb.Type{
11: Time, 11: Time,
12: Datetime, 12: Datetime,
13: Year, 13: Year,
15: VarChar,
16: Bit, 16: Bit,
245: TypeJSON, 245: TypeJSON,
246: Decimal, 246: Decimal,
......
...@@ -739,6 +739,9 @@ type DDL struct { ...@@ -739,6 +739,9 @@ type DDL struct {
// VindexCols is set for AddColVindexStr. // VindexCols is set for AddColVindexStr.
VindexCols []ColIdent VindexCols []ColIdent
// AutoIncSpec is set for AddAutoIncStr.
AutoIncSpec *AutoIncSpec
} }
// DDL strings. // DDL strings.
...@@ -755,6 +758,8 @@ const ( ...@@ -755,6 +758,8 @@ const (
DropVschemaTableStr = "drop vschema table" DropVschemaTableStr = "drop vschema table"
AddColVindexStr = "on table add vindex" AddColVindexStr = "on table add vindex"
DropColVindexStr = "on table drop vindex" DropColVindexStr = "on table drop vindex"
AddSequenceStr = "add sequence"
AddAutoIncStr = "add auto_increment"
// Vindex DDL param to specify the owner of a vindex // Vindex DDL param to specify the owner of a vindex
VindexOwnerStr = "owner" VindexOwnerStr = "owner"
...@@ -813,6 +818,10 @@ func (node *DDL) Format(buf *TrackedBuffer) { ...@@ -813,6 +818,10 @@ func (node *DDL) Format(buf *TrackedBuffer) {
} }
case DropColVindexStr: case DropColVindexStr:
buf.Myprintf("alter vschema on %v drop vindex %v", node.Table, node.VindexSpec.Name) buf.Myprintf("alter vschema on %v drop vindex %v", node.Table, node.VindexSpec.Name)
case AddSequenceStr:
buf.Myprintf("alter vschema add sequence %v", node.Table)
case AddAutoIncStr:
buf.Myprintf("alter vschema on %v add auto_increment %v", node.Table, node.AutoIncSpec)
default: default:
buf.Myprintf("%s table %v", node.Action, node.Table) buf.Myprintf("%s table %v", node.Action, node.Table)
} }
...@@ -1352,6 +1361,23 @@ type VindexSpec struct { ...@@ -1352,6 +1361,23 @@ type VindexSpec struct {
Params []VindexParam Params []VindexParam
} }
// AutoIncSpec defines and autoincrement value for a ADD AUTO_INCREMENT statement
type AutoIncSpec struct {
Column ColIdent
Sequence TableName
}
// Format formats the node.
func (node *AutoIncSpec) Format(buf *TrackedBuffer) {
buf.Myprintf("%v ", node.Column)
buf.Myprintf("using %v", node.Sequence)
}
func (node *AutoIncSpec) walkSubtree(visit Visit) error {
err := Walk(visit, node.Sequence, node.Column)
return err
}
// ParseParams parses the vindex parameter list, pulling out the special-case // ParseParams parses the vindex parameter list, pulling out the special-case
// "owner" parameter // "owner" parameter
func (node *VindexSpec) ParseParams() (string, map[string]string) { func (node *VindexSpec) ParseParams() (string, map[string]string) {
......
...@@ -164,6 +164,7 @@ func skipToEnd(yylex interface{}) { ...@@ -164,6 +164,7 @@ func skipToEnd(yylex interface{}) {
%token <bytes> MAXVALUE PARTITION REORGANIZE LESS THAN PROCEDURE TRIGGER %token <bytes> MAXVALUE PARTITION REORGANIZE LESS THAN PROCEDURE TRIGGER
%token <bytes> VINDEX VINDEXES %token <bytes> VINDEX VINDEXES
%token <bytes> STATUS VARIABLES WARNINGS %token <bytes> STATUS VARIABLES WARNINGS
%token <bytes> SEQUENCE
// Transaction Tokens // Transaction Tokens
%token <bytes> BEGIN START TRANSACTION COMMIT ROLLBACK %token <bytes> BEGIN START TRANSACTION COMMIT ROLLBACK
...@@ -181,7 +182,7 @@ func skipToEnd(yylex interface{}) { ...@@ -181,7 +182,7 @@ func skipToEnd(yylex interface{}) {
%token <bytes> NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL %token <bytes> NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL
// Supported SHOW tokens // Supported SHOW tokens
%token <bytes> COLLATION DATABASES SCHEMAS TABLES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS VSCHEMA VSCHEMA_TABLES VITESS_TARGET FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS %token <bytes> COLLATION DATABASES TABLES VSCHEMA FULL PROCESSLIST COLUMNS FIELDS ENGINES PLUGINS
// SET tokens // SET tokens
%token <bytes> NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE %token <bytes> NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE
...@@ -1352,6 +1353,21 @@ alter_statement: ...@@ -1352,6 +1353,21 @@ alter_statement:
}, },
} }
} }
| ALTER VSCHEMA ADD SEQUENCE table_name
{
$$ = &DDL{Action: AddSequenceStr, Table: $5}
}
| ALTER VSCHEMA ON table_name ADD AUTO_INCREMENT sql_id USING table_name
{
$$ = &DDL{
Action: AddAutoIncStr,
Table: $4,
AutoIncSpec: &AutoIncSpec{
Column: $7,
Sequence: $9,
},
}
}
alter_object_type: alter_object_type:
COLUMN COLUMN
...@@ -1499,10 +1515,6 @@ show_statement: ...@@ -1499,10 +1515,6 @@ show_statement:
{ {
$$ = &Show{Type: string($2)} $$ = &Show{Type: string($2)}
} }
| SHOW SCHEMAS ddl_skip_to_end
{
$$ = &Show{Type: string($2)}
}
| SHOW ENGINES | SHOW ENGINES
{ {
$$ = &Show{Type: string($2)} $$ = &Show{Type: string($2)}
...@@ -1560,22 +1572,6 @@ show_statement: ...@@ -1560,22 +1572,6 @@ show_statement:
showCollationFilterOpt := $4 showCollationFilterOpt := $4
$$ = &Show{Type: string($2), ShowCollationFilterOpt: &showCollationFilterOpt} $$ = &Show{Type: string($2), ShowCollationFilterOpt: &showCollationFilterOpt}
} }
| SHOW VITESS_KEYSPACES
{
$$ = &Show{Type: string($2)}
}
| SHOW VITESS_SHARDS
{
$$ = &Show{Type: string($2)}
}
| SHOW VITESS_TABLETS
{
$$ = &Show{Type: string($2)}
}
| SHOW VITESS_TARGET
{
$$ = &Show{Type: string($2)}
}
| SHOW VSCHEMA TABLES | SHOW VSCHEMA TABLES
{ {
$$ = &Show{Type: string($2) + " " + string($3)} $$ = &Show{Type: string($2) + " " + string($3)}
...@@ -1597,6 +1593,10 @@ show_statement: ...@@ -1597,6 +1593,10 @@ show_statement:
* *
* SHOW BINARY LOGS * SHOW BINARY LOGS
* SHOW INVALID * SHOW INVALID
* SHOW VITESS_KEYSPACES
* SHOW VITESS_TABLETS
* SHOW VITESS_SHARDS
* SHOW VITESS_TARGET
*/ */
| SHOW ID ddl_skip_to_end | SHOW ID ddl_skip_to_end
{ {
...@@ -3364,7 +3364,7 @@ non_reserved_keyword: ...@@ -3364,7 +3364,7 @@ non_reserved_keyword:
| REPEATABLE | REPEATABLE
| RESTRICT | RESTRICT
| ROLLBACK | ROLLBACK
| SCHEMAS | SEQUENCE
| SESSION | SESSION
| SERIALIZABLE | SERIALIZABLE
| SHARE | SHARE
...@@ -3393,12 +3393,7 @@ non_reserved_keyword: ...@@ -3393,12 +3393,7 @@ non_reserved_keyword:
| VIEW | VIEW
| VINDEX | VINDEX
| VINDEXES | VINDEXES
| VITESS_KEYSPACES
| VITESS_SHARDS
| VITESS_TABLETS
| VSCHEMA | VSCHEMA
| VSCHEMA_TABLES
| VITESS_TARGET
| WARNINGS | WARNINGS
| WITH | WITH
| WRITE | WRITE
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册