S3 Select API Support for CSV (#6127)

Add support for trivial where clause cases
This commit is contained in:
Arjun Mishra
2018-08-15 03:30:19 -07:00
committed by kannappanr
parent 0e02328c98
commit 7c14cdb60e
59 changed files with 30860 additions and 3 deletions

9
vendor/github.com/xwb1989/sqlparser/CONTRIBUTORS.md generated vendored Normal file
View File

@@ -0,0 +1,9 @@
This project is originally a fork of [https://github.com/youtube/vitess](https://github.com/youtube/vitess)
Copyright Google Inc
# Contributors
Wenbin Xiao 2015
Started this project and maintained it.
Andrew Brampton 2017
Merged in multiple upstream fixes/changes.

201
vendor/github.com/xwb1989/sqlparser/LICENSE.md generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

22
vendor/github.com/xwb1989/sqlparser/Makefile generated vendored Normal file
View File

@@ -0,0 +1,22 @@
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAKEFLAGS = -s
sql.go: sql.y
goyacc -o sql.go sql.y
gofmt -w sql.go
clean:
rm -f y.output sql.go

150
vendor/github.com/xwb1989/sqlparser/README.md generated vendored Normal file
View File

@@ -0,0 +1,150 @@
# sqlparser [![Build Status](https://img.shields.io/travis/xwb1989/sqlparser.svg)](https://travis-ci.org/xwb1989/sqlparser) [![Coverage](https://img.shields.io/coveralls/xwb1989/sqlparser.svg)](https://coveralls.io/github/xwb1989/sqlparser) [![Report card](https://goreportcard.com/badge/github.com/xwb1989/sqlparser)](https://goreportcard.com/report/github.com/xwb1989/sqlparser) [![GoDoc](https://godoc.org/github.com/xwb1989/sqlparser?status.svg)](https://godoc.org/github.com/xwb1989/sqlparser)
Go package for parsing MySQL SQL queries.
## Notice
The backbone of this repo is extracted from [vitessio/vitess](https://github.com/vitessio/vitess).
Inside vitessio/vitess there is a very nicely written sql parser. However as it's not a self-contained application, I created this one.
It applies the same LICENSE as vitessio/vitess.
## Usage
```go
import (
"github.com/xwb1989/sqlparser"
)
```
Then use:
```go
sql := "SELECT * FROM table WHERE a = 'abc'"
stmt, err := sqlparser.Parse(sql)
if err != nil {
// Do something with the err
}
// Otherwise do something with stmt
switch stmt := stmt.(type) {
case *sqlparser.Select:
_ = stmt
case *sqlparser.Insert:
}
```
Alternative to read many queries from a io.Reader:
```go
r := strings.NewReader("INSERT INTO table1 VALUES (1, 'a'); INSERT INTO table2 VALUES (3, 4);")
tokens := sqlparser.NewTokenizer(r)
for {
stmt, err := sqlparser.ParseNext(tokens)
if err == io.EOF {
break
}
// Do something with stmt or err.
}
```
See [parse_test.go](https://github.com/xwb1989/sqlparser/blob/master/parse_test.go) for more examples, or read the [godoc](https://godoc.org/github.com/xwb1989/sqlparser).
## Porting Instructions
You only need the below if you plan to try and keep this library up to date with [vitessio/vitess](https://github.com/vitessio/vitess).
### Keeping up to date
```bash
shopt -s nullglob
VITESS=${GOPATH?}/src/vitess.io/vitess/go/
XWB1989=${GOPATH?}/src/github.com/xwb1989/sqlparser/
# Create patches for everything that changed
LASTIMPORT=1b7879cb91f1dfe1a2dfa06fea96e951e3a7aec5
for path in ${VITESS?}/{vt/sqlparser,sqltypes,bytes2,hack}; do
cd ${path}
git format-patch ${LASTIMPORT?} .
done;
# Apply patches to the dependencies
cd ${XWB1989?}
git am --directory dependency -p2 ${VITESS?}/{sqltypes,bytes2,hack}/*.patch
# Apply the main patches to the repo
cd ${XWB1989?}
git am -p4 ${VITESS?}/vt/sqlparser/*.patch
# If you encounter diff failures, manually fix them with
patch -p4 < .git/rebase-apply/patch
...
git add name_of_files
git am --continue
# Cleanup
rm ${VITESS?}/{sqltypes,bytes2,hack}/*.patch ${VITESS?}/*.patch
# and Finally update the LASTIMPORT in this README.
```
### Fresh install
TODO: Change these instructions to use git to copy the files, that'll make later patching easier.
```bash
VITESS=${GOPATH?}/src/vitess.io/vitess/go/
XWB1989=${GOPATH?}/src/github.com/xwb1989/sqlparser/
cd ${XWB1989?}
# Copy all the code
cp -pr ${VITESS?}/vt/sqlparser/ .
cp -pr ${VITESS?}/sqltypes dependency
cp -pr ${VITESS?}/bytes2 dependency
cp -pr ${VITESS?}/hack dependency
# Delete some code we haven't ported
rm dependency/sqltypes/arithmetic.go dependency/sqltypes/arithmetic_test.go dependency/sqltypes/event_token.go dependency/sqltypes/event_token_test.go dependency/sqltypes/proto3.go dependency/sqltypes/proto3_test.go dependency/sqltypes/query_response.go dependency/sqltypes/result.go dependency/sqltypes/result_test.go
# Some automated fixes
# Fix imports
sed -i '.bak' 's_vitess.io/vitess/go/vt/proto/query_github.com/xwb1989/sqlparser/dependency/querypb_g' *.go dependency/sqltypes/*.go
sed -i '.bak' 's_vitess.io/vitess/go/_github.com/xwb1989/sqlparser/dependency/_g' *.go dependency/sqltypes/*.go
# Copy the proto, but basically drop everything we don't want
cp -pr ${VITESS?}/vt/proto/query dependency/querypb
sed -i '.bak' 's_.*Descriptor.*__g' dependency/querypb/*.go
sed -i '.bak' 's_.*ProtoMessage.*__g' dependency/querypb/*.go
sed -i '.bak' 's/proto.CompactTextString(m)/"TODO"/g' dependency/querypb/*.go
sed -i '.bak' 's/proto.EnumName/EnumName/g' dependency/querypb/*.go
sed -i '.bak' 's/proto.Equal/reflect.DeepEqual/g' dependency/sqltypes/*.go
# Remove the error library
sed -i '.bak' 's/vterrors.Errorf([^,]*, /fmt.Errorf(/g' *.go dependency/sqltypes/*.go
sed -i '.bak' 's/vterrors.New([^,]*, /errors.New(/g' *.go dependency/sqltypes/*.go
```
### Testing
```bash
VITESS=${GOPATH?}/src/vitess.io/vitess/go/
XWB1989=${GOPATH?}/src/github.com/xwb1989/sqlparser/
cd ${XWB1989?}
# Test, fix and repeat
go test ./...
# Finally make some diffs (for later reference)
diff -u ${VITESS?}/sqltypes/ ${XWB1989?}/dependency/sqltypes/ > ${XWB1989?}/patches/sqltypes.patch
diff -u ${VITESS?}/bytes2/ ${XWB1989?}/dependency/bytes2/ > ${XWB1989?}/patches/bytes2.patch
diff -u ${VITESS?}/vt/proto/query/ ${XWB1989?}/dependency/querypb/ > ${XWB1989?}/patches/querypb.patch
diff -u ${VITESS?}/vt/sqlparser/ ${XWB1989?}/ > ${XWB1989?}/patches/sqlparser.patch
```

343
vendor/github.com/xwb1989/sqlparser/analyzer.go generated vendored Normal file
View File

@@ -0,0 +1,343 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlparser
// analyzer.go contains utility analysis functions.
import (
"errors"
"fmt"
"strconv"
"strings"
"unicode"
"github.com/xwb1989/sqlparser/dependency/sqltypes"
)
// These constants are used to identify the SQL statement type.
const (
StmtSelect = iota
StmtStream
StmtInsert
StmtReplace
StmtUpdate
StmtDelete
StmtDDL
StmtBegin
StmtCommit
StmtRollback
StmtSet
StmtShow
StmtUse
StmtOther
StmtUnknown
StmtComment
)
// Preview analyzes the beginning of the query using a simpler and faster
// textual comparison to identify the statement type.
func Preview(sql string) int {
trimmed := StripLeadingComments(sql)
firstWord := trimmed
if end := strings.IndexFunc(trimmed, unicode.IsSpace); end != -1 {
firstWord = trimmed[:end]
}
firstWord = strings.TrimLeftFunc(firstWord, func(r rune) bool { return !unicode.IsLetter(r) })
// Comparison is done in order of priority.
loweredFirstWord := strings.ToLower(firstWord)
switch loweredFirstWord {
case "select":
return StmtSelect
case "stream":
return StmtStream
case "insert":
return StmtInsert
case "replace":
return StmtReplace
case "update":
return StmtUpdate
case "delete":
return StmtDelete
}
// For the following statements it is not sufficient to rely
// on loweredFirstWord. This is because they are not statements
// in the grammar and we are relying on Preview to parse them.
// For instance, we don't want: "BEGIN JUNK" to be parsed
// as StmtBegin.
trimmedNoComments, _ := SplitMarginComments(trimmed)
switch strings.ToLower(trimmedNoComments) {
case "begin", "start transaction":
return StmtBegin
case "commit":
return StmtCommit
case "rollback":
return StmtRollback
}
switch loweredFirstWord {
case "create", "alter", "rename", "drop", "truncate":
return StmtDDL
case "set":
return StmtSet
case "show":
return StmtShow
case "use":
return StmtUse
case "analyze", "describe", "desc", "explain", "repair", "optimize":
return StmtOther
}
if strings.Index(trimmed, "/*!") == 0 {
return StmtComment
}
return StmtUnknown
}
// StmtType returns the statement type as a string
func StmtType(stmtType int) string {
switch stmtType {
case StmtSelect:
return "SELECT"
case StmtStream:
return "STREAM"
case StmtInsert:
return "INSERT"
case StmtReplace:
return "REPLACE"
case StmtUpdate:
return "UPDATE"
case StmtDelete:
return "DELETE"
case StmtDDL:
return "DDL"
case StmtBegin:
return "BEGIN"
case StmtCommit:
return "COMMIT"
case StmtRollback:
return "ROLLBACK"
case StmtSet:
return "SET"
case StmtShow:
return "SHOW"
case StmtUse:
return "USE"
case StmtOther:
return "OTHER"
default:
return "UNKNOWN"
}
}
// IsDML returns true if the query is an INSERT, UPDATE or DELETE statement.
func IsDML(sql string) bool {
switch Preview(sql) {
case StmtInsert, StmtReplace, StmtUpdate, StmtDelete:
return true
}
return false
}
// GetTableName returns the table name from the SimpleTableExpr
// only if it's a simple expression. Otherwise, it returns "".
func GetTableName(node SimpleTableExpr) TableIdent {
if n, ok := node.(TableName); ok && n.Qualifier.IsEmpty() {
return n.Name
}
// sub-select or '.' expression
return NewTableIdent("")
}
// IsColName returns true if the Expr is a *ColName.
func IsColName(node Expr) bool {
_, ok := node.(*ColName)
return ok
}
// IsValue returns true if the Expr is a string, integral or value arg.
// NULL is not considered to be a value.
func IsValue(node Expr) bool {
switch v := node.(type) {
case *SQLVal:
switch v.Type {
case StrVal, HexVal, IntVal, ValArg:
return true
}
}
return false
}
// IsNull returns true if the Expr is SQL NULL
func IsNull(node Expr) bool {
switch node.(type) {
case *NullVal:
return true
}
return false
}
// IsSimpleTuple returns true if the Expr is a ValTuple that
// contains simple values or if it's a list arg.
func IsSimpleTuple(node Expr) bool {
switch vals := node.(type) {
case ValTuple:
for _, n := range vals {
if !IsValue(n) {
return false
}
}
return true
case ListArg:
return true
}
// It's a subquery
return false
}
// NewPlanValue builds a sqltypes.PlanValue from an Expr.
func NewPlanValue(node Expr) (sqltypes.PlanValue, error) {
switch node := node.(type) {
case *SQLVal:
switch node.Type {
case ValArg:
return sqltypes.PlanValue{Key: string(node.Val[1:])}, nil
case IntVal:
n, err := sqltypes.NewIntegral(string(node.Val))
if err != nil {
return sqltypes.PlanValue{}, fmt.Errorf("%v", err)
}
return sqltypes.PlanValue{Value: n}, nil
case StrVal:
return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val)}, nil
case HexVal:
v, err := node.HexDecode()
if err != nil {
return sqltypes.PlanValue{}, fmt.Errorf("%v", err)
}
return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, v)}, nil
}
case ListArg:
return sqltypes.PlanValue{ListKey: string(node[2:])}, nil
case ValTuple:
pv := sqltypes.PlanValue{
Values: make([]sqltypes.PlanValue, 0, len(node)),
}
for _, val := range node {
innerpv, err := NewPlanValue(val)
if err != nil {
return sqltypes.PlanValue{}, err
}
if innerpv.ListKey != "" || innerpv.Values != nil {
return sqltypes.PlanValue{}, errors.New("unsupported: nested lists")
}
pv.Values = append(pv.Values, innerpv)
}
return pv, nil
case *NullVal:
return sqltypes.PlanValue{}, nil
}
return sqltypes.PlanValue{}, fmt.Errorf("expression is too complex '%v'", String(node))
}
// StringIn is a convenience function that returns
// true if str matches any of the values.
func StringIn(str string, values ...string) bool {
for _, val := range values {
if str == val {
return true
}
}
return false
}
// SetKey is the extracted key from one SetExpr
type SetKey struct {
Key string
Scope string
}
// ExtractSetValues returns a map of key-value pairs
// if the query is a SET statement. Values can be bool, int64 or string.
// Since set variable names are case insensitive, all keys are returned
// as lower case.
func ExtractSetValues(sql string) (keyValues map[SetKey]interface{}, scope string, err error) {
stmt, err := Parse(sql)
if err != nil {
return nil, "", err
}
setStmt, ok := stmt.(*Set)
if !ok {
return nil, "", fmt.Errorf("ast did not yield *sqlparser.Set: %T", stmt)
}
result := make(map[SetKey]interface{})
for _, expr := range setStmt.Exprs {
scope := SessionStr
key := expr.Name.Lowered()
switch {
case strings.HasPrefix(key, "@@global."):
scope = GlobalStr
key = strings.TrimPrefix(key, "@@global.")
case strings.HasPrefix(key, "@@session."):
key = strings.TrimPrefix(key, "@@session.")
case strings.HasPrefix(key, "@@"):
key = strings.TrimPrefix(key, "@@")
}
if strings.HasPrefix(expr.Name.Lowered(), "@@") {
if setStmt.Scope != "" && scope != "" {
return nil, "", fmt.Errorf("unsupported in set: mixed using of variable scope")
}
_, out := NewStringTokenizer(key).Scan()
key = string(out)
}
setKey := SetKey{
Key: key,
Scope: scope,
}
switch expr := expr.Expr.(type) {
case *SQLVal:
switch expr.Type {
case StrVal:
result[setKey] = strings.ToLower(string(expr.Val))
case IntVal:
num, err := strconv.ParseInt(string(expr.Val), 0, 64)
if err != nil {
return nil, "", err
}
result[setKey] = num
default:
return nil, "", fmt.Errorf("invalid value type: %v", String(expr))
}
case BoolVal:
var val int64
if expr {
val = 1
}
result[setKey] = val
case *ColName:
result[setKey] = expr.Name.String()
case *NullVal:
result[setKey] = nil
case *Default:
result[setKey] = "default"
default:
return nil, "", fmt.Errorf("invalid syntax: %s", String(expr))
}
}
return result, strings.ToLower(setStmt.Scope), nil
}

3450
vendor/github.com/xwb1989/sqlparser/ast.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

293
vendor/github.com/xwb1989/sqlparser/comments.go generated vendored Normal file
View File

@@ -0,0 +1,293 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlparser
import (
"strconv"
"strings"
"unicode"
)
const (
// DirectiveMultiShardAutocommit is the query comment directive to allow
// single round trip autocommit with a multi-shard statement.
DirectiveMultiShardAutocommit = "MULTI_SHARD_AUTOCOMMIT"
// DirectiveSkipQueryPlanCache skips query plan cache when set.
DirectiveSkipQueryPlanCache = "SKIP_QUERY_PLAN_CACHE"
// DirectiveQueryTimeout sets a query timeout in vtgate. Only supported for SELECTS.
DirectiveQueryTimeout = "QUERY_TIMEOUT_MS"
)
func isNonSpace(r rune) bool {
return !unicode.IsSpace(r)
}
// leadingCommentEnd returns the first index after all leading comments, or
// 0 if there are no leading comments.
func leadingCommentEnd(text string) (end int) {
hasComment := false
pos := 0
for pos < len(text) {
// Eat up any whitespace. Trailing whitespace will be considered part of
// the leading comments.
nextVisibleOffset := strings.IndexFunc(text[pos:], isNonSpace)
if nextVisibleOffset < 0 {
break
}
pos += nextVisibleOffset
remainingText := text[pos:]
// Found visible characters. Look for '/*' at the beginning
// and '*/' somewhere after that.
if len(remainingText) < 4 || remainingText[:2] != "/*" {
break
}
commentLength := 4 + strings.Index(remainingText[2:], "*/")
if commentLength < 4 {
// Missing end comment :/
break
}
hasComment = true
pos += commentLength
}
if hasComment {
return pos
}
return 0
}
// trailingCommentStart returns the first index of trailing comments.
// If there are no trailing comments, returns the length of the input string.
func trailingCommentStart(text string) (start int) {
hasComment := false
reducedLen := len(text)
for reducedLen > 0 {
// Eat up any whitespace. Leading whitespace will be considered part of
// the trailing comments.
nextReducedLen := strings.LastIndexFunc(text[:reducedLen], isNonSpace) + 1
if nextReducedLen == 0 {
break
}
reducedLen = nextReducedLen
if reducedLen < 4 || text[reducedLen-2:reducedLen] != "*/" {
break
}
// Find the beginning of the comment
startCommentPos := strings.LastIndex(text[:reducedLen-2], "/*")
if startCommentPos < 0 {
// Badly formatted sql :/
break
}
hasComment = true
reducedLen = startCommentPos
}
if hasComment {
return reducedLen
}
return len(text)
}
// MarginComments holds the leading and trailing comments that surround a query.
type MarginComments struct {
Leading string
Trailing string
}
// SplitMarginComments pulls out any leading or trailing comments from a raw sql query.
// This function also trims leading (if there's a comment) and trailing whitespace.
func SplitMarginComments(sql string) (query string, comments MarginComments) {
trailingStart := trailingCommentStart(sql)
leadingEnd := leadingCommentEnd(sql[:trailingStart])
comments = MarginComments{
Leading: strings.TrimLeftFunc(sql[:leadingEnd], unicode.IsSpace),
Trailing: strings.TrimRightFunc(sql[trailingStart:], unicode.IsSpace),
}
return strings.TrimFunc(sql[leadingEnd:trailingStart], unicode.IsSpace), comments
}
// StripLeadingComments trims the SQL string and removes any leading comments
func StripLeadingComments(sql string) string {
sql = strings.TrimFunc(sql, unicode.IsSpace)
for hasCommentPrefix(sql) {
switch sql[0] {
case '/':
// Multi line comment
index := strings.Index(sql, "*/")
if index <= 1 {
return sql
}
// don't strip /*! ... */ or /*!50700 ... */
if len(sql) > 2 && sql[2] == '!' {
return sql
}
sql = sql[index+2:]
case '-':
// Single line comment
index := strings.Index(sql, "\n")
if index == -1 {
return sql
}
sql = sql[index+1:]
}
sql = strings.TrimFunc(sql, unicode.IsSpace)
}
return sql
}
func hasCommentPrefix(sql string) bool {
return len(sql) > 1 && ((sql[0] == '/' && sql[1] == '*') || (sql[0] == '-' && sql[1] == '-'))
}
// ExtractMysqlComment extracts the version and SQL from a comment-only query
// such as /*!50708 sql here */
func ExtractMysqlComment(sql string) (version string, innerSQL string) {
sql = sql[3 : len(sql)-2]
digitCount := 0
endOfVersionIndex := strings.IndexFunc(sql, func(c rune) bool {
digitCount++
return !unicode.IsDigit(c) || digitCount == 6
})
version = sql[0:endOfVersionIndex]
innerSQL = strings.TrimFunc(sql[endOfVersionIndex:], unicode.IsSpace)
return version, innerSQL
}
const commentDirectivePreamble = "/*vt+"
// CommentDirectives is the parsed representation for execution directives
// conveyed in query comments
type CommentDirectives map[string]interface{}
// ExtractCommentDirectives parses the comment list for any execution directives
// of the form:
//
// /*vt+ OPTION_ONE=1 OPTION_TWO OPTION_THREE=abcd */
//
// It returns the map of the directive values or nil if there aren't any.
func ExtractCommentDirectives(comments Comments) CommentDirectives {
if comments == nil {
return nil
}
var vals map[string]interface{}
for _, comment := range comments {
commentStr := string(comment)
if commentStr[0:5] != commentDirectivePreamble {
continue
}
if vals == nil {
vals = make(map[string]interface{})
}
// Split on whitespace and ignore the first and last directive
// since they contain the comment start/end
directives := strings.Fields(commentStr)
for i := 1; i < len(directives)-1; i++ {
directive := directives[i]
sep := strings.IndexByte(directive, '=')
// No value is equivalent to a true boolean
if sep == -1 {
vals[directive] = true
continue
}
strVal := directive[sep+1:]
directive = directive[:sep]
intVal, err := strconv.Atoi(strVal)
if err == nil {
vals[directive] = intVal
continue
}
boolVal, err := strconv.ParseBool(strVal)
if err == nil {
vals[directive] = boolVal
continue
}
vals[directive] = strVal
}
}
return vals
}
// IsSet checks the directive map for the named directive and returns
// true if the directive is set and has a true/false or 0/1 value
func (d CommentDirectives) IsSet(key string) bool {
if d == nil {
return false
}
val, ok := d[key]
if !ok {
return false
}
boolVal, ok := val.(bool)
if ok {
return boolVal
}
intVal, ok := val.(int)
if ok {
return intVal == 1
}
return false
}
// SkipQueryPlanCacheDirective returns true if skip query plan cache directive is set to true in query.
func SkipQueryPlanCacheDirective(stmt Statement) bool {
switch stmt := stmt.(type) {
case *Select:
directives := ExtractCommentDirectives(stmt.Comments)
if directives.IsSet(DirectiveSkipQueryPlanCache) {
return true
}
case *Insert:
directives := ExtractCommentDirectives(stmt.Comments)
if directives.IsSet(DirectiveSkipQueryPlanCache) {
return true
}
case *Update:
directives := ExtractCommentDirectives(stmt.Comments)
if directives.IsSet(DirectiveSkipQueryPlanCache) {
return true
}
case *Delete:
directives := ExtractCommentDirectives(stmt.Comments)
if directives.IsSet(DirectiveSkipQueryPlanCache) {
return true
}
default:
return false
}
return false
}

View File

@@ -0,0 +1,65 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bytes2
// Buffer implements a subset of the write portion of
// bytes.Buffer, but more efficiently. This is meant to
// be used in very high QPS operations, especially for
// WriteByte, and without abstracting it as a Writer.
// Function signatures contain errors for compatibility,
// but they do not return errors.
type Buffer struct {
bytes []byte
}
// NewBuffer is equivalent to bytes.NewBuffer.
func NewBuffer(b []byte) *Buffer {
return &Buffer{bytes: b}
}
// Write is equivalent to bytes.Buffer.Write.
func (buf *Buffer) Write(b []byte) (int, error) {
buf.bytes = append(buf.bytes, b...)
return len(b), nil
}
// WriteString is equivalent to bytes.Buffer.WriteString.
func (buf *Buffer) WriteString(s string) (int, error) {
buf.bytes = append(buf.bytes, s...)
return len(s), nil
}
// WriteByte is equivalent to bytes.Buffer.WriteByte.
func (buf *Buffer) WriteByte(b byte) error {
buf.bytes = append(buf.bytes, b)
return nil
}
// Bytes is equivalent to bytes.Buffer.Bytes.
func (buf *Buffer) Bytes() []byte {
return buf.bytes
}
// Strings is equivalent to bytes.Buffer.Strings.
func (buf *Buffer) String() string {
return string(buf.bytes)
}
// Len is equivalent to bytes.Buffer.Len.
func (buf *Buffer) Len() int {
return len(buf.bytes)
}

View File

@@ -0,0 +1,79 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package hack gives you some efficient functionality at the cost of
// breaking some Go rules.
package hack
import (
"reflect"
"unsafe"
)
// StringArena lets you consolidate allocations for a group of strings
// that have similar life length
type StringArena struct {
buf []byte
str string
}
// NewStringArena creates an arena of the specified size.
func NewStringArena(size int) *StringArena {
sa := &StringArena{buf: make([]byte, 0, size)}
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&sa.buf))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&sa.str))
pstring.Data = pbytes.Data
pstring.Len = pbytes.Cap
return sa
}
// NewString copies a byte slice into the arena and returns it as a string.
// If the arena is full, it returns a traditional go string.
func (sa *StringArena) NewString(b []byte) string {
if len(b) == 0 {
return ""
}
if len(sa.buf)+len(b) > cap(sa.buf) {
return string(b)
}
start := len(sa.buf)
sa.buf = append(sa.buf, b...)
return sa.str[start : start+len(b)]
}
// SpaceLeft returns the amount of space left in the arena.
func (sa *StringArena) SpaceLeft() int {
return cap(sa.buf) - len(sa.buf)
}
// String force casts a []byte to a string.
// USE AT YOUR OWN RISK
func String(b []byte) (s string) {
if len(b) == 0 {
return ""
}
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
pstring.Data = pbytes.Data
pstring.Len = pbytes.Len
return
}
// StringPointer returns &s[0], which is not allowed in go
func StringPointer(s string) unsafe.Pointer {
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
return unsafe.Pointer(pstring.Data)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,266 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqltypes
import (
"errors"
"fmt"
"reflect"
"strconv"
"github.com/xwb1989/sqlparser/dependency/querypb"
)
// NullBindVariable is a bindvar with NULL value.
var NullBindVariable = &querypb.BindVariable{Type: querypb.Type_NULL_TYPE}
// ValueToProto converts Value to a *querypb.Value.
func ValueToProto(v Value) *querypb.Value {
return &querypb.Value{Type: v.typ, Value: v.val}
}
// ProtoToValue converts a *querypb.Value to a Value.
func ProtoToValue(v *querypb.Value) Value {
return MakeTrusted(v.Type, v.Value)
}
// BuildBindVariables builds a map[string]*querypb.BindVariable from a map[string]interface{}.
func BuildBindVariables(in map[string]interface{}) (map[string]*querypb.BindVariable, error) {
if len(in) == 0 {
return nil, nil
}
out := make(map[string]*querypb.BindVariable, len(in))
for k, v := range in {
bv, err := BuildBindVariable(v)
if err != nil {
return nil, fmt.Errorf("%s: %v", k, err)
}
out[k] = bv
}
return out, nil
}
// Int32BindVariable converts an int32 to a bind var.
func Int32BindVariable(v int32) *querypb.BindVariable {
return ValueBindVariable(NewInt32(v))
}
// Int64BindVariable converts an int64 to a bind var.
func Int64BindVariable(v int64) *querypb.BindVariable {
return ValueBindVariable(NewInt64(v))
}
// Uint64BindVariable converts a uint64 to a bind var.
func Uint64BindVariable(v uint64) *querypb.BindVariable {
return ValueBindVariable(NewUint64(v))
}
// Float64BindVariable converts a float64 to a bind var.
func Float64BindVariable(v float64) *querypb.BindVariable {
return ValueBindVariable(NewFloat64(v))
}
// StringBindVariable converts a string to a bind var.
func StringBindVariable(v string) *querypb.BindVariable {
return ValueBindVariable(NewVarChar(v))
}
// BytesBindVariable converts a []byte to a bind var.
func BytesBindVariable(v []byte) *querypb.BindVariable {
return &querypb.BindVariable{Type: VarBinary, Value: v}
}
// ValueBindVariable converts a Value to a bind var.
func ValueBindVariable(v Value) *querypb.BindVariable {
return &querypb.BindVariable{Type: v.typ, Value: v.val}
}
// BuildBindVariable builds a *querypb.BindVariable from a valid input type.
func BuildBindVariable(v interface{}) (*querypb.BindVariable, error) {
switch v := v.(type) {
case string:
return StringBindVariable(v), nil
case []byte:
return BytesBindVariable(v), nil
case int:
return &querypb.BindVariable{
Type: querypb.Type_INT64,
Value: strconv.AppendInt(nil, int64(v), 10),
}, nil
case int64:
return Int64BindVariable(v), nil
case uint64:
return Uint64BindVariable(v), nil
case float64:
return Float64BindVariable(v), nil
case nil:
return NullBindVariable, nil
case Value:
return ValueBindVariable(v), nil
case *querypb.BindVariable:
return v, nil
case []interface{}:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
lbv, err := BuildBindVariable(lv)
if err != nil {
return nil, err
}
values[i].Type = lbv.Type
values[i].Value = lbv.Value
bv.Values[i] = &values[i]
}
return bv, nil
case []string:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_VARCHAR
values[i].Value = []byte(lv)
bv.Values[i] = &values[i]
}
return bv, nil
case [][]byte:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_VARBINARY
values[i].Value = lv
bv.Values[i] = &values[i]
}
return bv, nil
case []int:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_INT64
values[i].Value = strconv.AppendInt(nil, int64(lv), 10)
bv.Values[i] = &values[i]
}
return bv, nil
case []int64:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_INT64
values[i].Value = strconv.AppendInt(nil, lv, 10)
bv.Values[i] = &values[i]
}
return bv, nil
case []uint64:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_UINT64
values[i].Value = strconv.AppendUint(nil, lv, 10)
bv.Values[i] = &values[i]
}
return bv, nil
case []float64:
bv := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
Values: make([]*querypb.Value, len(v)),
}
values := make([]querypb.Value, len(v))
for i, lv := range v {
values[i].Type = querypb.Type_FLOAT64
values[i].Value = strconv.AppendFloat(nil, lv, 'g', -1, 64)
bv.Values[i] = &values[i]
}
return bv, nil
}
return nil, fmt.Errorf("type %T not supported as bind var: %v", v, v)
}
// ValidateBindVariables validates a map[string]*querypb.BindVariable.
func ValidateBindVariables(bv map[string]*querypb.BindVariable) error {
for k, v := range bv {
if err := ValidateBindVariable(v); err != nil {
return fmt.Errorf("%s: %v", k, err)
}
}
return nil
}
// ValidateBindVariable returns an error if the bind variable has inconsistent
// fields.
func ValidateBindVariable(bv *querypb.BindVariable) error {
if bv == nil {
return errors.New("bind variable is nil")
}
if bv.Type == querypb.Type_TUPLE {
if len(bv.Values) == 0 {
return errors.New("empty tuple is not allowed")
}
for _, val := range bv.Values {
if val.Type == querypb.Type_TUPLE {
return errors.New("tuple not allowed inside another tuple")
}
if err := ValidateBindVariable(&querypb.BindVariable{Type: val.Type, Value: val.Value}); err != nil {
return err
}
}
return nil
}
// If NewValue succeeds, the value is valid.
_, err := NewValue(bv.Type, bv.Value)
return err
}
// BindVariableToValue converts a bind var into a Value.
func BindVariableToValue(bv *querypb.BindVariable) (Value, error) {
if bv.Type == querypb.Type_TUPLE {
return NULL, errors.New("cannot convert a TUPLE bind var into a value")
}
return MakeTrusted(bv.Type, bv.Value), nil
}
// BindVariablesEqual compares two maps of bind variables.
func BindVariablesEqual(x, y map[string]*querypb.BindVariable) bool {
return reflect.DeepEqual(&querypb.BoundQuery{BindVariables: x}, &querypb.BoundQuery{BindVariables: y})
}
// CopyBindVariables returns a shallow-copy of the given bindVariables map.
func CopyBindVariables(bindVariables map[string]*querypb.BindVariable) map[string]*querypb.BindVariable {
result := make(map[string]*querypb.BindVariable, len(bindVariables))
for key, value := range bindVariables {
result[key] = value
}
return result
}

View File

@@ -0,0 +1,259 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqltypes
import (
"encoding/json"
"errors"
"fmt"
"github.com/xwb1989/sqlparser/dependency/querypb"
)
// PlanValue represents a value or a list of values for
// a column that will later be resolved using bind vars and used
// to perform plan actions like generating the final query or
// deciding on a route.
//
// Plan values are typically used as a slice ([]planValue)
// where each entry is for one column. For situations where
// the required output is a list of rows (like in the case
// of multi-value inserts), the representation is pivoted.
// For example, a statement like this:
// INSERT INTO t VALUES (1, 2), (3, 4)
// will be represented as follows:
// []PlanValue{
// Values: {1, 3},
// Values: {2, 4},
// }
//
// For WHERE clause items that contain a combination of
// equality expressions and IN clauses like this:
// WHERE pk1 = 1 AND pk2 IN (2, 3, 4)
// The plan values will be represented as follows:
// []PlanValue{
// Value: 1,
// Values: {2, 3, 4},
// }
// When converted into rows, columns with single values
// are replicated as the same for all rows:
// [][]Value{
// {1, 2},
// {1, 3},
// {1, 4},
// }
type PlanValue struct {
Key string
Value Value
ListKey string
Values []PlanValue
}
// IsNull returns true if the PlanValue is NULL.
func (pv PlanValue) IsNull() bool {
return pv.Key == "" && pv.Value.IsNull() && pv.ListKey == "" && pv.Values == nil
}
// IsList returns true if the PlanValue is a list.
func (pv PlanValue) IsList() bool {
return pv.ListKey != "" || pv.Values != nil
}
// ResolveValue resolves a PlanValue as a single value based on the supplied bindvars.
func (pv PlanValue) ResolveValue(bindVars map[string]*querypb.BindVariable) (Value, error) {
switch {
case pv.Key != "":
bv, err := pv.lookupValue(bindVars)
if err != nil {
return NULL, err
}
return MakeTrusted(bv.Type, bv.Value), nil
case !pv.Value.IsNull():
return pv.Value, nil
case pv.ListKey != "" || pv.Values != nil:
// This code is unreachable because the parser does not allow
// multi-value constructs where a single value is expected.
return NULL, errors.New("a list was supplied where a single value was expected")
}
return NULL, nil
}
func (pv PlanValue) lookupValue(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) {
bv, ok := bindVars[pv.Key]
if !ok {
return nil, fmt.Errorf("missing bind var %s", pv.Key)
}
if bv.Type == querypb.Type_TUPLE {
return nil, fmt.Errorf("TUPLE was supplied for single value bind var %s", pv.ListKey)
}
return bv, nil
}
// ResolveList resolves a PlanValue as a list of values based on the supplied bindvars.
func (pv PlanValue) ResolveList(bindVars map[string]*querypb.BindVariable) ([]Value, error) {
switch {
case pv.ListKey != "":
bv, err := pv.lookupList(bindVars)
if err != nil {
return nil, err
}
values := make([]Value, 0, len(bv.Values))
for _, val := range bv.Values {
values = append(values, MakeTrusted(val.Type, val.Value))
}
return values, nil
case pv.Values != nil:
values := make([]Value, 0, len(pv.Values))
for _, val := range pv.Values {
v, err := val.ResolveValue(bindVars)
if err != nil {
return nil, err
}
values = append(values, v)
}
return values, nil
}
// This code is unreachable because the parser does not allow
// single value constructs where multiple values are expected.
return nil, errors.New("a single value was supplied where a list was expected")
}
func (pv PlanValue) lookupList(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) {
bv, ok := bindVars[pv.ListKey]
if !ok {
return nil, fmt.Errorf("missing bind var %s", pv.ListKey)
}
if bv.Type != querypb.Type_TUPLE {
return nil, fmt.Errorf("single value was supplied for TUPLE bind var %s", pv.ListKey)
}
return bv, nil
}
// MarshalJSON should be used only for testing.
func (pv PlanValue) MarshalJSON() ([]byte, error) {
switch {
case pv.Key != "":
return json.Marshal(":" + pv.Key)
case !pv.Value.IsNull():
if pv.Value.IsIntegral() {
return pv.Value.ToBytes(), nil
}
return json.Marshal(pv.Value.ToString())
case pv.ListKey != "":
return json.Marshal("::" + pv.ListKey)
case pv.Values != nil:
return json.Marshal(pv.Values)
}
return []byte("null"), nil
}
func rowCount(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) (int, error) {
count := -1
setCount := func(l int) error {
switch count {
case -1:
count = l
return nil
case l:
return nil
default:
return errors.New("mismatch in number of column values")
}
}
for _, pv := range pvs {
switch {
case pv.Key != "" || !pv.Value.IsNull():
continue
case pv.Values != nil:
if err := setCount(len(pv.Values)); err != nil {
return 0, err
}
case pv.ListKey != "":
bv, err := pv.lookupList(bindVars)
if err != nil {
return 0, err
}
if err := setCount(len(bv.Values)); err != nil {
return 0, err
}
}
}
if count == -1 {
// If there were no lists inside, it was a single row.
// Note that count can never be 0 because there is enough
// protection at the top level: list bind vars must have
// at least one value (enforced by vtgate), and AST lists
// must have at least one value (enforced by the parser).
// Also lists created internally after vtgate validation
// ensure at least one value.
// TODO(sougou): verify and change API to enforce this.
return 1, nil
}
return count, nil
}
// ResolveRows resolves a []PlanValue as rows based on the supplied bindvars.
func ResolveRows(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) ([][]Value, error) {
count, err := rowCount(pvs, bindVars)
if err != nil {
return nil, err
}
// Allocate the rows.
rows := make([][]Value, count)
for i := range rows {
rows[i] = make([]Value, len(pvs))
}
// Using j becasue we're resolving by columns.
for j, pv := range pvs {
switch {
case pv.Key != "":
bv, err := pv.lookupValue(bindVars)
if err != nil {
return nil, err
}
for i := range rows {
rows[i][j] = MakeTrusted(bv.Type, bv.Value)
}
case !pv.Value.IsNull():
for i := range rows {
rows[i][j] = pv.Value
}
case pv.ListKey != "":
bv, err := pv.lookupList(bindVars)
if err != nil {
// This code is unreachable because pvRowCount already checks this.
return nil, err
}
for i := range rows {
rows[i][j] = MakeTrusted(bv.Values[i].Type, bv.Values[i].Value)
}
case pv.Values != nil:
for i := range rows {
rows[i][j], err = pv.Values[i].ResolveValue(bindVars)
if err != nil {
return nil, err
}
}
// default case is a NULL value, which the row values are already initialized to.
}
}
return rows, nil
}

View File

@@ -0,0 +1,154 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqltypes
import (
querypb "github.com/xwb1989/sqlparser/dependency/querypb"
)
// Functions in this file should only be used for testing.
// This is an experiment to see if test code bloat can be
// reduced and readability improved.
/*
// MakeTestFields builds a []*querypb.Field for testing.
// fields := sqltypes.MakeTestFields(
// "a|b",
// "int64|varchar",
// )
// The field types are as defined in querypb and are case
// insensitive. Column delimiters must be used only to sepearate
// strings and not at the beginning or the end.
func MakeTestFields(names, types string) []*querypb.Field {
n := split(names)
t := split(types)
var fields []*querypb.Field
for i := range n {
fields = append(fields, &querypb.Field{
Name: n[i],
Type: querypb.Type(querypb.Type_value[strings.ToUpper(t[i])]),
})
}
return fields
}
// MakeTestResult builds a *sqltypes.Result object for testing.
// result := sqltypes.MakeTestResult(
// fields,
// " 1|a",
// "10|abcd",
// )
// The field type values are set as the types for the rows built.
// Spaces are trimmed from row values. "null" is treated as NULL.
func MakeTestResult(fields []*querypb.Field, rows ...string) *Result {
result := &Result{
Fields: fields,
}
if len(rows) > 0 {
result.Rows = make([][]Value, len(rows))
}
for i, row := range rows {
result.Rows[i] = make([]Value, len(fields))
for j, col := range split(row) {
if col == "null" {
continue
}
result.Rows[i][j] = MakeTrusted(fields[j].Type, []byte(col))
}
}
result.RowsAffected = uint64(len(result.Rows))
return result
}
// MakeTestStreamingResults builds a list of results for streaming.
// results := sqltypes.MakeStreamingResults(
// fields,
// "1|a",
// "2|b",
// "---",
// "c|c",
// )
// The first result contains only the fields. Subsequent results
// are built using the field types. Every input that starts with a "-"
// is treated as streaming delimiter for one result. A final
// delimiter must not be supplied.
func MakeTestStreamingResults(fields []*querypb.Field, rows ...string) []*Result {
var results []*Result
results = append(results, &Result{Fields: fields})
start := 0
cur := 0
// Add a final streaming delimiter to simplify the loop below.
rows = append(rows, "-")
for cur < len(rows) {
if rows[cur][0] != '-' {
cur++
continue
}
result := MakeTestResult(fields, rows[start:cur]...)
result.Fields = nil
result.RowsAffected = 0
results = append(results, result)
start = cur + 1
cur = start
}
return results
}
*/
// TestBindVariable makes a *querypb.BindVariable from
// an interface{}.It panics on invalid input.
// This function should only be used for testing.
func TestBindVariable(v interface{}) *querypb.BindVariable {
if v == nil {
return NullBindVariable
}
bv, err := BuildBindVariable(v)
if err != nil {
panic(err)
}
return bv
}
// TestValue builds a Value from typ and val.
// This function should only be used for testing.
func TestValue(typ querypb.Type, val string) Value {
return MakeTrusted(typ, []byte(val))
}
/*
// PrintResults prints []*Results into a string.
// This function should only be used for testing.
func PrintResults(results []*Result) string {
b := new(bytes.Buffer)
for i, r := range results {
if i == 0 {
fmt.Fprintf(b, "%v", r)
continue
}
fmt.Fprintf(b, ", %v", r)
}
return b.String()
}
func split(str string) []string {
splits := strings.Split(str, "|")
for i, v := range splits {
splits[i] = strings.TrimSpace(v)
}
return splits
}
*/

View File

@@ -0,0 +1,288 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqltypes
import (
"fmt"
"github.com/xwb1989/sqlparser/dependency/querypb"
)
// This file provides wrappers and support
// functions for querypb.Type.
// These bit flags can be used to query on the
// common properties of types.
const (
flagIsIntegral = int(querypb.Flag_ISINTEGRAL)
flagIsUnsigned = int(querypb.Flag_ISUNSIGNED)
flagIsFloat = int(querypb.Flag_ISFLOAT)
flagIsQuoted = int(querypb.Flag_ISQUOTED)
flagIsText = int(querypb.Flag_ISTEXT)
flagIsBinary = int(querypb.Flag_ISBINARY)
)
// IsIntegral returns true if querypb.Type is an integral
// (signed/unsigned) that can be represented using
// up to 64 binary bits.
// If you have a Value object, use its member function.
func IsIntegral(t querypb.Type) bool {
return int(t)&flagIsIntegral == flagIsIntegral
}
// IsSigned returns true if querypb.Type is a signed integral.
// If you have a Value object, use its member function.
func IsSigned(t querypb.Type) bool {
return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral
}
// IsUnsigned returns true if querypb.Type is an unsigned integral.
// Caution: this is not the same as !IsSigned.
// If you have a Value object, use its member function.
func IsUnsigned(t querypb.Type) bool {
return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral|flagIsUnsigned
}
// IsFloat returns true is querypb.Type is a floating point.
// If you have a Value object, use its member function.
func IsFloat(t querypb.Type) bool {
return int(t)&flagIsFloat == flagIsFloat
}
// IsQuoted returns true if querypb.Type is a quoted text or binary.
// If you have a Value object, use its member function.
func IsQuoted(t querypb.Type) bool {
return int(t)&flagIsQuoted == flagIsQuoted
}
// IsText returns true if querypb.Type is a text.
// If you have a Value object, use its member function.
func IsText(t querypb.Type) bool {
return int(t)&flagIsText == flagIsText
}
// IsBinary returns true if querypb.Type is a binary.
// If you have a Value object, use its member function.
func IsBinary(t querypb.Type) bool {
return int(t)&flagIsBinary == flagIsBinary
}
// isNumber returns true if the type is any type of number.
func isNumber(t querypb.Type) bool {
return IsIntegral(t) || IsFloat(t) || t == Decimal
}
// Vitess data types. These are idiomatically
// named synonyms for the querypb.Type values.
// Although these constants are interchangeable,
// they should be treated as different from querypb.Type.
// Use the synonyms only to refer to the type in Value.
// For proto variables, use the querypb.Type constants
// instead.
// The following conditions are non-overlapping
// and cover all types: IsSigned(), IsUnsigned(),
// IsFloat(), IsQuoted(), Null, Decimal, Expression.
// Also, IsIntegral() == (IsSigned()||IsUnsigned()).
// TestCategory needs to be updated accordingly if
// you add a new type.
// If IsBinary or IsText is true, then IsQuoted is
// also true. But there are IsQuoted types that are
// neither binary or text.
// querypb.Type_TUPLE is not included in this list
// because it's not a valid Value type.
// TODO(sougou): provide a categorization function
// that returns enums, which will allow for cleaner
// switch statements for those who want to cover types
// by their category.
const (
Null = querypb.Type_NULL_TYPE
Int8 = querypb.Type_INT8
Uint8 = querypb.Type_UINT8
Int16 = querypb.Type_INT16
Uint16 = querypb.Type_UINT16
Int24 = querypb.Type_INT24
Uint24 = querypb.Type_UINT24
Int32 = querypb.Type_INT32
Uint32 = querypb.Type_UINT32
Int64 = querypb.Type_INT64
Uint64 = querypb.Type_UINT64
Float32 = querypb.Type_FLOAT32
Float64 = querypb.Type_FLOAT64
Timestamp = querypb.Type_TIMESTAMP
Date = querypb.Type_DATE
Time = querypb.Type_TIME
Datetime = querypb.Type_DATETIME
Year = querypb.Type_YEAR
Decimal = querypb.Type_DECIMAL
Text = querypb.Type_TEXT
Blob = querypb.Type_BLOB
VarChar = querypb.Type_VARCHAR
VarBinary = querypb.Type_VARBINARY
Char = querypb.Type_CHAR
Binary = querypb.Type_BINARY
Bit = querypb.Type_BIT
Enum = querypb.Type_ENUM
Set = querypb.Type_SET
Geometry = querypb.Type_GEOMETRY
TypeJSON = querypb.Type_JSON
Expression = querypb.Type_EXPRESSION
)
// bit-shift the mysql flags by two byte so we
// can merge them with the mysql or vitess types.
const (
mysqlUnsigned = 32
mysqlBinary = 128
mysqlEnum = 256
mysqlSet = 2048
)
// If you add to this map, make sure you add a test case
// in tabletserver/endtoend.
var mysqlToType = map[int64]querypb.Type{
1: Int8,
2: Int16,
3: Int32,
4: Float32,
5: Float64,
6: Null,
7: Timestamp,
8: Int64,
9: Int24,
10: Date,
11: Time,
12: Datetime,
13: Year,
16: Bit,
245: TypeJSON,
246: Decimal,
249: Text,
250: Text,
251: Text,
252: Text,
253: VarChar,
254: Char,
255: Geometry,
}
// modifyType modifies the vitess type based on the
// mysql flag. The function checks specific flags based
// on the type. This allows us to ignore stray flags
// that MySQL occasionally sets.
func modifyType(typ querypb.Type, flags int64) querypb.Type {
switch typ {
case Int8:
if flags&mysqlUnsigned != 0 {
return Uint8
}
return Int8
case Int16:
if flags&mysqlUnsigned != 0 {
return Uint16
}
return Int16
case Int32:
if flags&mysqlUnsigned != 0 {
return Uint32
}
return Int32
case Int64:
if flags&mysqlUnsigned != 0 {
return Uint64
}
return Int64
case Int24:
if flags&mysqlUnsigned != 0 {
return Uint24
}
return Int24
case Text:
if flags&mysqlBinary != 0 {
return Blob
}
return Text
case VarChar:
if flags&mysqlBinary != 0 {
return VarBinary
}
return VarChar
case Char:
if flags&mysqlBinary != 0 {
return Binary
}
if flags&mysqlEnum != 0 {
return Enum
}
if flags&mysqlSet != 0 {
return Set
}
return Char
}
return typ
}
// MySQLToType computes the vitess type from mysql type and flags.
func MySQLToType(mysqlType, flags int64) (typ querypb.Type, err error) {
result, ok := mysqlToType[mysqlType]
if !ok {
return 0, fmt.Errorf("unsupported type: %d", mysqlType)
}
return modifyType(result, flags), nil
}
// typeToMySQL is the reverse of mysqlToType.
var typeToMySQL = map[querypb.Type]struct {
typ int64
flags int64
}{
Int8: {typ: 1},
Uint8: {typ: 1, flags: mysqlUnsigned},
Int16: {typ: 2},
Uint16: {typ: 2, flags: mysqlUnsigned},
Int32: {typ: 3},
Uint32: {typ: 3, flags: mysqlUnsigned},
Float32: {typ: 4},
Float64: {typ: 5},
Null: {typ: 6, flags: mysqlBinary},
Timestamp: {typ: 7},
Int64: {typ: 8},
Uint64: {typ: 8, flags: mysqlUnsigned},
Int24: {typ: 9},
Uint24: {typ: 9, flags: mysqlUnsigned},
Date: {typ: 10, flags: mysqlBinary},
Time: {typ: 11, flags: mysqlBinary},
Datetime: {typ: 12, flags: mysqlBinary},
Year: {typ: 13, flags: mysqlUnsigned},
Bit: {typ: 16, flags: mysqlUnsigned},
TypeJSON: {typ: 245},
Decimal: {typ: 246},
Text: {typ: 252},
Blob: {typ: 252, flags: mysqlBinary},
VarChar: {typ: 253},
VarBinary: {typ: 253, flags: mysqlBinary},
Char: {typ: 254},
Binary: {typ: 254, flags: mysqlBinary},
Enum: {typ: 254, flags: mysqlEnum},
Set: {typ: 254, flags: mysqlSet},
Geometry: {typ: 255},
}
// TypeToMySQL returns the equivalent mysql type and flag for a vitess type.
func TypeToMySQL(typ querypb.Type) (mysqlType, flags int64) {
val := typeToMySQL[typ]
return val.typ, val.flags
}

View File

@@ -0,0 +1,376 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package sqltypes implements interfaces and types that represent SQL values.
package sqltypes
import (
"encoding/base64"
"encoding/json"
"fmt"
"strconv"
"github.com/xwb1989/sqlparser/dependency/bytes2"
"github.com/xwb1989/sqlparser/dependency/hack"
"github.com/xwb1989/sqlparser/dependency/querypb"
)
var (
// NULL represents the NULL value.
NULL = Value{}
// DontEscape tells you if a character should not be escaped.
DontEscape = byte(255)
nullstr = []byte("null")
)
// BinWriter interface is used for encoding values.
// Types like bytes.Buffer conform to this interface.
// We expect the writer objects to be in-memory buffers.
// So, we don't expect the write operations to fail.
type BinWriter interface {
Write([]byte) (int, error)
}
// Value can store any SQL value. If the value represents
// an integral type, the bytes are always stored as a cannonical
// representation that matches how MySQL returns such values.
type Value struct {
typ querypb.Type
val []byte
}
// NewValue builds a Value using typ and val. If the value and typ
// don't match, it returns an error.
func NewValue(typ querypb.Type, val []byte) (v Value, err error) {
switch {
case IsSigned(typ):
if _, err := strconv.ParseInt(string(val), 0, 64); err != nil {
return NULL, err
}
return MakeTrusted(typ, val), nil
case IsUnsigned(typ):
if _, err := strconv.ParseUint(string(val), 0, 64); err != nil {
return NULL, err
}
return MakeTrusted(typ, val), nil
case IsFloat(typ) || typ == Decimal:
if _, err := strconv.ParseFloat(string(val), 64); err != nil {
return NULL, err
}
return MakeTrusted(typ, val), nil
case IsQuoted(typ) || typ == Null:
return MakeTrusted(typ, val), nil
}
// All other types are unsafe or invalid.
return NULL, fmt.Errorf("invalid type specified for MakeValue: %v", typ)
}
// MakeTrusted makes a new Value based on the type.
// This function should only be used if you know the value
// and type conform to the rules. Every place this function is
// called, a comment is needed that explains why it's justified.
// Exceptions: The current package and mysql package do not need
// comments. Other packages can also use the function to create
// VarBinary or VarChar values.
func MakeTrusted(typ querypb.Type, val []byte) Value {
if typ == Null {
return NULL
}
return Value{typ: typ, val: val}
}
// NewInt64 builds an Int64 Value.
func NewInt64(v int64) Value {
return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10))
}
// NewInt32 builds an Int64 Value.
func NewInt32(v int32) Value {
return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10))
}
// NewUint64 builds an Uint64 Value.
func NewUint64(v uint64) Value {
return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10))
}
// NewFloat64 builds an Float64 Value.
func NewFloat64(v float64) Value {
return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64))
}
// NewVarChar builds a VarChar Value.
func NewVarChar(v string) Value {
return MakeTrusted(VarChar, []byte(v))
}
// NewVarBinary builds a VarBinary Value.
// The input is a string because it's the most common use case.
func NewVarBinary(v string) Value {
return MakeTrusted(VarBinary, []byte(v))
}
// NewIntegral builds an integral type from a string representaion.
// The type will be Int64 or Uint64. Int64 will be preferred where possible.
func NewIntegral(val string) (n Value, err error) {
signed, err := strconv.ParseInt(val, 0, 64)
if err == nil {
return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil
}
unsigned, err := strconv.ParseUint(val, 0, 64)
if err != nil {
return Value{}, err
}
return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil
}
// InterfaceToValue builds a value from a go type.
// Supported types are nil, int64, uint64, float64,
// string and []byte.
// This function is deprecated. Use the type-specific
// functions instead.
func InterfaceToValue(goval interface{}) (Value, error) {
switch goval := goval.(type) {
case nil:
return NULL, nil
case []byte:
return MakeTrusted(VarBinary, goval), nil
case int64:
return NewInt64(goval), nil
case uint64:
return NewUint64(goval), nil
case float64:
return NewFloat64(goval), nil
case string:
return NewVarChar(goval), nil
default:
return NULL, fmt.Errorf("unexpected type %T: %v", goval, goval)
}
}
// Type returns the type of Value.
func (v Value) Type() querypb.Type {
return v.typ
}
// Raw returns the internal represenation of the value. For newer types,
// this may not match MySQL's representation.
func (v Value) Raw() []byte {
return v.val
}
// ToBytes returns the value as MySQL would return it as []byte.
// In contrast, Raw returns the internal representation of the Value, which may not
// match MySQL's representation for newer types.
// If the value is not convertible like in the case of Expression, it returns nil.
func (v Value) ToBytes() []byte {
if v.typ == Expression {
return nil
}
return v.val
}
// Len returns the length.
func (v Value) Len() int {
return len(v.val)
}
// ToString returns the value as MySQL would return it as string.
// If the value is not convertible like in the case of Expression, it returns nil.
func (v Value) ToString() string {
if v.typ == Expression {
return ""
}
return hack.String(v.val)
}
// String returns a printable version of the value.
func (v Value) String() string {
if v.typ == Null {
return "NULL"
}
if v.IsQuoted() {
return fmt.Sprintf("%v(%q)", v.typ, v.val)
}
return fmt.Sprintf("%v(%s)", v.typ, v.val)
}
// EncodeSQL encodes the value into an SQL statement. Can be binary.
func (v Value) EncodeSQL(b BinWriter) {
switch {
case v.typ == Null:
b.Write(nullstr)
case v.IsQuoted():
encodeBytesSQL(v.val, b)
default:
b.Write(v.val)
}
}
// EncodeASCII encodes the value using 7-bit clean ascii bytes.
func (v Value) EncodeASCII(b BinWriter) {
switch {
case v.typ == Null:
b.Write(nullstr)
case v.IsQuoted():
encodeBytesASCII(v.val, b)
default:
b.Write(v.val)
}
}
// IsNull returns true if Value is null.
func (v Value) IsNull() bool {
return v.typ == Null
}
// IsIntegral returns true if Value is an integral.
func (v Value) IsIntegral() bool {
return IsIntegral(v.typ)
}
// IsSigned returns true if Value is a signed integral.
func (v Value) IsSigned() bool {
return IsSigned(v.typ)
}
// IsUnsigned returns true if Value is an unsigned integral.
func (v Value) IsUnsigned() bool {
return IsUnsigned(v.typ)
}
// IsFloat returns true if Value is a float.
func (v Value) IsFloat() bool {
return IsFloat(v.typ)
}
// IsQuoted returns true if Value must be SQL-quoted.
func (v Value) IsQuoted() bool {
return IsQuoted(v.typ)
}
// IsText returns true if Value is a collatable text.
func (v Value) IsText() bool {
return IsText(v.typ)
}
// IsBinary returns true if Value is binary.
func (v Value) IsBinary() bool {
return IsBinary(v.typ)
}
// MarshalJSON should only be used for testing.
// It's not a complete implementation.
func (v Value) MarshalJSON() ([]byte, error) {
switch {
case v.IsQuoted():
return json.Marshal(v.ToString())
case v.typ == Null:
return nullstr, nil
}
return v.val, nil
}
// UnmarshalJSON should only be used for testing.
// It's not a complete implementation.
func (v *Value) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
return fmt.Errorf("error unmarshaling empty bytes")
}
var val interface{}
var err error
switch b[0] {
case '-':
var ival int64
err = json.Unmarshal(b, &ival)
val = ival
case '"':
var bval []byte
err = json.Unmarshal(b, &bval)
val = bval
case 'n': // null
err = json.Unmarshal(b, &val)
default:
var uval uint64
err = json.Unmarshal(b, &uval)
val = uval
}
if err != nil {
return err
}
*v, err = InterfaceToValue(val)
return err
}
func encodeBytesSQL(val []byte, b BinWriter) {
buf := &bytes2.Buffer{}
buf.WriteByte('\'')
for _, ch := range val {
if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape {
buf.WriteByte(ch)
} else {
buf.WriteByte('\\')
buf.WriteByte(encodedChar)
}
}
buf.WriteByte('\'')
b.Write(buf.Bytes())
}
func encodeBytesASCII(val []byte, b BinWriter) {
buf := &bytes2.Buffer{}
buf.WriteByte('\'')
encoder := base64.NewEncoder(base64.StdEncoding, buf)
encoder.Write(val)
encoder.Close()
buf.WriteByte('\'')
b.Write(buf.Bytes())
}
// SQLEncodeMap specifies how to escape binary data with '\'.
// Complies to http://dev.mysql.com/doc/refman/5.1/en/string-syntax.html
var SQLEncodeMap [256]byte
// SQLDecodeMap is the reverse of SQLEncodeMap
var SQLDecodeMap [256]byte
var encodeRef = map[byte]byte{
'\x00': '0',
'\'': '\'',
'"': '"',
'\b': 'b',
'\n': 'n',
'\r': 'r',
'\t': 't',
26: 'Z', // ctl-Z
'\\': '\\',
}
func init() {
for i := range SQLEncodeMap {
SQLEncodeMap[i] = DontEscape
SQLDecodeMap[i] = DontEscape
}
for i := range SQLEncodeMap {
if to, ok := encodeRef[byte(i)]; ok {
SQLEncodeMap[byte(i)] = to
SQLDecodeMap[to] = byte(i)
}
}
}

99
vendor/github.com/xwb1989/sqlparser/encodable.go generated vendored Normal file
View File

@@ -0,0 +1,99 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlparser
import (
"bytes"
"github.com/xwb1989/sqlparser/dependency/sqltypes"
)
// This file contains types that are 'Encodable'.
// Encodable defines the interface for types that can
// be custom-encoded into SQL.
type Encodable interface {
EncodeSQL(buf *bytes.Buffer)
}
// InsertValues is a custom SQL encoder for the values of
// an insert statement.
type InsertValues [][]sqltypes.Value
// EncodeSQL performs the SQL encoding for InsertValues.
func (iv InsertValues) EncodeSQL(buf *bytes.Buffer) {
for i, rows := range iv {
if i != 0 {
buf.WriteString(", ")
}
buf.WriteByte('(')
for j, bv := range rows {
if j != 0 {
buf.WriteString(", ")
}
bv.EncodeSQL(buf)
}
buf.WriteByte(')')
}
}
// TupleEqualityList is for generating equality constraints
// for tables that have composite primary keys.
type TupleEqualityList struct {
Columns []ColIdent
Rows [][]sqltypes.Value
}
// EncodeSQL generates the where clause constraints for the tuple
// equality.
func (tpl *TupleEqualityList) EncodeSQL(buf *bytes.Buffer) {
if len(tpl.Columns) == 1 {
tpl.encodeAsIn(buf)
return
}
tpl.encodeAsEquality(buf)
}
func (tpl *TupleEqualityList) encodeAsIn(buf *bytes.Buffer) {
Append(buf, tpl.Columns[0])
buf.WriteString(" in (")
for i, r := range tpl.Rows {
if i != 0 {
buf.WriteString(", ")
}
r[0].EncodeSQL(buf)
}
buf.WriteByte(')')
}
func (tpl *TupleEqualityList) encodeAsEquality(buf *bytes.Buffer) {
for i, r := range tpl.Rows {
if i != 0 {
buf.WriteString(" or ")
}
buf.WriteString("(")
for j, c := range tpl.Columns {
if j != 0 {
buf.WriteString(" and ")
}
Append(buf, c)
buf.WriteString(" = ")
r[j].EncodeSQL(buf)
}
buf.WriteByte(')')
}
}

View File

@@ -0,0 +1,39 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreedto in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlparser
// FormatImpossibleQuery creates an impossible query in a TrackedBuffer.
// An impossible query is a modified version of a query where all selects have where clauses that are
// impossible for mysql to resolve. This is used in the vtgate and vttablet:
//
// - In the vtgate it's used for joins: if the first query returns no result, then vtgate uses the impossible
// query just to fetch field info from vttablet
// - In the vttablet, it's just an optimization: the field info is fetched once form MySQL, cached and reused
// for subsequent queries
func FormatImpossibleQuery(buf *TrackedBuffer, node SQLNode) {
switch node := node.(type) {
case *Select:
buf.Myprintf("select %v from %v where 1 != 1", node.SelectExprs, node.From)
if node.GroupBy != nil {
node.GroupBy.Format(buf)
}
case *Union:
buf.Myprintf("%v %s %v", node.Left, node.Type, node.Right)
default:
node.Format(buf)
}
}

224
vendor/github.com/xwb1989/sqlparser/normalizer.go generated vendored Normal file
View File

@@ -0,0 +1,224 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlparser
import (
"fmt"
"github.com/xwb1989/sqlparser/dependency/sqltypes"
"github.com/xwb1989/sqlparser/dependency/querypb"
)
// Normalize changes the statement to use bind values, and
// updates the bind vars to those values. The supplied prefix
// is used to generate the bind var names. The function ensures
// that there are no collisions with existing bind vars.
// Within Select constructs, bind vars are deduped. This allows
// us to identify vindex equality. Otherwise, every value is
// treated as distinct.
func Normalize(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) {
nz := newNormalizer(stmt, bindVars, prefix)
_ = Walk(nz.WalkStatement, stmt)
}
type normalizer struct {
stmt Statement
bindVars map[string]*querypb.BindVariable
prefix string
reserved map[string]struct{}
counter int
vals map[string]string
}
func newNormalizer(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) *normalizer {
return &normalizer{
stmt: stmt,
bindVars: bindVars,
prefix: prefix,
reserved: GetBindvars(stmt),
counter: 1,
vals: make(map[string]string),
}
}
// WalkStatement is the top level walk function.
// If it encounters a Select, it switches to a mode
// where variables are deduped.
func (nz *normalizer) WalkStatement(node SQLNode) (bool, error) {
switch node := node.(type) {
case *Select:
_ = Walk(nz.WalkSelect, node)
// Don't continue
return false, nil
case *SQLVal:
nz.convertSQLVal(node)
case *ComparisonExpr:
nz.convertComparison(node)
}
return true, nil
}
// WalkSelect normalizes the AST in Select mode.
func (nz *normalizer) WalkSelect(node SQLNode) (bool, error) {
switch node := node.(type) {
case *SQLVal:
nz.convertSQLValDedup(node)
case *ComparisonExpr:
nz.convertComparison(node)
}
return true, nil
}
func (nz *normalizer) convertSQLValDedup(node *SQLVal) {
// If value is too long, don't dedup.
// Such values are most likely not for vindexes.
// We save a lot of CPU because we avoid building
// the key for them.
if len(node.Val) > 256 {
nz.convertSQLVal(node)
return
}
// Make the bindvar
bval := nz.sqlToBindvar(node)
if bval == nil {
return
}
// Check if there's a bindvar for that value already.
var key string
if bval.Type == sqltypes.VarBinary {
// Prefixing strings with "'" ensures that a string
// and number that have the same representation don't
// collide.
key = "'" + string(node.Val)
} else {
key = string(node.Val)
}
bvname, ok := nz.vals[key]
if !ok {
// If there's no such bindvar, make a new one.
bvname = nz.newName()
nz.vals[key] = bvname
nz.bindVars[bvname] = bval
}
// Modify the AST node to a bindvar.
node.Type = ValArg
node.Val = append([]byte(":"), bvname...)
}
// convertSQLVal converts an SQLVal without the dedup.
func (nz *normalizer) convertSQLVal(node *SQLVal) {
bval := nz.sqlToBindvar(node)
if bval == nil {
return
}
bvname := nz.newName()
nz.bindVars[bvname] = bval
node.Type = ValArg
node.Val = append([]byte(":"), bvname...)
}
// convertComparison attempts to convert IN clauses to
// use the list bind var construct. If it fails, it returns
// with no change made. The walk function will then continue
// and iterate on converting each individual value into separate
// bind vars.
func (nz *normalizer) convertComparison(node *ComparisonExpr) {
if node.Operator != InStr && node.Operator != NotInStr {
return
}
tupleVals, ok := node.Right.(ValTuple)
if !ok {
return
}
// The RHS is a tuple of values.
// Make a list bindvar.
bvals := &querypb.BindVariable{
Type: querypb.Type_TUPLE,
}
for _, val := range tupleVals {
bval := nz.sqlToBindvar(val)
if bval == nil {
return
}
bvals.Values = append(bvals.Values, &querypb.Value{
Type: bval.Type,
Value: bval.Value,
})
}
bvname := nz.newName()
nz.bindVars[bvname] = bvals
// Modify RHS to be a list bindvar.
node.Right = ListArg(append([]byte("::"), bvname...))
}
func (nz *normalizer) sqlToBindvar(node SQLNode) *querypb.BindVariable {
if node, ok := node.(*SQLVal); ok {
var v sqltypes.Value
var err error
switch node.Type {
case StrVal:
v, err = sqltypes.NewValue(sqltypes.VarBinary, node.Val)
case IntVal:
v, err = sqltypes.NewValue(sqltypes.Int64, node.Val)
case FloatVal:
v, err = sqltypes.NewValue(sqltypes.Float64, node.Val)
default:
return nil
}
if err != nil {
return nil
}
return sqltypes.ValueBindVariable(v)
}
return nil
}
func (nz *normalizer) newName() string {
for {
newName := fmt.Sprintf("%s%d", nz.prefix, nz.counter)
if _, ok := nz.reserved[newName]; !ok {
nz.reserved[newName] = struct{}{}
return newName
}
nz.counter++
}
}
// GetBindvars returns a map of the bind vars referenced in the statement.
// TODO(sougou); This function gets called again from vtgate/planbuilder.
// Ideally, this should be done only once.
func GetBindvars(stmt Statement) map[string]struct{} {
bindvars := make(map[string]struct{})
_ = Walk(func(node SQLNode) (kontinue bool, err error) {
switch node := node.(type) {
case *SQLVal:
if node.Type == ValArg {
bindvars[string(node.Val[1:])] = struct{}{}
}
case ListArg:
bindvars[string(node[2:])] = struct{}{}
}
return true, nil
}, stmt)
return bindvars
}

119
vendor/github.com/xwb1989/sqlparser/parsed_query.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlparser
import (
"bytes"
"fmt"
"github.com/xwb1989/sqlparser/dependency/querypb"
"github.com/xwb1989/sqlparser/dependency/sqltypes"
)
// ParsedQuery represents a parsed query where
// bind locations are precompued for fast substitutions.
type ParsedQuery struct {
Query string
bindLocations []bindLocation
}
type bindLocation struct {
offset, length int
}
// NewParsedQuery returns a ParsedQuery of the ast.
func NewParsedQuery(node SQLNode) *ParsedQuery {
buf := NewTrackedBuffer(nil)
buf.Myprintf("%v", node)
return buf.ParsedQuery()
}
// GenerateQuery generates a query by substituting the specified
// bindVariables. The extras parameter specifies special parameters
// that can perform custom encoding.
func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) ([]byte, error) {
if len(pq.bindLocations) == 0 {
return []byte(pq.Query), nil
}
buf := bytes.NewBuffer(make([]byte, 0, len(pq.Query)))
current := 0
for _, loc := range pq.bindLocations {
buf.WriteString(pq.Query[current:loc.offset])
name := pq.Query[loc.offset : loc.offset+loc.length]
if encodable, ok := extras[name[1:]]; ok {
encodable.EncodeSQL(buf)
} else {
supplied, _, err := FetchBindVar(name, bindVariables)
if err != nil {
return nil, err
}
EncodeValue(buf, supplied)
}
current = loc.offset + loc.length
}
buf.WriteString(pq.Query[current:])
return buf.Bytes(), nil
}
// EncodeValue encodes one bind variable value into the query.
func EncodeValue(buf *bytes.Buffer, value *querypb.BindVariable) {
if value.Type != querypb.Type_TUPLE {
// Since we already check for TUPLE, we don't expect an error.
v, _ := sqltypes.BindVariableToValue(value)
v.EncodeSQL(buf)
return
}
// It's a TUPLE.
buf.WriteByte('(')
for i, bv := range value.Values {
if i != 0 {
buf.WriteString(", ")
}
sqltypes.ProtoToValue(bv).EncodeSQL(buf)
}
buf.WriteByte(')')
}
// FetchBindVar resolves the bind variable by fetching it from bindVariables.
func FetchBindVar(name string, bindVariables map[string]*querypb.BindVariable) (val *querypb.BindVariable, isList bool, err error) {
name = name[1:]
if name[0] == ':' {
name = name[1:]
isList = true
}
supplied, ok := bindVariables[name]
if !ok {
return nil, false, fmt.Errorf("missing bind var %s", name)
}
if isList {
if supplied.Type != querypb.Type_TUPLE {
return nil, false, fmt.Errorf("unexpected list arg type (%v) for key %s", supplied.Type, name)
}
if len(supplied.Values) == 0 {
return nil, false, fmt.Errorf("empty list supplied for %s", name)
}
return supplied, true, nil
}
if supplied.Type == querypb.Type_TUPLE {
return nil, false, fmt.Errorf("unexpected arg type (TUPLE) for non-list key %s", name)
}
return supplied, false, nil
}

19
vendor/github.com/xwb1989/sqlparser/redact_query.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
package sqlparser
import querypb "github.com/xwb1989/sqlparser/dependency/querypb"
// RedactSQLQuery returns a sql string with the params stripped out for display
func RedactSQLQuery(sql string) (string, error) {
bv := map[string]*querypb.BindVariable{}
sqlStripped, comments := SplitMarginComments(sql)
stmt, err := Parse(sqlStripped)
if err != nil {
return "", err
}
prefix := "redacted"
Normalize(stmt, bv, prefix)
return comments.Leading + String(stmt) + comments.Trailing, nil
}

6136
vendor/github.com/xwb1989/sqlparser/sql.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

3159
vendor/github.com/xwb1989/sqlparser/sql.y generated vendored Normal file

File diff suppressed because it is too large Load Diff

950
vendor/github.com/xwb1989/sqlparser/token.go generated vendored Normal file
View File

@@ -0,0 +1,950 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlparser
import (
"bytes"
"errors"
"fmt"
"io"
"github.com/xwb1989/sqlparser/dependency/bytes2"
"github.com/xwb1989/sqlparser/dependency/sqltypes"
)
const (
defaultBufSize = 4096
eofChar = 0x100
)
// Tokenizer is the struct used to generate SQL
// tokens for the parser.
type Tokenizer struct {
InStream io.Reader
AllowComments bool
ForceEOF bool
lastChar uint16
Position int
lastToken []byte
LastError error
posVarIndex int
ParseTree Statement
partialDDL *DDL
nesting int
multi bool
specialComment *Tokenizer
buf []byte
bufPos int
bufSize int
}
// NewStringTokenizer creates a new Tokenizer for the
// sql string.
func NewStringTokenizer(sql string) *Tokenizer {
buf := []byte(sql)
return &Tokenizer{
buf: buf,
bufSize: len(buf),
}
}
// NewTokenizer creates a new Tokenizer reading a sql
// string from the io.Reader.
func NewTokenizer(r io.Reader) *Tokenizer {
return &Tokenizer{
InStream: r,
buf: make([]byte, defaultBufSize),
}
}
// keywords is a map of mysql keywords that fall into two categories:
// 1) keywords considered reserved by MySQL
// 2) keywords for us to handle specially in sql.y
//
// Those marked as UNUSED are likely reserved keywords. We add them here so that
// when rewriting queries we can properly backtick quote them so they don't cause issues
//
// NOTE: If you add new keywords, add them also to the reserved_keywords or
// non_reserved_keywords grammar in sql.y -- this will allow the keyword to be used
// in identifiers. See the docs for each grammar to determine which one to put it into.
var keywords = map[string]int{
"accessible": UNUSED,
"add": ADD,
"against": AGAINST,
"all": ALL,
"alter": ALTER,
"analyze": ANALYZE,
"and": AND,
"as": AS,
"asc": ASC,
"asensitive": UNUSED,
"auto_increment": AUTO_INCREMENT,
"before": UNUSED,
"begin": BEGIN,
"between": BETWEEN,
"bigint": BIGINT,
"binary": BINARY,
"_binary": UNDERSCORE_BINARY,
"bit": BIT,
"blob": BLOB,
"bool": BOOL,
"boolean": BOOLEAN,
"both": UNUSED,
"by": BY,
"call": UNUSED,
"cascade": UNUSED,
"case": CASE,
"cast": CAST,
"change": UNUSED,
"char": CHAR,
"character": CHARACTER,
"charset": CHARSET,
"check": UNUSED,
"collate": COLLATE,
"column": COLUMN,
"comment": COMMENT_KEYWORD,
"committed": COMMITTED,
"commit": COMMIT,
"condition": UNUSED,
"constraint": CONSTRAINT,
"continue": UNUSED,
"convert": CONVERT,
"substr": SUBSTR,
"substring": SUBSTRING,
"create": CREATE,
"cross": CROSS,
"current_date": CURRENT_DATE,
"current_time": CURRENT_TIME,
"current_timestamp": CURRENT_TIMESTAMP,
"current_user": UNUSED,
"cursor": UNUSED,
"database": DATABASE,
"databases": DATABASES,
"day_hour": UNUSED,
"day_microsecond": UNUSED,
"day_minute": UNUSED,
"day_second": UNUSED,
"date": DATE,
"datetime": DATETIME,
"dec": UNUSED,
"decimal": DECIMAL,
"declare": UNUSED,
"default": DEFAULT,
"delayed": UNUSED,
"delete": DELETE,
"desc": DESC,
"describe": DESCRIBE,
"deterministic": UNUSED,
"distinct": DISTINCT,
"distinctrow": UNUSED,
"div": DIV,
"double": DOUBLE,
"drop": DROP,
"duplicate": DUPLICATE,
"each": UNUSED,
"else": ELSE,
"elseif": UNUSED,
"enclosed": UNUSED,
"end": END,
"enum": ENUM,
"escape": ESCAPE,
"escaped": UNUSED,
"exists": EXISTS,
"exit": UNUSED,
"explain": EXPLAIN,
"expansion": EXPANSION,
"extended": EXTENDED,
"false": FALSE,
"fetch": UNUSED,
"float": FLOAT_TYPE,
"float4": UNUSED,
"float8": UNUSED,
"for": FOR,
"force": FORCE,
"foreign": FOREIGN,
"from": FROM,
"full": FULL,
"fulltext": FULLTEXT,
"generated": UNUSED,
"geometry": GEOMETRY,
"geometrycollection": GEOMETRYCOLLECTION,
"get": UNUSED,
"global": GLOBAL,
"grant": UNUSED,
"group": GROUP,
"group_concat": GROUP_CONCAT,
"having": HAVING,
"high_priority": UNUSED,
"hour_microsecond": UNUSED,
"hour_minute": UNUSED,
"hour_second": UNUSED,
"if": IF,
"ignore": IGNORE,
"in": IN,
"index": INDEX,
"infile": UNUSED,
"inout": UNUSED,
"inner": INNER,
"insensitive": UNUSED,
"insert": INSERT,
"int": INT,
"int1": UNUSED,
"int2": UNUSED,
"int3": UNUSED,
"int4": UNUSED,
"int8": UNUSED,
"integer": INTEGER,
"interval": INTERVAL,
"into": INTO,
"io_after_gtids": UNUSED,
"is": IS,
"isolation": ISOLATION,
"iterate": UNUSED,
"join": JOIN,
"json": JSON,
"key": KEY,
"keys": KEYS,
"key_block_size": KEY_BLOCK_SIZE,
"kill": UNUSED,
"language": LANGUAGE,
"last_insert_id": LAST_INSERT_ID,
"leading": UNUSED,
"leave": UNUSED,
"left": LEFT,
"less": LESS,
"level": LEVEL,
"like": LIKE,
"limit": LIMIT,
"linear": UNUSED,
"lines": UNUSED,
"linestring": LINESTRING,
"load": UNUSED,
"localtime": LOCALTIME,
"localtimestamp": LOCALTIMESTAMP,
"lock": LOCK,
"long": UNUSED,
"longblob": LONGBLOB,
"longtext": LONGTEXT,
"loop": UNUSED,
"low_priority": UNUSED,
"master_bind": UNUSED,
"match": MATCH,
"maxvalue": MAXVALUE,
"mediumblob": MEDIUMBLOB,
"mediumint": MEDIUMINT,
"mediumtext": MEDIUMTEXT,
"middleint": UNUSED,
"minute_microsecond": UNUSED,
"minute_second": UNUSED,
"mod": MOD,
"mode": MODE,
"modifies": UNUSED,
"multilinestring": MULTILINESTRING,
"multipoint": MULTIPOINT,
"multipolygon": MULTIPOLYGON,
"names": NAMES,
"natural": NATURAL,
"nchar": NCHAR,
"next": NEXT,
"not": NOT,
"no_write_to_binlog": UNUSED,
"null": NULL,
"numeric": NUMERIC,
"offset": OFFSET,
"on": ON,
"only": ONLY,
"optimize": OPTIMIZE,
"optimizer_costs": UNUSED,
"option": UNUSED,
"optionally": UNUSED,
"or": OR,
"order": ORDER,
"out": UNUSED,
"outer": OUTER,
"outfile": UNUSED,
"partition": PARTITION,
"point": POINT,
"polygon": POLYGON,
"precision": UNUSED,
"primary": PRIMARY,
"processlist": PROCESSLIST,
"procedure": PROCEDURE,
"query": QUERY,
"range": UNUSED,
"read": READ,
"reads": UNUSED,
"read_write": UNUSED,
"real": REAL,
"references": UNUSED,
"regexp": REGEXP,
"release": UNUSED,
"rename": RENAME,
"reorganize": REORGANIZE,
"repair": REPAIR,
"repeat": UNUSED,
"repeatable": REPEATABLE,
"replace": REPLACE,
"require": UNUSED,
"resignal": UNUSED,
"restrict": UNUSED,
"return": UNUSED,
"revoke": UNUSED,
"right": RIGHT,
"rlike": REGEXP,
"rollback": ROLLBACK,
"schema": SCHEMA,
"schemas": UNUSED,
"second_microsecond": UNUSED,
"select": SELECT,
"sensitive": UNUSED,
"separator": SEPARATOR,
"serializable": SERIALIZABLE,
"session": SESSION,
"set": SET,
"share": SHARE,
"show": SHOW,
"signal": UNUSED,
"signed": SIGNED,
"smallint": SMALLINT,
"spatial": SPATIAL,
"specific": UNUSED,
"sql": UNUSED,
"sqlexception": UNUSED,
"sqlstate": UNUSED,
"sqlwarning": UNUSED,
"sql_big_result": UNUSED,
"sql_cache": SQL_CACHE,
"sql_calc_found_rows": UNUSED,
"sql_no_cache": SQL_NO_CACHE,
"sql_small_result": UNUSED,
"ssl": UNUSED,
"start": START,
"starting": UNUSED,
"status": STATUS,
"stored": UNUSED,
"straight_join": STRAIGHT_JOIN,
"stream": STREAM,
"table": TABLE,
"tables": TABLES,
"terminated": UNUSED,
"text": TEXT,
"than": THAN,
"then": THEN,
"time": TIME,
"timestamp": TIMESTAMP,
"tinyblob": TINYBLOB,
"tinyint": TINYINT,
"tinytext": TINYTEXT,
"to": TO,
"trailing": UNUSED,
"transaction": TRANSACTION,
"trigger": TRIGGER,
"true": TRUE,
"truncate": TRUNCATE,
"uncommitted": UNCOMMITTED,
"undo": UNUSED,
"union": UNION,
"unique": UNIQUE,
"unlock": UNUSED,
"unsigned": UNSIGNED,
"update": UPDATE,
"usage": UNUSED,
"use": USE,
"using": USING,
"utc_date": UTC_DATE,
"utc_time": UTC_TIME,
"utc_timestamp": UTC_TIMESTAMP,
"values": VALUES,
"variables": VARIABLES,
"varbinary": VARBINARY,
"varchar": VARCHAR,
"varcharacter": UNUSED,
"varying": UNUSED,
"virtual": UNUSED,
"vindex": VINDEX,
"vindexes": VINDEXES,
"view": VIEW,
"vitess_keyspaces": VITESS_KEYSPACES,
"vitess_shards": VITESS_SHARDS,
"vitess_tablets": VITESS_TABLETS,
"vschema_tables": VSCHEMA_TABLES,
"when": WHEN,
"where": WHERE,
"while": UNUSED,
"with": WITH,
"write": WRITE,
"xor": UNUSED,
"year": YEAR,
"year_month": UNUSED,
"zerofill": ZEROFILL,
}
// keywordStrings contains the reverse mapping of token to keyword strings
var keywordStrings = map[int]string{}
func init() {
for str, id := range keywords {
if id == UNUSED {
continue
}
keywordStrings[id] = str
}
}
// KeywordString returns the string corresponding to the given keyword
func KeywordString(id int) string {
str, ok := keywordStrings[id]
if !ok {
return ""
}
return str
}
// Lex returns the next token form the Tokenizer.
// This function is used by go yacc.
func (tkn *Tokenizer) Lex(lval *yySymType) int {
typ, val := tkn.Scan()
for typ == COMMENT {
if tkn.AllowComments {
break
}
typ, val = tkn.Scan()
}
lval.bytes = val
tkn.lastToken = val
return typ
}
// Error is called by go yacc if there's a parsing error.
func (tkn *Tokenizer) Error(err string) {
buf := &bytes2.Buffer{}
if tkn.lastToken != nil {
fmt.Fprintf(buf, "%s at position %v near '%s'", err, tkn.Position, tkn.lastToken)
} else {
fmt.Fprintf(buf, "%s at position %v", err, tkn.Position)
}
tkn.LastError = errors.New(buf.String())
// Try and re-sync to the next statement
if tkn.lastChar != ';' {
tkn.skipStatement()
}
}
// Scan scans the tokenizer for the next token and returns
// the token type and an optional value.
func (tkn *Tokenizer) Scan() (int, []byte) {
if tkn.specialComment != nil {
// Enter specialComment scan mode.
// for scanning such kind of comment: /*! MySQL-specific code */
specialComment := tkn.specialComment
tok, val := specialComment.Scan()
if tok != 0 {
// return the specialComment scan result as the result
return tok, val
}
// leave specialComment scan mode after all stream consumed.
tkn.specialComment = nil
}
if tkn.lastChar == 0 {
tkn.next()
}
if tkn.ForceEOF {
tkn.skipStatement()
return 0, nil
}
tkn.skipBlank()
switch ch := tkn.lastChar; {
case isLetter(ch):
tkn.next()
if ch == 'X' || ch == 'x' {
if tkn.lastChar == '\'' {
tkn.next()
return tkn.scanHex()
}
}
if ch == 'B' || ch == 'b' {
if tkn.lastChar == '\'' {
tkn.next()
return tkn.scanBitLiteral()
}
}
isDbSystemVariable := false
if ch == '@' && tkn.lastChar == '@' {
isDbSystemVariable = true
}
return tkn.scanIdentifier(byte(ch), isDbSystemVariable)
case isDigit(ch):
return tkn.scanNumber(false)
case ch == ':':
return tkn.scanBindVar()
case ch == ';' && tkn.multi:
return 0, nil
default:
tkn.next()
switch ch {
case eofChar:
return 0, nil
case '=', ',', ';', '(', ')', '+', '*', '%', '^', '~':
return int(ch), nil
case '&':
if tkn.lastChar == '&' {
tkn.next()
return AND, nil
}
return int(ch), nil
case '|':
if tkn.lastChar == '|' {
tkn.next()
return OR, nil
}
return int(ch), nil
case '?':
tkn.posVarIndex++
buf := new(bytes2.Buffer)
fmt.Fprintf(buf, ":v%d", tkn.posVarIndex)
return VALUE_ARG, buf.Bytes()
case '.':
if isDigit(tkn.lastChar) {
return tkn.scanNumber(true)
}
return int(ch), nil
case '/':
switch tkn.lastChar {
case '/':
tkn.next()
return tkn.scanCommentType1("//")
case '*':
tkn.next()
switch tkn.lastChar {
case '!':
return tkn.scanMySQLSpecificComment()
default:
return tkn.scanCommentType2()
}
default:
return int(ch), nil
}
case '#':
return tkn.scanCommentType1("#")
case '-':
switch tkn.lastChar {
case '-':
tkn.next()
return tkn.scanCommentType1("--")
case '>':
tkn.next()
if tkn.lastChar == '>' {
tkn.next()
return JSON_UNQUOTE_EXTRACT_OP, nil
}
return JSON_EXTRACT_OP, nil
}
return int(ch), nil
case '<':
switch tkn.lastChar {
case '>':
tkn.next()
return NE, nil
case '<':
tkn.next()
return SHIFT_LEFT, nil
case '=':
tkn.next()
switch tkn.lastChar {
case '>':
tkn.next()
return NULL_SAFE_EQUAL, nil
default:
return LE, nil
}
default:
return int(ch), nil
}
case '>':
switch tkn.lastChar {
case '=':
tkn.next()
return GE, nil
case '>':
tkn.next()
return SHIFT_RIGHT, nil
default:
return int(ch), nil
}
case '!':
if tkn.lastChar == '=' {
tkn.next()
return NE, nil
}
return int(ch), nil
case '\'', '"':
return tkn.scanString(ch, STRING)
case '`':
return tkn.scanLiteralIdentifier()
default:
return LEX_ERROR, []byte{byte(ch)}
}
}
}
// skipStatement scans until the EOF, or end of statement is encountered.
func (tkn *Tokenizer) skipStatement() {
ch := tkn.lastChar
for ch != ';' && ch != eofChar {
tkn.next()
ch = tkn.lastChar
}
}
func (tkn *Tokenizer) skipBlank() {
ch := tkn.lastChar
for ch == ' ' || ch == '\n' || ch == '\r' || ch == '\t' {
tkn.next()
ch = tkn.lastChar
}
}
func (tkn *Tokenizer) scanIdentifier(firstByte byte, isDbSystemVariable bool) (int, []byte) {
buffer := &bytes2.Buffer{}
buffer.WriteByte(firstByte)
for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || (isDbSystemVariable && isCarat(tkn.lastChar)) {
buffer.WriteByte(byte(tkn.lastChar))
tkn.next()
}
lowered := bytes.ToLower(buffer.Bytes())
loweredStr := string(lowered)
if keywordID, found := keywords[loweredStr]; found {
return keywordID, lowered
}
// dual must always be case-insensitive
if loweredStr == "dual" {
return ID, lowered
}
return ID, buffer.Bytes()
}
func (tkn *Tokenizer) scanHex() (int, []byte) {
buffer := &bytes2.Buffer{}
tkn.scanMantissa(16, buffer)
if tkn.lastChar != '\'' {
return LEX_ERROR, buffer.Bytes()
}
tkn.next()
if buffer.Len()%2 != 0 {
return LEX_ERROR, buffer.Bytes()
}
return HEX, buffer.Bytes()
}
func (tkn *Tokenizer) scanBitLiteral() (int, []byte) {
buffer := &bytes2.Buffer{}
tkn.scanMantissa(2, buffer)
if tkn.lastChar != '\'' {
return LEX_ERROR, buffer.Bytes()
}
tkn.next()
return BIT_LITERAL, buffer.Bytes()
}
func (tkn *Tokenizer) scanLiteralIdentifier() (int, []byte) {
buffer := &bytes2.Buffer{}
backTickSeen := false
for {
if backTickSeen {
if tkn.lastChar != '`' {
break
}
backTickSeen = false
buffer.WriteByte('`')
tkn.next()
continue
}
// The previous char was not a backtick.
switch tkn.lastChar {
case '`':
backTickSeen = true
case eofChar:
// Premature EOF.
return LEX_ERROR, buffer.Bytes()
default:
buffer.WriteByte(byte(tkn.lastChar))
}
tkn.next()
}
if buffer.Len() == 0 {
return LEX_ERROR, buffer.Bytes()
}
return ID, buffer.Bytes()
}
func (tkn *Tokenizer) scanBindVar() (int, []byte) {
buffer := &bytes2.Buffer{}
buffer.WriteByte(byte(tkn.lastChar))
token := VALUE_ARG
tkn.next()
if tkn.lastChar == ':' {
token = LIST_ARG
buffer.WriteByte(byte(tkn.lastChar))
tkn.next()
}
if !isLetter(tkn.lastChar) {
return LEX_ERROR, buffer.Bytes()
}
for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || tkn.lastChar == '.' {
buffer.WriteByte(byte(tkn.lastChar))
tkn.next()
}
return token, buffer.Bytes()
}
func (tkn *Tokenizer) scanMantissa(base int, buffer *bytes2.Buffer) {
for digitVal(tkn.lastChar) < base {
tkn.consumeNext(buffer)
}
}
func (tkn *Tokenizer) scanNumber(seenDecimalPoint bool) (int, []byte) {
token := INTEGRAL
buffer := &bytes2.Buffer{}
if seenDecimalPoint {
token = FLOAT
buffer.WriteByte('.')
tkn.scanMantissa(10, buffer)
goto exponent
}
// 0x construct.
if tkn.lastChar == '0' {
tkn.consumeNext(buffer)
if tkn.lastChar == 'x' || tkn.lastChar == 'X' {
token = HEXNUM
tkn.consumeNext(buffer)
tkn.scanMantissa(16, buffer)
goto exit
}
}
tkn.scanMantissa(10, buffer)
if tkn.lastChar == '.' {
token = FLOAT
tkn.consumeNext(buffer)
tkn.scanMantissa(10, buffer)
}
exponent:
if tkn.lastChar == 'e' || tkn.lastChar == 'E' {
token = FLOAT
tkn.consumeNext(buffer)
if tkn.lastChar == '+' || tkn.lastChar == '-' {
tkn.consumeNext(buffer)
}
tkn.scanMantissa(10, buffer)
}
exit:
// A letter cannot immediately follow a number.
if isLetter(tkn.lastChar) {
return LEX_ERROR, buffer.Bytes()
}
return token, buffer.Bytes()
}
func (tkn *Tokenizer) scanString(delim uint16, typ int) (int, []byte) {
var buffer bytes2.Buffer
for {
ch := tkn.lastChar
if ch == eofChar {
// Unterminated string.
return LEX_ERROR, buffer.Bytes()
}
if ch != delim && ch != '\\' {
buffer.WriteByte(byte(ch))
// Scan ahead to the next interesting character.
start := tkn.bufPos
for ; tkn.bufPos < tkn.bufSize; tkn.bufPos++ {
ch = uint16(tkn.buf[tkn.bufPos])
if ch == delim || ch == '\\' {
break
}
}
buffer.Write(tkn.buf[start:tkn.bufPos])
tkn.Position += (tkn.bufPos - start)
if tkn.bufPos >= tkn.bufSize {
// Reached the end of the buffer without finding a delim or
// escape character.
tkn.next()
continue
}
tkn.bufPos++
tkn.Position++
}
tkn.next() // Read one past the delim or escape character.
if ch == '\\' {
if tkn.lastChar == eofChar {
// String terminates mid escape character.
return LEX_ERROR, buffer.Bytes()
}
if decodedChar := sqltypes.SQLDecodeMap[byte(tkn.lastChar)]; decodedChar == sqltypes.DontEscape {
ch = tkn.lastChar
} else {
ch = uint16(decodedChar)
}
} else if ch == delim && tkn.lastChar != delim {
// Correctly terminated string, which is not a double delim.
break
}
buffer.WriteByte(byte(ch))
tkn.next()
}
return typ, buffer.Bytes()
}
func (tkn *Tokenizer) scanCommentType1(prefix string) (int, []byte) {
buffer := &bytes2.Buffer{}
buffer.WriteString(prefix)
for tkn.lastChar != eofChar {
if tkn.lastChar == '\n' {
tkn.consumeNext(buffer)
break
}
tkn.consumeNext(buffer)
}
return COMMENT, buffer.Bytes()
}
func (tkn *Tokenizer) scanCommentType2() (int, []byte) {
buffer := &bytes2.Buffer{}
buffer.WriteString("/*")
for {
if tkn.lastChar == '*' {
tkn.consumeNext(buffer)
if tkn.lastChar == '/' {
tkn.consumeNext(buffer)
break
}
continue
}
if tkn.lastChar == eofChar {
return LEX_ERROR, buffer.Bytes()
}
tkn.consumeNext(buffer)
}
return COMMENT, buffer.Bytes()
}
func (tkn *Tokenizer) scanMySQLSpecificComment() (int, []byte) {
buffer := &bytes2.Buffer{}
buffer.WriteString("/*!")
tkn.next()
for {
if tkn.lastChar == '*' {
tkn.consumeNext(buffer)
if tkn.lastChar == '/' {
tkn.consumeNext(buffer)
break
}
continue
}
if tkn.lastChar == eofChar {
return LEX_ERROR, buffer.Bytes()
}
tkn.consumeNext(buffer)
}
_, sql := ExtractMysqlComment(buffer.String())
tkn.specialComment = NewStringTokenizer(sql)
return tkn.Scan()
}
func (tkn *Tokenizer) consumeNext(buffer *bytes2.Buffer) {
if tkn.lastChar == eofChar {
// This should never happen.
panic("unexpected EOF")
}
buffer.WriteByte(byte(tkn.lastChar))
tkn.next()
}
func (tkn *Tokenizer) next() {
if tkn.bufPos >= tkn.bufSize && tkn.InStream != nil {
// Try and refill the buffer
var err error
tkn.bufPos = 0
if tkn.bufSize, err = tkn.InStream.Read(tkn.buf); err != io.EOF && err != nil {
tkn.LastError = err
}
}
if tkn.bufPos >= tkn.bufSize {
if tkn.lastChar != eofChar {
tkn.Position++
tkn.lastChar = eofChar
}
} else {
tkn.Position++
tkn.lastChar = uint16(tkn.buf[tkn.bufPos])
tkn.bufPos++
}
}
// reset clears any internal state.
func (tkn *Tokenizer) reset() {
tkn.ParseTree = nil
tkn.partialDDL = nil
tkn.specialComment = nil
tkn.posVarIndex = 0
tkn.nesting = 0
tkn.ForceEOF = false
}
func isLetter(ch uint16) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch == '@'
}
func isCarat(ch uint16) bool {
return ch == '.' || ch == '\'' || ch == '"' || ch == '`'
}
func digitVal(ch uint16) int {
switch {
case '0' <= ch && ch <= '9':
return int(ch) - '0'
case 'a' <= ch && ch <= 'f':
return int(ch) - 'a' + 10
case 'A' <= ch && ch <= 'F':
return int(ch) - 'A' + 10
}
return 16 // larger than any legal digit val
}
func isDigit(ch uint16) bool {
return '0' <= ch && ch <= '9'
}

140
vendor/github.com/xwb1989/sqlparser/tracked_buffer.go generated vendored Normal file
View File

@@ -0,0 +1,140 @@
/*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqlparser
import (
"bytes"
"fmt"
)
// NodeFormatter defines the signature of a custom node formatter
// function that can be given to TrackedBuffer for code generation.
type NodeFormatter func(buf *TrackedBuffer, node SQLNode)
// TrackedBuffer is used to rebuild a query from the ast.
// bindLocations keeps track of locations in the buffer that
// use bind variables for efficient future substitutions.
// nodeFormatter is the formatting function the buffer will
// use to format a node. By default(nil), it's FormatNode.
// But you can supply a different formatting function if you
// want to generate a query that's different from the default.
type TrackedBuffer struct {
*bytes.Buffer
bindLocations []bindLocation
nodeFormatter NodeFormatter
}
// NewTrackedBuffer creates a new TrackedBuffer.
func NewTrackedBuffer(nodeFormatter NodeFormatter) *TrackedBuffer {
return &TrackedBuffer{
Buffer: new(bytes.Buffer),
nodeFormatter: nodeFormatter,
}
}
// WriteNode function, initiates the writing of a single SQLNode tree by passing
// through to Myprintf with a default format string
func (buf *TrackedBuffer) WriteNode(node SQLNode) *TrackedBuffer {
buf.Myprintf("%v", node)
return buf
}
// Myprintf mimics fmt.Fprintf(buf, ...), but limited to Node(%v),
// Node.Value(%s) and string(%s). It also allows a %a for a value argument, in
// which case it adds tracking info for future substitutions.
//
// The name must be something other than the usual Printf() to avoid "go vet"
// warnings due to our custom format specifiers.
func (buf *TrackedBuffer) Myprintf(format string, values ...interface{}) {
end := len(format)
fieldnum := 0
for i := 0; i < end; {
lasti := i
for i < end && format[i] != '%' {
i++
}
if i > lasti {
buf.WriteString(format[lasti:i])
}
if i >= end {
break
}
i++ // '%'
switch format[i] {
case 'c':
switch v := values[fieldnum].(type) {
case byte:
buf.WriteByte(v)
case rune:
buf.WriteRune(v)
default:
panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v))
}
case 's':
switch v := values[fieldnum].(type) {
case []byte:
buf.Write(v)
case string:
buf.WriteString(v)
default:
panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v))
}
case 'v':
node := values[fieldnum].(SQLNode)
if buf.nodeFormatter == nil {
node.Format(buf)
} else {
buf.nodeFormatter(buf, node)
}
case 'a':
buf.WriteArg(values[fieldnum].(string))
default:
panic("unexpected")
}
fieldnum++
i++
}
}
// WriteArg writes a value argument into the buffer along with
// tracking information for future substitutions. arg must contain
// the ":" or "::" prefix.
func (buf *TrackedBuffer) WriteArg(arg string) {
buf.bindLocations = append(buf.bindLocations, bindLocation{
offset: buf.Len(),
length: len(arg),
})
buf.WriteString(arg)
}
// ParsedQuery returns a ParsedQuery that contains bind
// locations for easy substitution.
func (buf *TrackedBuffer) ParsedQuery() *ParsedQuery {
return &ParsedQuery{Query: buf.String(), bindLocations: buf.bindLocations}
}
// HasBindVars returns true if the parsed query uses bind vars.
func (buf *TrackedBuffer) HasBindVars() bool {
return len(buf.bindLocations) != 0
}
// BuildParsedQuery builds a ParsedQuery from the input.
func BuildParsedQuery(in string, vars ...interface{}) *ParsedQuery {
buf := NewTrackedBuffer(nil)
buf.Myprintf(in, vars...)
return buf.ParsedQuery()
}